aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel
diff options
context:
space:
mode:
authorKalle Valo <kvalo@codeaurora.org>2015-11-17 13:57:38 -0500
committerKalle Valo <kvalo@codeaurora.org>2015-11-18 07:28:30 -0500
commite705c12146aa9c69ca498d4ebb83ba7138f9b41f (patch)
treeb55d4eb7a83c2ec117f460684eb71c89eee6a709 /drivers/net/wireless/intel
parent7ac9a364c1721a863ecc6cc9aba66e10114908db (diff)
iwlwifi: move under intel vendor directory
Part of reorganising wireless drivers directory and Kconfig. Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Diffstat (limited to 'drivers/net/wireless/intel')
-rw-r--r--drivers/net/wireless/intel/Kconfig1
-rw-r--r--drivers/net/wireless/intel/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h485
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h4008
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c2441
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h949
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c690
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c223
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c1300
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c1655
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2077
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c395
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c3338
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h426
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c1101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c1572
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c1075
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c1442
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c685
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c1412
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c452
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-1000.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-2000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-5000.c178
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c389
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c346
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h437
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h552
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h225
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h209
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c1706
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c947
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c464
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h535
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h320
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h768
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h322
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c289
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c193
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c844
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h271
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c471
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h401
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-scd.h143
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c1005
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c1315
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c1483
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c1516
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h476
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h425
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h387
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h389
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h238
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h730
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h414
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h646
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h1773
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c1166
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c1452
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4260
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1535
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c864
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c217
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1434
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c295
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c1040
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c328
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3983
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c612
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c1552
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c1810
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h426
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c732
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/testmode.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c872
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h249
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c306
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c460
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c1115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c1083
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c685
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h569
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c1548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c2825
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c1988
124 files changed, 89832 insertions, 0 deletions
diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig
index 0a7cd61e528c..5b14f2f64a8a 100644
--- a/drivers/net/wireless/intel/Kconfig
+++ b/drivers/net/wireless/intel/Kconfig
@@ -13,5 +13,6 @@ if WLAN_VENDOR_INTEL
13 13
14source "drivers/net/wireless/intel/ipw2x00/Kconfig" 14source "drivers/net/wireless/intel/ipw2x00/Kconfig"
15source "drivers/net/wireless/intel/iwlegacy/Kconfig" 15source "drivers/net/wireless/intel/iwlegacy/Kconfig"
16source "drivers/net/wireless/intel/iwlwifi/Kconfig"
16 17
17endif # WLAN_VENDOR_INTEL 18endif # WLAN_VENDOR_INTEL
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index cec507d3c6bf..c9cbcc85b569 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_IPW2100) += ipw2x00/
2obj-$(CONFIG_IPW2200) += ipw2x00/ 2obj-$(CONFIG_IPW2200) += ipw2x00/
3 3
4obj-$(CONFIG_IWLEGACY) += iwlegacy/ 4obj-$(CONFIG_IWLEGACY) += iwlegacy/
5
6obj-$(CONFIG_IWLWIFI) += iwlwifi/
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
new file mode 100644
index 000000000000..6e949df399d6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -0,0 +1,161 @@
1config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && MAC80211 && HAS_IOMEM
4 select FW_LOADER
5 ---help---
6 Select to build the driver supporting the:
7
8 Intel Wireless WiFi Link Next-Gen AGN
9
10 This option enables support for use with the following hardware:
11 Intel Wireless WiFi Link 6250AGN Adapter
12 Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
13 Intel WiFi Link 1000BGN
14 Intel Wireless WiFi 5150AGN
15 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
16 Intel 6005 Series Wi-Fi Adapters
17 Intel 6030 Series Wi-Fi Adapters
18 Intel Wireless WiFi Link 6150BGN 2 Adapter
19 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
20 Intel 2000 Series Wi-Fi Adapters
21 Intel 7260 Wi-Fi Adapter
22 Intel 3160 Wi-Fi Adapter
23 Intel 7265 Wi-Fi Adapter
24 Intel 8260 Wi-Fi Adapter
25 Intel 3165 Wi-Fi Adapter
26
27
28 This driver uses the kernel's mac80211 subsystem.
29
30 In order to use this driver, you will need a firmware
31 image for it. You can obtain the microcode from:
32
33 <http://wireless.kernel.org/en/users/Drivers/iwlwifi>.
34
35 The firmware is typically installed in /lib/firmware. You can
36 look in the hotplug script /etc/hotplug/firmware.agent to
37 determine which directory FIRMWARE_DIR is set to when the script
38 runs.
39
40 If you want to compile the driver as a module ( = code which can be
41 inserted in and removed from the running kernel whenever you want),
42 say M here and read <file:Documentation/kbuild/modules.txt>. The
43 module will be called iwlwifi.
44
45if IWLWIFI
46
47config IWLWIFI_LEDS
48 bool
49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
50 select LEDS_TRIGGERS
51 select MAC80211_LEDS
52 default y
53
54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support"
56 default IWLWIFI
57 help
58 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here:
60 https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
61
62config IWLMVM
63 tristate "Intel Wireless WiFi MVM Firmware support"
64 select WANT_DEV_COREDUMP
65 help
66 This is the driver that supports the MVM firmware. The list
67 of the devices that use this firmware is available here:
68 https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
69
70# don't call it _MODULE -- will confuse Kconfig/fixdep/...
71config IWLWIFI_OPMODE_MODULAR
72 bool
73 default y if IWLDVM=m
74 default y if IWLMVM=m
75
76comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
77 depends on IWLDVM=n && IWLMVM=n
78
79config IWLWIFI_BCAST_FILTERING
80 bool "Enable broadcast filtering"
81 depends on IWLMVM
82 help
83 Say Y here to enable default bcast filtering configuration.
84
85 Enabling broadcast filtering will drop any incoming wireless
86 broadcast frames, except some very specific predefined
87 patterns (e.g. incoming arp requests).
88
89 If unsure, don't enable this option, as some programs might
90 expect incoming broadcasts for their normal operations.
91
92config IWLWIFI_UAPSD
93 bool "enable U-APSD by default"
94 depends on IWLMVM
95 help
96 Say Y here to enable U-APSD by default. This may cause
97 interoperability problems with some APs, manifesting in lower than
98 expected throughput due to those APs not enabling aggregation
99
100 If unsure, say N.
101
102menu "Debugging Options"
103
104config IWLWIFI_DEBUG
105 bool "Enable full debugging output in the iwlwifi driver"
106 ---help---
107 This option will enable debug tracing output for the iwlwifi drivers
108
109 This will result in the kernel module being ~100k larger. You can
110 control which debug output is sent to the kernel log by setting the
111 value in
112
113 /sys/module/iwlwifi/parameters/debug
114
115 This entry will only exist if this option is enabled.
116
117 To set a value, simply echo an 8-byte hex value to the same file:
118
119 % echo 0x43fff > /sys/module/iwlwifi/parameters/debug
120
121 You can find the list of debug mask values in:
122 drivers/net/wireless/iwlwifi/iwl-debug.h
123
124 If this is your first time using this driver, you should say Y here
125 as the debug information can assist others in helping you resolve
126 any problems you may encounter.
127
128config IWLWIFI_DEBUGFS
129 bool "iwlwifi debugfs support"
130 depends on MAC80211_DEBUGFS
131 ---help---
132 Enable creation of debugfs files for the iwlwifi drivers. This
133 is a low-impact option that allows getting insight into the
134 driver's state at runtime.
135
136config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
137 bool "Experimental uCode support"
138 depends on IWLWIFI_DEBUG
139 ---help---
140 Enable use of experimental ucode for testing and debugging.
141
142config IWLWIFI_DEVICE_TRACING
143 bool "iwlwifi device access tracing"
144 depends on EVENT_TRACING
145 default y
146 help
147 Say Y here to trace all commands, including TX frames and IO
148 accesses, sent to the device. If you say yes, iwlwifi will
149 register with the ftrace framework for event tracing and dump
150 all this information to the ringbuffer, you may need to
151 increase the ringbuffer size. See the ftrace documentation
152 for more information.
153
154 When tracing is not enabled, this option still has some
155 (though rather small) overhead.
156
157 If unsure, say Y so we can help you better when problems
158 occur.
159endmenu
160
161endif
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
new file mode 100644
index 000000000000..dbfc5b18bcb7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -0,0 +1,23 @@
1# common
2obj-$(CONFIG_IWLWIFI) += iwlwifi.o
3iwlwifi-objs += iwl-io.o
4iwlwifi-objs += iwl-drv.o
5iwlwifi-objs += iwl-debug.o
6iwlwifi-objs += iwl-notif-wait.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
11iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
12iwlwifi-objs += iwl-trans.o
13
14iwlwifi-objs += $(iwlwifi-m)
15
16iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
17
18ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
19
20obj-$(CONFIG_IWLDVM) += dvm/
21obj-$(CONFIG_IWLMVM) += mvm/
22
23CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
new file mode 100644
index 000000000000..4d19685f31c3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -0,0 +1,13 @@
1# DVM
2obj-$(CONFIG_IWLDVM) += iwldvm.o
3iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
4iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
5
6iwldvm-objs += power.o
7iwldvm-objs += scan.o
8iwldvm-objs += rxon.o devices.o
9
10iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
11iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
12
13ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
new file mode 100644
index 000000000000..991def878881
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -0,0 +1,485 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__
65
66#include "iwl-config.h"
67
68#include "dev.h"
69
70/* The first 11 queues (0-10) are used otherwise */
71#define IWLAGN_FIRST_AMPDU_QUEUE 11
72
73/* AUX (TX during scan dwell) queue */
74#define IWL_AUX_QUEUE 10
75
76#define IWL_INVALID_STATION 255
77
78/* device operations */
79extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg;
80extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg;
81extern const struct iwl_dvm_cfg iwl_dvm_105_cfg;
82extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg;
83extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg;
84extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg;
85extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg;
86extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg;
87extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg;
88extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
89
90
91#define TIME_UNIT 1024
92
93/*****************************************************
94* DRIVER STATUS FUNCTIONS
95******************************************************/
96#define STATUS_RF_KILL_HW 0
97#define STATUS_CT_KILL 1
98#define STATUS_ALIVE 2
99#define STATUS_READY 3
100#define STATUS_EXIT_PENDING 5
101#define STATUS_STATISTICS 6
102#define STATUS_SCANNING 7
103#define STATUS_SCAN_ABORTING 8
104#define STATUS_SCAN_HW 9
105#define STATUS_FW_ERROR 10
106#define STATUS_CHANNEL_SWITCH_PENDING 11
107#define STATUS_SCAN_COMPLETE 12
108#define STATUS_POWER_PMI 13
109
110struct iwl_ucode_capabilities;
111
112extern const struct ieee80211_ops iwlagn_hw_ops;
113
114static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
115{
116 hdr->op_code = cmd;
117 hdr->first_group = 0;
118 hdr->groups_num = 1;
119 hdr->data_valid = 1;
120}
121
122void iwl_down(struct iwl_priv *priv);
123void iwl_cancel_deferred_work(struct iwl_priv *priv);
124void iwlagn_prepare_restart(struct iwl_priv *priv);
125void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
126 struct iwl_rx_cmd_buffer *rxb);
127
128bool iwl_check_for_ct_kill(struct iwl_priv *priv);
129
130void iwlagn_lift_passive_no_rx(struct iwl_priv *priv);
131
132/* MAC80211 */
133struct ieee80211_hw *iwl_alloc_all(void);
134int iwlagn_mac_setup_register(struct iwl_priv *priv,
135 const struct iwl_ucode_capabilities *capa);
136void iwlagn_mac_unregister(struct iwl_priv *priv);
137
138/* commands */
139int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
140int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
141 u32 flags, u16 len, const void *data);
142
143/* RXON */
144void iwl_connection_init_rx_config(struct iwl_priv *priv,
145 struct iwl_rxon_context *ctx);
146int iwlagn_set_pan_params(struct iwl_priv *priv);
147int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
148void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
149int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
150void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
151 struct ieee80211_vif *vif,
152 struct ieee80211_bss_conf *bss_conf,
153 u32 changes);
154void iwlagn_config_ht40(struct ieee80211_conf *conf,
155 struct iwl_rxon_context *ctx);
156void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
157void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
158 struct iwl_rxon_context *ctx);
159void iwl_set_flags_for_band(struct iwl_priv *priv,
160 struct iwl_rxon_context *ctx,
161 enum ieee80211_band band,
162 struct ieee80211_vif *vif);
163
164/* uCode */
165int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
166void iwl_send_prio_tbl(struct iwl_priv *priv);
167int iwl_init_alive_start(struct iwl_priv *priv);
168int iwl_run_init_ucode(struct iwl_priv *priv);
169int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
170 enum iwl_ucode_type ucode_type);
171int iwl_send_calib_results(struct iwl_priv *priv);
172int iwl_calib_set(struct iwl_priv *priv,
173 const struct iwl_calib_hdr *cmd, int len);
174void iwl_calib_free_results(struct iwl_priv *priv);
175int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
176 char **buf);
177int iwlagn_hw_valid_rtc_data_addr(u32 addr);
178
179/* lib */
180int iwlagn_send_tx_power(struct iwl_priv *priv);
181void iwlagn_temperature(struct iwl_priv *priv);
182int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk);
183void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
184int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
185int iwl_send_statistics_request(struct iwl_priv *priv,
186 u8 flags, bool clear);
187
188static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
189 struct iwl_priv *priv, enum ieee80211_band band)
190{
191 return priv->hw->wiphy->bands[band];
192}
193
194#ifdef CONFIG_PM_SLEEP
195int iwlagn_send_patterns(struct iwl_priv *priv,
196 struct cfg80211_wowlan *wowlan);
197int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
198#endif
199
200/* rx */
201int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
202void iwl_setup_rx_handlers(struct iwl_priv *priv);
203void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
204
205
206/* tx */
207int iwlagn_tx_skb(struct iwl_priv *priv,
208 struct ieee80211_sta *sta,
209 struct sk_buff *skb);
210int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
211 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
212int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
213 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
214int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
215 struct ieee80211_sta *sta, u16 tid);
216int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
217 struct ieee80211_sta *sta, u16 tid);
218void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
219 struct iwl_rx_cmd_buffer *rxb);
220void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
221
222static inline u32 iwl_tx_status_to_mac80211(u32 status)
223{
224 status &= TX_STATUS_MSK;
225
226 switch (status) {
227 case TX_STATUS_SUCCESS:
228 case TX_STATUS_DIRECT_DONE:
229 return IEEE80211_TX_STAT_ACK;
230 case TX_STATUS_FAIL_DEST_PS:
231 case TX_STATUS_FAIL_PASSIVE_NO_RX:
232 return IEEE80211_TX_STAT_TX_FILTERED;
233 default:
234 return 0;
235 }
236}
237
238static inline bool iwl_is_tx_success(u32 status)
239{
240 status &= TX_STATUS_MSK;
241 return (status == TX_STATUS_SUCCESS) ||
242 (status == TX_STATUS_DIRECT_DONE);
243}
244
245u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
246
247/* scan */
248void iwlagn_post_scan(struct iwl_priv *priv);
249int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
250void iwl_init_scan_params(struct iwl_priv *priv);
251int iwl_scan_cancel(struct iwl_priv *priv);
252void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
253void iwl_force_scan_end(struct iwl_priv *priv);
254void iwl_internal_short_hw_scan(struct iwl_priv *priv);
255void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
256void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
257void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
258int __must_check iwl_scan_initiate(struct iwl_priv *priv,
259 struct ieee80211_vif *vif,
260 enum iwl_scan_type scan_type,
261 enum ieee80211_band band);
262
263/* For faster active scanning, scan will move to the next channel if fewer than
264 * PLCP_QUIET_THRESH packets are heard on this channel within
265 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
266 * time if it's a quiet channel (nothing responded to our probe, and there's
267 * no other traffic).
268 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
269#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
270#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
271
272#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
273
274
275/* bt coex */
276void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
277void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
278void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
279void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
280void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
281void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
282
283static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
284{
285 return priv->lib->bt_params &&
286 priv->lib->bt_params->advanced_bt_coexist;
287}
288
289#ifdef CONFIG_IWLWIFI_DEBUG
290const char *iwl_get_tx_fail_reason(u32 status);
291const char *iwl_get_agg_tx_fail_reason(u16 status);
292#else
293static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
294static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
295#endif
296
297
298/* station management */
299int iwlagn_manage_ibss_station(struct iwl_priv *priv,
300 struct ieee80211_vif *vif, bool add);
301#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
302#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
303#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
304 being activated */
305#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
306 (this is for the IBSS BSSID stations) */
307#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
308
309
310void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
311void iwl_clear_ucode_stations(struct iwl_priv *priv,
312 struct iwl_rxon_context *ctx);
313void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
314int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
315int iwl_send_add_sta(struct iwl_priv *priv,
316 struct iwl_addsta_cmd *sta, u8 flags);
317int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
318 const u8 *addr, bool is_ap,
319 struct ieee80211_sta *sta, u8 *sta_id_r);
320int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
321 const u8 *addr);
322void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
323 const u8 *addr);
324u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
325 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
326
327int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
328 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
329void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
330int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
331 struct ieee80211_sta *sta);
332
333bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
334 struct iwl_rxon_context *ctx,
335 struct ieee80211_sta *sta);
336
337static inline int iwl_sta_id(struct ieee80211_sta *sta)
338{
339 if (WARN_ON(!sta))
340 return IWL_INVALID_STATION;
341
342 return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
343}
344
345int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
346 struct iwl_rxon_context *ctx);
347int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
348 const u8 *addr, u8 *sta_id_r);
349int iwl_remove_default_wep_key(struct iwl_priv *priv,
350 struct iwl_rxon_context *ctx,
351 struct ieee80211_key_conf *key);
352int iwl_set_default_wep_key(struct iwl_priv *priv,
353 struct iwl_rxon_context *ctx,
354 struct ieee80211_key_conf *key);
355int iwl_restore_default_wep_keys(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx);
357int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
358 struct ieee80211_key_conf *key,
359 struct ieee80211_sta *sta);
360int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
361 struct ieee80211_key_conf *key,
362 struct ieee80211_sta *sta);
363void iwl_update_tkip_key(struct iwl_priv *priv,
364 struct ieee80211_vif *vif,
365 struct ieee80211_key_conf *keyconf,
366 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
367int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
368int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
369 int tid, u16 ssn);
370int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
371 int tid);
372void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
373int iwl_update_bcast_station(struct iwl_priv *priv,
374 struct iwl_rxon_context *ctx);
375int iwl_update_bcast_stations(struct iwl_priv *priv);
376
377/* rate */
378static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
379{
380 return BIT(ant_idx) << RATE_MCS_ANT_POS;
381}
382
383static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
384{
385 return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK;
386}
387
388static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
389{
390 return cpu_to_le32(flags|(u32)rate);
391}
392
393int iwl_alive_start(struct iwl_priv *priv);
394
395#ifdef CONFIG_IWLWIFI_DEBUG
396void iwl_print_rx_config_cmd(struct iwl_priv *priv,
397 enum iwl_rxon_context_id ctxid);
398#else
399static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
400 enum iwl_rxon_context_id ctxid)
401{
402}
403#endif
404
405/* status checks */
406
407static inline int iwl_is_ready(struct iwl_priv *priv)
408{
409 /* The adapter is 'ready' if READY EXIT_PENDING is not set */
410 return test_bit(STATUS_READY, &priv->status) &&
411 !test_bit(STATUS_EXIT_PENDING, &priv->status);
412}
413
414static inline int iwl_is_alive(struct iwl_priv *priv)
415{
416 return test_bit(STATUS_ALIVE, &priv->status);
417}
418
419static inline int iwl_is_rfkill(struct iwl_priv *priv)
420{
421 return test_bit(STATUS_RF_KILL_HW, &priv->status);
422}
423
424static inline int iwl_is_ctkill(struct iwl_priv *priv)
425{
426 return test_bit(STATUS_CT_KILL, &priv->status);
427}
428
429static inline int iwl_is_ready_rf(struct iwl_priv *priv)
430{
431 if (iwl_is_rfkill(priv))
432 return 0;
433
434 return iwl_is_ready(priv);
435}
436
437static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
438{
439 if (state)
440 set_bit(STATUS_POWER_PMI, &priv->status);
441 else
442 clear_bit(STATUS_POWER_PMI, &priv->status);
443 iwl_trans_set_pmi(priv->trans, state);
444}
445
446#ifdef CONFIG_IWLWIFI_DEBUGFS
447int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
448#else
449static inline int iwl_dbgfs_register(struct iwl_priv *priv,
450 struct dentry *dbgfs_dir)
451{
452 return 0;
453}
454#endif /* CONFIG_IWLWIFI_DEBUGFS */
455
456#ifdef CONFIG_IWLWIFI_DEBUG
457#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
458do { \
459 if (!iwl_is_rfkill((m))) \
460 IWL_ERR(m, fmt, ##args); \
461 else \
462 __iwl_err((m)->dev, true, \
463 !iwl_have_debug_level(IWL_DL_RADIO), \
464 fmt, ##args); \
465} while (0)
466#else
467#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
468do { \
469 if (!iwl_is_rfkill((m))) \
470 IWL_ERR(m, fmt, ##args); \
471 else \
472 __iwl_err((m)->dev, true, true, fmt, ##args); \
473} while (0)
474#endif /* CONFIG_IWLWIFI_DEBUG */
475
476extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1];
477
478static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
479{
480 const char *s = iwl_dvm_cmd_strings[cmd];
481 if (s)
482 return s;
483 return "UNKNOWN";
484}
485#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
new file mode 100644
index 000000000000..20e6aa910700
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -0,0 +1,1113 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/slab.h>
64#include <net/mac80211.h>
65
66#include "iwl-trans.h"
67
68#include "dev.h"
69#include "calib.h"
70#include "agn.h"
71
72/*****************************************************************************
73 * INIT calibrations framework
74 *****************************************************************************/
75
76/* Opaque calibration results */
77struct iwl_calib_result {
78 struct list_head list;
79 size_t cmd_len;
80 struct iwl_calib_hdr hdr;
81 /* data follows */
82};
83
84struct statistics_general_data {
85 u32 beacon_silence_rssi_a;
86 u32 beacon_silence_rssi_b;
87 u32 beacon_silence_rssi_c;
88 u32 beacon_energy_a;
89 u32 beacon_energy_b;
90 u32 beacon_energy_c;
91};
92
93int iwl_send_calib_results(struct iwl_priv *priv)
94{
95 struct iwl_host_cmd hcmd = {
96 .id = REPLY_PHY_CALIBRATION_CMD,
97 };
98 struct iwl_calib_result *res;
99
100 list_for_each_entry(res, &priv->calib_results, list) {
101 int ret;
102
103 hcmd.len[0] = res->cmd_len;
104 hcmd.data[0] = &res->hdr;
105 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
106 ret = iwl_dvm_send_cmd(priv, &hcmd);
107 if (ret) {
108 IWL_ERR(priv, "Error %d on calib cmd %d\n",
109 ret, res->hdr.op_code);
110 return ret;
111 }
112 }
113
114 return 0;
115}
116
117int iwl_calib_set(struct iwl_priv *priv,
118 const struct iwl_calib_hdr *cmd, int len)
119{
120 struct iwl_calib_result *res, *tmp;
121
122 res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
123 GFP_ATOMIC);
124 if (!res)
125 return -ENOMEM;
126 memcpy(&res->hdr, cmd, len);
127 res->cmd_len = len;
128
129 list_for_each_entry(tmp, &priv->calib_results, list) {
130 if (tmp->hdr.op_code == res->hdr.op_code) {
131 list_replace(&tmp->list, &res->list);
132 kfree(tmp);
133 return 0;
134 }
135 }
136
137 /* wasn't in list already */
138 list_add_tail(&res->list, &priv->calib_results);
139
140 return 0;
141}
142
143void iwl_calib_free_results(struct iwl_priv *priv)
144{
145 struct iwl_calib_result *res, *tmp;
146
147 list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
148 list_del(&res->list);
149 kfree(res);
150 }
151}
152
153/*****************************************************************************
154 * RUNTIME calibrations framework
155 *****************************************************************************/
156
157/* "false alarms" are signals that our DSP tries to lock onto,
158 * but then determines that they are either noise, or transmissions
159 * from a distant wireless network (also "noise", really) that get
160 * "stepped on" by stronger transmissions within our own network.
161 * This algorithm attempts to set a sensitivity level that is high
162 * enough to receive all of our own network traffic, but not so
163 * high that our DSP gets too busy trying to lock onto non-network
164 * activity/noise. */
165static int iwl_sens_energy_cck(struct iwl_priv *priv,
166 u32 norm_fa,
167 u32 rx_enable_time,
168 struct statistics_general_data *rx_info)
169{
170 u32 max_nrg_cck = 0;
171 int i = 0;
172 u8 max_silence_rssi = 0;
173 u32 silence_ref = 0;
174 u8 silence_rssi_a = 0;
175 u8 silence_rssi_b = 0;
176 u8 silence_rssi_c = 0;
177 u32 val;
178
179 /* "false_alarms" values below are cross-multiplications to assess the
180 * numbers of false alarms within the measured period of actual Rx
181 * (Rx is off when we're txing), vs the min/max expected false alarms
182 * (some should be expected if rx is sensitive enough) in a
183 * hypothetical listening period of 200 time units (TU), 204.8 msec:
184 *
185 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
186 *
187 * */
188 u32 false_alarms = norm_fa * 200 * 1024;
189 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
190 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
191 struct iwl_sensitivity_data *data = NULL;
192 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
193
194 data = &(priv->sensitivity_data);
195
196 data->nrg_auto_corr_silence_diff = 0;
197
198 /* Find max silence rssi among all 3 receivers.
199 * This is background noise, which may include transmissions from other
200 * networks, measured during silence before our network's beacon */
201 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
202 ALL_BAND_FILTER) >> 8);
203 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
204 ALL_BAND_FILTER) >> 8);
205 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
206 ALL_BAND_FILTER) >> 8);
207
208 val = max(silence_rssi_b, silence_rssi_c);
209 max_silence_rssi = max(silence_rssi_a, (u8) val);
210
211 /* Store silence rssi in 20-beacon history table */
212 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
213 data->nrg_silence_idx++;
214 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
215 data->nrg_silence_idx = 0;
216
217 /* Find max silence rssi across 20 beacon history */
218 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
219 val = data->nrg_silence_rssi[i];
220 silence_ref = max(silence_ref, val);
221 }
222 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
223 silence_rssi_a, silence_rssi_b, silence_rssi_c,
224 silence_ref);
225
226 /* Find max rx energy (min value!) among all 3 receivers,
227 * measured during beacon frame.
228 * Save it in 10-beacon history table. */
229 i = data->nrg_energy_idx;
230 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
231 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
232
233 data->nrg_energy_idx++;
234 if (data->nrg_energy_idx >= 10)
235 data->nrg_energy_idx = 0;
236
237 /* Find min rx energy (max value) across 10 beacon history.
238 * This is the minimum signal level that we want to receive well.
239 * Add backoff (margin so we don't miss slightly lower energy frames).
240 * This establishes an upper bound (min value) for energy threshold. */
241 max_nrg_cck = data->nrg_value[0];
242 for (i = 1; i < 10; i++)
243 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
244 max_nrg_cck += 6;
245
246 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
247 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
248 rx_info->beacon_energy_c, max_nrg_cck - 6);
249
250 /* Count number of consecutive beacons with fewer-than-desired
251 * false alarms. */
252 if (false_alarms < min_false_alarms)
253 data->num_in_cck_no_fa++;
254 else
255 data->num_in_cck_no_fa = 0;
256 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
257 data->num_in_cck_no_fa);
258
259 /* If we got too many false alarms this time, reduce sensitivity */
260 if ((false_alarms > max_false_alarms) &&
261 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
262 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
263 false_alarms, max_false_alarms);
264 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
265 data->nrg_curr_state = IWL_FA_TOO_MANY;
266 /* Store for "fewer than desired" on later beacon */
267 data->nrg_silence_ref = silence_ref;
268
269 /* increase energy threshold (reduce nrg value)
270 * to decrease sensitivity */
271 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
272 /* Else if we got fewer than desired, increase sensitivity */
273 } else if (false_alarms < min_false_alarms) {
274 data->nrg_curr_state = IWL_FA_TOO_FEW;
275
276 /* Compare silence level with silence level for most recent
277 * healthy number or too many false alarms */
278 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
279 (s32)silence_ref;
280
281 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n",
282 false_alarms, min_false_alarms,
283 data->nrg_auto_corr_silence_diff);
284
285 /* Increase value to increase sensitivity, but only if:
286 * 1a) previous beacon did *not* have *too many* false alarms
287 * 1b) AND there's a significant difference in Rx levels
288 * from a previous beacon with too many, or healthy # FAs
289 * OR 2) We've seen a lot of beacons (100) with too few
290 * false alarms */
291 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
292 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
293 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
294
295 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
296 /* Increase nrg value to increase sensitivity */
297 val = data->nrg_th_cck + NRG_STEP_CCK;
298 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
299 } else {
300 IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n");
301 }
302
303 /* Else we got a healthy number of false alarms, keep status quo */
304 } else {
305 IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
306 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
307
308 /* Store for use in "fewer than desired" with later beacon */
309 data->nrg_silence_ref = silence_ref;
310
311 /* If previous beacon had too many false alarms,
312 * give it some extra margin by reducing sensitivity again
313 * (but don't go below measured energy of desired Rx) */
314 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
315 IWL_DEBUG_CALIB(priv, "... increasing margin\n");
316 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
317 data->nrg_th_cck -= NRG_MARGIN;
318 else
319 data->nrg_th_cck = max_nrg_cck;
320 }
321 }
322
323 /* Make sure the energy threshold does not go above the measured
324 * energy of the desired Rx signals (reduced by backoff margin),
325 * or else we might start missing Rx frames.
326 * Lower value is higher energy, so we use max()!
327 */
328 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
329 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
330
331 data->nrg_prev_state = data->nrg_curr_state;
332
333 /* Auto-correlation CCK algorithm */
334 if (false_alarms > min_false_alarms) {
335
336 /* increase auto_corr values to decrease sensitivity
337 * so the DSP won't be disturbed by the noise
338 */
339 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
340 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
341 else {
342 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
343 data->auto_corr_cck =
344 min((u32)ranges->auto_corr_max_cck, val);
345 }
346 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
347 data->auto_corr_cck_mrc =
348 min((u32)ranges->auto_corr_max_cck_mrc, val);
349 } else if ((false_alarms < min_false_alarms) &&
350 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
351 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
352
353 /* Decrease auto_corr values to increase sensitivity */
354 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
355 data->auto_corr_cck =
356 max((u32)ranges->auto_corr_min_cck, val);
357 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
358 data->auto_corr_cck_mrc =
359 max((u32)ranges->auto_corr_min_cck_mrc, val);
360 }
361
362 return 0;
363}
364
365
366static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
367 u32 norm_fa,
368 u32 rx_enable_time)
369{
370 u32 val;
371 u32 false_alarms = norm_fa * 200 * 1024;
372 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
373 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
374 struct iwl_sensitivity_data *data = NULL;
375 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
376
377 data = &(priv->sensitivity_data);
378
379 /* If we got too many false alarms this time, reduce sensitivity */
380 if (false_alarms > max_false_alarms) {
381
382 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
383 false_alarms, max_false_alarms);
384
385 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
386 data->auto_corr_ofdm =
387 min((u32)ranges->auto_corr_max_ofdm, val);
388
389 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
390 data->auto_corr_ofdm_mrc =
391 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
392
393 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
394 data->auto_corr_ofdm_x1 =
395 min((u32)ranges->auto_corr_max_ofdm_x1, val);
396
397 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
398 data->auto_corr_ofdm_mrc_x1 =
399 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
400 }
401
402 /* Else if we got fewer than desired, increase sensitivity */
403 else if (false_alarms < min_false_alarms) {
404
405 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
406 false_alarms, min_false_alarms);
407
408 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
409 data->auto_corr_ofdm =
410 max((u32)ranges->auto_corr_min_ofdm, val);
411
412 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
413 data->auto_corr_ofdm_mrc =
414 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
415
416 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
417 data->auto_corr_ofdm_x1 =
418 max((u32)ranges->auto_corr_min_ofdm_x1, val);
419
420 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
421 data->auto_corr_ofdm_mrc_x1 =
422 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
423 } else {
424 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
425 min_false_alarms, false_alarms, max_false_alarms);
426 }
427 return 0;
428}
429
430static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
431 struct iwl_sensitivity_data *data,
432 __le16 *tbl)
433{
434 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
435 cpu_to_le16((u16)data->auto_corr_ofdm);
436 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
437 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
438 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
439 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
440 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
441 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
442
443 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
444 cpu_to_le16((u16)data->auto_corr_cck);
445 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
446 cpu_to_le16((u16)data->auto_corr_cck_mrc);
447
448 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
449 cpu_to_le16((u16)data->nrg_th_cck);
450 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
451 cpu_to_le16((u16)data->nrg_th_ofdm);
452
453 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
454 cpu_to_le16(data->barker_corr_th_min);
455 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
456 cpu_to_le16(data->barker_corr_th_min_mrc);
457 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
458 cpu_to_le16(data->nrg_th_cca);
459
460 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
461 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
462 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
463 data->nrg_th_ofdm);
464
465 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
466 data->auto_corr_cck, data->auto_corr_cck_mrc,
467 data->nrg_th_cck);
468}
469
470/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
471static int iwl_sensitivity_write(struct iwl_priv *priv)
472{
473 struct iwl_sensitivity_cmd cmd;
474 struct iwl_sensitivity_data *data = NULL;
475 struct iwl_host_cmd cmd_out = {
476 .id = SENSITIVITY_CMD,
477 .len = { sizeof(struct iwl_sensitivity_cmd), },
478 .flags = CMD_ASYNC,
479 .data = { &cmd, },
480 };
481
482 data = &(priv->sensitivity_data);
483
484 memset(&cmd, 0, sizeof(cmd));
485
486 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
487
488 /* Update uCode's "work" table, and copy it to DSP */
489 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
490
491 /* Don't send command to uCode if nothing has changed */
492 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
493 sizeof(u16)*HD_TABLE_SIZE)) {
494 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
495 return 0;
496 }
497
498 /* Copy table for comparison next time */
499 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
500 sizeof(u16)*HD_TABLE_SIZE);
501
502 return iwl_dvm_send_cmd(priv, &cmd_out);
503}
504
505/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
506static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
507{
508 struct iwl_enhance_sensitivity_cmd cmd;
509 struct iwl_sensitivity_data *data = NULL;
510 struct iwl_host_cmd cmd_out = {
511 .id = SENSITIVITY_CMD,
512 .len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
513 .flags = CMD_ASYNC,
514 .data = { &cmd, },
515 };
516
517 data = &(priv->sensitivity_data);
518
519 memset(&cmd, 0, sizeof(cmd));
520
521 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
522
523 if (priv->lib->hd_v2) {
524 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
525 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
526 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
527 HD_INA_NON_SQUARE_DET_CCK_DATA_V2;
528 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
529 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2;
530 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
531 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
532 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
533 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
534 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
535 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2;
536 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
537 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2;
538 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
539 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
540 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
541 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
542 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
543 HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2;
544 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
545 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2;
546 } else {
547 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
548 HD_INA_NON_SQUARE_DET_OFDM_DATA_V1;
549 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
550 HD_INA_NON_SQUARE_DET_CCK_DATA_V1;
551 cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
552 HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1;
553 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
554 HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
555 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
556 HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
557 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
558 HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1;
559 cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
560 HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1;
561 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
562 HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
563 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
564 HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
565 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
566 HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1;
567 cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
568 HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1;
569 }
570
571 /* Update uCode's "work" table, and copy it to DSP */
572 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
573
574 /* Don't send command to uCode if nothing has changed */
575 if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]),
576 sizeof(u16)*HD_TABLE_SIZE) &&
577 !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX],
578 &(priv->enhance_sensitivity_tbl[0]),
579 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) {
580 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
581 return 0;
582 }
583
584 /* Copy table for comparison next time */
585 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]),
586 sizeof(u16)*HD_TABLE_SIZE);
587 memcpy(&(priv->enhance_sensitivity_tbl[0]),
588 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
589 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
590
591 return iwl_dvm_send_cmd(priv, &cmd_out);
592}
593
594void iwl_init_sensitivity(struct iwl_priv *priv)
595{
596 int ret = 0;
597 int i;
598 struct iwl_sensitivity_data *data = NULL;
599 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
600
601 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
602 return;
603
604 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
605
606 /* Clear driver's sensitivity algo data */
607 data = &(priv->sensitivity_data);
608
609 if (ranges == NULL)
610 return;
611
612 memset(data, 0, sizeof(struct iwl_sensitivity_data));
613
614 data->num_in_cck_no_fa = 0;
615 data->nrg_curr_state = IWL_FA_TOO_MANY;
616 data->nrg_prev_state = IWL_FA_TOO_MANY;
617 data->nrg_silence_ref = 0;
618 data->nrg_silence_idx = 0;
619 data->nrg_energy_idx = 0;
620
621 for (i = 0; i < 10; i++)
622 data->nrg_value[i] = 0;
623
624 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
625 data->nrg_silence_rssi[i] = 0;
626
627 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
628 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
629 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
630 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
631 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
632 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
633 data->nrg_th_cck = ranges->nrg_th_cck;
634 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
635 data->barker_corr_th_min = ranges->barker_corr_th_min;
636 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
637 data->nrg_th_cca = ranges->nrg_th_cca;
638
639 data->last_bad_plcp_cnt_ofdm = 0;
640 data->last_fa_cnt_ofdm = 0;
641 data->last_bad_plcp_cnt_cck = 0;
642 data->last_fa_cnt_cck = 0;
643
644 if (priv->fw->enhance_sensitivity_table)
645 ret |= iwl_enhance_sensitivity_write(priv);
646 else
647 ret |= iwl_sensitivity_write(priv);
648 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
649}
650
651void iwl_sensitivity_calibration(struct iwl_priv *priv)
652{
653 u32 rx_enable_time;
654 u32 fa_cck;
655 u32 fa_ofdm;
656 u32 bad_plcp_cck;
657 u32 bad_plcp_ofdm;
658 u32 norm_fa_ofdm;
659 u32 norm_fa_cck;
660 struct iwl_sensitivity_data *data = NULL;
661 struct statistics_rx_non_phy *rx_info;
662 struct statistics_rx_phy *ofdm, *cck;
663 struct statistics_general_data statis;
664
665 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
666 return;
667
668 data = &(priv->sensitivity_data);
669
670 if (!iwl_is_any_associated(priv)) {
671 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
672 return;
673 }
674
675 spin_lock_bh(&priv->statistics.lock);
676 rx_info = &priv->statistics.rx_non_phy;
677 ofdm = &priv->statistics.rx_ofdm;
678 cck = &priv->statistics.rx_cck;
679 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
680 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
681 spin_unlock_bh(&priv->statistics.lock);
682 return;
683 }
684
685 /* Extract Statistics: */
686 rx_enable_time = le32_to_cpu(rx_info->channel_load);
687 fa_cck = le32_to_cpu(cck->false_alarm_cnt);
688 fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
689 bad_plcp_cck = le32_to_cpu(cck->plcp_err);
690 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
691
692 statis.beacon_silence_rssi_a =
693 le32_to_cpu(rx_info->beacon_silence_rssi_a);
694 statis.beacon_silence_rssi_b =
695 le32_to_cpu(rx_info->beacon_silence_rssi_b);
696 statis.beacon_silence_rssi_c =
697 le32_to_cpu(rx_info->beacon_silence_rssi_c);
698 statis.beacon_energy_a =
699 le32_to_cpu(rx_info->beacon_energy_a);
700 statis.beacon_energy_b =
701 le32_to_cpu(rx_info->beacon_energy_b);
702 statis.beacon_energy_c =
703 le32_to_cpu(rx_info->beacon_energy_c);
704
705 spin_unlock_bh(&priv->statistics.lock);
706
707 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
708
709 if (!rx_enable_time) {
710 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
711 return;
712 }
713
714 /* These statistics increase monotonically, and do not reset
715 * at each beacon. Calculate difference from last value, or just
716 * use the new statistics value if it has reset or wrapped around. */
717 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
718 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
719 else {
720 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
721 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
722 }
723
724 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
725 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
726 else {
727 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
728 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
729 }
730
731 if (data->last_fa_cnt_ofdm > fa_ofdm)
732 data->last_fa_cnt_ofdm = fa_ofdm;
733 else {
734 fa_ofdm -= data->last_fa_cnt_ofdm;
735 data->last_fa_cnt_ofdm += fa_ofdm;
736 }
737
738 if (data->last_fa_cnt_cck > fa_cck)
739 data->last_fa_cnt_cck = fa_cck;
740 else {
741 fa_cck -= data->last_fa_cnt_cck;
742 data->last_fa_cnt_cck += fa_cck;
743 }
744
745 /* Total aborted signal locks */
746 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
747 norm_fa_cck = fa_cck + bad_plcp_cck;
748
749 IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
750 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
751
752 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
753 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
754 if (priv->fw->enhance_sensitivity_table)
755 iwl_enhance_sensitivity_write(priv);
756 else
757 iwl_sensitivity_write(priv);
758}
759
760static inline u8 find_first_chain(u8 mask)
761{
762 if (mask & ANT_A)
763 return CHAIN_A;
764 if (mask & ANT_B)
765 return CHAIN_B;
766 return CHAIN_C;
767}
768
769/**
770 * Run disconnected antenna algorithm to find out which antennas are
771 * disconnected.
772 */
773static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
774 struct iwl_chain_noise_data *data)
775{
776 u32 active_chains = 0;
777 u32 max_average_sig;
778 u16 max_average_sig_antenna_i;
779 u8 num_tx_chains;
780 u8 first_chain;
781 u16 i = 0;
782
783 average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS;
784 average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS;
785 average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS;
786
787 if (average_sig[0] >= average_sig[1]) {
788 max_average_sig = average_sig[0];
789 max_average_sig_antenna_i = 0;
790 active_chains = (1 << max_average_sig_antenna_i);
791 } else {
792 max_average_sig = average_sig[1];
793 max_average_sig_antenna_i = 1;
794 active_chains = (1 << max_average_sig_antenna_i);
795 }
796
797 if (average_sig[2] >= max_average_sig) {
798 max_average_sig = average_sig[2];
799 max_average_sig_antenna_i = 2;
800 active_chains = (1 << max_average_sig_antenna_i);
801 }
802
803 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
804 average_sig[0], average_sig[1], average_sig[2]);
805 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
806 max_average_sig, max_average_sig_antenna_i);
807
808 /* Compare signal strengths for all 3 receivers. */
809 for (i = 0; i < NUM_RX_CHAINS; i++) {
810 if (i != max_average_sig_antenna_i) {
811 s32 rssi_delta = (max_average_sig - average_sig[i]);
812
813 /* If signal is very weak, compared with
814 * strongest, mark it as disconnected. */
815 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
816 data->disconn_array[i] = 1;
817 else
818 active_chains |= (1 << i);
819 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
820 "disconn_array[i] = %d\n",
821 i, rssi_delta, data->disconn_array[i]);
822 }
823 }
824
825 /*
826 * The above algorithm sometimes fails when the ucode
827 * reports 0 for all chains. It's not clear why that
828 * happens to start with, but it is then causing trouble
829 * because this can make us enable more chains than the
830 * hardware really has.
831 *
832 * To be safe, simply mask out any chains that we know
833 * are not on the device.
834 */
835 active_chains &= priv->nvm_data->valid_rx_ant;
836
837 num_tx_chains = 0;
838 for (i = 0; i < NUM_RX_CHAINS; i++) {
839 /* loops on all the bits of
840 * priv->hw_setting.valid_tx_ant */
841 u8 ant_msk = (1 << i);
842 if (!(priv->nvm_data->valid_tx_ant & ant_msk))
843 continue;
844
845 num_tx_chains++;
846 if (data->disconn_array[i] == 0)
847 /* there is a Tx antenna connected */
848 break;
849 if (num_tx_chains == priv->hw_params.tx_chains_num &&
850 data->disconn_array[i]) {
851 /*
852 * If all chains are disconnected
853 * connect the first valid tx chain
854 */
855 first_chain =
856 find_first_chain(priv->nvm_data->valid_tx_ant);
857 data->disconn_array[first_chain] = 0;
858 active_chains |= BIT(first_chain);
859 IWL_DEBUG_CALIB(priv,
860 "All Tx chains are disconnected W/A - declare %d as connected\n",
861 first_chain);
862 break;
863 }
864 }
865
866 if (active_chains != priv->nvm_data->valid_rx_ant &&
867 active_chains != priv->chain_noise_data.active_chains)
868 IWL_DEBUG_CALIB(priv,
869 "Detected that not all antennas are connected! "
870 "Connected: %#x, valid: %#x.\n",
871 active_chains,
872 priv->nvm_data->valid_rx_ant);
873
874 /* Save for use within RXON, TX, SCAN commands, etc. */
875 data->active_chains = active_chains;
876 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
877 active_chains);
878}
879
880static void iwlagn_gain_computation(struct iwl_priv *priv,
881 u32 average_noise[NUM_RX_CHAINS],
882 u8 default_chain)
883{
884 int i;
885 s32 delta_g;
886 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
887
888 /*
889 * Find Gain Code for the chains based on "default chain"
890 */
891 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
892 if ((data->disconn_array[i])) {
893 data->delta_gain_code[i] = 0;
894 continue;
895 }
896
897 delta_g = (priv->lib->chain_noise_scale *
898 ((s32)average_noise[default_chain] -
899 (s32)average_noise[i])) / 1500;
900
901 /* bound gain by 2 bits value max, 3rd bit is sign */
902 data->delta_gain_code[i] =
903 min(abs(delta_g),
904 (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
905
906 if (delta_g < 0)
907 /*
908 * set negative sign ...
909 * note to Intel developers: This is uCode API format,
910 * not the format of any internal device registers.
911 * Do not change this format for e.g. 6050 or similar
912 * devices. Change format only if more resolution
913 * (i.e. more than 2 bits magnitude) is needed.
914 */
915 data->delta_gain_code[i] |= (1 << 2);
916 }
917
918 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
919 data->delta_gain_code[1], data->delta_gain_code[2]);
920
921 if (!data->radio_write) {
922 struct iwl_calib_chain_noise_gain_cmd cmd;
923
924 memset(&cmd, 0, sizeof(cmd));
925
926 iwl_set_calib_hdr(&cmd.hdr,
927 priv->phy_calib_chain_noise_gain_cmd);
928 cmd.delta_gain_1 = data->delta_gain_code[1];
929 cmd.delta_gain_2 = data->delta_gain_code[2];
930 iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
931 CMD_ASYNC, sizeof(cmd), &cmd);
932
933 data->radio_write = 1;
934 data->state = IWL_CHAIN_NOISE_CALIBRATED;
935 }
936}
937
938/*
939 * Accumulate 16 beacons of signal and noise statistics for each of
940 * 3 receivers/antennas/rx-chains, then figure out:
941 * 1) Which antennas are connected.
942 * 2) Differential rx gain settings to balance the 3 receivers.
943 */
944void iwl_chain_noise_calibration(struct iwl_priv *priv)
945{
946 struct iwl_chain_noise_data *data = NULL;
947
948 u32 chain_noise_a;
949 u32 chain_noise_b;
950 u32 chain_noise_c;
951 u32 chain_sig_a;
952 u32 chain_sig_b;
953 u32 chain_sig_c;
954 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
955 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
956 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
957 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
958 u16 i = 0;
959 u16 rxon_chnum = INITIALIZATION_VALUE;
960 u16 stat_chnum = INITIALIZATION_VALUE;
961 u8 rxon_band24;
962 u8 stat_band24;
963 struct statistics_rx_non_phy *rx_info;
964
965 /*
966 * MULTI-FIXME:
967 * When we support multiple interfaces on different channels,
968 * this must be modified/fixed.
969 */
970 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
971
972 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
973 return;
974
975 data = &(priv->chain_noise_data);
976
977 /*
978 * Accumulate just the first "chain_noise_num_beacons" after
979 * the first association, then we're done forever.
980 */
981 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
982 if (data->state == IWL_CHAIN_NOISE_ALIVE)
983 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
984 return;
985 }
986
987 spin_lock_bh(&priv->statistics.lock);
988
989 rx_info = &priv->statistics.rx_non_phy;
990
991 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
992 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
993 spin_unlock_bh(&priv->statistics.lock);
994 return;
995 }
996
997 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
998 rxon_chnum = le16_to_cpu(ctx->staging.channel);
999 stat_band24 =
1000 !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
1001 stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16;
1002
1003 /* Make sure we accumulate data for just the associated channel
1004 * (even if scanning). */
1005 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
1006 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
1007 rxon_chnum, rxon_band24);
1008 spin_unlock_bh(&priv->statistics.lock);
1009 return;
1010 }
1011
1012 /*
1013 * Accumulate beacon statistics values across
1014 * "chain_noise_num_beacons"
1015 */
1016 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1017 IN_BAND_FILTER;
1018 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1019 IN_BAND_FILTER;
1020 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1021 IN_BAND_FILTER;
1022
1023 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1024 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1025 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1026
1027 spin_unlock_bh(&priv->statistics.lock);
1028
1029 data->beacon_count++;
1030
1031 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1032 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1033 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1034
1035 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1036 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1037 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1038
1039 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
1040 rxon_chnum, rxon_band24, data->beacon_count);
1041 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
1042 chain_sig_a, chain_sig_b, chain_sig_c);
1043 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
1044 chain_noise_a, chain_noise_b, chain_noise_c);
1045
1046 /* If this is the "chain_noise_num_beacons", determine:
1047 * 1) Disconnected antennas (using signal strengths)
1048 * 2) Differential gain (using silence noise) to balance receivers */
1049 if (data->beacon_count != IWL_CAL_NUM_BEACONS)
1050 return;
1051
1052 /* Analyze signal for disconnected antenna */
1053 if (priv->lib->bt_params &&
1054 priv->lib->bt_params->advanced_bt_coexist) {
1055 /* Disable disconnected antenna algorithm for advanced
1056 bt coex, assuming valid antennas are connected */
1057 data->active_chains = priv->nvm_data->valid_rx_ant;
1058 for (i = 0; i < NUM_RX_CHAINS; i++)
1059 if (!(data->active_chains & (1<<i)))
1060 data->disconn_array[i] = 1;
1061 } else
1062 iwl_find_disconn_antenna(priv, average_sig, data);
1063
1064 /* Analyze noise for rx balance */
1065 average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS;
1066 average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS;
1067 average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS;
1068
1069 for (i = 0; i < NUM_RX_CHAINS; i++) {
1070 if (!(data->disconn_array[i]) &&
1071 (average_noise[i] <= min_average_noise)) {
1072 /* This means that chain i is active and has
1073 * lower noise values so far: */
1074 min_average_noise = average_noise[i];
1075 min_average_noise_antenna_i = i;
1076 }
1077 }
1078
1079 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
1080 average_noise[0], average_noise[1],
1081 average_noise[2]);
1082
1083 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
1084 min_average_noise, min_average_noise_antenna_i);
1085
1086 iwlagn_gain_computation(
1087 priv, average_noise,
1088 find_first_chain(priv->nvm_data->valid_rx_ant));
1089
1090 /* Some power changes may have been made during the calibration.
1091 * Update and commit the RXON
1092 */
1093 iwl_update_chain_flags(priv);
1094
1095 data->state = IWL_CHAIN_NOISE_DONE;
1096 iwl_power_update_mode(priv, false);
1097}
1098
1099void iwl_reset_run_time_calib(struct iwl_priv *priv)
1100{
1101 int i;
1102 memset(&(priv->sensitivity_data), 0,
1103 sizeof(struct iwl_sensitivity_data));
1104 memset(&(priv->chain_noise_data), 0,
1105 sizeof(struct iwl_chain_noise_data));
1106 for (i = 0; i < NUM_RX_CHAINS; i++)
1107 priv->chain_noise_data.delta_gain_code[i] =
1108 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1109
1110 /* Ask for statistics now, the uCode will send notification
1111 * periodically after association */
1112 iwl_send_statistics_request(priv, CMD_ASYNC, true);
1113}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
new file mode 100644
index 000000000000..aeae4e80ea40
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
@@ -0,0 +1,74 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include "dev.h"
66#include "commands.h"
67
68void iwl_chain_noise_calibration(struct iwl_priv *priv);
69void iwl_sensitivity_calibration(struct iwl_priv *priv);
70
71void iwl_init_sensitivity(struct iwl_priv *priv);
72void iwl_reset_run_time_calib(struct iwl_priv *priv);
73
74#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
new file mode 100644
index 000000000000..7a34e4d158d1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -0,0 +1,4008 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_commands_h__
70#define __iwl_commands_h__
71
72#include <linux/ieee80211.h>
73#include <linux/types.h>
74
75
76enum {
77 REPLY_ALIVE = 0x1,
78 REPLY_ERROR = 0x2,
79 REPLY_ECHO = 0x3, /* test command */
80
81 /* RXON and QOS commands */
82 REPLY_RXON = 0x10,
83 REPLY_RXON_ASSOC = 0x11,
84 REPLY_QOS_PARAM = 0x13,
85 REPLY_RXON_TIMING = 0x14,
86
87 /* Multi-Station support */
88 REPLY_ADD_STA = 0x18,
89 REPLY_REMOVE_STA = 0x19,
90 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
91 REPLY_TXFIFO_FLUSH = 0x1e,
92
93 /* Security */
94 REPLY_WEPKEY = 0x20,
95
96 /* RX, TX, LEDs */
97 REPLY_TX = 0x1c,
98 REPLY_LEDS_CMD = 0x48,
99 REPLY_TX_LINK_QUALITY_CMD = 0x4e,
100
101 /* WiMAX coexistence */
102 COEX_PRIORITY_TABLE_CMD = 0x5a,
103 COEX_MEDIUM_NOTIFICATION = 0x5b,
104 COEX_EVENT_CMD = 0x5c,
105
106 /* Calibration */
107 TEMPERATURE_NOTIFICATION = 0x62,
108 CALIBRATION_CFG_CMD = 0x65,
109 CALIBRATION_RES_NOTIFICATION = 0x66,
110 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
111
112 /* 802.11h related */
113 REPLY_QUIET_CMD = 0x71, /* not used */
114 REPLY_CHANNEL_SWITCH = 0x72,
115 CHANNEL_SWITCH_NOTIFICATION = 0x73,
116 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
117 SPECTRUM_MEASURE_NOTIFICATION = 0x75,
118
119 /* Power Management */
120 POWER_TABLE_CMD = 0x77,
121 PM_SLEEP_NOTIFICATION = 0x7A,
122 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
123
124 /* Scan commands and notifications */
125 REPLY_SCAN_CMD = 0x80,
126 REPLY_SCAN_ABORT_CMD = 0x81,
127 SCAN_START_NOTIFICATION = 0x82,
128 SCAN_RESULTS_NOTIFICATION = 0x83,
129 SCAN_COMPLETE_NOTIFICATION = 0x84,
130
131 /* IBSS/AP commands */
132 BEACON_NOTIFICATION = 0x90,
133 REPLY_TX_BEACON = 0x91,
134 WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */
135
136 /* Miscellaneous commands */
137 REPLY_TX_POWER_DBM_CMD = 0x95,
138 QUIET_NOTIFICATION = 0x96, /* not used */
139 REPLY_TX_PWR_TABLE_CMD = 0x97,
140 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
141 TX_ANT_CONFIGURATION_CMD = 0x98,
142 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
143
144 /* Bluetooth device coexistence config command */
145 REPLY_BT_CONFIG = 0x9b,
146
147 /* Statistics */
148 REPLY_STATISTICS_CMD = 0x9c,
149 STATISTICS_NOTIFICATION = 0x9d,
150
151 /* RF-KILL commands and notifications */
152 REPLY_CARD_STATE_CMD = 0xa0,
153 CARD_STATE_NOTIFICATION = 0xa1,
154
155 /* Missed beacons notification */
156 MISSED_BEACONS_NOTIFICATION = 0xa2,
157
158 REPLY_CT_KILL_CONFIG_CMD = 0xa4,
159 SENSITIVITY_CMD = 0xa8,
160 REPLY_PHY_CALIBRATION_CMD = 0xb0,
161 REPLY_RX_PHY_CMD = 0xc0,
162 REPLY_RX_MPDU_CMD = 0xc1,
163 REPLY_RX = 0xc3,
164 REPLY_COMPRESSED_BA = 0xc5,
165
166 /* BT Coex */
167 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
168 REPLY_BT_COEX_PROT_ENV = 0xcd,
169 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
170
171 /* PAN commands */
172 REPLY_WIPAN_PARAMS = 0xb2,
173 REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
174 REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
175 REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
176 REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
177 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
178 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
179 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
180 REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
181
182 REPLY_WOWLAN_PATTERNS = 0xe0,
183 REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
184 REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
185 REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
186 REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
187 REPLY_WOWLAN_GET_STATUS = 0xe5,
188 REPLY_D3_CONFIG = 0xd3,
189
190 REPLY_MAX = 0xff
191};
192
193/*
194 * Minimum number of queues. MAX_NUM is defined in hw specific files.
195 * Set the minimum to accommodate
196 * - 4 standard TX queues
197 * - the command queue
198 * - 4 PAN TX queues
199 * - the PAN multicast queue, and
200 * - the AUX (TX during scan dwell) queue.
201 */
202#define IWL_MIN_NUM_QUEUES 11
203
204/*
205 * Command queue depends on iPAN support.
206 */
207#define IWL_DEFAULT_CMD_QUEUE_NUM 4
208#define IWL_IPAN_CMD_QUEUE_NUM 9
209
210#define IWL_TX_FIFO_BK 0 /* shared */
211#define IWL_TX_FIFO_BE 1
212#define IWL_TX_FIFO_VI 2 /* shared */
213#define IWL_TX_FIFO_VO 3
214#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
215#define IWL_TX_FIFO_BE_IPAN 4
216#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
217#define IWL_TX_FIFO_VO_IPAN 5
218/* re-uses the VO FIFO, uCode will properly flush/schedule */
219#define IWL_TX_FIFO_AUX 5
220#define IWL_TX_FIFO_UNUSED 255
221
222#define IWLAGN_CMD_FIFO_NUM 7
223
224/*
225 * This queue number is required for proper operation
226 * because the ucode will stop/start the scheduler as
227 * required.
228 */
229#define IWL_IPAN_MCAST_QUEUE 8
230
231/******************************************************************************
232 * (0)
233 * Commonly used structures and definitions:
234 * Command header, rate_n_flags, txpower
235 *
236 *****************************************************************************/
237
238/**
239 * iwlagn rate_n_flags bit fields
240 *
241 * rate_n_flags format is used in following iwlagn commands:
242 * REPLY_RX (response only)
243 * REPLY_RX_MPDU (response only)
244 * REPLY_TX (both command and response)
245 * REPLY_TX_LINK_QUALITY_CMD
246 *
247 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
248 * 2-0: 0) 6 Mbps
249 * 1) 12 Mbps
250 * 2) 18 Mbps
251 * 3) 24 Mbps
252 * 4) 36 Mbps
253 * 5) 48 Mbps
254 * 6) 54 Mbps
255 * 7) 60 Mbps
256 *
257 * 4-3: 0) Single stream (SISO)
258 * 1) Dual stream (MIMO)
259 * 2) Triple stream (MIMO)
260 *
261 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
262 *
263 * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
264 * 3-0: 0xD) 6 Mbps
265 * 0xF) 9 Mbps
266 * 0x5) 12 Mbps
267 * 0x7) 18 Mbps
268 * 0x9) 24 Mbps
269 * 0xB) 36 Mbps
270 * 0x1) 48 Mbps
271 * 0x3) 54 Mbps
272 *
273 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
274 * 6-0: 10) 1 Mbps
275 * 20) 2 Mbps
276 * 55) 5.5 Mbps
277 * 110) 11 Mbps
278 */
279#define RATE_MCS_CODE_MSK 0x7
280#define RATE_MCS_SPATIAL_POS 3
281#define RATE_MCS_SPATIAL_MSK 0x18
282#define RATE_MCS_HT_DUP_POS 5
283#define RATE_MCS_HT_DUP_MSK 0x20
284/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */
285#define RATE_MCS_RATE_MSK 0xff
286
287/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
288#define RATE_MCS_FLAGS_POS 8
289#define RATE_MCS_HT_POS 8
290#define RATE_MCS_HT_MSK 0x100
291
292/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
293#define RATE_MCS_CCK_POS 9
294#define RATE_MCS_CCK_MSK 0x200
295
296/* Bit 10: (1) Use Green Field preamble */
297#define RATE_MCS_GF_POS 10
298#define RATE_MCS_GF_MSK 0x400
299
300/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
301#define RATE_MCS_HT40_POS 11
302#define RATE_MCS_HT40_MSK 0x800
303
304/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
305#define RATE_MCS_DUP_POS 12
306#define RATE_MCS_DUP_MSK 0x1000
307
308/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
309#define RATE_MCS_SGI_POS 13
310#define RATE_MCS_SGI_MSK 0x2000
311
312/**
313 * rate_n_flags Tx antenna masks
314 * 4965 has 2 transmitters
315 * 5100 has 1 transmitter B
316 * 5150 has 1 transmitter A
317 * 5300 has 3 transmitters
318 * 5350 has 3 transmitters
319 * bit14:16
320 */
321#define RATE_MCS_ANT_POS 14
322#define RATE_MCS_ANT_A_MSK 0x04000
323#define RATE_MCS_ANT_B_MSK 0x08000
324#define RATE_MCS_ANT_C_MSK 0x10000
325#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3
328
329#define POWER_TABLE_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32
332
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2
335
336/**
337 * struct tx_power_dual_stream
338 *
339 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
340 *
341 * Same format as iwl_tx_power_dual_stream, but __le32
342 */
343struct tx_power_dual_stream {
344 __le32 dw;
345} __packed;
346
347/**
348 * Command REPLY_TX_POWER_DBM_CMD = 0x98
349 * struct iwlagn_tx_power_dbm_cmd
350 */
351#define IWLAGN_TX_POWER_AUTO 0x7f
352#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6)
353
354struct iwlagn_tx_power_dbm_cmd {
355 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
356 u8 flags;
357 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
358 u8 reserved;
359} __packed;
360
361/**
362 * Command TX_ANT_CONFIGURATION_CMD = 0x98
363 * This command is used to configure valid Tx antenna.
364 * By default uCode concludes the valid antenna according to the radio flavor.
365 * This command enables the driver to override/modify this conclusion.
366 */
367struct iwl_tx_ant_config_cmd {
368 __le32 valid;
369} __packed;
370
371/******************************************************************************
372 * (0a)
373 * Alive and Error Commands & Responses:
374 *
375 *****************************************************************************/
376
377#define UCODE_VALID_OK cpu_to_le32(0x1)
378
379/**
380 * REPLY_ALIVE = 0x1 (response only, not a command)
381 *
382 * uCode issues this "alive" notification once the runtime image is ready
383 * to receive commands from the driver. This is the *second* "alive"
384 * notification that the driver will receive after rebooting uCode;
385 * this "alive" is indicated by subtype field != 9.
386 *
387 * See comments documenting "BSM" (bootstrap state machine).
388 *
389 * This response includes two pointers to structures within the device's
390 * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
391 *
392 * 1) log_event_table_ptr indicates base of the event log. This traces
393 * a 256-entry history of uCode execution within a circular buffer.
394 * Its header format is:
395 *
396 * __le32 log_size; log capacity (in number of entries)
397 * __le32 type; (1) timestamp with each entry, (0) no timestamp
398 * __le32 wraps; # times uCode has wrapped to top of circular buffer
399 * __le32 write_index; next circular buffer entry that uCode would fill
400 *
401 * The header is followed by the circular buffer of log entries. Entries
402 * with timestamps have the following format:
403 *
404 * __le32 event_id; range 0 - 1500
405 * __le32 timestamp; low 32 bits of TSF (of network, if associated)
406 * __le32 data; event_id-specific data value
407 *
408 * Entries without timestamps contain only event_id and data.
409 *
410 *
411 * 2) error_event_table_ptr indicates base of the error log. This contains
412 * information about any uCode error that occurs. For agn, the format
413 * of the error log is defined by struct iwl_error_event_table.
414 *
415 * The Linux driver can print both logs to the system log when a uCode error
416 * occurs.
417 */
418
419/*
420 * Note: This structure is read from the device with IO accesses,
421 * and the reading already does the endian conversion. As it is
422 * read with u32-sized accesses, any members with a different size
423 * need to be ordered correctly though!
424 */
425struct iwl_error_event_table {
426 u32 valid; /* (nonzero) valid, (0) log is empty */
427 u32 error_id; /* type of error */
428 u32 pc; /* program counter */
429 u32 blink1; /* branch link */
430 u32 blink2; /* branch link */
431 u32 ilink1; /* interrupt link */
432 u32 ilink2; /* interrupt link */
433 u32 data1; /* error-specific data */
434 u32 data2; /* error-specific data */
435 u32 line; /* source code line of error */
436 u32 bcon_time; /* beacon timer */
437 u32 tsf_low; /* network timestamp function timer */
438 u32 tsf_hi; /* network timestamp function timer */
439 u32 gp1; /* GP1 timer register */
440 u32 gp2; /* GP2 timer register */
441 u32 gp3; /* GP3 timer register */
442 u32 ucode_ver; /* uCode version */
443 u32 hw_ver; /* HW Silicon version */
444 u32 brd_ver; /* HW board version */
445 u32 log_pc; /* log program counter */
446 u32 frame_ptr; /* frame pointer */
447 u32 stack_ptr; /* stack pointer */
448 u32 hcmd; /* last host command header */
449 u32 isr0; /* isr status register LMPM_NIC_ISR0:
450 * rxtx_flag */
451 u32 isr1; /* isr status register LMPM_NIC_ISR1:
452 * host_flag */
453 u32 isr2; /* isr status register LMPM_NIC_ISR2:
454 * enc_flag */
455 u32 isr3; /* isr status register LMPM_NIC_ISR3:
456 * time_flag */
457 u32 isr4; /* isr status register LMPM_NIC_ISR4:
458 * wico interrupt */
459 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
460 u32 wait_event; /* wait event() caller address */
461 u32 l2p_control; /* L2pControlField */
462 u32 l2p_duration; /* L2pDurationField */
463 u32 l2p_mhvalid; /* L2pMhValidBits */
464 u32 l2p_addr_match; /* L2pAddrMatchStat */
465 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
466 * (LMPM_PMG_SEL) */
467 u32 u_timestamp; /* indicate when the date and time of the
468 * compilation */
469 u32 flow_handler; /* FH read/write pointers, RX credit */
470} __packed;
471
472struct iwl_alive_resp {
473 u8 ucode_minor;
474 u8 ucode_major;
475 __le16 reserved1;
476 u8 sw_rev[8];
477 u8 ver_type;
478 u8 ver_subtype; /* not "9" for runtime alive */
479 __le16 reserved2;
480 __le32 log_event_table_ptr; /* SRAM address for event log */
481 __le32 error_event_table_ptr; /* SRAM address for error log */
482 __le32 timestamp;
483 __le32 is_valid;
484} __packed;
485
486/*
487 * REPLY_ERROR = 0x2 (response only, not a command)
488 */
489struct iwl_error_resp {
490 __le32 error_type;
491 u8 cmd_id;
492 u8 reserved1;
493 __le16 bad_cmd_seq_num;
494 __le32 error_info;
495 __le64 timestamp;
496} __packed;
497
498/******************************************************************************
499 * (1)
500 * RXON Commands & Responses:
501 *
502 *****************************************************************************/
503
504/*
505 * Rx config defines & structure
506 */
507/* rx_config device types */
508enum {
509 RXON_DEV_TYPE_AP = 1,
510 RXON_DEV_TYPE_ESS = 3,
511 RXON_DEV_TYPE_IBSS = 4,
512 RXON_DEV_TYPE_SNIFFER = 6,
513 RXON_DEV_TYPE_CP = 7,
514 RXON_DEV_TYPE_2STA = 8,
515 RXON_DEV_TYPE_P2P = 9,
516};
517
518
519#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
520#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
521#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
522#define RXON_RX_CHAIN_VALID_POS (1)
523#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
524#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
525#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
526#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
527#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
528#define RXON_RX_CHAIN_CNT_POS (10)
529#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
530#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
531#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
532#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
533
534/* rx_config flags */
535/* band & modulation selection */
536#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
537#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
538/* auto detection enable */
539#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
540/* TGg protection when tx */
541#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
542/* cck short slot & preamble */
543#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
544#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
545/* antenna selection */
546#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
547#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
548#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
549#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
550/* radar detection enable */
551#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
552#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
553/* rx response to host with 8-byte TSF
554* (according to ON_AIR deassertion) */
555#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
556
557
558/* HT flags */
559#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
560#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
561
562#define RXON_FLG_HT_OPERATING_MODE_POS (23)
563
564#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
565#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
566
567#define RXON_FLG_CHANNEL_MODE_POS (25)
568#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
569
570/* channel mode */
571enum {
572 CHANNEL_MODE_LEGACY = 0,
573 CHANNEL_MODE_PURE_40 = 1,
574 CHANNEL_MODE_MIXED = 2,
575 CHANNEL_MODE_RESERVED = 3,
576};
577#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
578#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
579#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
580
581/* CTS to self (if spec allows) flag */
582#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
583
584/* rx_config filter flags */
585/* accept all data frames */
586#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
587/* pass control & management to host */
588#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
589/* accept multi-cast */
590#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
591/* don't decrypt uni-cast frames */
592#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
593/* don't decrypt multi-cast frames */
594#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
595/* STA is associated */
596#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
597/* transfer to host non bssid beacons in associated state */
598#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
599
600/**
601 * REPLY_RXON = 0x10 (command, has simple generic response)
602 *
603 * RXON tunes the radio tuner to a service channel, and sets up a number
604 * of parameters that are used primarily for Rx, but also for Tx operations.
605 *
606 * NOTE: When tuning to a new channel, driver must set the
607 * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
608 * info within the device, including the station tables, tx retry
609 * rate tables, and txpower tables. Driver must build a new station
610 * table and txpower table before transmitting anything on the RXON
611 * channel.
612 *
613 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
614 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
615 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
616 */
617
618struct iwl_rxon_cmd {
619 u8 node_addr[6];
620 __le16 reserved1;
621 u8 bssid_addr[6];
622 __le16 reserved2;
623 u8 wlap_bssid_addr[6];
624 __le16 reserved3;
625 u8 dev_type;
626 u8 air_propagation;
627 __le16 rx_chain;
628 u8 ofdm_basic_rates;
629 u8 cck_basic_rates;
630 __le16 assoc_id;
631 __le32 flags;
632 __le32 filter_flags;
633 __le16 channel;
634 u8 ofdm_ht_single_stream_basic_rates;
635 u8 ofdm_ht_dual_stream_basic_rates;
636 u8 ofdm_ht_triple_stream_basic_rates;
637 u8 reserved5;
638 __le16 acquisition_data;
639 __le16 reserved6;
640} __packed;
641
642/*
643 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
644 */
645struct iwl_rxon_assoc_cmd {
646 __le32 flags;
647 __le32 filter_flags;
648 u8 ofdm_basic_rates;
649 u8 cck_basic_rates;
650 __le16 reserved1;
651 u8 ofdm_ht_single_stream_basic_rates;
652 u8 ofdm_ht_dual_stream_basic_rates;
653 u8 ofdm_ht_triple_stream_basic_rates;
654 u8 reserved2;
655 __le16 rx_chain_select_flags;
656 __le16 acquisition_data;
657 __le32 reserved3;
658} __packed;
659
660#define IWL_CONN_MAX_LISTEN_INTERVAL 10
661#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
662
663/*
664 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
665 */
666struct iwl_rxon_time_cmd {
667 __le64 timestamp;
668 __le16 beacon_interval;
669 __le16 atim_window;
670 __le32 beacon_init_val;
671 __le16 listen_interval;
672 u8 dtim_period;
673 u8 delta_cp_bss_tbtts;
674} __packed;
675
676/*
677 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
678 */
679/**
680 * struct iwl5000_channel_switch_cmd
681 * @band: 0- 5.2GHz, 1- 2.4GHz
682 * @expect_beacon: 0- resume transmits after channel switch
683 * 1- wait for beacon to resume transmits
684 * @channel: new channel number
685 * @rxon_flags: Rx on flags
686 * @rxon_filter_flags: filtering parameters
687 * @switch_time: switch time in extended beacon format
688 * @reserved: reserved bytes
689 */
690struct iwl5000_channel_switch_cmd {
691 u8 band;
692 u8 expect_beacon;
693 __le16 channel;
694 __le32 rxon_flags;
695 __le32 rxon_filter_flags;
696 __le32 switch_time;
697 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
698} __packed;
699
700/**
701 * struct iwl6000_channel_switch_cmd
702 * @band: 0- 5.2GHz, 1- 2.4GHz
703 * @expect_beacon: 0- resume transmits after channel switch
704 * 1- wait for beacon to resume transmits
705 * @channel: new channel number
706 * @rxon_flags: Rx on flags
707 * @rxon_filter_flags: filtering parameters
708 * @switch_time: switch time in extended beacon format
709 * @reserved: reserved bytes
710 */
711struct iwl6000_channel_switch_cmd {
712 u8 band;
713 u8 expect_beacon;
714 __le16 channel;
715 __le32 rxon_flags;
716 __le32 rxon_filter_flags;
717 __le32 switch_time;
718 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
719} __packed;
720
721/*
722 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
723 */
724struct iwl_csa_notification {
725 __le16 band;
726 __le16 channel;
727 __le32 status; /* 0 - OK, 1 - fail */
728} __packed;
729
730/******************************************************************************
731 * (2)
732 * Quality-of-Service (QOS) Commands & Responses:
733 *
734 *****************************************************************************/
735
736/**
737 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
738 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
739 *
740 * @cw_min: Contention window, start value in numbers of slots.
741 * Should be a power-of-2, minus 1. Device's default is 0x0f.
742 * @cw_max: Contention window, max value in numbers of slots.
743 * Should be a power-of-2, minus 1. Device's default is 0x3f.
744 * @aifsn: Number of slots in Arbitration Interframe Space (before
745 * performing random backoff timing prior to Tx). Device default 1.
746 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
747 *
748 * Device will automatically increase contention window by (2*CW) + 1 for each
749 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
750 * value, to cap the CW value.
751 */
752struct iwl_ac_qos {
753 __le16 cw_min;
754 __le16 cw_max;
755 u8 aifsn;
756 u8 reserved1;
757 __le16 edca_txop;
758} __packed;
759
760/* QoS flags defines */
761#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
762#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
763#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
764
765/* Number of Access Categories (AC) (EDCA), queues 0..3 */
766#define AC_NUM 4
767
768/*
769 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
770 *
771 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
772 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
773 */
774struct iwl_qosparam_cmd {
775 __le32 qos_flags;
776 struct iwl_ac_qos ac[AC_NUM];
777} __packed;
778
779/******************************************************************************
780 * (3)
781 * Add/Modify Stations Commands & Responses:
782 *
783 *****************************************************************************/
784/*
785 * Multi station support
786 */
787
788/* Special, dedicated locations within device's station table */
789#define IWL_AP_ID 0
790#define IWL_AP_ID_PAN 1
791#define IWL_STA_ID 2
792#define IWLAGN_PAN_BCAST_ID 14
793#define IWLAGN_BROADCAST_ID 15
794#define IWLAGN_STATION_COUNT 16
795
796#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
797
798#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
799#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
800#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
801#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
802#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
803#define STA_FLG_MAX_AGG_SIZE_POS (19)
804#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
805#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
806#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
807#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
808#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
809
810/* Use in mode field. 1: modify existing entry, 0: add new station entry */
811#define STA_CONTROL_MODIFY_MSK 0x01
812
813/* key flags __le16*/
814#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
815#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
816#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
817#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
818#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
819
820#define STA_KEY_FLG_KEYID_POS 8
821#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
822/* wep key is either from global key (0) or from station info array (1) */
823#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
824
825/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
826#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
827#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
828#define STA_KEY_MAX_NUM 8
829#define STA_KEY_MAX_NUM_PAN 16
830/* must not match WEP_INVALID_OFFSET */
831#define IWLAGN_HW_KEY_DEFAULT 0xfe
832
833/* Flags indicate whether to modify vs. don't change various station params */
834#define STA_MODIFY_KEY_MASK 0x01
835#define STA_MODIFY_TID_DISABLE_TX 0x02
836#define STA_MODIFY_TX_RATE_MSK 0x04
837#define STA_MODIFY_ADDBA_TID_MSK 0x08
838#define STA_MODIFY_DELBA_TID_MSK 0x10
839#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
840
841/* agn */
842struct iwl_keyinfo {
843 __le16 key_flags;
844 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
845 u8 reserved1;
846 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
847 u8 key_offset;
848 u8 reserved2;
849 u8 key[16]; /* 16-byte unicast decryption key */
850 __le64 tx_secur_seq_cnt;
851 __le64 hw_tkip_mic_rx_key;
852 __le64 hw_tkip_mic_tx_key;
853} __packed;
854
855/**
856 * struct sta_id_modify
857 * @addr[ETH_ALEN]: station's MAC address
858 * @sta_id: index of station in uCode's station table
859 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
860 *
861 * Driver selects unused table index when adding new station,
862 * or the index to a pre-existing station entry when modifying that station.
863 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
864 *
865 * modify_mask flags select which parameters to modify vs. leave alone.
866 */
867struct sta_id_modify {
868 u8 addr[ETH_ALEN];
869 __le16 reserved1;
870 u8 sta_id;
871 u8 modify_mask;
872 __le16 reserved2;
873} __packed;
874
875/*
876 * REPLY_ADD_STA = 0x18 (command)
877 *
878 * The device contains an internal table of per-station information,
879 * with info on security keys, aggregation parameters, and Tx rates for
880 * initial Tx attempt and any retries (agn devices uses
881 * REPLY_TX_LINK_QUALITY_CMD,
882 *
883 * REPLY_ADD_STA sets up the table entry for one station, either creating
884 * a new entry, or modifying a pre-existing one.
885 *
886 * NOTE: RXON command (without "associated" bit set) wipes the station table
887 * clean. Moving into RF_KILL state does this also. Driver must set up
888 * new station table before transmitting anything on the RXON channel
889 * (except active scans or active measurements; those commands carry
890 * their own txpower/rate setup data).
891 *
892 * When getting started on a new channel, driver must set up the
893 * IWL_BROADCAST_ID entry (last entry in the table). For a client
894 * station in a BSS, once an AP is selected, driver sets up the AP STA
895 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
896 * are all that are needed for a BSS client station. If the device is
897 * used as AP, or in an IBSS network, driver must set up station table
898 * entries for all STAs in network, starting with index IWL_STA_ID.
899 */
900
901struct iwl_addsta_cmd {
902 u8 mode; /* 1: modify existing, 0: add new station */
903 u8 reserved[3];
904 struct sta_id_modify sta;
905 struct iwl_keyinfo key;
906 __le32 station_flags; /* STA_FLG_* */
907 __le32 station_flags_msk; /* STA_FLG_* */
908
909 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
910 * corresponding to bit (e.g. bit 5 controls TID 5).
911 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
912 __le16 tid_disable_tx;
913 __le16 legacy_reserved;
914
915 /* TID for which to add block-ack support.
916 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
917 u8 add_immediate_ba_tid;
918
919 /* TID for which to remove block-ack support.
920 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
921 u8 remove_immediate_ba_tid;
922
923 /* Starting Sequence Number for added block-ack support.
924 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
925 __le16 add_immediate_ba_ssn;
926
927 /*
928 * Number of packets OK to transmit to station even though
929 * it is asleep -- used to synchronise PS-poll and u-APSD
930 * responses while ucode keeps track of STA sleep state.
931 */
932 __le16 sleep_tx_count;
933
934 __le16 reserved2;
935} __packed;
936
937
938#define ADD_STA_SUCCESS_MSK 0x1
939#define ADD_STA_NO_ROOM_IN_TABLE 0x2
940#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
941#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
942/*
943 * REPLY_ADD_STA = 0x18 (response)
944 */
945struct iwl_add_sta_resp {
946 u8 status; /* ADD_STA_* */
947} __packed;
948
949#define REM_STA_SUCCESS_MSK 0x1
950/*
951 * REPLY_REM_STA = 0x19 (response)
952 */
953struct iwl_rem_sta_resp {
954 u8 status;
955} __packed;
956
957/*
958 * REPLY_REM_STA = 0x19 (command)
959 */
960struct iwl_rem_sta_cmd {
961 u8 num_sta; /* number of removed stations */
962 u8 reserved[3];
963 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
964 u8 reserved2[2];
965} __packed;
966
967
968/* WiFi queues mask */
969#define IWL_SCD_BK_MSK BIT(0)
970#define IWL_SCD_BE_MSK BIT(1)
971#define IWL_SCD_VI_MSK BIT(2)
972#define IWL_SCD_VO_MSK BIT(3)
973#define IWL_SCD_MGMT_MSK BIT(3)
974
975/* PAN queues mask */
976#define IWL_PAN_SCD_BK_MSK BIT(4)
977#define IWL_PAN_SCD_BE_MSK BIT(5)
978#define IWL_PAN_SCD_VI_MSK BIT(6)
979#define IWL_PAN_SCD_VO_MSK BIT(7)
980#define IWL_PAN_SCD_MGMT_MSK BIT(7)
981#define IWL_PAN_SCD_MULTICAST_MSK BIT(8)
982
983#define IWL_AGG_TX_QUEUE_MSK 0xffc00
984
985#define IWL_DROP_ALL BIT(1)
986
987/*
988 * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
989 *
990 * When using full FIFO flush this command checks the scheduler HW block WR/RD
991 * pointers to check if all the frames were transferred by DMA into the
992 * relevant TX FIFO queue. Only when the DMA is finished and the queue is
993 * empty the command can finish.
994 * This command is used to flush the TXFIFO from transmit commands, it may
995 * operate on single or multiple queues, the command queue can't be flushed by
996 * this command. The command response is returned when all the queue flush
997 * operations are done. Each TX command flushed return response with the FLUSH
998 * status set in the TX response status. When FIFO flush operation is used,
999 * the flush operation ends when both the scheduler DMA done and TXFIFO empty
1000 * are set.
1001 *
1002 * @queue_control: bit mask for which queues to flush
1003 * @flush_control: flush controls
1004 * 0: Dump single MSDU
1005 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
1006 * 2: Dump all FIFO
1007 */
1008struct iwl_txfifo_flush_cmd_v3 {
1009 __le32 queue_control;
1010 __le16 flush_control;
1011 __le16 reserved;
1012} __packed;
1013
1014struct iwl_txfifo_flush_cmd_v2 {
1015 __le16 queue_control;
1016 __le16 flush_control;
1017} __packed;
1018
1019/*
1020 * REPLY_WEP_KEY = 0x20
1021 */
1022struct iwl_wep_key {
1023 u8 key_index;
1024 u8 key_offset;
1025 u8 reserved1[2];
1026 u8 key_size;
1027 u8 reserved2[3];
1028 u8 key[16];
1029} __packed;
1030
1031struct iwl_wep_cmd {
1032 u8 num_keys;
1033 u8 global_key_type;
1034 u8 flags;
1035 u8 reserved;
1036 struct iwl_wep_key key[0];
1037} __packed;
1038
1039#define WEP_KEY_WEP_TYPE 1
1040#define WEP_KEYS_MAX 4
1041#define WEP_INVALID_OFFSET 0xff
1042#define WEP_KEY_LEN_64 5
1043#define WEP_KEY_LEN_128 13
1044
1045/******************************************************************************
1046 * (4)
1047 * Rx Responses:
1048 *
1049 *****************************************************************************/
1050
1051#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1052#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1053
1054#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1055#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1056#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1057#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1058#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
1059#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1060#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
1061
1062#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1063#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
1064#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
1065#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
1066#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
1067#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
1068
1069#define RX_RES_STATUS_STATION_FOUND (1<<6)
1070#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
1071
1072#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
1073#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
1074#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
1075#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
1076#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
1077
1078#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
1079#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
1080#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1081#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1082
1083
1084#define IWLAGN_RX_RES_PHY_CNT 8
1085#define IWLAGN_RX_RES_AGC_IDX 1
1086#define IWLAGN_RX_RES_RSSI_AB_IDX 2
1087#define IWLAGN_RX_RES_RSSI_C_IDX 3
1088#define IWLAGN_OFDM_AGC_MSK 0xfe00
1089#define IWLAGN_OFDM_AGC_BIT_POS 9
1090#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
1091#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
1092#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
1093#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
1094#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
1095#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
1096#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
1097#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
1098#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
1099
1100struct iwlagn_non_cfg_phy {
1101 __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1102} __packed;
1103
1104
1105/*
1106 * REPLY_RX = 0xc3 (response only, not a command)
1107 * Used only for legacy (non 11n) frames.
1108 */
1109struct iwl_rx_phy_res {
1110 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1111 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1112 u8 stat_id; /* configurable DSP phy data set ID */
1113 u8 reserved1;
1114 __le64 timestamp; /* TSF at on air rise */
1115 __le32 beacon_time_stamp; /* beacon at on-air rise */
1116 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1117 __le16 channel; /* channel number */
1118 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1119 __le32 rate_n_flags; /* RATE_MCS_* */
1120 __le16 byte_count; /* frame's byte-count */
1121 __le16 frame_time; /* frame's time on the air */
1122} __packed;
1123
1124struct iwl_rx_mpdu_res_start {
1125 __le16 byte_count;
1126 __le16 reserved;
1127} __packed;
1128
1129
1130/******************************************************************************
1131 * (5)
1132 * Tx Commands & Responses:
1133 *
1134 * Driver must place each REPLY_TX command into one of the prioritized Tx
1135 * queues in host DRAM, shared between driver and device (see comments for
1136 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1137 * are preparing to transmit, the device pulls the Tx command over the PCI
1138 * bus via one of the device's Tx DMA channels, to fill an internal FIFO
1139 * from which data will be transmitted.
1140 *
1141 * uCode handles all timing and protocol related to control frames
1142 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1143 * handle reception of block-acks; uCode updates the host driver via
1144 * REPLY_COMPRESSED_BA.
1145 *
1146 * uCode handles retrying Tx when an ACK is expected but not received.
1147 * This includes trying lower data rates than the one requested in the Tx
1148 * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
1149 *
1150 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1151 * This command must be executed after every RXON command, before Tx can occur.
1152 *****************************************************************************/
1153
1154/* REPLY_TX Tx flags field */
1155
1156/*
1157 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1158 * before this frame. if CTS-to-self required check
1159 * RXON_FLG_SELF_CTS_EN status.
1160 */
1161#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
1162
1163/* 1: Expect ACK from receiving station
1164 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1165 * Set this for unicast frames, but not broadcast/multicast. */
1166#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1167
1168/* For agn devices:
1169 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1170 * Tx command's initial_rate_index indicates first rate to try;
1171 * uCode walks through table for additional Tx attempts.
1172 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1173 * This rate will be used for all Tx attempts; it will not be scaled. */
1174#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1175
1176/* 1: Expect immediate block-ack.
1177 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1178#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1179
1180/* Tx antenna selection field; reserved (0) for agn devices. */
1181#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1182
1183/* 1: Ignore Bluetooth priority for this frame.
1184 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1185#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
1186
1187/* 1: uCode overrides sequence control field in MAC header.
1188 * 0: Driver provides sequence control field in MAC header.
1189 * Set this for management frames, non-QOS data frames, non-unicast frames,
1190 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1191#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1192
1193/* 1: This frame is non-last MPDU; more fragments are coming.
1194 * 0: Last fragment, or not using fragmentation. */
1195#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1196
1197/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1198 * 0: No TSF required in outgoing frame.
1199 * Set this for transmitting beacons and probe responses. */
1200#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1201
1202/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1203 * alignment of frame's payload data field.
1204 * 0: No pad
1205 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1206 * field (but not both). Driver must align frame data (i.e. data following
1207 * MAC header) to DWORD boundary. */
1208#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1209
1210/* accelerate aggregation support
1211 * 0 - no CCMP encryption; 1 - CCMP encryption */
1212#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1213
1214/* HCCA-AP - disable duration overwriting. */
1215#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1216
1217
1218/*
1219 * TX command security control
1220 */
1221#define TX_CMD_SEC_WEP 0x01
1222#define TX_CMD_SEC_CCM 0x02
1223#define TX_CMD_SEC_TKIP 0x03
1224#define TX_CMD_SEC_MSK 0x03
1225#define TX_CMD_SEC_SHIFT 6
1226#define TX_CMD_SEC_KEY128 0x08
1227
1228/*
1229 * REPLY_TX = 0x1c (command)
1230 */
1231
1232/*
1233 * 4965 uCode updates these Tx attempt count values in host DRAM.
1234 * Used for managing Tx retries when expecting block-acks.
1235 * Driver should set these fields to 0.
1236 */
1237struct iwl_dram_scratch {
1238 u8 try_cnt; /* Tx attempts */
1239 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1240 __le16 reserved;
1241} __packed;
1242
1243struct iwl_tx_cmd {
1244 /*
1245 * MPDU byte count:
1246 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1247 * + 8 byte IV for CCM or TKIP (not used for WEP)
1248 * + Data payload
1249 * + 8-byte MIC (not used for CCM/WEP)
1250 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1251 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1252 * Range: 14-2342 bytes.
1253 */
1254 __le16 len;
1255
1256 /*
1257 * MPDU or MSDU byte count for next frame.
1258 * Used for fragmentation and bursting, but not 11n aggregation.
1259 * Same as "len", but for next frame. Set to 0 if not applicable.
1260 */
1261 __le16 next_frame_len;
1262
1263 __le32 tx_flags; /* TX_CMD_FLG_* */
1264
1265 /* uCode may modify this field of the Tx command (in host DRAM!).
1266 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1267 struct iwl_dram_scratch scratch;
1268
1269 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1270 __le32 rate_n_flags; /* RATE_MCS_* */
1271
1272 /* Index of destination station in uCode's station table */
1273 u8 sta_id;
1274
1275 /* Type of security encryption: CCM or TKIP */
1276 u8 sec_ctl; /* TX_CMD_SEC_* */
1277
1278 /*
1279 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
1280 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1281 * data frames, this field may be used to selectively reduce initial
1282 * rate (via non-0 value) for special frames (e.g. management), while
1283 * still supporting rate scaling for all frames.
1284 */
1285 u8 initial_rate_index;
1286 u8 reserved;
1287 u8 key[16];
1288 __le16 next_frame_flags;
1289 __le16 reserved2;
1290 union {
1291 __le32 life_time;
1292 __le32 attempt;
1293 } stop_time;
1294
1295 /* Host DRAM physical address pointer to "scratch" in this command.
1296 * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
1297 __le32 dram_lsb_ptr;
1298 u8 dram_msb_ptr;
1299
1300 u8 rts_retry_limit; /*byte 50 */
1301 u8 data_retry_limit; /*byte 51 */
1302 u8 tid_tspec;
1303 union {
1304 __le16 pm_frame_timeout;
1305 __le16 attempt_duration;
1306 } timeout;
1307
1308 /*
1309 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1310 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1311 */
1312 __le16 driver_txop;
1313
1314 /*
1315 * MAC header goes here, followed by 2 bytes padding if MAC header
1316 * length is 26 or 30 bytes, followed by payload data
1317 */
1318 u8 payload[0];
1319 struct ieee80211_hdr hdr[0];
1320} __packed;
1321
1322/*
1323 * TX command response is sent after *agn* transmission attempts.
1324 *
1325 * both postpone and abort status are expected behavior from uCode. there is
1326 * no special operation required from driver; except for RFKILL_FLUSH,
1327 * which required tx flush host command to flush all the tx frames in queues
1328 */
1329enum {
1330 TX_STATUS_SUCCESS = 0x01,
1331 TX_STATUS_DIRECT_DONE = 0x02,
1332 /* postpone TX */
1333 TX_STATUS_POSTPONE_DELAY = 0x40,
1334 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1335 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
1336 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1337 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1338 /* abort TX */
1339 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1340 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1341 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1342 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1343 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1344 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1345 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1346 TX_STATUS_FAIL_DEST_PS = 0x88,
1347 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1348 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1349 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1350 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1351 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1352 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1353 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1354 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1355 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1356};
1357
1358#define TX_PACKET_MODE_REGULAR 0x0000
1359#define TX_PACKET_MODE_BURST_SEQ 0x0100
1360#define TX_PACKET_MODE_BURST_FIRST 0x0200
1361
1362enum {
1363 TX_POWER_PA_NOT_ACTIVE = 0x0,
1364};
1365
1366enum {
1367 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1368 TX_STATUS_DELAY_MSK = 0x00000040,
1369 TX_STATUS_ABORT_MSK = 0x00000080,
1370 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1371 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1372 TX_RESERVED = 0x00780000, /* bits 19:22 */
1373 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1374 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1375};
1376
1377/* *******************************
1378 * TX aggregation status
1379 ******************************* */
1380
1381enum {
1382 AGG_TX_STATE_TRANSMITTED = 0x00,
1383 AGG_TX_STATE_UNDERRUN_MSK = 0x01,
1384 AGG_TX_STATE_BT_PRIO_MSK = 0x02,
1385 AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
1386 AGG_TX_STATE_ABORT_MSK = 0x08,
1387 AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
1388 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
1389 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40,
1390 AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
1391 AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
1392 AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
1393 AGG_TX_STATE_DUMP_TX_MSK = 0x200,
1394 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1395};
1396
1397#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1398#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1399#define AGG_TX_TRY_POS 12
1400
1401#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1402 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1403 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
1404
1405/* # tx attempts for first frame in aggregation */
1406#define AGG_TX_STATE_TRY_CNT_POS 12
1407#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
1408
1409/* Command ID and sequence number of Tx command for this frame */
1410#define AGG_TX_STATE_SEQ_NUM_POS 16
1411#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1412
1413/*
1414 * REPLY_TX = 0x1c (response)
1415 *
1416 * This response may be in one of two slightly different formats, indicated
1417 * by the frame_count field:
1418 *
1419 * 1) No aggregation (frame_count == 1). This reports Tx results for
1420 * a single frame. Multiple attempts, at various bit rates, may have
1421 * been made for this frame.
1422 *
1423 * 2) Aggregation (frame_count > 1). This reports Tx results for
1424 * 2 or more frames that used block-acknowledge. All frames were
1425 * transmitted at same rate. Rate scaling may have been used if first
1426 * frame in this new agg block failed in previous agg block(s).
1427 *
1428 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1429 * block-ack has not been received by the time the agn device records
1430 * this status.
1431 * This status relates to reasons the tx might have been blocked or aborted
1432 * within the sending station (this agn device), rather than whether it was
1433 * received successfully by the destination station.
1434 */
1435struct agg_tx_status {
1436 __le16 status;
1437 __le16 sequence;
1438} __packed;
1439
1440/*
1441 * definitions for initial rate index field
1442 * bits [3:0] initial rate index
1443 * bits [6:4] rate table color, used for the initial rate
1444 * bit-7 invalid rate indication
1445 * i.e. rate was not chosen from rate table
1446 * or rate table color was changed during frame retries
1447 * refer tlc rate info
1448 */
1449
1450#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0
1451#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f
1452#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4
1453#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70
1454#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
1455
1456/* refer to ra_tid */
1457#define IWLAGN_TX_RES_TID_POS 0
1458#define IWLAGN_TX_RES_TID_MSK 0x0f
1459#define IWLAGN_TX_RES_RA_POS 4
1460#define IWLAGN_TX_RES_RA_MSK 0xf0
1461
1462struct iwlagn_tx_resp {
1463 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1464 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1465 u8 failure_rts; /* # failures due to unsuccessful RTS */
1466 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1467
1468 /* For non-agg: Rate at which frame was successful.
1469 * For agg: Rate at which all frames were transmitted. */
1470 __le32 rate_n_flags; /* RATE_MCS_* */
1471
1472 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1473 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1474 __le16 wireless_media_time; /* uSecs */
1475
1476 u8 pa_status; /* RF power amplifier measurement (not used) */
1477 u8 pa_integ_res_a[3];
1478 u8 pa_integ_res_b[3];
1479 u8 pa_integ_res_C[3];
1480
1481 __le32 tfd_info;
1482 __le16 seq_ctl;
1483 __le16 byte_cnt;
1484 u8 tlc_info;
1485 u8 ra_tid; /* tid (0:3), sta_id (4:7) */
1486 __le16 frame_ctrl;
1487 /*
1488 * For non-agg: frame status TX_STATUS_*
1489 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1490 * fields follow this one, up to frame_count.
1491 * Bit fields:
1492 * 11- 0: AGG_TX_STATE_* status code
1493 * 15-12: Retry count for 1st frame in aggregation (retries
1494 * occur if tx failed for this frame when it was a
1495 * member of a previous aggregation block). If rate
1496 * scaling is used, retry count indicates the rate
1497 * table entry used for all frames in the new agg.
1498 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1499 */
1500 struct agg_tx_status status; /* TX status (in aggregation -
1501 * status of 1st frame) */
1502} __packed;
1503/*
1504 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1505 *
1506 * Reports Block-Acknowledge from recipient station
1507 */
1508struct iwl_compressed_ba_resp {
1509 __le32 sta_addr_lo32;
1510 __le16 sta_addr_hi16;
1511 __le16 reserved;
1512
1513 /* Index of recipient (BA-sending) station in uCode's station table */
1514 u8 sta_id;
1515 u8 tid;
1516 __le16 seq_ctl;
1517 __le64 bitmap;
1518 __le16 scd_flow;
1519 __le16 scd_ssn;
1520 u8 txed; /* number of frames sent */
1521 u8 txed_2_done; /* number of frames acked */
1522 __le16 reserved1;
1523} __packed;
1524
1525/*
1526 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
1527 *
1528 */
1529
1530/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1531#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1532
1533/* # of EDCA prioritized tx fifos */
1534#define LINK_QUAL_AC_NUM AC_NUM
1535
1536/* # entries in rate scale table to support Tx retries */
1537#define LINK_QUAL_MAX_RETRY_NUM 16
1538
1539/* Tx antenna selection values */
1540#define LINK_QUAL_ANT_A_MSK (1 << 0)
1541#define LINK_QUAL_ANT_B_MSK (1 << 1)
1542#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1543
1544
1545/**
1546 * struct iwl_link_qual_general_params
1547 *
1548 * Used in REPLY_TX_LINK_QUALITY_CMD
1549 */
1550struct iwl_link_qual_general_params {
1551 u8 flags;
1552
1553 /* No entries at or above this (driver chosen) index contain MIMO */
1554 u8 mimo_delimiter;
1555
1556 /* Best single antenna to use for single stream (legacy, SISO). */
1557 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1558
1559 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1560 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1561
1562 /*
1563 * If driver needs to use different initial rates for different
1564 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1565 * this table will set that up, by indicating the indexes in the
1566 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1567 * Otherwise, driver should set all entries to 0.
1568 *
1569 * Entry usage:
1570 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1571 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1572 */
1573 u8 start_rate_index[LINK_QUAL_AC_NUM];
1574} __packed;
1575
1576#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1577#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1578#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1579
1580#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
1581#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
1582#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
1583
1584#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
1585#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
1586#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1587
1588/**
1589 * struct iwl_link_qual_agg_params
1590 *
1591 * Used in REPLY_TX_LINK_QUALITY_CMD
1592 */
1593struct iwl_link_qual_agg_params {
1594
1595 /*
1596 *Maximum number of uSec in aggregation.
1597 * default set to 4000 (4 milliseconds) if not configured in .cfg
1598 */
1599 __le16 agg_time_limit;
1600
1601 /*
1602 * Number of Tx retries allowed for a frame, before that frame will
1603 * no longer be considered for the start of an aggregation sequence
1604 * (scheduler will then try to tx it as single frame).
1605 * Driver should set this to 3.
1606 */
1607 u8 agg_dis_start_th;
1608
1609 /*
1610 * Maximum number of frames in aggregation.
1611 * 0 = no limit (default). 1 = no aggregation.
1612 * Other values = max # frames in aggregation.
1613 */
1614 u8 agg_frame_cnt_limit;
1615
1616 __le32 reserved;
1617} __packed;
1618
1619/*
1620 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1621 *
1622 * For agn devices
1623 *
1624 * Each station in the agn device's internal station table has its own table
1625 * of 16
1626 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
1627 * an ACK is not received. This command replaces the entire table for
1628 * one station.
1629 *
1630 * NOTE: Station must already be in agn device's station table.
1631 * Use REPLY_ADD_STA.
1632 *
1633 * The rate scaling procedures described below work well. Of course, other
1634 * procedures are possible, and may work better for particular environments.
1635 *
1636 *
1637 * FILLING THE RATE TABLE
1638 *
1639 * Given a particular initial rate and mode, as determined by the rate
1640 * scaling algorithm described below, the Linux driver uses the following
1641 * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
1642 * Link Quality command:
1643 *
1644 *
1645 * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
1646 * a) Use this same initial rate for first 3 entries.
1647 * b) Find next lower available rate using same mode (SISO or MIMO),
1648 * use for next 3 entries. If no lower rate available, switch to
1649 * legacy mode (no HT40 channel, no MIMO, no short guard interval).
1650 * c) If using MIMO, set command's mimo_delimiter to number of entries
1651 * using MIMO (3 or 6).
1652 * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
1653 * no MIMO, no short guard interval), at the next lower bit rate
1654 * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
1655 * legacy procedure for remaining table entries.
1656 *
1657 * 2) If using legacy initial rate:
1658 * a) Use the initial rate for only one entry.
1659 * b) For each following entry, reduce the rate to next lower available
1660 * rate, until reaching the lowest available rate.
1661 * c) When reducing rate, also switch antenna selection.
1662 * d) Once lowest available rate is reached, repeat this rate until
1663 * rate table is filled (16 entries), switching antenna each entry.
1664 *
1665 *
1666 * ACCUMULATING HISTORY
1667 *
1668 * The rate scaling algorithm for agn devices, as implemented in Linux driver,
1669 * uses two sets of frame Tx success history: One for the current/active
1670 * modulation mode, and one for a speculative/search mode that is being
1671 * attempted. If the speculative mode turns out to be more effective (i.e.
1672 * actual transfer rate is better), then the driver continues to use the
1673 * speculative mode as the new current active mode.
1674 *
1675 * Each history set contains, separately for each possible rate, data for a
1676 * sliding window of the 62 most recent tx attempts at that rate. The data
1677 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1678 * and attempted frames, from which the driver can additionally calculate a
1679 * success ratio (success / attempted) and number of failures
1680 * (attempted - success), and control the size of the window (attempted).
1681 * The driver uses the bit map to remove successes from the success sum, as
1682 * the oldest tx attempts fall out of the window.
1683 *
1684 * When the agn device makes multiple tx attempts for a given frame, each
1685 * attempt might be at a different rate, and have different modulation
1686 * characteristics (e.g. antenna, fat channel, short guard interval), as set
1687 * up in the rate scaling table in the Link Quality command. The driver must
1688 * determine which rate table entry was used for each tx attempt, to determine
1689 * which rate-specific history to update, and record only those attempts that
1690 * match the modulation characteristics of the history set.
1691 *
1692 * When using block-ack (aggregation), all frames are transmitted at the same
1693 * rate, since there is no per-attempt acknowledgment from the destination
1694 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1695 * rate_n_flags field. After receiving a block-ack, the driver can update
1696 * history for the entire block all at once.
1697 *
1698 *
1699 * FINDING BEST STARTING RATE:
1700 *
1701 * When working with a selected initial modulation mode (see below), the
1702 * driver attempts to find a best initial rate. The initial rate is the
1703 * first entry in the Link Quality command's rate table.
1704 *
1705 * 1) Calculate actual throughput (success ratio * expected throughput, see
1706 * table below) for current initial rate. Do this only if enough frames
1707 * have been attempted to make the value meaningful: at least 6 failed
1708 * tx attempts, or at least 8 successes. If not enough, don't try rate
1709 * scaling yet.
1710 *
1711 * 2) Find available rates adjacent to current initial rate. Available means:
1712 * a) supported by hardware &&
1713 * b) supported by association &&
1714 * c) within any constraints selected by user
1715 *
1716 * 3) Gather measured throughputs for adjacent rates. These might not have
1717 * enough history to calculate a throughput. That's okay, we might try
1718 * using one of them anyway!
1719 *
1720 * 4) Try decreasing rate if, for current rate:
1721 * a) success ratio is < 15% ||
1722 * b) lower adjacent rate has better measured throughput ||
1723 * c) higher adjacent rate has worse throughput, and lower is unmeasured
1724 *
1725 * As a sanity check, if decrease was determined above, leave rate
1726 * unchanged if:
1727 * a) lower rate unavailable
1728 * b) success ratio at current rate > 85% (very good)
1729 * c) current measured throughput is better than expected throughput
1730 * of lower rate (under perfect 100% tx conditions, see table below)
1731 *
1732 * 5) Try increasing rate if, for current rate:
1733 * a) success ratio is < 15% ||
1734 * b) both adjacent rates' throughputs are unmeasured (try it!) ||
1735 * b) higher adjacent rate has better measured throughput ||
1736 * c) lower adjacent rate has worse throughput, and higher is unmeasured
1737 *
1738 * As a sanity check, if increase was determined above, leave rate
1739 * unchanged if:
1740 * a) success ratio at current rate < 70%. This is not particularly
1741 * good performance; higher rate is sure to have poorer success.
1742 *
1743 * 6) Re-evaluate the rate after each tx frame. If working with block-
1744 * acknowledge, history and statistics may be calculated for the entire
1745 * block (including prior history that fits within the history windows),
1746 * before re-evaluation.
1747 *
1748 * FINDING BEST STARTING MODULATION MODE:
1749 *
1750 * After working with a modulation mode for a "while" (and doing rate scaling),
1751 * the driver searches for a new initial mode in an attempt to improve
1752 * throughput. The "while" is measured by numbers of attempted frames:
1753 *
1754 * For legacy mode, search for new mode after:
1755 * 480 successful frames, or 160 failed frames
1756 * For high-throughput modes (SISO or MIMO), search for new mode after:
1757 * 4500 successful frames, or 400 failed frames
1758 *
1759 * Mode switch possibilities are (3 for each mode):
1760 *
1761 * For legacy:
1762 * Change antenna, try SISO (if HT association), try MIMO (if HT association)
1763 * For SISO:
1764 * Change antenna, try MIMO, try shortened guard interval (SGI)
1765 * For MIMO:
1766 * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
1767 *
1768 * When trying a new mode, use the same bit rate as the old/current mode when
1769 * trying antenna switches and shortened guard interval. When switching to
1770 * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
1771 * for which the expected throughput (under perfect conditions) is about the
1772 * same or slightly better than the actual measured throughput delivered by
1773 * the old/current mode.
1774 *
1775 * Actual throughput can be estimated by multiplying the expected throughput
1776 * by the success ratio (successful / attempted tx frames). Frame size is
1777 * not considered in this calculation; it assumes that frame size will average
1778 * out to be fairly consistent over several samples. The following are
1779 * metric values for expected throughput assuming 100% success ratio.
1780 * Only G band has support for CCK rates:
1781 *
1782 * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
1783 *
1784 * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
1785 * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
1786 * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
1787 * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
1788 * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
1789 * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
1790 * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
1791 * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
1792 * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
1793 * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
1794 *
1795 * After the new mode has been tried for a short while (minimum of 6 failed
1796 * frames or 8 successful frames), compare success ratio and actual throughput
1797 * estimate of the new mode with the old. If either is better with the new
1798 * mode, continue to use the new mode.
1799 *
1800 * Continue comparing modes until all 3 possibilities have been tried.
1801 * If moving from legacy to HT, try all 3 possibilities from the new HT
1802 * mode. After trying all 3, a best mode is found. Continue to use this mode
1803 * for the longer "while" described above (e.g. 480 successful frames for
1804 * legacy), and then repeat the search process.
1805 *
1806 */
1807struct iwl_link_quality_cmd {
1808
1809 /* Index of destination/recipient station in uCode's station table */
1810 u8 sta_id;
1811 u8 reserved1;
1812 __le16 control; /* not used */
1813 struct iwl_link_qual_general_params general_params;
1814 struct iwl_link_qual_agg_params agg_params;
1815
1816 /*
1817 * Rate info; when using rate-scaling, Tx command's initial_rate_index
1818 * specifies 1st Tx rate attempted, via index into this table.
1819 * agn devices works its way through table when retrying Tx.
1820 */
1821 struct {
1822 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
1823 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
1824 __le32 reserved2;
1825} __packed;
1826
1827/*
1828 * BT configuration enable flags:
1829 * bit 0 - 1: BT channel announcement enabled
1830 * 0: disable
1831 * bit 1 - 1: priority of BT device enabled
1832 * 0: disable
1833 * bit 2 - 1: BT 2 wire support enabled
1834 * 0: disable
1835 */
1836#define BT_COEX_DISABLE (0x0)
1837#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
1838#define BT_ENABLE_PRIORITY BIT(1)
1839#define BT_ENABLE_2_WIRE BIT(2)
1840
1841#define BT_COEX_DISABLE (0x0)
1842#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
1843
1844#define BT_LEAD_TIME_MIN (0x0)
1845#define BT_LEAD_TIME_DEF (0x1E)
1846#define BT_LEAD_TIME_MAX (0xFF)
1847
1848#define BT_MAX_KILL_MIN (0x1)
1849#define BT_MAX_KILL_DEF (0x5)
1850#define BT_MAX_KILL_MAX (0xFF)
1851
1852#define BT_DURATION_LIMIT_DEF 625
1853#define BT_DURATION_LIMIT_MAX 1250
1854#define BT_DURATION_LIMIT_MIN 625
1855
1856#define BT_ON_THRESHOLD_DEF 4
1857#define BT_ON_THRESHOLD_MAX 1000
1858#define BT_ON_THRESHOLD_MIN 1
1859
1860#define BT_FRAG_THRESHOLD_DEF 0
1861#define BT_FRAG_THRESHOLD_MAX 0
1862#define BT_FRAG_THRESHOLD_MIN 0
1863
1864#define BT_AGG_THRESHOLD_DEF 1200
1865#define BT_AGG_THRESHOLD_MAX 8000
1866#define BT_AGG_THRESHOLD_MIN 400
1867
1868/*
1869 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
1870 *
1871 * agn devices support hardware handshake with Bluetooth device on
1872 * same platform. Bluetooth device alerts wireless device when it will Tx;
1873 * wireless device can delay or kill its own Tx to accommodate.
1874 */
1875struct iwl_bt_cmd {
1876 u8 flags;
1877 u8 lead_time;
1878 u8 max_kill;
1879 u8 reserved;
1880 __le32 kill_ack_mask;
1881 __le32 kill_cts_mask;
1882} __packed;
1883
1884#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
1885
1886#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
1887#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
1888#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
1889#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
1890#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
1891#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
1892
1893#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
1894/* Disable Sync PSPoll on SCO/eSCO */
1895#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
1896
1897#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */
1898#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */
1899
1900#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
1901#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
1902#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
1903#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0
1904
1905#define IWLAGN_BT_MAX_KILL_DEFAULT 5
1906
1907#define IWLAGN_BT3_T7_DEFAULT 1
1908
1909enum iwl_bt_kill_idx {
1910 IWL_BT_KILL_DEFAULT = 0,
1911 IWL_BT_KILL_OVERRIDE = 1,
1912 IWL_BT_KILL_REDUCE = 2,
1913};
1914
1915#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
1916#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
1917#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
1918#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE cpu_to_le32(0)
1919
1920#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
1921
1922#define IWLAGN_BT3_T2_DEFAULT 0xc
1923
1924#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
1925#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
1926#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
1927#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
1928#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
1929#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
1930#define IWLAGN_BT_VALID_REDUCED_TX_PWR cpu_to_le16(BIT(6))
1931#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
1932
1933#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
1934 IWLAGN_BT_VALID_BOOST | \
1935 IWLAGN_BT_VALID_MAX_KILL | \
1936 IWLAGN_BT_VALID_3W_TIMERS | \
1937 IWLAGN_BT_VALID_KILL_ACK_MASK | \
1938 IWLAGN_BT_VALID_KILL_CTS_MASK | \
1939 IWLAGN_BT_VALID_REDUCED_TX_PWR | \
1940 IWLAGN_BT_VALID_3W_LUT)
1941
1942#define IWLAGN_BT_REDUCED_TX_PWR BIT(0)
1943
1944#define IWLAGN_BT_DECISION_LUT_SIZE 12
1945
1946struct iwl_basic_bt_cmd {
1947 u8 flags;
1948 u8 ledtime; /* unused */
1949 u8 max_kill;
1950 u8 bt3_timer_t7_value;
1951 __le32 kill_ack_mask;
1952 __le32 kill_cts_mask;
1953 u8 bt3_prio_sample_time;
1954 u8 bt3_timer_t2_value;
1955 __le16 bt4_reaction_time; /* unused */
1956 __le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
1957 /*
1958 * bit 0: use reduced tx power for control frame
1959 * bit 1 - 7: reserved
1960 */
1961 u8 reduce_txpower;
1962 u8 reserved;
1963 __le16 valid;
1964};
1965
1966struct iwl_bt_cmd_v1 {
1967 struct iwl_basic_bt_cmd basic;
1968 u8 prio_boost;
1969 /*
1970 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
1971 * if configure the following patterns
1972 */
1973 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
1974 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1975};
1976
1977struct iwl_bt_cmd_v2 {
1978 struct iwl_basic_bt_cmd basic;
1979 __le32 prio_boost;
1980 /*
1981 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
1982 * if configure the following patterns
1983 */
1984 u8 reserved;
1985 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
1986 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1987};
1988
1989#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
1990
1991struct iwlagn_bt_sco_cmd {
1992 __le32 flags;
1993};
1994
1995/******************************************************************************
1996 * (6)
1997 * Spectrum Management (802.11h) Commands, Responses, Notifications:
1998 *
1999 *****************************************************************************/
2000
2001/*
2002 * Spectrum Management
2003 */
2004#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
2005 RXON_FILTER_CTL2HOST_MSK | \
2006 RXON_FILTER_ACCEPT_GRP_MSK | \
2007 RXON_FILTER_DIS_DECRYPT_MSK | \
2008 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
2009 RXON_FILTER_ASSOC_MSK | \
2010 RXON_FILTER_BCON_AWARE_MSK)
2011
2012struct iwl_measure_channel {
2013 __le32 duration; /* measurement duration in extended beacon
2014 * format */
2015 u8 channel; /* channel to measure */
2016 u8 type; /* see enum iwl_measure_type */
2017 __le16 reserved;
2018} __packed;
2019
2020/*
2021 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
2022 */
2023struct iwl_spectrum_cmd {
2024 __le16 len; /* number of bytes starting from token */
2025 u8 token; /* token id */
2026 u8 id; /* measurement id -- 0 or 1 */
2027 u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
2028 u8 periodic; /* 1 = periodic */
2029 __le16 path_loss_timeout;
2030 __le32 start_time; /* start time in extended beacon format */
2031 __le32 reserved2;
2032 __le32 flags; /* rxon flags */
2033 __le32 filter_flags; /* rxon filter flags */
2034 __le16 channel_count; /* minimum 1, maximum 10 */
2035 __le16 reserved3;
2036 struct iwl_measure_channel channels[10];
2037} __packed;
2038
2039/*
2040 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
2041 */
2042struct iwl_spectrum_resp {
2043 u8 token;
2044 u8 id; /* id of the prior command replaced, or 0xff */
2045 __le16 status; /* 0 - command will be handled
2046 * 1 - cannot handle (conflicts with another
2047 * measurement) */
2048} __packed;
2049
2050enum iwl_measurement_state {
2051 IWL_MEASUREMENT_START = 0,
2052 IWL_MEASUREMENT_STOP = 1,
2053};
2054
2055enum iwl_measurement_status {
2056 IWL_MEASUREMENT_OK = 0,
2057 IWL_MEASUREMENT_CONCURRENT = 1,
2058 IWL_MEASUREMENT_CSA_CONFLICT = 2,
2059 IWL_MEASUREMENT_TGH_CONFLICT = 3,
2060 /* 4-5 reserved */
2061 IWL_MEASUREMENT_STOPPED = 6,
2062 IWL_MEASUREMENT_TIMEOUT = 7,
2063 IWL_MEASUREMENT_PERIODIC_FAILED = 8,
2064};
2065
2066#define NUM_ELEMENTS_IN_HISTOGRAM 8
2067
2068struct iwl_measurement_histogram {
2069 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2070 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2071} __packed;
2072
2073/* clear channel availability counters */
2074struct iwl_measurement_cca_counters {
2075 __le32 ofdm;
2076 __le32 cck;
2077} __packed;
2078
2079enum iwl_measure_type {
2080 IWL_MEASURE_BASIC = (1 << 0),
2081 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
2082 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2083 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2084 IWL_MEASURE_FRAME = (1 << 4),
2085 /* bits 5:6 are reserved */
2086 IWL_MEASURE_IDLE = (1 << 7),
2087};
2088
2089/*
2090 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
2091 */
2092struct iwl_spectrum_notification {
2093 u8 id; /* measurement id -- 0 or 1 */
2094 u8 token;
2095 u8 channel_index; /* index in measurement channel list */
2096 u8 state; /* 0 - start, 1 - stop */
2097 __le32 start_time; /* lower 32-bits of TSF */
2098 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2099 u8 channel;
2100 u8 type; /* see enum iwl_measurement_type */
2101 u8 reserved1;
2102 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2103 * valid if applicable for measurement type requested. */
2104 __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
2105 __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
2106 __le32 cca_time; /* channel load time in usecs */
2107 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2108 * unidentified */
2109 u8 reserved2[3];
2110 struct iwl_measurement_histogram histogram;
2111 __le32 stop_time; /* lower 32-bits of TSF */
2112 __le32 status; /* see iwl_measurement_status */
2113} __packed;
2114
2115/******************************************************************************
2116 * (7)
2117 * Power Management Commands, Responses, Notifications:
2118 *
2119 *****************************************************************************/
2120
2121/**
2122 * struct iwl_powertable_cmd - Power Table Command
2123 * @flags: See below:
2124 *
2125 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
2126 *
2127 * PM allow:
2128 * bit 0 - '0' Driver not allow power management
2129 * '1' Driver allow PM (use rest of parameters)
2130 *
2131 * uCode send sleep notifications:
2132 * bit 1 - '0' Don't send sleep notification
2133 * '1' send sleep notification (SEND_PM_NOTIFICATION)
2134 *
2135 * Sleep over DTIM
2136 * bit 2 - '0' PM have to walk up every DTIM
2137 * '1' PM could sleep over DTIM till listen Interval.
2138 *
2139 * PCI power managed
2140 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2141 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2142 *
2143 * Fast PD
2144 * bit 4 - '1' Put radio to sleep when receiving frame for others
2145 *
2146 * Force sleep Modes
2147 * bit 31/30- '00' use both mac/xtal sleeps
2148 * '01' force Mac sleep
2149 * '10' force xtal sleep
2150 * '11' Illegal set
2151 *
2152 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2153 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2154 * for every DTIM.
2155 */
2156#define IWL_POWER_VEC_SIZE 5
2157
2158#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2159#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2160#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2161#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2162#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2163#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2164#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2165#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2166#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2167#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2168#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2169
2170struct iwl_powertable_cmd {
2171 __le16 flags;
2172 u8 keep_alive_seconds;
2173 u8 debug_flags;
2174 __le32 rx_data_timeout;
2175 __le32 tx_data_timeout;
2176 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2177 __le32 keep_alive_beacons;
2178} __packed;
2179
2180/*
2181 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2182 * all devices identical.
2183 */
2184struct iwl_sleep_notification {
2185 u8 pm_sleep_mode;
2186 u8 pm_wakeup_src;
2187 __le16 reserved;
2188 __le32 sleep_time;
2189 __le32 tsf_low;
2190 __le32 bcon_timer;
2191} __packed;
2192
2193/* Sleep states. all devices identical. */
2194enum {
2195 IWL_PM_NO_SLEEP = 0,
2196 IWL_PM_SLP_MAC = 1,
2197 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2198 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2199 IWL_PM_SLP_PHY = 4,
2200 IWL_PM_SLP_REPENT = 5,
2201 IWL_PM_WAKEUP_BY_TIMER = 6,
2202 IWL_PM_WAKEUP_BY_DRIVER = 7,
2203 IWL_PM_WAKEUP_BY_RFKILL = 8,
2204 /* 3 reserved */
2205 IWL_PM_NUM_OF_MODES = 12,
2206};
2207
2208/*
2209 * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
2210 */
2211#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
2212#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
2213#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2214struct iwl_card_state_cmd {
2215 __le32 status; /* CARD_STATE_CMD_* request new power state */
2216} __packed;
2217
2218/*
2219 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2220 */
2221struct iwl_card_state_notif {
2222 __le32 flags;
2223} __packed;
2224
2225#define HW_CARD_DISABLED 0x01
2226#define SW_CARD_DISABLED 0x02
2227#define CT_CARD_DISABLED 0x04
2228#define RXON_CARD_DISABLED 0x10
2229
2230struct iwl_ct_kill_config {
2231 __le32 reserved;
2232 __le32 critical_temperature_M;
2233 __le32 critical_temperature_R;
2234} __packed;
2235
2236/* 1000, and 6x00 */
2237struct iwl_ct_kill_throttling_config {
2238 __le32 critical_temperature_exit;
2239 __le32 reserved;
2240 __le32 critical_temperature_enter;
2241} __packed;
2242
2243/******************************************************************************
2244 * (8)
2245 * Scan Commands, Responses, Notifications:
2246 *
2247 *****************************************************************************/
2248
2249#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2250#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2251
2252/**
2253 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
2254 *
2255 * One for each channel in the scan list.
2256 * Each channel can independently select:
2257 * 1) SSID for directed active scans
2258 * 2) Txpower setting (for rate specified within Tx command)
2259 * 3) How long to stay on-channel (behavior may be modified by quiet_time,
2260 * quiet_plcp_th, good_CRC_th)
2261 *
2262 * To avoid uCode errors, make sure the following are true (see comments
2263 * under struct iwl_scan_cmd about max_out_time and quiet_time):
2264 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2265 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2266 * 2) quiet_time <= active_dwell
2267 * 3) If restricting off-channel time (i.e. max_out_time !=0):
2268 * passive_dwell < max_out_time
2269 * active_dwell < max_out_time
2270 */
2271
2272struct iwl_scan_channel {
2273 /*
2274 * type is defined as:
2275 * 0:0 1 = active, 0 = passive
2276 * 1:20 SSID direct bit map; if a bit is set, then corresponding
2277 * SSID IE is transmitted in probe request.
2278 * 21:31 reserved
2279 */
2280 __le32 type;
2281 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
2282 u8 tx_gain; /* gain for analog radio */
2283 u8 dsp_atten; /* gain for DSP */
2284 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2285 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2286} __packed;
2287
2288/* set number of direct probes __le32 type */
2289#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2290
2291/**
2292 * struct iwl_ssid_ie - directed scan network information element
2293 *
2294 * Up to 20 of these may appear in REPLY_SCAN_CMD,
2295 * selected by "type" bit field in struct iwl_scan_channel;
2296 * each channel may select different ssids from among the 20 entries.
2297 * SSID IEs get transmitted in reverse order of entry.
2298 */
2299struct iwl_ssid_ie {
2300 u8 id;
2301 u8 len;
2302 u8 ssid[32];
2303} __packed;
2304
2305#define PROBE_OPTION_MAX 20
2306#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2307#define IWL_GOOD_CRC_TH_DISABLED 0
2308#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2309#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2310#define IWL_MAX_CMD_SIZE 4096
2311
2312/*
2313 * REPLY_SCAN_CMD = 0x80 (command)
2314 *
2315 * The hardware scan command is very powerful; the driver can set it up to
2316 * maintain (relatively) normal network traffic while doing a scan in the
2317 * background. The max_out_time and suspend_time control the ratio of how
2318 * long the device stays on an associated network channel ("service channel")
2319 * vs. how long it's away from the service channel, i.e. tuned to other channels
2320 * for scanning.
2321 *
2322 * max_out_time is the max time off-channel (in usec), and suspend_time
2323 * is how long (in "extended beacon" format) that the scan is "suspended"
2324 * after returning to the service channel. That is, suspend_time is the
2325 * time that we stay on the service channel, doing normal work, between
2326 * scan segments. The driver may set these parameters differently to support
2327 * scanning when associated vs. not associated, and light vs. heavy traffic
2328 * loads when associated.
2329 *
2330 * After receiving this command, the device's scan engine does the following;
2331 *
2332 * 1) Sends SCAN_START notification to driver
2333 * 2) Checks to see if it has time to do scan for one channel
2334 * 3) Sends NULL packet, with power-save (PS) bit set to 1,
2335 * to tell AP that we're going off-channel
2336 * 4) Tunes to first channel in scan list, does active or passive scan
2337 * 5) Sends SCAN_RESULT notification to driver
2338 * 6) Checks to see if it has time to do scan on *next* channel in list
2339 * 7) Repeats 4-6 until it no longer has time to scan the next channel
2340 * before max_out_time expires
2341 * 8) Returns to service channel
2342 * 9) Sends NULL packet with PS=0 to tell AP that we're back
2343 * 10) Stays on service channel until suspend_time expires
2344 * 11) Repeats entire process 2-10 until list is complete
2345 * 12) Sends SCAN_COMPLETE notification
2346 *
2347 * For fast, efficient scans, the scan command also has support for staying on
2348 * a channel for just a short time, if doing active scanning and getting no
2349 * responses to the transmitted probe request. This time is controlled by
2350 * quiet_time, and the number of received packets below which a channel is
2351 * considered "quiet" is controlled by quiet_plcp_threshold.
2352 *
2353 * For active scanning on channels that have regulatory restrictions against
2354 * blindly transmitting, the scan can listen before transmitting, to make sure
2355 * that there is already legitimate activity on the channel. If enough
2356 * packets are cleanly received on the channel (controlled by good_CRC_th,
2357 * typical value 1), the scan engine starts transmitting probe requests.
2358 *
2359 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2360 *
2361 * To avoid uCode errors, see timing restrictions described under
2362 * struct iwl_scan_channel.
2363 */
2364
2365enum iwl_scan_flags {
2366 /* BIT(0) currently unused */
2367 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
2368 /* bits 2-7 reserved */
2369};
2370
2371struct iwl_scan_cmd {
2372 __le16 len;
2373 u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
2374 u8 channel_count; /* # channels in channel list */
2375 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2376 * (only for active scan) */
2377 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2378 __le16 good_CRC_th; /* passive -> active promotion threshold */
2379 __le16 rx_chain; /* RXON_RX_CHAIN_* */
2380 __le32 max_out_time; /* max usec to be away from associated (service)
2381 * channel */
2382 __le32 suspend_time; /* pause scan this long (in "extended beacon
2383 * format") when returning to service chnl:
2384 */
2385 __le32 flags; /* RXON_FLG_* */
2386 __le32 filter_flags; /* RXON_FILTER_* */
2387
2388 /* For active scans (set to all-0s for passive scans).
2389 * Does not include payload. Must specify Tx rate; no rate scaling. */
2390 struct iwl_tx_cmd tx_cmd;
2391
2392 /* For directed active scans (set to all-0s otherwise) */
2393 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
2394
2395 /*
2396 * Probe request frame, followed by channel list.
2397 *
2398 * Size of probe request frame is specified by byte count in tx_cmd.
2399 * Channel list follows immediately after probe request frame.
2400 * Number of channels in list is specified by channel_count.
2401 * Each channel in list is of type:
2402 *
2403 * struct iwl_scan_channel channels[0];
2404 *
2405 * NOTE: Only one band of channels can be scanned per pass. You
2406 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2407 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2408 * before requesting another scan.
2409 */
2410 u8 data[0];
2411} __packed;
2412
2413/* Can abort will notify by complete notification with abort status. */
2414#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2415/* complete notification statuses */
2416#define ABORT_STATUS 0x2
2417
2418/*
2419 * REPLY_SCAN_CMD = 0x80 (response)
2420 */
2421struct iwl_scanreq_notification {
2422 __le32 status; /* 1: okay, 2: cannot fulfill request */
2423} __packed;
2424
2425/*
2426 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
2427 */
2428struct iwl_scanstart_notification {
2429 __le32 tsf_low;
2430 __le32 tsf_high;
2431 __le32 beacon_timer;
2432 u8 channel;
2433 u8 band;
2434 u8 reserved[2];
2435 __le32 status;
2436} __packed;
2437
2438#define SCAN_OWNER_STATUS 0x1
2439#define MEASURE_OWNER_STATUS 0x2
2440
2441#define IWL_PROBE_STATUS_OK 0
2442#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
2443/* error statuses combined with TX_FAILED */
2444#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
2445#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
2446
2447#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2448/*
2449 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
2450 */
2451struct iwl_scanresults_notification {
2452 u8 channel;
2453 u8 band;
2454 u8 probe_status;
2455 u8 num_probe_not_sent; /* not enough time to send */
2456 __le32 tsf_low;
2457 __le32 tsf_high;
2458 __le32 statistics[NUMBER_OF_STATISTICS];
2459} __packed;
2460
2461/*
2462 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
2463 */
2464struct iwl_scancomplete_notification {
2465 u8 scanned_channels;
2466 u8 status;
2467 u8 bt_status; /* BT On/Off status */
2468 u8 last_channel;
2469 __le32 tsf_low;
2470 __le32 tsf_high;
2471} __packed;
2472
2473
2474/******************************************************************************
2475 * (9)
2476 * IBSS/AP Commands and Notifications:
2477 *
2478 *****************************************************************************/
2479
2480enum iwl_ibss_manager {
2481 IWL_NOT_IBSS_MANAGER = 0,
2482 IWL_IBSS_MANAGER = 1,
2483};
2484
2485/*
2486 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2487 */
2488
2489struct iwlagn_beacon_notif {
2490 struct iwlagn_tx_resp beacon_notify_hdr;
2491 __le32 low_tsf;
2492 __le32 high_tsf;
2493 __le32 ibss_mgr_status;
2494} __packed;
2495
2496/*
2497 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2498 */
2499
2500struct iwl_tx_beacon_cmd {
2501 struct iwl_tx_cmd tx;
2502 __le16 tim_idx;
2503 u8 tim_size;
2504 u8 reserved1;
2505 struct ieee80211_hdr frame[0]; /* beacon frame */
2506} __packed;
2507
2508/******************************************************************************
2509 * (10)
2510 * Statistics Commands and Notifications:
2511 *
2512 *****************************************************************************/
2513
2514#define IWL_TEMP_CONVERT 260
2515
2516#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2517#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
2518#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
2519
2520/* Used for passing to driver number of successes and failures per rate */
2521struct rate_histogram {
2522 union {
2523 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2524 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2525 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2526 } success;
2527 union {
2528 __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
2529 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2530 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2531 } failed;
2532} __packed;
2533
2534/* statistics command response */
2535
2536struct statistics_dbg {
2537 __le32 burst_check;
2538 __le32 burst_count;
2539 __le32 wait_for_silence_timeout_cnt;
2540 __le32 reserved[3];
2541} __packed;
2542
2543struct statistics_rx_phy {
2544 __le32 ina_cnt;
2545 __le32 fina_cnt;
2546 __le32 plcp_err;
2547 __le32 crc32_err;
2548 __le32 overrun_err;
2549 __le32 early_overrun_err;
2550 __le32 crc32_good;
2551 __le32 false_alarm_cnt;
2552 __le32 fina_sync_err_cnt;
2553 __le32 sfd_timeout;
2554 __le32 fina_timeout;
2555 __le32 unresponded_rts;
2556 __le32 rxe_frame_limit_overrun;
2557 __le32 sent_ack_cnt;
2558 __le32 sent_cts_cnt;
2559 __le32 sent_ba_rsp_cnt;
2560 __le32 dsp_self_kill;
2561 __le32 mh_format_err;
2562 __le32 re_acq_main_rssi_sum;
2563 __le32 reserved3;
2564} __packed;
2565
2566struct statistics_rx_ht_phy {
2567 __le32 plcp_err;
2568 __le32 overrun_err;
2569 __le32 early_overrun_err;
2570 __le32 crc32_good;
2571 __le32 crc32_err;
2572 __le32 mh_format_err;
2573 __le32 agg_crc32_good;
2574 __le32 agg_mpdu_cnt;
2575 __le32 agg_cnt;
2576 __le32 unsupport_mcs;
2577} __packed;
2578
2579#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2580
2581struct statistics_rx_non_phy {
2582 __le32 bogus_cts; /* CTS received when not expecting CTS */
2583 __le32 bogus_ack; /* ACK received when not expecting ACK */
2584 __le32 non_bssid_frames; /* number of frames with BSSID that
2585 * doesn't belong to the STA BSSID */
2586 __le32 filtered_frames; /* count frames that were dumped in the
2587 * filtering process */
2588 __le32 non_channel_beacons; /* beacons with our bss id but not on
2589 * our serving channel */
2590 __le32 channel_beacons; /* beacons with our bss id and in our
2591 * serving channel */
2592 __le32 num_missed_bcon; /* number of missed beacons */
2593 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2594 * ADC was in saturation */
2595 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
2596 * for INA */
2597 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2598 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2599 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2600 __le32 interference_data_flag; /* flag for interference data
2601 * availability. 1 when data is
2602 * available. */
2603 __le32 channel_load; /* counts RX Enable time in uSec */
2604 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2605 * and CCK) counter */
2606 __le32 beacon_rssi_a;
2607 __le32 beacon_rssi_b;
2608 __le32 beacon_rssi_c;
2609 __le32 beacon_energy_a;
2610 __le32 beacon_energy_b;
2611 __le32 beacon_energy_c;
2612} __packed;
2613
2614struct statistics_rx_non_phy_bt {
2615 struct statistics_rx_non_phy common;
2616 /* additional stats for bt */
2617 __le32 num_bt_kills;
2618 __le32 reserved[2];
2619} __packed;
2620
2621struct statistics_rx {
2622 struct statistics_rx_phy ofdm;
2623 struct statistics_rx_phy cck;
2624 struct statistics_rx_non_phy general;
2625 struct statistics_rx_ht_phy ofdm_ht;
2626} __packed;
2627
2628struct statistics_rx_bt {
2629 struct statistics_rx_phy ofdm;
2630 struct statistics_rx_phy cck;
2631 struct statistics_rx_non_phy_bt general;
2632 struct statistics_rx_ht_phy ofdm_ht;
2633} __packed;
2634
2635/**
2636 * struct statistics_tx_power - current tx power
2637 *
2638 * @ant_a: current tx power on chain a in 1/2 dB step
2639 * @ant_b: current tx power on chain b in 1/2 dB step
2640 * @ant_c: current tx power on chain c in 1/2 dB step
2641 */
2642struct statistics_tx_power {
2643 u8 ant_a;
2644 u8 ant_b;
2645 u8 ant_c;
2646 u8 reserved;
2647} __packed;
2648
2649struct statistics_tx_non_phy_agg {
2650 __le32 ba_timeout;
2651 __le32 ba_reschedule_frames;
2652 __le32 scd_query_agg_frame_cnt;
2653 __le32 scd_query_no_agg;
2654 __le32 scd_query_agg;
2655 __le32 scd_query_mismatch;
2656 __le32 frame_not_ready;
2657 __le32 underrun;
2658 __le32 bt_prio_kill;
2659 __le32 rx_ba_rsp_cnt;
2660} __packed;
2661
2662struct statistics_tx {
2663 __le32 preamble_cnt;
2664 __le32 rx_detected_cnt;
2665 __le32 bt_prio_defer_cnt;
2666 __le32 bt_prio_kill_cnt;
2667 __le32 few_bytes_cnt;
2668 __le32 cts_timeout;
2669 __le32 ack_timeout;
2670 __le32 expected_ack_cnt;
2671 __le32 actual_ack_cnt;
2672 __le32 dump_msdu_cnt;
2673 __le32 burst_abort_next_frame_mismatch_cnt;
2674 __le32 burst_abort_missing_next_frame_cnt;
2675 __le32 cts_timeout_collision;
2676 __le32 ack_or_ba_timeout_collision;
2677 struct statistics_tx_non_phy_agg agg;
2678 /*
2679 * "tx_power" are optional parameters provided by uCode,
2680 * 6000 series is the only device provide the information,
2681 * Those are reserved fields for all the other devices
2682 */
2683 struct statistics_tx_power tx_power;
2684 __le32 reserved1;
2685} __packed;
2686
2687
2688struct statistics_div {
2689 __le32 tx_on_a;
2690 __le32 tx_on_b;
2691 __le32 exec_time;
2692 __le32 probe_time;
2693 __le32 reserved1;
2694 __le32 reserved2;
2695} __packed;
2696
2697struct statistics_general_common {
2698 __le32 temperature; /* radio temperature */
2699 __le32 temperature_m; /* radio voltage */
2700 struct statistics_dbg dbg;
2701 __le32 sleep_time;
2702 __le32 slots_out;
2703 __le32 slots_idle;
2704 __le32 ttl_timestamp;
2705 struct statistics_div div;
2706 __le32 rx_enable_counter;
2707 /*
2708 * num_of_sos_states:
2709 * count the number of times we have to re-tune
2710 * in order to get out of bad PHY status
2711 */
2712 __le32 num_of_sos_states;
2713} __packed;
2714
2715struct statistics_bt_activity {
2716 /* Tx statistics */
2717 __le32 hi_priority_tx_req_cnt;
2718 __le32 hi_priority_tx_denied_cnt;
2719 __le32 lo_priority_tx_req_cnt;
2720 __le32 lo_priority_tx_denied_cnt;
2721 /* Rx statistics */
2722 __le32 hi_priority_rx_req_cnt;
2723 __le32 hi_priority_rx_denied_cnt;
2724 __le32 lo_priority_rx_req_cnt;
2725 __le32 lo_priority_rx_denied_cnt;
2726} __packed;
2727
2728struct statistics_general {
2729 struct statistics_general_common common;
2730 __le32 reserved2;
2731 __le32 reserved3;
2732} __packed;
2733
2734struct statistics_general_bt {
2735 struct statistics_general_common common;
2736 struct statistics_bt_activity activity;
2737 __le32 reserved2;
2738 __le32 reserved3;
2739} __packed;
2740
2741#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
2742#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
2743#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
2744
2745/*
2746 * REPLY_STATISTICS_CMD = 0x9c,
2747 * all devices identical.
2748 *
2749 * This command triggers an immediate response containing uCode statistics.
2750 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
2751 *
2752 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2753 * internal copy of the statistics (counters) after issuing the response.
2754 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
2755 *
2756 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2757 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2758 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2759 */
2760#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2761#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2762struct iwl_statistics_cmd {
2763 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2764} __packed;
2765
2766/*
2767 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
2768 *
2769 * By default, uCode issues this notification after receiving a beacon
2770 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2771 * REPLY_STATISTICS_CMD 0x9c, above.
2772 *
2773 * Statistics counters continue to increment beacon after beacon, but are
2774 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
2775 * 0x9c with CLEAR_STATS bit set (see above).
2776 *
2777 * uCode also issues this notification during scans. uCode clears statistics
2778 * appropriately so that each notification contains statistics for only the
2779 * one channel that has just been scanned.
2780 */
2781#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2782#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
2783
2784struct iwl_notif_statistics {
2785 __le32 flag;
2786 struct statistics_rx rx;
2787 struct statistics_tx tx;
2788 struct statistics_general general;
2789} __packed;
2790
2791struct iwl_bt_notif_statistics {
2792 __le32 flag;
2793 struct statistics_rx_bt rx;
2794 struct statistics_tx tx;
2795 struct statistics_general_bt general;
2796} __packed;
2797
2798/*
2799 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
2800 *
2801 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
2802 * in regardless of how many missed beacons, which mean when driver receive the
2803 * notification, inside the command, it can find all the beacons information
2804 * which include number of total missed beacons, number of consecutive missed
2805 * beacons, number of beacons received and number of beacons expected to
2806 * receive.
2807 *
2808 * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
2809 * in order to bring the radio/PHY back to working state; which has no relation
2810 * to when driver will perform sensitivity calibration.
2811 *
2812 * Driver should set it own missed_beacon_threshold to decide when to perform
2813 * sensitivity calibration based on number of consecutive missed beacons in
2814 * order to improve overall performance, especially in noisy environment.
2815 *
2816 */
2817
2818#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
2819#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
2820#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
2821
2822struct iwl_missed_beacon_notif {
2823 __le32 consecutive_missed_beacons;
2824 __le32 total_missed_becons;
2825 __le32 num_expected_beacons;
2826 __le32 num_recvd_beacons;
2827} __packed;
2828
2829
2830/******************************************************************************
2831 * (11)
2832 * Rx Calibration Commands:
2833 *
2834 * With the uCode used for open source drivers, most Tx calibration (except
2835 * for Tx Power) and most Rx calibration is done by uCode during the
2836 * "initialize" phase of uCode boot. Driver must calibrate only:
2837 *
2838 * 1) Tx power (depends on temperature), described elsewhere
2839 * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
2840 * 3) Receiver sensitivity (to optimize signal detection)
2841 *
2842 *****************************************************************************/
2843
2844/**
2845 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
2846 *
2847 * This command sets up the Rx signal detector for a sensitivity level that
2848 * is high enough to lock onto all signals within the associated network,
2849 * but low enough to ignore signals that are below a certain threshold, so as
2850 * not to have too many "false alarms". False alarms are signals that the
2851 * Rx DSP tries to lock onto, but then discards after determining that they
2852 * are noise.
2853 *
2854 * The optimum number of false alarms is between 5 and 50 per 200 TUs
2855 * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
2856 * time listening, not transmitting). Driver must adjust sensitivity so that
2857 * the ratio of actual false alarms to actual Rx time falls within this range.
2858 *
2859 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
2860 * received beacon. These provide information to the driver to analyze the
2861 * sensitivity. Don't analyze statistics that come in from scanning, or any
2862 * other non-associated-network source. Pertinent statistics include:
2863 *
2864 * From "general" statistics (struct statistics_rx_non_phy):
2865 *
2866 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
2867 * Measure of energy of desired signal. Used for establishing a level
2868 * below which the device does not detect signals.
2869 *
2870 * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
2871 * Measure of background noise in silent period after beacon.
2872 *
2873 * channel_load
2874 * uSecs of actual Rx time during beacon period (varies according to
2875 * how much time was spent transmitting).
2876 *
2877 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
2878 *
2879 * false_alarm_cnt
2880 * Signal locks abandoned early (before phy-level header).
2881 *
2882 * plcp_err
2883 * Signal locks abandoned late (during phy-level header).
2884 *
2885 * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
2886 * beacon to beacon, i.e. each value is an accumulation of all errors
2887 * before and including the latest beacon. Values will wrap around to 0
2888 * after counting up to 2^32 - 1. Driver must differentiate vs.
2889 * previous beacon's values to determine # false alarms in the current
2890 * beacon period.
2891 *
2892 * Total number of false alarms = false_alarms + plcp_errs
2893 *
2894 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
2895 * (notice that the start points for OFDM are at or close to settings for
2896 * maximum sensitivity):
2897 *
2898 * START / MIN / MAX
2899 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
2900 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
2901 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
2902 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
2903 *
2904 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
2905 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
2906 * by *adding* 1 to all 4 of the table entries above, up to the max for
2907 * each entry. Conversely, if false alarm rate is too low (less than 5
2908 * for each 204.8 msecs listening), *subtract* 1 from each entry to
2909 * increase sensitivity.
2910 *
2911 * For CCK sensitivity, keep track of the following:
2912 *
2913 * 1). 20-beacon history of maximum background noise, indicated by
2914 * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
2915 * 3 receivers. For any given beacon, the "silence reference" is
2916 * the maximum of last 60 samples (20 beacons * 3 receivers).
2917 *
2918 * 2). 10-beacon history of strongest signal level, as indicated
2919 * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
2920 * i.e. the strength of the signal through the best receiver at the
2921 * moment. These measurements are "upside down", with lower values
2922 * for stronger signals, so max energy will be *minimum* value.
2923 *
2924 * Then for any given beacon, the driver must determine the *weakest*
2925 * of the strongest signals; this is the minimum level that needs to be
2926 * successfully detected, when using the best receiver at the moment.
2927 * "Max cck energy" is the maximum (higher value means lower energy!)
2928 * of the last 10 minima. Once this is determined, driver must add
2929 * a little margin by adding "6" to it.
2930 *
2931 * 3). Number of consecutive beacon periods with too few false alarms.
2932 * Reset this to 0 at the first beacon period that falls within the
2933 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
2934 *
2935 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
2936 * (notice that the start points for CCK are at maximum sensitivity):
2937 *
2938 * START / MIN / MAX
2939 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
2940 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
2941 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
2942 *
2943 * If actual rate of CCK false alarms (+ plcp_errors) is too high
2944 * (greater than 50 for each 204.8 msecs listening), method for reducing
2945 * sensitivity is:
2946 *
2947 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
2948 * up to max 400.
2949 *
2950 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
2951 * sensitivity has been reduced a significant amount; bring it up to
2952 * a moderate 161. Otherwise, *add* 3, up to max 200.
2953 *
2954 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
2955 * sensitivity has been reduced only a moderate or small amount;
2956 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
2957 * down to min 0. Otherwise (if gain has been significantly reduced),
2958 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
2959 *
2960 * b) Save a snapshot of the "silence reference".
2961 *
2962 * If actual rate of CCK false alarms (+ plcp_errors) is too low
2963 * (less than 5 for each 204.8 msecs listening), method for increasing
2964 * sensitivity is used only if:
2965 *
2966 * 1a) Previous beacon did not have too many false alarms
2967 * 1b) AND difference between previous "silence reference" and current
2968 * "silence reference" (prev - current) is 2 or more,
2969 * OR 2) 100 or more consecutive beacon periods have had rate of
2970 * less than 5 false alarms per 204.8 milliseconds rx time.
2971 *
2972 * Method for increasing sensitivity:
2973 *
2974 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
2975 * down to min 125.
2976 *
2977 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
2978 * down to min 200.
2979 *
2980 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
2981 *
2982 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
2983 * (between 5 and 50 for each 204.8 msecs listening):
2984 *
2985 * 1) Save a snapshot of the silence reference.
2986 *
2987 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
2988 * give some extra margin to energy threshold by *subtracting* 8
2989 * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
2990 *
2991 * For all cases (too few, too many, good range), make sure that the CCK
2992 * detection threshold (energy) is below the energy level for robust
2993 * detection over the past 10 beacon periods, the "Max cck energy".
2994 * Lower values mean higher energy; this means making sure that the value
2995 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
2996 *
2997 */
2998
2999/*
3000 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
3001 */
3002#define HD_TABLE_SIZE (11) /* number of entries */
3003#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
3004#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
3005#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
3006#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
3007#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
3008#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
3009#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
3010#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
3011#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
3012#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
3013#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
3014
3015/*
3016 * Additional table entries in enhance SENSITIVITY_CMD
3017 */
3018#define HD_INA_NON_SQUARE_DET_OFDM_INDEX (11)
3019#define HD_INA_NON_SQUARE_DET_CCK_INDEX (12)
3020#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX (13)
3021#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX (14)
3022#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (15)
3023#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX (16)
3024#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX (17)
3025#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX (18)
3026#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX (19)
3027#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX (20)
3028#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX (21)
3029#define HD_RESERVED (22)
3030
3031/* number of entries for enhanced tbl */
3032#define ENHANCE_HD_TABLE_SIZE (23)
3033
3034/* number of additional entries for enhanced tbl */
3035#define ENHANCE_HD_TABLE_ENTRIES (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
3036
3037#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1 cpu_to_le16(0)
3038#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1 cpu_to_le16(0)
3039#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1 cpu_to_le16(0)
3040#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(668)
3041#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
3042#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(486)
3043#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(37)
3044#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1 cpu_to_le16(853)
3045#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1 cpu_to_le16(4)
3046#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1 cpu_to_le16(476)
3047#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1 cpu_to_le16(99)
3048
3049#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2 cpu_to_le16(1)
3050#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2 cpu_to_le16(1)
3051#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2 cpu_to_le16(1)
3052#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(600)
3053#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(40)
3054#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(486)
3055#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(45)
3056#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2 cpu_to_le16(853)
3057#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2 cpu_to_le16(60)
3058#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2 cpu_to_le16(476)
3059#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2 cpu_to_le16(99)
3060
3061
3062/* Control field in struct iwl_sensitivity_cmd */
3063#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
3064#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
3065
3066/**
3067 * struct iwl_sensitivity_cmd
3068 * @control: (1) updates working table, (0) updates default table
3069 * @table: energy threshold values, use HD_* as index into table
3070 *
3071 * Always use "1" in "control" to update uCode's working table and DSP.
3072 */
3073struct iwl_sensitivity_cmd {
3074 __le16 control; /* always use "1" */
3075 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3076} __packed;
3077
3078/*
3079 *
3080 */
3081struct iwl_enhance_sensitivity_cmd {
3082 __le16 control; /* always use "1" */
3083 __le16 enhance_table[ENHANCE_HD_TABLE_SIZE]; /* use HD_* as index */
3084} __packed;
3085
3086
3087/**
3088 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3089 *
3090 * This command sets the relative gains of agn device's 3 radio receiver chains.
3091 *
3092 * After the first association, driver should accumulate signal and noise
3093 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
3094 * beacons from the associated network (don't collect statistics that come
3095 * in from scanning, or any other non-network source).
3096 *
3097 * DISCONNECTED ANTENNA:
3098 *
3099 * Driver should determine which antennas are actually connected, by comparing
3100 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3101 * following values over 20 beacons, one accumulator for each of the chains
3102 * a/b/c, from struct statistics_rx_non_phy:
3103 *
3104 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3105 *
3106 * Find the strongest signal from among a/b/c. Compare the other two to the
3107 * strongest. If any signal is more than 15 dB (times 20, unless you
3108 * divide the accumulated values by 20) below the strongest, the driver
3109 * considers that antenna to be disconnected, and should not try to use that
3110 * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
3111 * driver should declare the stronger one as connected, and attempt to use it
3112 * (A and B are the only 2 Tx chains!).
3113 *
3114 *
3115 * RX BALANCE:
3116 *
3117 * Driver should balance the 3 receivers (but just the ones that are connected
3118 * to antennas, see above) for gain, by comparing the average signal levels
3119 * detected during the silence after each beacon (background noise).
3120 * Accumulate (add) the following values over 20 beacons, one accumulator for
3121 * each of the chains a/b/c, from struct statistics_rx_non_phy:
3122 *
3123 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3124 *
3125 * Find the weakest background noise level from among a/b/c. This Rx chain
3126 * will be the reference, with 0 gain adjustment. Attenuate other channels by
3127 * finding noise difference:
3128 *
3129 * (accum_noise[i] - accum_noise[reference]) / 30
3130 *
3131 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3132 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
3133 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3134 * and set bit 2 to indicate "reduce gain". The value for the reference
3135 * (weakest) chain should be "0".
3136 *
3137 * diff_gain_[abc] bit fields:
3138 * 2: (1) reduce gain, (0) increase gain
3139 * 1-0: amount of gain, units of 1.5 dB
3140 */
3141
3142/* Phy calibration command for series */
3143enum {
3144 IWL_PHY_CALIBRATE_DC_CMD = 8,
3145 IWL_PHY_CALIBRATE_LO_CMD = 9,
3146 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
3147 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3148 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3149 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
3150 IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
3151};
3152
3153/* This enum defines the bitmap of various calibrations to enable in both
3154 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
3155 */
3156enum iwl_ucode_calib_cfg {
3157 IWL_CALIB_CFG_RX_BB_IDX = BIT(0),
3158 IWL_CALIB_CFG_DC_IDX = BIT(1),
3159 IWL_CALIB_CFG_LO_IDX = BIT(2),
3160 IWL_CALIB_CFG_TX_IQ_IDX = BIT(3),
3161 IWL_CALIB_CFG_RX_IQ_IDX = BIT(4),
3162 IWL_CALIB_CFG_NOISE_IDX = BIT(5),
3163 IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6),
3164 IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7),
3165 IWL_CALIB_CFG_PAPD_IDX = BIT(8),
3166 IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9),
3167 IWL_CALIB_CFG_TX_PWR_IDX = BIT(10),
3168};
3169
3170#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
3171 IWL_CALIB_CFG_DC_IDX | \
3172 IWL_CALIB_CFG_LO_IDX | \
3173 IWL_CALIB_CFG_TX_IQ_IDX | \
3174 IWL_CALIB_CFG_RX_IQ_IDX | \
3175 IWL_CALIB_CFG_CRYSTAL_IDX)
3176
3177#define IWL_CALIB_RT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
3178 IWL_CALIB_CFG_DC_IDX | \
3179 IWL_CALIB_CFG_LO_IDX | \
3180 IWL_CALIB_CFG_TX_IQ_IDX | \
3181 IWL_CALIB_CFG_RX_IQ_IDX | \
3182 IWL_CALIB_CFG_TEMPERATURE_IDX | \
3183 IWL_CALIB_CFG_PAPD_IDX | \
3184 IWL_CALIB_CFG_TX_PWR_IDX | \
3185 IWL_CALIB_CFG_CRYSTAL_IDX)
3186
3187#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
3188
3189struct iwl_calib_cfg_elmnt_s {
3190 __le32 is_enable;
3191 __le32 start;
3192 __le32 send_res;
3193 __le32 apply_res;
3194 __le32 reserved;
3195} __packed;
3196
3197struct iwl_calib_cfg_status_s {
3198 struct iwl_calib_cfg_elmnt_s once;
3199 struct iwl_calib_cfg_elmnt_s perd;
3200 __le32 flags;
3201} __packed;
3202
3203struct iwl_calib_cfg_cmd {
3204 struct iwl_calib_cfg_status_s ucd_calib_cfg;
3205 struct iwl_calib_cfg_status_s drv_calib_cfg;
3206 __le32 reserved1;
3207} __packed;
3208
3209struct iwl_calib_hdr {
3210 u8 op_code;
3211 u8 first_group;
3212 u8 groups_num;
3213 u8 data_valid;
3214} __packed;
3215
3216struct iwl_calib_cmd {
3217 struct iwl_calib_hdr hdr;
3218 u8 data[0];
3219} __packed;
3220
3221struct iwl_calib_xtal_freq_cmd {
3222 struct iwl_calib_hdr hdr;
3223 u8 cap_pin1;
3224 u8 cap_pin2;
3225 u8 pad[2];
3226} __packed;
3227
3228#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700)
3229struct iwl_calib_temperature_offset_cmd {
3230 struct iwl_calib_hdr hdr;
3231 __le16 radio_sensor_offset;
3232 __le16 reserved;
3233} __packed;
3234
3235struct iwl_calib_temperature_offset_v2_cmd {
3236 struct iwl_calib_hdr hdr;
3237 __le16 radio_sensor_offset_high;
3238 __le16 radio_sensor_offset_low;
3239 __le16 burntVoltageRef;
3240 __le16 reserved;
3241} __packed;
3242
3243/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3244struct iwl_calib_chain_noise_reset_cmd {
3245 struct iwl_calib_hdr hdr;
3246 u8 data[0];
3247};
3248
3249/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
3250struct iwl_calib_chain_noise_gain_cmd {
3251 struct iwl_calib_hdr hdr;
3252 u8 delta_gain_1;
3253 u8 delta_gain_2;
3254 u8 pad[2];
3255} __packed;
3256
3257/******************************************************************************
3258 * (12)
3259 * Miscellaneous Commands:
3260 *
3261 *****************************************************************************/
3262
3263/*
3264 * LEDs Command & Response
3265 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
3266 *
3267 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3268 * this command turns it on or off, or sets up a periodic blinking cycle.
3269 */
3270struct iwl_led_cmd {
3271 __le32 interval; /* "interval" in uSec */
3272 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3273 u8 off; /* # intervals off while blinking;
3274 * "0", with >0 "on" value, turns LED on */
3275 u8 on; /* # intervals on while blinking;
3276 * "0", regardless of "off", turns LED off */
3277 u8 reserved;
3278} __packed;
3279
3280/*
3281 * station priority table entries
3282 * also used as potential "events" value for both
3283 * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
3284 */
3285
3286/*
3287 * COEX events entry flag masks
3288 * RP - Requested Priority
3289 * WP - Win Medium Priority: priority assigned when the contention has been won
3290 */
3291#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1)
3292#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2)
3293#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4)
3294
3295#define COEX_CU_UNASSOC_IDLE_RP 4
3296#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4
3297#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4
3298#define COEX_CU_CALIBRATION_RP 4
3299#define COEX_CU_PERIODIC_CALIBRATION_RP 4
3300#define COEX_CU_CONNECTION_ESTAB_RP 4
3301#define COEX_CU_ASSOCIATED_IDLE_RP 4
3302#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4
3303#define COEX_CU_ASSOC_AUTO_SCAN_RP 4
3304#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4
3305#define COEX_CU_RF_ON_RP 6
3306#define COEX_CU_RF_OFF_RP 4
3307#define COEX_CU_STAND_ALONE_DEBUG_RP 6
3308#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4
3309#define COEX_CU_RSRVD1_RP 4
3310#define COEX_CU_RSRVD2_RP 4
3311
3312#define COEX_CU_UNASSOC_IDLE_WP 3
3313#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3
3314#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3
3315#define COEX_CU_CALIBRATION_WP 3
3316#define COEX_CU_PERIODIC_CALIBRATION_WP 3
3317#define COEX_CU_CONNECTION_ESTAB_WP 3
3318#define COEX_CU_ASSOCIATED_IDLE_WP 3
3319#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3
3320#define COEX_CU_ASSOC_AUTO_SCAN_WP 3
3321#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3
3322#define COEX_CU_RF_ON_WP 3
3323#define COEX_CU_RF_OFF_WP 3
3324#define COEX_CU_STAND_ALONE_DEBUG_WP 6
3325#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3
3326#define COEX_CU_RSRVD1_WP 3
3327#define COEX_CU_RSRVD2_WP 3
3328
3329#define COEX_UNASSOC_IDLE_FLAGS 0
3330#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \
3331 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3332 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3333#define COEX_UNASSOC_AUTO_SCAN_FLAGS \
3334 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3335 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3336#define COEX_CALIBRATION_FLAGS \
3337 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3338 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3339#define COEX_PERIODIC_CALIBRATION_FLAGS 0
3340/*
3341 * COEX_CONNECTION_ESTAB:
3342 * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3343 */
3344#define COEX_CONNECTION_ESTAB_FLAGS \
3345 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3346 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3347 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3348#define COEX_ASSOCIATED_IDLE_FLAGS 0
3349#define COEX_ASSOC_MANUAL_SCAN_FLAGS \
3350 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3351 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3352#define COEX_ASSOC_AUTO_SCAN_FLAGS \
3353 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3354 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3355#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
3356#define COEX_RF_ON_FLAGS 0
3357#define COEX_RF_OFF_FLAGS 0
3358#define COEX_STAND_ALONE_DEBUG_FLAGS \
3359 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3360 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3361#define COEX_IPAN_ASSOC_LEVEL_FLAGS \
3362 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3363 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3364 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3365#define COEX_RSRVD1_FLAGS 0
3366#define COEX_RSRVD2_FLAGS 0
3367/*
3368 * COEX_CU_RF_ON is the event wrapping all radio ownership.
3369 * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3370 */
3371#define COEX_CU_RF_ON_FLAGS \
3372 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3373 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3374 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3375
3376
3377enum {
3378 /* un-association part */
3379 COEX_UNASSOC_IDLE = 0,
3380 COEX_UNASSOC_MANUAL_SCAN = 1,
3381 COEX_UNASSOC_AUTO_SCAN = 2,
3382 /* calibration */
3383 COEX_CALIBRATION = 3,
3384 COEX_PERIODIC_CALIBRATION = 4,
3385 /* connection */
3386 COEX_CONNECTION_ESTAB = 5,
3387 /* association part */
3388 COEX_ASSOCIATED_IDLE = 6,
3389 COEX_ASSOC_MANUAL_SCAN = 7,
3390 COEX_ASSOC_AUTO_SCAN = 8,
3391 COEX_ASSOC_ACTIVE_LEVEL = 9,
3392 /* RF ON/OFF */
3393 COEX_RF_ON = 10,
3394 COEX_RF_OFF = 11,
3395 COEX_STAND_ALONE_DEBUG = 12,
3396 /* IPAN */
3397 COEX_IPAN_ASSOC_LEVEL = 13,
3398 /* reserved */
3399 COEX_RSRVD1 = 14,
3400 COEX_RSRVD2 = 15,
3401 COEX_NUM_OF_EVENTS = 16
3402};
3403
3404/*
3405 * Coexistence WIFI/WIMAX Command
3406 * COEX_PRIORITY_TABLE_CMD = 0x5a
3407 *
3408 */
3409struct iwl_wimax_coex_event_entry {
3410 u8 request_prio;
3411 u8 win_medium_prio;
3412 u8 reserved;
3413 u8 flags;
3414} __packed;
3415
3416/* COEX flag masks */
3417
3418/* Station table is valid */
3419#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
3420/* UnMask wake up src at unassociated sleep */
3421#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
3422/* UnMask wake up src at associated sleep */
3423#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
3424/* Enable CoEx feature. */
3425#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
3426
3427struct iwl_wimax_coex_cmd {
3428 u8 flags;
3429 u8 reserved[3];
3430 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3431} __packed;
3432
3433/*
3434 * Coexistence MEDIUM NOTIFICATION
3435 * COEX_MEDIUM_NOTIFICATION = 0x5b
3436 *
3437 * notification from uCode to host to indicate medium changes
3438 *
3439 */
3440/*
3441 * status field
3442 * bit 0 - 2: medium status
3443 * bit 3: medium change indication
3444 * bit 4 - 31: reserved
3445 */
3446/* status option values, (0 - 2 bits) */
3447#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */
3448#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */
3449#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */
3450#define COEX_MEDIUM_MSK (0x7)
3451
3452/* send notification status (1 bit) */
3453#define COEX_MEDIUM_CHANGED (0x8)
3454#define COEX_MEDIUM_CHANGED_MSK (0x8)
3455#define COEX_MEDIUM_SHIFT (3)
3456
3457struct iwl_coex_medium_notification {
3458 __le32 status;
3459 __le32 events;
3460} __packed;
3461
3462/*
3463 * Coexistence EVENT Command
3464 * COEX_EVENT_CMD = 0x5c
3465 *
3466 * send from host to uCode for coex event request.
3467 */
3468/* flags options */
3469#define COEX_EVENT_REQUEST_MSK (0x1)
3470
3471struct iwl_coex_event_cmd {
3472 u8 flags;
3473 u8 event;
3474 __le16 reserved;
3475} __packed;
3476
3477struct iwl_coex_event_resp {
3478 __le32 status;
3479} __packed;
3480
3481
3482/******************************************************************************
3483 * Bluetooth Coexistence commands
3484 *
3485 *****************************************************************************/
3486
3487/*
3488 * BT Status notification
3489 * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
3490 */
3491enum iwl_bt_coex_profile_traffic_load {
3492 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
3493 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
3494 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
3495 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
3496/*
3497 * There are no more even though below is a u8, the
3498 * indication from the BT device only has two bits.
3499 */
3500};
3501
3502#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
3503#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
3504
3505/* BT UART message - Share Part (BT -> WiFi) */
3506#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
3507#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
3508 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
3509#define BT_UART_MSG_FRAME1SSN_POS (3)
3510#define BT_UART_MSG_FRAME1SSN_MSK \
3511 (0x3 << BT_UART_MSG_FRAME1SSN_POS)
3512#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
3513#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
3514 (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
3515#define BT_UART_MSG_FRAME1RESERVED_POS (6)
3516#define BT_UART_MSG_FRAME1RESERVED_MSK \
3517 (0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
3518
3519#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
3520#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
3521 (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
3522#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
3523#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
3524 (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
3525#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
3526#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
3527 (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
3528#define BT_UART_MSG_FRAME2INBAND_POS (5)
3529#define BT_UART_MSG_FRAME2INBAND_MSK \
3530 (0x1 << BT_UART_MSG_FRAME2INBAND_POS)
3531#define BT_UART_MSG_FRAME2RESERVED_POS (6)
3532#define BT_UART_MSG_FRAME2RESERVED_MSK \
3533 (0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
3534
3535#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
3536#define BT_UART_MSG_FRAME3SCOESCO_MSK \
3537 (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
3538#define BT_UART_MSG_FRAME3SNIFF_POS (1)
3539#define BT_UART_MSG_FRAME3SNIFF_MSK \
3540 (0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
3541#define BT_UART_MSG_FRAME3A2DP_POS (2)
3542#define BT_UART_MSG_FRAME3A2DP_MSK \
3543 (0x1 << BT_UART_MSG_FRAME3A2DP_POS)
3544#define BT_UART_MSG_FRAME3ACL_POS (3)
3545#define BT_UART_MSG_FRAME3ACL_MSK \
3546 (0x1 << BT_UART_MSG_FRAME3ACL_POS)
3547#define BT_UART_MSG_FRAME3MASTER_POS (4)
3548#define BT_UART_MSG_FRAME3MASTER_MSK \
3549 (0x1 << BT_UART_MSG_FRAME3MASTER_POS)
3550#define BT_UART_MSG_FRAME3OBEX_POS (5)
3551#define BT_UART_MSG_FRAME3OBEX_MSK \
3552 (0x1 << BT_UART_MSG_FRAME3OBEX_POS)
3553#define BT_UART_MSG_FRAME3RESERVED_POS (6)
3554#define BT_UART_MSG_FRAME3RESERVED_MSK \
3555 (0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
3556
3557#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
3558#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
3559 (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
3560#define BT_UART_MSG_FRAME4RESERVED_POS (6)
3561#define BT_UART_MSG_FRAME4RESERVED_MSK \
3562 (0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
3563
3564#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
3565#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
3566 (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
3567#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
3568#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
3569 (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
3570#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
3571#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
3572 (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
3573#define BT_UART_MSG_FRAME5RESERVED_POS (6)
3574#define BT_UART_MSG_FRAME5RESERVED_MSK \
3575 (0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
3576
3577#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
3578#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
3579 (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
3580#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
3581#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
3582 (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
3583#define BT_UART_MSG_FRAME6RESERVED_POS (6)
3584#define BT_UART_MSG_FRAME6RESERVED_MSK \
3585 (0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
3586
3587#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
3588#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
3589 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
3590#define BT_UART_MSG_FRAME7PAGE_POS (3)
3591#define BT_UART_MSG_FRAME7PAGE_MSK \
3592 (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
3593#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
3594#define BT_UART_MSG_FRAME7INQUIRY_MSK \
3595 (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
3596#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
3597#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
3598 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
3599#define BT_UART_MSG_FRAME7RESERVED_POS (6)
3600#define BT_UART_MSG_FRAME7RESERVED_MSK \
3601 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
3602
3603/* BT Session Activity 2 UART message (BT -> WiFi) */
3604#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
3605#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
3606 (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
3607#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
3608#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
3609 (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
3610
3611#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
3612#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
3613 (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
3614#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
3615#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
3616 (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
3617
3618#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
3619#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
3620 (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
3621#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
3622#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
3623 (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
3624#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
3625#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
3626 (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
3627#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
3628#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
3629 (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
3630
3631#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
3632#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
3633 (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
3634#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
3635#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
3636 (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
3637#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
3638#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
3639 (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
3640
3641#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
3642#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
3643 (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
3644#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
3645#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
3646 (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
3647#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
3648#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
3649 (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
3650#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
3651#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
3652 (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
3653
3654#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
3655#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
3656 (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
3657#define BT_UART_MSG_2_FRAME6RFU_POS (5)
3658#define BT_UART_MSG_2_FRAME6RFU_MSK \
3659 (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
3660#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
3661#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
3662 (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
3663
3664#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
3665#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
3666 (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
3667#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
3668#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
3669 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
3670#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
3671#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
3672 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
3673#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
3674#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
3675 (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
3676#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
3677#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
3678 (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
3679
3680
3681#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
3682#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
3683
3684struct iwl_bt_uart_msg {
3685 u8 header;
3686 u8 frame1;
3687 u8 frame2;
3688 u8 frame3;
3689 u8 frame4;
3690 u8 frame5;
3691 u8 frame6;
3692 u8 frame7;
3693} __packed;
3694
3695struct iwl_bt_coex_profile_notif {
3696 struct iwl_bt_uart_msg last_bt_uart_msg;
3697 u8 bt_status; /* 0 - off, 1 - on */
3698 u8 bt_traffic_load; /* 0 .. 3? */
3699 u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
3700 u8 reserved;
3701} __packed;
3702
3703#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
3704#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
3705#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
3706#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
3707#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
3708#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
3709#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
3710
3711/*
3712 * BT Coexistence Priority table
3713 * REPLY_BT_COEX_PRIO_TABLE = 0xcc
3714 */
3715enum bt_coex_prio_table_events {
3716 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
3717 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
3718 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
3719 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
3720 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
3721 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
3722 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
3723 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
3724 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
3725 BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
3726 BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
3727 BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
3728 BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
3729 BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
3730 BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
3731 BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
3732 /* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
3733 BT_COEX_PRIO_TBL_EVT_MAX,
3734};
3735
3736enum bt_coex_prio_table_priorities {
3737 BT_COEX_PRIO_TBL_DISABLED = 0,
3738 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
3739 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
3740 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
3741 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
3742 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
3743 BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
3744 BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
3745 BT_COEX_PRIO_TBL_MAX,
3746};
3747
3748struct iwl_bt_coex_prio_table_cmd {
3749 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
3750} __packed;
3751
3752#define IWL_BT_COEX_ENV_CLOSE 0
3753#define IWL_BT_COEX_ENV_OPEN 1
3754/*
3755 * BT Protection Envelope
3756 * REPLY_BT_COEX_PROT_ENV = 0xcd
3757 */
3758struct iwl_bt_coex_prot_env_cmd {
3759 u8 action; /* 0 = closed, 1 = open */
3760 u8 type; /* 0 .. 15 */
3761 u8 reserved[2];
3762} __packed;
3763
3764/*
3765 * REPLY_D3_CONFIG
3766 */
3767enum iwlagn_d3_wakeup_filters {
3768 IWLAGN_D3_WAKEUP_RFKILL = BIT(0),
3769 IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1),
3770};
3771
3772struct iwlagn_d3_config_cmd {
3773 __le32 min_sleep_time;
3774 __le32 wakeup_flags;
3775} __packed;
3776
3777/*
3778 * REPLY_WOWLAN_PATTERNS
3779 */
3780#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
3781#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128
3782
3783struct iwlagn_wowlan_pattern {
3784 u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
3785 u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
3786 u8 mask_size;
3787 u8 pattern_size;
3788 __le16 reserved;
3789} __packed;
3790
3791#define IWLAGN_WOWLAN_MAX_PATTERNS 20
3792
3793struct iwlagn_wowlan_patterns_cmd {
3794 __le32 n_patterns;
3795 struct iwlagn_wowlan_pattern patterns[];
3796} __packed;
3797
3798/*
3799 * REPLY_WOWLAN_WAKEUP_FILTER
3800 */
3801enum iwlagn_wowlan_wakeup_filters {
3802 IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
3803 IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
3804 IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
3805 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
3806 IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
3807 IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
3808 IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
3809 IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7),
3810 IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8),
3811};
3812
3813struct iwlagn_wowlan_wakeup_filter_cmd {
3814 __le32 enabled;
3815 __le16 non_qos_seq;
3816 __le16 reserved;
3817 __le16 qos_seq[8];
3818};
3819
3820/*
3821 * REPLY_WOWLAN_TSC_RSC_PARAMS
3822 */
3823#define IWLAGN_NUM_RSC 16
3824
3825struct tkip_sc {
3826 __le16 iv16;
3827 __le16 pad;
3828 __le32 iv32;
3829} __packed;
3830
3831struct iwlagn_tkip_rsc_tsc {
3832 struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
3833 struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
3834 struct tkip_sc tsc;
3835} __packed;
3836
3837struct aes_sc {
3838 __le64 pn;
3839} __packed;
3840
3841struct iwlagn_aes_rsc_tsc {
3842 struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
3843 struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
3844 struct aes_sc tsc;
3845} __packed;
3846
3847union iwlagn_all_tsc_rsc {
3848 struct iwlagn_tkip_rsc_tsc tkip;
3849 struct iwlagn_aes_rsc_tsc aes;
3850};
3851
3852struct iwlagn_wowlan_rsc_tsc_params_cmd {
3853 union iwlagn_all_tsc_rsc all_tsc_rsc;
3854} __packed;
3855
3856/*
3857 * REPLY_WOWLAN_TKIP_PARAMS
3858 */
3859#define IWLAGN_MIC_KEY_SIZE 8
3860#define IWLAGN_P1K_SIZE 5
3861struct iwlagn_mic_keys {
3862 u8 tx[IWLAGN_MIC_KEY_SIZE];
3863 u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
3864 u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
3865} __packed;
3866
3867struct iwlagn_p1k_cache {
3868 __le16 p1k[IWLAGN_P1K_SIZE];
3869} __packed;
3870
3871#define IWLAGN_NUM_RX_P1K_CACHE 2
3872
3873struct iwlagn_wowlan_tkip_params_cmd {
3874 struct iwlagn_mic_keys mic_keys;
3875 struct iwlagn_p1k_cache tx;
3876 struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
3877 struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
3878} __packed;
3879
3880/*
3881 * REPLY_WOWLAN_KEK_KCK_MATERIAL
3882 */
3883
3884#define IWLAGN_KCK_MAX_SIZE 32
3885#define IWLAGN_KEK_MAX_SIZE 32
3886
3887struct iwlagn_wowlan_kek_kck_material_cmd {
3888 u8 kck[IWLAGN_KCK_MAX_SIZE];
3889 u8 kek[IWLAGN_KEK_MAX_SIZE];
3890 __le16 kck_len;
3891 __le16 kek_len;
3892 __le64 replay_ctr;
3893} __packed;
3894
3895#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
3896
3897/*
3898 * REPLY_WOWLAN_GET_STATUS = 0xe5
3899 */
3900struct iwlagn_wowlan_status {
3901 __le64 replay_ctr;
3902 __le32 rekey_status;
3903 __le32 wakeup_reason;
3904 u8 pattern_number;
3905 u8 reserved1;
3906 __le16 qos_seq_ctr[8];
3907 __le16 non_qos_seq_ctr;
3908 __le16 reserved2;
3909 union iwlagn_all_tsc_rsc tsc_rsc;
3910 __le16 reserved3;
3911} __packed;
3912
3913/*
3914 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
3915 */
3916
3917/*
3918 * Minimum slot time in TU
3919 */
3920#define IWL_MIN_SLOT_TIME 20
3921
3922/**
3923 * struct iwl_wipan_slot
3924 * @width: Time in TU
3925 * @type:
3926 * 0 - BSS
3927 * 1 - PAN
3928 */
3929struct iwl_wipan_slot {
3930 __le16 width;
3931 u8 type;
3932 u8 reserved;
3933} __packed;
3934
3935#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
3936#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
3937#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
3938#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
3939#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
3940
3941/**
3942 * struct iwl_wipan_params_cmd
3943 * @flags:
3944 * bit0: reserved
3945 * bit1: CP leave channel with CTS
3946 * bit2: CP leave channel qith Quiet
3947 * bit3: slotted mode
3948 * 1 - work in slotted mode
3949 * 0 - work in non slotted mode
3950 * bit4: filter beacon notification
3951 * bit5: full tx slotted mode. if this flag is set,
3952 * uCode will perform leaving channel methods in context switch
3953 * also when working in same channel mode
3954 * @num_slots: 1 - 10
3955 */
3956struct iwl_wipan_params_cmd {
3957 __le16 flags;
3958 u8 reserved;
3959 u8 num_slots;
3960 struct iwl_wipan_slot slots[10];
3961} __packed;
3962
3963/*
3964 * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
3965 *
3966 * TODO: Figure out what this is used for,
3967 * it can only switch between 2.4 GHz
3968 * channels!!
3969 */
3970
3971struct iwl_wipan_p2p_channel_switch_cmd {
3972 __le16 channel;
3973 __le16 reserved;
3974};
3975
3976/*
3977 * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
3978 *
3979 * This is used by the device to notify us of the
3980 * NoA schedule it determined so we can forward it
3981 * to userspace for inclusion in probe responses.
3982 *
3983 * In beacons, the NoA schedule is simply appended
3984 * to the frame we give the device.
3985 */
3986
3987struct iwl_wipan_noa_descriptor {
3988 u8 count;
3989 __le32 duration;
3990 __le32 interval;
3991 __le32 starttime;
3992} __packed;
3993
3994struct iwl_wipan_noa_attribute {
3995 u8 id;
3996 __le16 length;
3997 u8 index;
3998 u8 ct_window;
3999 struct iwl_wipan_noa_descriptor descr0, descr1;
4000 u8 reserved;
4001} __packed;
4002
4003struct iwl_wipan_noa_notification {
4004 u32 noa_active;
4005 struct iwl_wipan_noa_attribute noa_attribute;
4006} __packed;
4007
4008#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
new file mode 100644
index 000000000000..b15e44f8d1bd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -0,0 +1,2441 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/slab.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/debugfs.h>
33#include <linux/ieee80211.h>
34#include <net/mac80211.h>
35#include "iwl-debug.h"
36#include "iwl-io.h"
37#include "dev.h"
38#include "agn.h"
39
40/* create and remove of files */
41#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
42 if (!debugfs_create_file(#name, mode, parent, priv, \
43 &iwl_dbgfs_##name##_ops)) \
44 goto err; \
45} while (0)
46
47#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
48 struct dentry *__tmp; \
49 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
50 parent, ptr); \
51 if (IS_ERR(__tmp) || !__tmp) \
52 goto err; \
53} while (0)
54
55#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
56 struct dentry *__tmp; \
57 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
58 parent, ptr); \
59 if (IS_ERR(__tmp) || !__tmp) \
60 goto err; \
61} while (0)
62
63#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
64 struct dentry *__tmp; \
65 __tmp = debugfs_create_u32(#name, mode, \
66 parent, ptr); \
67 if (IS_ERR(__tmp) || !__tmp) \
68 goto err; \
69} while (0)
70
71/* file operation */
72#define DEBUGFS_READ_FILE_OPS(name) \
73static const struct file_operations iwl_dbgfs_##name##_ops = { \
74 .read = iwl_dbgfs_##name##_read, \
75 .open = simple_open, \
76 .llseek = generic_file_llseek, \
77};
78
79#define DEBUGFS_WRITE_FILE_OPS(name) \
80static const struct file_operations iwl_dbgfs_##name##_ops = { \
81 .write = iwl_dbgfs_##name##_write, \
82 .open = simple_open, \
83 .llseek = generic_file_llseek, \
84};
85
86
87#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
88static const struct file_operations iwl_dbgfs_##name##_ops = { \
89 .write = iwl_dbgfs_##name##_write, \
90 .read = iwl_dbgfs_##name##_read, \
91 .open = simple_open, \
92 .llseek = generic_file_llseek, \
93};
94
95static ssize_t iwl_dbgfs_sram_read(struct file *file,
96 char __user *user_buf,
97 size_t count, loff_t *ppos)
98{
99 u32 val = 0;
100 char *buf;
101 ssize_t ret;
102 int i = 0;
103 bool device_format = false;
104 int offset = 0;
105 int len = 0;
106 int pos = 0;
107 int sram;
108 struct iwl_priv *priv = file->private_data;
109 const struct fw_img *img;
110 size_t bufsz;
111
112 if (!iwl_is_ready_rf(priv))
113 return -EAGAIN;
114
115 /* default is to dump the entire data segment */
116 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
117 priv->dbgfs_sram_offset = 0x800000;
118 if (!priv->ucode_loaded)
119 return -EINVAL;
120 img = &priv->fw->img[priv->cur_ucode];
121 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
122 }
123 len = priv->dbgfs_sram_len;
124
125 if (len == -4) {
126 device_format = true;
127 len = 4;
128 }
129
130 bufsz = 50 + len * 4;
131 buf = kmalloc(bufsz, GFP_KERNEL);
132 if (!buf)
133 return -ENOMEM;
134
135 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
136 len);
137 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
138 priv->dbgfs_sram_offset);
139
140 /* adjust sram address since reads are only on even u32 boundaries */
141 offset = priv->dbgfs_sram_offset & 0x3;
142 sram = priv->dbgfs_sram_offset & ~0x3;
143
144 /* read the first u32 from sram */
145 val = iwl_trans_read_mem32(priv->trans, sram);
146
147 for (; len; len--) {
148 /* put the address at the start of every line */
149 if (i == 0)
150 pos += scnprintf(buf + pos, bufsz - pos,
151 "%08X: ", sram + offset);
152
153 if (device_format)
154 pos += scnprintf(buf + pos, bufsz - pos,
155 "%02x", (val >> (8 * (3 - offset))) & 0xff);
156 else
157 pos += scnprintf(buf + pos, bufsz - pos,
158 "%02x ", (val >> (8 * offset)) & 0xff);
159
160 /* if all bytes processed, read the next u32 from sram */
161 if (++offset == 4) {
162 sram += 4;
163 offset = 0;
164 val = iwl_trans_read_mem32(priv->trans, sram);
165 }
166
167 /* put in extra spaces and split lines for human readability */
168 if (++i == 16) {
169 i = 0;
170 pos += scnprintf(buf + pos, bufsz - pos, "\n");
171 } else if (!(i & 7)) {
172 pos += scnprintf(buf + pos, bufsz - pos, " ");
173 } else if (!(i & 3)) {
174 pos += scnprintf(buf + pos, bufsz - pos, " ");
175 }
176 }
177 if (i)
178 pos += scnprintf(buf + pos, bufsz - pos, "\n");
179
180 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
181 kfree(buf);
182 return ret;
183}
184
185static ssize_t iwl_dbgfs_sram_write(struct file *file,
186 const char __user *user_buf,
187 size_t count, loff_t *ppos)
188{
189 struct iwl_priv *priv = file->private_data;
190 char buf[64];
191 int buf_size;
192 u32 offset, len;
193
194 memset(buf, 0, sizeof(buf));
195 buf_size = min(count, sizeof(buf) - 1);
196 if (copy_from_user(buf, user_buf, buf_size))
197 return -EFAULT;
198
199 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
200 priv->dbgfs_sram_offset = offset;
201 priv->dbgfs_sram_len = len;
202 } else if (sscanf(buf, "%x", &offset) == 1) {
203 priv->dbgfs_sram_offset = offset;
204 priv->dbgfs_sram_len = -4;
205 } else {
206 priv->dbgfs_sram_offset = 0;
207 priv->dbgfs_sram_len = 0;
208 }
209
210 return count;
211}
212
213static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
214 char __user *user_buf,
215 size_t count, loff_t *ppos)
216{
217 struct iwl_priv *priv = file->private_data;
218 const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN];
219
220 if (!priv->wowlan_sram)
221 return -ENODATA;
222
223 return simple_read_from_buffer(user_buf, count, ppos,
224 priv->wowlan_sram,
225 img->sec[IWL_UCODE_SECTION_DATA].len);
226}
227static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
228 size_t count, loff_t *ppos)
229{
230 struct iwl_priv *priv = file->private_data;
231 struct iwl_station_entry *station;
232 struct iwl_tid_data *tid_data;
233 char *buf;
234 int i, j, pos = 0;
235 ssize_t ret;
236 /* Add 30 for initial string */
237 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
238
239 buf = kmalloc(bufsz, GFP_KERNEL);
240 if (!buf)
241 return -ENOMEM;
242
243 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
244 priv->num_stations);
245
246 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
247 station = &priv->stations[i];
248 if (!station->used)
249 continue;
250 pos += scnprintf(buf + pos, bufsz - pos,
251 "station %d - addr: %pM, flags: %#x\n",
252 i, station->sta.sta.addr,
253 station->sta.station_flags_msk);
254 pos += scnprintf(buf + pos, bufsz - pos,
255 "TID seqno next_rclmd "
256 "rate_n_flags state txq\n");
257
258 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
259 tid_data = &priv->tid_data[i][j];
260 pos += scnprintf(buf + pos, bufsz - pos,
261 "%d: 0x%.4x 0x%.4x 0x%.8x "
262 "%d %.2d",
263 j, tid_data->seq_number,
264 tid_data->next_reclaimed,
265 tid_data->agg.rate_n_flags,
266 tid_data->agg.state,
267 tid_data->agg.txq_id);
268
269 if (tid_data->agg.wait_for_ba)
270 pos += scnprintf(buf + pos, bufsz - pos,
271 " - waitforba");
272 pos += scnprintf(buf + pos, bufsz - pos, "\n");
273 }
274
275 pos += scnprintf(buf + pos, bufsz - pos, "\n");
276 }
277
278 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
279 kfree(buf);
280 return ret;
281}
282
283static ssize_t iwl_dbgfs_nvm_read(struct file *file,
284 char __user *user_buf,
285 size_t count,
286 loff_t *ppos)
287{
288 ssize_t ret;
289 struct iwl_priv *priv = file->private_data;
290 int pos = 0, ofs = 0, buf_size = 0;
291 const u8 *ptr;
292 char *buf;
293 u16 nvm_ver;
294 size_t eeprom_len = priv->eeprom_blob_size;
295 buf_size = 4 * eeprom_len + 256;
296
297 if (eeprom_len % 16)
298 return -ENODATA;
299
300 ptr = priv->eeprom_blob;
301 if (!ptr)
302 return -ENOMEM;
303
304 /* 4 characters for byte 0xYY */
305 buf = kzalloc(buf_size, GFP_KERNEL);
306 if (!buf)
307 return -ENOMEM;
308
309 nvm_ver = priv->nvm_data->nvm_version;
310 pos += scnprintf(buf + pos, buf_size - pos,
311 "NVM version: 0x%x\n", nvm_ver);
312 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
313 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
314 ofs, ptr + ofs);
315 }
316
317 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
318 kfree(buf);
319 return ret;
320}
321
322static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
323 size_t count, loff_t *ppos)
324{
325 struct iwl_priv *priv = file->private_data;
326 struct ieee80211_channel *channels = NULL;
327 const struct ieee80211_supported_band *supp_band = NULL;
328 int pos = 0, i, bufsz = PAGE_SIZE;
329 char *buf;
330 ssize_t ret;
331
332 buf = kzalloc(bufsz, GFP_KERNEL);
333 if (!buf)
334 return -ENOMEM;
335
336 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
337 if (supp_band) {
338 channels = supp_band->channels;
339
340 pos += scnprintf(buf + pos, bufsz - pos,
341 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
342 supp_band->n_channels);
343
344 for (i = 0; i < supp_band->n_channels; i++)
345 pos += scnprintf(buf + pos, bufsz - pos,
346 "%d: %ddBm: BSS%s%s, %s.\n",
347 channels[i].hw_value,
348 channels[i].max_power,
349 channels[i].flags & IEEE80211_CHAN_RADAR ?
350 " (IEEE 802.11h required)" : "",
351 ((channels[i].flags & IEEE80211_CHAN_NO_IR)
352 || (channels[i].flags &
353 IEEE80211_CHAN_RADAR)) ? "" :
354 ", IBSS",
355 channels[i].flags &
356 IEEE80211_CHAN_NO_IR ?
357 "passive only" : "active/passive");
358 }
359 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
360 if (supp_band) {
361 channels = supp_band->channels;
362
363 pos += scnprintf(buf + pos, bufsz - pos,
364 "Displaying %d channels in 5.2GHz band (802.11a)\n",
365 supp_band->n_channels);
366
367 for (i = 0; i < supp_band->n_channels; i++)
368 pos += scnprintf(buf + pos, bufsz - pos,
369 "%d: %ddBm: BSS%s%s, %s.\n",
370 channels[i].hw_value,
371 channels[i].max_power,
372 channels[i].flags & IEEE80211_CHAN_RADAR ?
373 " (IEEE 802.11h required)" : "",
374 ((channels[i].flags & IEEE80211_CHAN_NO_IR)
375 || (channels[i].flags &
376 IEEE80211_CHAN_RADAR)) ? "" :
377 ", IBSS",
378 channels[i].flags &
379 IEEE80211_CHAN_NO_IR ?
380 "passive only" : "active/passive");
381 }
382 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
383 kfree(buf);
384 return ret;
385}
386
387static ssize_t iwl_dbgfs_status_read(struct file *file,
388 char __user *user_buf,
389 size_t count, loff_t *ppos) {
390
391 struct iwl_priv *priv = file->private_data;
392 char buf[512];
393 int pos = 0;
394 const size_t bufsz = sizeof(buf);
395
396 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
397 test_bit(STATUS_RF_KILL_HW, &priv->status));
398 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
399 test_bit(STATUS_CT_KILL, &priv->status));
400 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
401 test_bit(STATUS_ALIVE, &priv->status));
402 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
403 test_bit(STATUS_READY, &priv->status));
404 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
405 test_bit(STATUS_EXIT_PENDING, &priv->status));
406 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
407 test_bit(STATUS_STATISTICS, &priv->status));
408 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
409 test_bit(STATUS_SCANNING, &priv->status));
410 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
411 test_bit(STATUS_SCAN_ABORTING, &priv->status));
412 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
413 test_bit(STATUS_SCAN_HW, &priv->status));
414 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
415 test_bit(STATUS_POWER_PMI, &priv->status));
416 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
417 test_bit(STATUS_FW_ERROR, &priv->status));
418 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
419}
420
421static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
422 char __user *user_buf,
423 size_t count, loff_t *ppos) {
424
425 struct iwl_priv *priv = file->private_data;
426
427 int pos = 0;
428 int cnt = 0;
429 char *buf;
430 int bufsz = 24 * 64; /* 24 items * 64 char per item */
431 ssize_t ret;
432
433 buf = kzalloc(bufsz, GFP_KERNEL);
434 if (!buf)
435 return -ENOMEM;
436
437 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
438 if (priv->rx_handlers_stats[cnt] > 0)
439 pos += scnprintf(buf + pos, bufsz - pos,
440 "\tRx handler[%36s]:\t\t %u\n",
441 iwl_dvm_get_cmd_string(cnt),
442 priv->rx_handlers_stats[cnt]);
443 }
444
445 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
446 kfree(buf);
447 return ret;
448}
449
450static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
451 const char __user *user_buf,
452 size_t count, loff_t *ppos)
453{
454 struct iwl_priv *priv = file->private_data;
455
456 char buf[8];
457 int buf_size;
458 u32 reset_flag;
459
460 memset(buf, 0, sizeof(buf));
461 buf_size = min(count, sizeof(buf) - 1);
462 if (copy_from_user(buf, user_buf, buf_size))
463 return -EFAULT;
464 if (sscanf(buf, "%x", &reset_flag) != 1)
465 return -EFAULT;
466 if (reset_flag == 0)
467 memset(&priv->rx_handlers_stats[0], 0,
468 sizeof(priv->rx_handlers_stats));
469
470 return count;
471}
472
473static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
474 size_t count, loff_t *ppos)
475{
476 struct iwl_priv *priv = file->private_data;
477 struct iwl_rxon_context *ctx;
478 int pos = 0, i;
479 char buf[256 * NUM_IWL_RXON_CTX];
480 const size_t bufsz = sizeof(buf);
481
482 for_each_context(priv, ctx) {
483 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
484 ctx->ctxid);
485 for (i = 0; i < AC_NUM; i++) {
486 pos += scnprintf(buf + pos, bufsz - pos,
487 "\tcw_min\tcw_max\taifsn\ttxop\n");
488 pos += scnprintf(buf + pos, bufsz - pos,
489 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
490 ctx->qos_data.def_qos_parm.ac[i].cw_min,
491 ctx->qos_data.def_qos_parm.ac[i].cw_max,
492 ctx->qos_data.def_qos_parm.ac[i].aifsn,
493 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
494 }
495 pos += scnprintf(buf + pos, bufsz - pos, "\n");
496 }
497 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
498}
499
500static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
501 char __user *user_buf,
502 size_t count, loff_t *ppos)
503{
504 struct iwl_priv *priv = file->private_data;
505 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
506 struct iwl_tt_restriction *restriction;
507 char buf[100];
508 int pos = 0;
509 const size_t bufsz = sizeof(buf);
510
511 pos += scnprintf(buf + pos, bufsz - pos,
512 "Thermal Throttling Mode: %s\n",
513 tt->advanced_tt ? "Advance" : "Legacy");
514 pos += scnprintf(buf + pos, bufsz - pos,
515 "Thermal Throttling State: %d\n",
516 tt->state);
517 if (tt->advanced_tt) {
518 restriction = tt->restriction + tt->state;
519 pos += scnprintf(buf + pos, bufsz - pos,
520 "Tx mode: %d\n",
521 restriction->tx_stream);
522 pos += scnprintf(buf + pos, bufsz - pos,
523 "Rx mode: %d\n",
524 restriction->rx_stream);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 "HT mode: %d\n",
527 restriction->is_ht);
528 }
529 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
530}
531
532static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
533 const char __user *user_buf,
534 size_t count, loff_t *ppos)
535{
536 struct iwl_priv *priv = file->private_data;
537 char buf[8];
538 int buf_size;
539 int ht40;
540
541 memset(buf, 0, sizeof(buf));
542 buf_size = min(count, sizeof(buf) - 1);
543 if (copy_from_user(buf, user_buf, buf_size))
544 return -EFAULT;
545 if (sscanf(buf, "%d", &ht40) != 1)
546 return -EFAULT;
547 if (!iwl_is_any_associated(priv))
548 priv->disable_ht40 = ht40 ? true : false;
549 else
550 return -EINVAL;
551
552 return count;
553}
554
555static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
556 char __user *user_buf,
557 size_t count, loff_t *ppos)
558{
559 struct iwl_priv *priv = file->private_data;
560 char buf[100];
561 int pos = 0;
562 const size_t bufsz = sizeof(buf);
563
564 pos += scnprintf(buf + pos, bufsz - pos,
565 "11n 40MHz Mode: %s\n",
566 priv->disable_ht40 ? "Disabled" : "Enabled");
567 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
568}
569
570static ssize_t iwl_dbgfs_temperature_read(struct file *file,
571 char __user *user_buf,
572 size_t count, loff_t *ppos)
573{
574 struct iwl_priv *priv = file->private_data;
575 char buf[8];
576 int pos = 0;
577 const size_t bufsz = sizeof(buf);
578
579 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature);
580 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
581}
582
583
584static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
585 const char __user *user_buf,
586 size_t count, loff_t *ppos)
587{
588 struct iwl_priv *priv = file->private_data;
589 char buf[8];
590 int buf_size;
591 int value;
592
593 memset(buf, 0, sizeof(buf));
594 buf_size = min(count, sizeof(buf) - 1);
595 if (copy_from_user(buf, user_buf, buf_size))
596 return -EFAULT;
597
598 if (sscanf(buf, "%d", &value) != 1)
599 return -EINVAL;
600
601 /*
602 * Our users expect 0 to be "CAM", but 0 isn't actually
603 * valid here. However, let's not confuse them and present
604 * IWL_POWER_INDEX_1 as "1", not "0".
605 */
606 if (value == 0)
607 return -EINVAL;
608 else if (value > 0)
609 value -= 1;
610
611 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
612 return -EINVAL;
613
614 if (!iwl_is_ready_rf(priv))
615 return -EAGAIN;
616
617 priv->power_data.debug_sleep_level_override = value;
618
619 mutex_lock(&priv->mutex);
620 iwl_power_update_mode(priv, true);
621 mutex_unlock(&priv->mutex);
622
623 return count;
624}
625
626static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
627 char __user *user_buf,
628 size_t count, loff_t *ppos)
629{
630 struct iwl_priv *priv = file->private_data;
631 char buf[10];
632 int pos, value;
633 const size_t bufsz = sizeof(buf);
634
635 /* see the write function */
636 value = priv->power_data.debug_sleep_level_override;
637 if (value >= 0)
638 value += 1;
639
640 pos = scnprintf(buf, bufsz, "%d\n", value);
641 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
642}
643
644static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
645 char __user *user_buf,
646 size_t count, loff_t *ppos)
647{
648 struct iwl_priv *priv = file->private_data;
649 char buf[200];
650 int pos = 0, i;
651 const size_t bufsz = sizeof(buf);
652 struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd;
653
654 pos += scnprintf(buf + pos, bufsz - pos,
655 "flags: %#.2x\n", le16_to_cpu(cmd->flags));
656 pos += scnprintf(buf + pos, bufsz - pos,
657 "RX/TX timeout: %d/%d usec\n",
658 le32_to_cpu(cmd->rx_data_timeout),
659 le32_to_cpu(cmd->tx_data_timeout));
660 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
661 pos += scnprintf(buf + pos, bufsz - pos,
662 "sleep_interval[%d]: %d\n", i,
663 le32_to_cpu(cmd->sleep_interval[i]));
664
665 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
666}
667
668DEBUGFS_READ_WRITE_FILE_OPS(sram);
669DEBUGFS_READ_FILE_OPS(wowlan_sram);
670DEBUGFS_READ_FILE_OPS(nvm);
671DEBUGFS_READ_FILE_OPS(stations);
672DEBUGFS_READ_FILE_OPS(channels);
673DEBUGFS_READ_FILE_OPS(status);
674DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
675DEBUGFS_READ_FILE_OPS(qos);
676DEBUGFS_READ_FILE_OPS(thermal_throttling);
677DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
678DEBUGFS_READ_FILE_OPS(temperature);
679DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
680DEBUGFS_READ_FILE_OPS(current_sleep_command);
681
682static const char *fmt_value = " %-30s %10u\n";
683static const char *fmt_hex = " %-30s 0x%02X\n";
684static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
685static const char *fmt_header =
686 "%-32s current cumulative delta max\n";
687
688static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
689{
690 int p = 0;
691 u32 flag;
692
693 lockdep_assert_held(&priv->statistics.lock);
694
695 flag = le32_to_cpu(priv->statistics.flag);
696
697 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
698 if (flag & UCODE_STATISTICS_CLEAR_MSK)
699 p += scnprintf(buf + p, bufsz - p,
700 "\tStatistics have been cleared\n");
701 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
702 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
703 ? "2.4 GHz" : "5.2 GHz");
704 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
705 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
706 ? "enabled" : "disabled");
707
708 return p;
709}
710
711static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
712 char __user *user_buf,
713 size_t count, loff_t *ppos)
714{
715 struct iwl_priv *priv = file->private_data;
716 int pos = 0;
717 char *buf;
718 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
719 sizeof(struct statistics_rx_non_phy) * 40 +
720 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
721 ssize_t ret;
722 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
723 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
724 struct statistics_rx_non_phy *general, *accum_general;
725 struct statistics_rx_non_phy *delta_general, *max_general;
726 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
727
728 if (!iwl_is_alive(priv))
729 return -EAGAIN;
730
731 buf = kzalloc(bufsz, GFP_KERNEL);
732 if (!buf)
733 return -ENOMEM;
734
735 /*
736 * the statistic information display here is based on
737 * the last statistics notification from uCode
738 * might not reflect the current uCode activity
739 */
740 spin_lock_bh(&priv->statistics.lock);
741 ofdm = &priv->statistics.rx_ofdm;
742 cck = &priv->statistics.rx_cck;
743 general = &priv->statistics.rx_non_phy;
744 ht = &priv->statistics.rx_ofdm_ht;
745 accum_ofdm = &priv->accum_stats.rx_ofdm;
746 accum_cck = &priv->accum_stats.rx_cck;
747 accum_general = &priv->accum_stats.rx_non_phy;
748 accum_ht = &priv->accum_stats.rx_ofdm_ht;
749 delta_ofdm = &priv->delta_stats.rx_ofdm;
750 delta_cck = &priv->delta_stats.rx_cck;
751 delta_general = &priv->delta_stats.rx_non_phy;
752 delta_ht = &priv->delta_stats.rx_ofdm_ht;
753 max_ofdm = &priv->max_delta_stats.rx_ofdm;
754 max_cck = &priv->max_delta_stats.rx_cck;
755 max_general = &priv->max_delta_stats.rx_non_phy;
756 max_ht = &priv->max_delta_stats.rx_ofdm_ht;
757
758 pos += iwl_statistics_flag(priv, buf, bufsz);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_header, "Statistics_Rx - OFDM:");
761 pos += scnprintf(buf + pos, bufsz - pos,
762 fmt_table, "ina_cnt:",
763 le32_to_cpu(ofdm->ina_cnt),
764 accum_ofdm->ina_cnt,
765 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
766 pos += scnprintf(buf + pos, bufsz - pos,
767 fmt_table, "fina_cnt:",
768 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
769 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
770 pos += scnprintf(buf + pos, bufsz - pos,
771 fmt_table, "plcp_err:",
772 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
773 delta_ofdm->plcp_err, max_ofdm->plcp_err);
774 pos += scnprintf(buf + pos, bufsz - pos,
775 fmt_table, "crc32_err:",
776 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
777 delta_ofdm->crc32_err, max_ofdm->crc32_err);
778 pos += scnprintf(buf + pos, bufsz - pos,
779 fmt_table, "overrun_err:",
780 le32_to_cpu(ofdm->overrun_err),
781 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
782 max_ofdm->overrun_err);
783 pos += scnprintf(buf + pos, bufsz - pos,
784 fmt_table, "early_overrun_err:",
785 le32_to_cpu(ofdm->early_overrun_err),
786 accum_ofdm->early_overrun_err,
787 delta_ofdm->early_overrun_err,
788 max_ofdm->early_overrun_err);
789 pos += scnprintf(buf + pos, bufsz - pos,
790 fmt_table, "crc32_good:",
791 le32_to_cpu(ofdm->crc32_good),
792 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
793 max_ofdm->crc32_good);
794 pos += scnprintf(buf + pos, bufsz - pos,
795 fmt_table, "false_alarm_cnt:",
796 le32_to_cpu(ofdm->false_alarm_cnt),
797 accum_ofdm->false_alarm_cnt,
798 delta_ofdm->false_alarm_cnt,
799 max_ofdm->false_alarm_cnt);
800 pos += scnprintf(buf + pos, bufsz - pos,
801 fmt_table, "fina_sync_err_cnt:",
802 le32_to_cpu(ofdm->fina_sync_err_cnt),
803 accum_ofdm->fina_sync_err_cnt,
804 delta_ofdm->fina_sync_err_cnt,
805 max_ofdm->fina_sync_err_cnt);
806 pos += scnprintf(buf + pos, bufsz - pos,
807 fmt_table, "sfd_timeout:",
808 le32_to_cpu(ofdm->sfd_timeout),
809 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
810 max_ofdm->sfd_timeout);
811 pos += scnprintf(buf + pos, bufsz - pos,
812 fmt_table, "fina_timeout:",
813 le32_to_cpu(ofdm->fina_timeout),
814 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
815 max_ofdm->fina_timeout);
816 pos += scnprintf(buf + pos, bufsz - pos,
817 fmt_table, "unresponded_rts:",
818 le32_to_cpu(ofdm->unresponded_rts),
819 accum_ofdm->unresponded_rts,
820 delta_ofdm->unresponded_rts,
821 max_ofdm->unresponded_rts);
822 pos += scnprintf(buf + pos, bufsz - pos,
823 fmt_table, "rxe_frame_lmt_ovrun:",
824 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
825 accum_ofdm->rxe_frame_limit_overrun,
826 delta_ofdm->rxe_frame_limit_overrun,
827 max_ofdm->rxe_frame_limit_overrun);
828 pos += scnprintf(buf + pos, bufsz - pos,
829 fmt_table, "sent_ack_cnt:",
830 le32_to_cpu(ofdm->sent_ack_cnt),
831 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
832 max_ofdm->sent_ack_cnt);
833 pos += scnprintf(buf + pos, bufsz - pos,
834 fmt_table, "sent_cts_cnt:",
835 le32_to_cpu(ofdm->sent_cts_cnt),
836 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
837 max_ofdm->sent_cts_cnt);
838 pos += scnprintf(buf + pos, bufsz - pos,
839 fmt_table, "sent_ba_rsp_cnt:",
840 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
841 accum_ofdm->sent_ba_rsp_cnt,
842 delta_ofdm->sent_ba_rsp_cnt,
843 max_ofdm->sent_ba_rsp_cnt);
844 pos += scnprintf(buf + pos, bufsz - pos,
845 fmt_table, "dsp_self_kill:",
846 le32_to_cpu(ofdm->dsp_self_kill),
847 accum_ofdm->dsp_self_kill,
848 delta_ofdm->dsp_self_kill,
849 max_ofdm->dsp_self_kill);
850 pos += scnprintf(buf + pos, bufsz - pos,
851 fmt_table, "mh_format_err:",
852 le32_to_cpu(ofdm->mh_format_err),
853 accum_ofdm->mh_format_err,
854 delta_ofdm->mh_format_err,
855 max_ofdm->mh_format_err);
856 pos += scnprintf(buf + pos, bufsz - pos,
857 fmt_table, "re_acq_main_rssi_sum:",
858 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
859 accum_ofdm->re_acq_main_rssi_sum,
860 delta_ofdm->re_acq_main_rssi_sum,
861 max_ofdm->re_acq_main_rssi_sum);
862
863 pos += scnprintf(buf + pos, bufsz - pos,
864 fmt_header, "Statistics_Rx - CCK:");
865 pos += scnprintf(buf + pos, bufsz - pos,
866 fmt_table, "ina_cnt:",
867 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
868 delta_cck->ina_cnt, max_cck->ina_cnt);
869 pos += scnprintf(buf + pos, bufsz - pos,
870 fmt_table, "fina_cnt:",
871 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
872 delta_cck->fina_cnt, max_cck->fina_cnt);
873 pos += scnprintf(buf + pos, bufsz - pos,
874 fmt_table, "plcp_err:",
875 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
876 delta_cck->plcp_err, max_cck->plcp_err);
877 pos += scnprintf(buf + pos, bufsz - pos,
878 fmt_table, "crc32_err:",
879 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
880 delta_cck->crc32_err, max_cck->crc32_err);
881 pos += scnprintf(buf + pos, bufsz - pos,
882 fmt_table, "overrun_err:",
883 le32_to_cpu(cck->overrun_err),
884 accum_cck->overrun_err, delta_cck->overrun_err,
885 max_cck->overrun_err);
886 pos += scnprintf(buf + pos, bufsz - pos,
887 fmt_table, "early_overrun_err:",
888 le32_to_cpu(cck->early_overrun_err),
889 accum_cck->early_overrun_err,
890 delta_cck->early_overrun_err,
891 max_cck->early_overrun_err);
892 pos += scnprintf(buf + pos, bufsz - pos,
893 fmt_table, "crc32_good:",
894 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
895 delta_cck->crc32_good, max_cck->crc32_good);
896 pos += scnprintf(buf + pos, bufsz - pos,
897 fmt_table, "false_alarm_cnt:",
898 le32_to_cpu(cck->false_alarm_cnt),
899 accum_cck->false_alarm_cnt,
900 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
901 pos += scnprintf(buf + pos, bufsz - pos,
902 fmt_table, "fina_sync_err_cnt:",
903 le32_to_cpu(cck->fina_sync_err_cnt),
904 accum_cck->fina_sync_err_cnt,
905 delta_cck->fina_sync_err_cnt,
906 max_cck->fina_sync_err_cnt);
907 pos += scnprintf(buf + pos, bufsz - pos,
908 fmt_table, "sfd_timeout:",
909 le32_to_cpu(cck->sfd_timeout),
910 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
911 max_cck->sfd_timeout);
912 pos += scnprintf(buf + pos, bufsz - pos,
913 fmt_table, "fina_timeout:",
914 le32_to_cpu(cck->fina_timeout),
915 accum_cck->fina_timeout, delta_cck->fina_timeout,
916 max_cck->fina_timeout);
917 pos += scnprintf(buf + pos, bufsz - pos,
918 fmt_table, "unresponded_rts:",
919 le32_to_cpu(cck->unresponded_rts),
920 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
921 max_cck->unresponded_rts);
922 pos += scnprintf(buf + pos, bufsz - pos,
923 fmt_table, "rxe_frame_lmt_ovrun:",
924 le32_to_cpu(cck->rxe_frame_limit_overrun),
925 accum_cck->rxe_frame_limit_overrun,
926 delta_cck->rxe_frame_limit_overrun,
927 max_cck->rxe_frame_limit_overrun);
928 pos += scnprintf(buf + pos, bufsz - pos,
929 fmt_table, "sent_ack_cnt:",
930 le32_to_cpu(cck->sent_ack_cnt),
931 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
932 max_cck->sent_ack_cnt);
933 pos += scnprintf(buf + pos, bufsz - pos,
934 fmt_table, "sent_cts_cnt:",
935 le32_to_cpu(cck->sent_cts_cnt),
936 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
937 max_cck->sent_cts_cnt);
938 pos += scnprintf(buf + pos, bufsz - pos,
939 fmt_table, "sent_ba_rsp_cnt:",
940 le32_to_cpu(cck->sent_ba_rsp_cnt),
941 accum_cck->sent_ba_rsp_cnt,
942 delta_cck->sent_ba_rsp_cnt,
943 max_cck->sent_ba_rsp_cnt);
944 pos += scnprintf(buf + pos, bufsz - pos,
945 fmt_table, "dsp_self_kill:",
946 le32_to_cpu(cck->dsp_self_kill),
947 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
948 max_cck->dsp_self_kill);
949 pos += scnprintf(buf + pos, bufsz - pos,
950 fmt_table, "mh_format_err:",
951 le32_to_cpu(cck->mh_format_err),
952 accum_cck->mh_format_err, delta_cck->mh_format_err,
953 max_cck->mh_format_err);
954 pos += scnprintf(buf + pos, bufsz - pos,
955 fmt_table, "re_acq_main_rssi_sum:",
956 le32_to_cpu(cck->re_acq_main_rssi_sum),
957 accum_cck->re_acq_main_rssi_sum,
958 delta_cck->re_acq_main_rssi_sum,
959 max_cck->re_acq_main_rssi_sum);
960
961 pos += scnprintf(buf + pos, bufsz - pos,
962 fmt_header, "Statistics_Rx - GENERAL:");
963 pos += scnprintf(buf + pos, bufsz - pos,
964 fmt_table, "bogus_cts:",
965 le32_to_cpu(general->bogus_cts),
966 accum_general->bogus_cts, delta_general->bogus_cts,
967 max_general->bogus_cts);
968 pos += scnprintf(buf + pos, bufsz - pos,
969 fmt_table, "bogus_ack:",
970 le32_to_cpu(general->bogus_ack),
971 accum_general->bogus_ack, delta_general->bogus_ack,
972 max_general->bogus_ack);
973 pos += scnprintf(buf + pos, bufsz - pos,
974 fmt_table, "non_bssid_frames:",
975 le32_to_cpu(general->non_bssid_frames),
976 accum_general->non_bssid_frames,
977 delta_general->non_bssid_frames,
978 max_general->non_bssid_frames);
979 pos += scnprintf(buf + pos, bufsz - pos,
980 fmt_table, "filtered_frames:",
981 le32_to_cpu(general->filtered_frames),
982 accum_general->filtered_frames,
983 delta_general->filtered_frames,
984 max_general->filtered_frames);
985 pos += scnprintf(buf + pos, bufsz - pos,
986 fmt_table, "non_channel_beacons:",
987 le32_to_cpu(general->non_channel_beacons),
988 accum_general->non_channel_beacons,
989 delta_general->non_channel_beacons,
990 max_general->non_channel_beacons);
991 pos += scnprintf(buf + pos, bufsz - pos,
992 fmt_table, "channel_beacons:",
993 le32_to_cpu(general->channel_beacons),
994 accum_general->channel_beacons,
995 delta_general->channel_beacons,
996 max_general->channel_beacons);
997 pos += scnprintf(buf + pos, bufsz - pos,
998 fmt_table, "num_missed_bcon:",
999 le32_to_cpu(general->num_missed_bcon),
1000 accum_general->num_missed_bcon,
1001 delta_general->num_missed_bcon,
1002 max_general->num_missed_bcon);
1003 pos += scnprintf(buf + pos, bufsz - pos,
1004 fmt_table, "adc_rx_saturation_time:",
1005 le32_to_cpu(general->adc_rx_saturation_time),
1006 accum_general->adc_rx_saturation_time,
1007 delta_general->adc_rx_saturation_time,
1008 max_general->adc_rx_saturation_time);
1009 pos += scnprintf(buf + pos, bufsz - pos,
1010 fmt_table, "ina_detect_search_tm:",
1011 le32_to_cpu(general->ina_detection_search_time),
1012 accum_general->ina_detection_search_time,
1013 delta_general->ina_detection_search_time,
1014 max_general->ina_detection_search_time);
1015 pos += scnprintf(buf + pos, bufsz - pos,
1016 fmt_table, "beacon_silence_rssi_a:",
1017 le32_to_cpu(general->beacon_silence_rssi_a),
1018 accum_general->beacon_silence_rssi_a,
1019 delta_general->beacon_silence_rssi_a,
1020 max_general->beacon_silence_rssi_a);
1021 pos += scnprintf(buf + pos, bufsz - pos,
1022 fmt_table, "beacon_silence_rssi_b:",
1023 le32_to_cpu(general->beacon_silence_rssi_b),
1024 accum_general->beacon_silence_rssi_b,
1025 delta_general->beacon_silence_rssi_b,
1026 max_general->beacon_silence_rssi_b);
1027 pos += scnprintf(buf + pos, bufsz - pos,
1028 fmt_table, "beacon_silence_rssi_c:",
1029 le32_to_cpu(general->beacon_silence_rssi_c),
1030 accum_general->beacon_silence_rssi_c,
1031 delta_general->beacon_silence_rssi_c,
1032 max_general->beacon_silence_rssi_c);
1033 pos += scnprintf(buf + pos, bufsz - pos,
1034 fmt_table, "interference_data_flag:",
1035 le32_to_cpu(general->interference_data_flag),
1036 accum_general->interference_data_flag,
1037 delta_general->interference_data_flag,
1038 max_general->interference_data_flag);
1039 pos += scnprintf(buf + pos, bufsz - pos,
1040 fmt_table, "channel_load:",
1041 le32_to_cpu(general->channel_load),
1042 accum_general->channel_load,
1043 delta_general->channel_load,
1044 max_general->channel_load);
1045 pos += scnprintf(buf + pos, bufsz - pos,
1046 fmt_table, "dsp_false_alarms:",
1047 le32_to_cpu(general->dsp_false_alarms),
1048 accum_general->dsp_false_alarms,
1049 delta_general->dsp_false_alarms,
1050 max_general->dsp_false_alarms);
1051 pos += scnprintf(buf + pos, bufsz - pos,
1052 fmt_table, "beacon_rssi_a:",
1053 le32_to_cpu(general->beacon_rssi_a),
1054 accum_general->beacon_rssi_a,
1055 delta_general->beacon_rssi_a,
1056 max_general->beacon_rssi_a);
1057 pos += scnprintf(buf + pos, bufsz - pos,
1058 fmt_table, "beacon_rssi_b:",
1059 le32_to_cpu(general->beacon_rssi_b),
1060 accum_general->beacon_rssi_b,
1061 delta_general->beacon_rssi_b,
1062 max_general->beacon_rssi_b);
1063 pos += scnprintf(buf + pos, bufsz - pos,
1064 fmt_table, "beacon_rssi_c:",
1065 le32_to_cpu(general->beacon_rssi_c),
1066 accum_general->beacon_rssi_c,
1067 delta_general->beacon_rssi_c,
1068 max_general->beacon_rssi_c);
1069 pos += scnprintf(buf + pos, bufsz - pos,
1070 fmt_table, "beacon_energy_a:",
1071 le32_to_cpu(general->beacon_energy_a),
1072 accum_general->beacon_energy_a,
1073 delta_general->beacon_energy_a,
1074 max_general->beacon_energy_a);
1075 pos += scnprintf(buf + pos, bufsz - pos,
1076 fmt_table, "beacon_energy_b:",
1077 le32_to_cpu(general->beacon_energy_b),
1078 accum_general->beacon_energy_b,
1079 delta_general->beacon_energy_b,
1080 max_general->beacon_energy_b);
1081 pos += scnprintf(buf + pos, bufsz - pos,
1082 fmt_table, "beacon_energy_c:",
1083 le32_to_cpu(general->beacon_energy_c),
1084 accum_general->beacon_energy_c,
1085 delta_general->beacon_energy_c,
1086 max_general->beacon_energy_c);
1087
1088 pos += scnprintf(buf + pos, bufsz - pos,
1089 fmt_header, "Statistics_Rx - OFDM_HT:");
1090 pos += scnprintf(buf + pos, bufsz - pos,
1091 fmt_table, "plcp_err:",
1092 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1093 delta_ht->plcp_err, max_ht->plcp_err);
1094 pos += scnprintf(buf + pos, bufsz - pos,
1095 fmt_table, "overrun_err:",
1096 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1097 delta_ht->overrun_err, max_ht->overrun_err);
1098 pos += scnprintf(buf + pos, bufsz - pos,
1099 fmt_table, "early_overrun_err:",
1100 le32_to_cpu(ht->early_overrun_err),
1101 accum_ht->early_overrun_err,
1102 delta_ht->early_overrun_err,
1103 max_ht->early_overrun_err);
1104 pos += scnprintf(buf + pos, bufsz - pos,
1105 fmt_table, "crc32_good:",
1106 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1107 delta_ht->crc32_good, max_ht->crc32_good);
1108 pos += scnprintf(buf + pos, bufsz - pos,
1109 fmt_table, "crc32_err:",
1110 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1111 delta_ht->crc32_err, max_ht->crc32_err);
1112 pos += scnprintf(buf + pos, bufsz - pos,
1113 fmt_table, "mh_format_err:",
1114 le32_to_cpu(ht->mh_format_err),
1115 accum_ht->mh_format_err,
1116 delta_ht->mh_format_err, max_ht->mh_format_err);
1117 pos += scnprintf(buf + pos, bufsz - pos,
1118 fmt_table, "agg_crc32_good:",
1119 le32_to_cpu(ht->agg_crc32_good),
1120 accum_ht->agg_crc32_good,
1121 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1122 pos += scnprintf(buf + pos, bufsz - pos,
1123 fmt_table, "agg_mpdu_cnt:",
1124 le32_to_cpu(ht->agg_mpdu_cnt),
1125 accum_ht->agg_mpdu_cnt,
1126 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1127 pos += scnprintf(buf + pos, bufsz - pos,
1128 fmt_table, "agg_cnt:",
1129 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1130 delta_ht->agg_cnt, max_ht->agg_cnt);
1131 pos += scnprintf(buf + pos, bufsz - pos,
1132 fmt_table, "unsupport_mcs:",
1133 le32_to_cpu(ht->unsupport_mcs),
1134 accum_ht->unsupport_mcs,
1135 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1136
1137 spin_unlock_bh(&priv->statistics.lock);
1138
1139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1140 kfree(buf);
1141 return ret;
1142}
1143
1144static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1145 char __user *user_buf,
1146 size_t count, loff_t *ppos)
1147{
1148 struct iwl_priv *priv = file->private_data;
1149 int pos = 0;
1150 char *buf;
1151 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1152 ssize_t ret;
1153 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1154
1155 if (!iwl_is_alive(priv))
1156 return -EAGAIN;
1157
1158 buf = kzalloc(bufsz, GFP_KERNEL);
1159 if (!buf)
1160 return -ENOMEM;
1161
1162 /* the statistic information display here is based on
1163 * the last statistics notification from uCode
1164 * might not reflect the current uCode activity
1165 */
1166 spin_lock_bh(&priv->statistics.lock);
1167
1168 tx = &priv->statistics.tx;
1169 accum_tx = &priv->accum_stats.tx;
1170 delta_tx = &priv->delta_stats.tx;
1171 max_tx = &priv->max_delta_stats.tx;
1172
1173 pos += iwl_statistics_flag(priv, buf, bufsz);
1174 pos += scnprintf(buf + pos, bufsz - pos,
1175 fmt_header, "Statistics_Tx:");
1176 pos += scnprintf(buf + pos, bufsz - pos,
1177 fmt_table, "preamble:",
1178 le32_to_cpu(tx->preamble_cnt),
1179 accum_tx->preamble_cnt,
1180 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1181 pos += scnprintf(buf + pos, bufsz - pos,
1182 fmt_table, "rx_detected_cnt:",
1183 le32_to_cpu(tx->rx_detected_cnt),
1184 accum_tx->rx_detected_cnt,
1185 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1186 pos += scnprintf(buf + pos, bufsz - pos,
1187 fmt_table, "bt_prio_defer_cnt:",
1188 le32_to_cpu(tx->bt_prio_defer_cnt),
1189 accum_tx->bt_prio_defer_cnt,
1190 delta_tx->bt_prio_defer_cnt,
1191 max_tx->bt_prio_defer_cnt);
1192 pos += scnprintf(buf + pos, bufsz - pos,
1193 fmt_table, "bt_prio_kill_cnt:",
1194 le32_to_cpu(tx->bt_prio_kill_cnt),
1195 accum_tx->bt_prio_kill_cnt,
1196 delta_tx->bt_prio_kill_cnt,
1197 max_tx->bt_prio_kill_cnt);
1198 pos += scnprintf(buf + pos, bufsz - pos,
1199 fmt_table, "few_bytes_cnt:",
1200 le32_to_cpu(tx->few_bytes_cnt),
1201 accum_tx->few_bytes_cnt,
1202 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1203 pos += scnprintf(buf + pos, bufsz - pos,
1204 fmt_table, "cts_timeout:",
1205 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1206 delta_tx->cts_timeout, max_tx->cts_timeout);
1207 pos += scnprintf(buf + pos, bufsz - pos,
1208 fmt_table, "ack_timeout:",
1209 le32_to_cpu(tx->ack_timeout),
1210 accum_tx->ack_timeout,
1211 delta_tx->ack_timeout, max_tx->ack_timeout);
1212 pos += scnprintf(buf + pos, bufsz - pos,
1213 fmt_table, "expected_ack_cnt:",
1214 le32_to_cpu(tx->expected_ack_cnt),
1215 accum_tx->expected_ack_cnt,
1216 delta_tx->expected_ack_cnt,
1217 max_tx->expected_ack_cnt);
1218 pos += scnprintf(buf + pos, bufsz - pos,
1219 fmt_table, "actual_ack_cnt:",
1220 le32_to_cpu(tx->actual_ack_cnt),
1221 accum_tx->actual_ack_cnt,
1222 delta_tx->actual_ack_cnt,
1223 max_tx->actual_ack_cnt);
1224 pos += scnprintf(buf + pos, bufsz - pos,
1225 fmt_table, "dump_msdu_cnt:",
1226 le32_to_cpu(tx->dump_msdu_cnt),
1227 accum_tx->dump_msdu_cnt,
1228 delta_tx->dump_msdu_cnt,
1229 max_tx->dump_msdu_cnt);
1230 pos += scnprintf(buf + pos, bufsz - pos,
1231 fmt_table, "abort_nxt_frame_mismatch:",
1232 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1233 accum_tx->burst_abort_next_frame_mismatch_cnt,
1234 delta_tx->burst_abort_next_frame_mismatch_cnt,
1235 max_tx->burst_abort_next_frame_mismatch_cnt);
1236 pos += scnprintf(buf + pos, bufsz - pos,
1237 fmt_table, "abort_missing_nxt_frame:",
1238 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1239 accum_tx->burst_abort_missing_next_frame_cnt,
1240 delta_tx->burst_abort_missing_next_frame_cnt,
1241 max_tx->burst_abort_missing_next_frame_cnt);
1242 pos += scnprintf(buf + pos, bufsz - pos,
1243 fmt_table, "cts_timeout_collision:",
1244 le32_to_cpu(tx->cts_timeout_collision),
1245 accum_tx->cts_timeout_collision,
1246 delta_tx->cts_timeout_collision,
1247 max_tx->cts_timeout_collision);
1248 pos += scnprintf(buf + pos, bufsz - pos,
1249 fmt_table, "ack_ba_timeout_collision:",
1250 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1251 accum_tx->ack_or_ba_timeout_collision,
1252 delta_tx->ack_or_ba_timeout_collision,
1253 max_tx->ack_or_ba_timeout_collision);
1254 pos += scnprintf(buf + pos, bufsz - pos,
1255 fmt_table, "agg ba_timeout:",
1256 le32_to_cpu(tx->agg.ba_timeout),
1257 accum_tx->agg.ba_timeout,
1258 delta_tx->agg.ba_timeout,
1259 max_tx->agg.ba_timeout);
1260 pos += scnprintf(buf + pos, bufsz - pos,
1261 fmt_table, "agg ba_resched_frames:",
1262 le32_to_cpu(tx->agg.ba_reschedule_frames),
1263 accum_tx->agg.ba_reschedule_frames,
1264 delta_tx->agg.ba_reschedule_frames,
1265 max_tx->agg.ba_reschedule_frames);
1266 pos += scnprintf(buf + pos, bufsz - pos,
1267 fmt_table, "agg scd_query_agg_frame:",
1268 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1269 accum_tx->agg.scd_query_agg_frame_cnt,
1270 delta_tx->agg.scd_query_agg_frame_cnt,
1271 max_tx->agg.scd_query_agg_frame_cnt);
1272 pos += scnprintf(buf + pos, bufsz - pos,
1273 fmt_table, "agg scd_query_no_agg:",
1274 le32_to_cpu(tx->agg.scd_query_no_agg),
1275 accum_tx->agg.scd_query_no_agg,
1276 delta_tx->agg.scd_query_no_agg,
1277 max_tx->agg.scd_query_no_agg);
1278 pos += scnprintf(buf + pos, bufsz - pos,
1279 fmt_table, "agg scd_query_agg:",
1280 le32_to_cpu(tx->agg.scd_query_agg),
1281 accum_tx->agg.scd_query_agg,
1282 delta_tx->agg.scd_query_agg,
1283 max_tx->agg.scd_query_agg);
1284 pos += scnprintf(buf + pos, bufsz - pos,
1285 fmt_table, "agg scd_query_mismatch:",
1286 le32_to_cpu(tx->agg.scd_query_mismatch),
1287 accum_tx->agg.scd_query_mismatch,
1288 delta_tx->agg.scd_query_mismatch,
1289 max_tx->agg.scd_query_mismatch);
1290 pos += scnprintf(buf + pos, bufsz - pos,
1291 fmt_table, "agg frame_not_ready:",
1292 le32_to_cpu(tx->agg.frame_not_ready),
1293 accum_tx->agg.frame_not_ready,
1294 delta_tx->agg.frame_not_ready,
1295 max_tx->agg.frame_not_ready);
1296 pos += scnprintf(buf + pos, bufsz - pos,
1297 fmt_table, "agg underrun:",
1298 le32_to_cpu(tx->agg.underrun),
1299 accum_tx->agg.underrun,
1300 delta_tx->agg.underrun, max_tx->agg.underrun);
1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 fmt_table, "agg bt_prio_kill:",
1303 le32_to_cpu(tx->agg.bt_prio_kill),
1304 accum_tx->agg.bt_prio_kill,
1305 delta_tx->agg.bt_prio_kill,
1306 max_tx->agg.bt_prio_kill);
1307 pos += scnprintf(buf + pos, bufsz - pos,
1308 fmt_table, "agg rx_ba_rsp_cnt:",
1309 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1310 accum_tx->agg.rx_ba_rsp_cnt,
1311 delta_tx->agg.rx_ba_rsp_cnt,
1312 max_tx->agg.rx_ba_rsp_cnt);
1313
1314 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1315 pos += scnprintf(buf + pos, bufsz - pos,
1316 "tx power: (1/2 dB step)\n");
1317 if ((priv->nvm_data->valid_tx_ant & ANT_A) &&
1318 tx->tx_power.ant_a)
1319 pos += scnprintf(buf + pos, bufsz - pos,
1320 fmt_hex, "antenna A:",
1321 tx->tx_power.ant_a);
1322 if ((priv->nvm_data->valid_tx_ant & ANT_B) &&
1323 tx->tx_power.ant_b)
1324 pos += scnprintf(buf + pos, bufsz - pos,
1325 fmt_hex, "antenna B:",
1326 tx->tx_power.ant_b);
1327 if ((priv->nvm_data->valid_tx_ant & ANT_C) &&
1328 tx->tx_power.ant_c)
1329 pos += scnprintf(buf + pos, bufsz - pos,
1330 fmt_hex, "antenna C:",
1331 tx->tx_power.ant_c);
1332 }
1333
1334 spin_unlock_bh(&priv->statistics.lock);
1335
1336 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1337 kfree(buf);
1338 return ret;
1339}
1340
1341static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1342 char __user *user_buf,
1343 size_t count, loff_t *ppos)
1344{
1345 struct iwl_priv *priv = file->private_data;
1346 int pos = 0;
1347 char *buf;
1348 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1349 ssize_t ret;
1350 struct statistics_general_common *general, *accum_general;
1351 struct statistics_general_common *delta_general, *max_general;
1352 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1353 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1354
1355 if (!iwl_is_alive(priv))
1356 return -EAGAIN;
1357
1358 buf = kzalloc(bufsz, GFP_KERNEL);
1359 if (!buf)
1360 return -ENOMEM;
1361
1362 /* the statistic information display here is based on
1363 * the last statistics notification from uCode
1364 * might not reflect the current uCode activity
1365 */
1366
1367 spin_lock_bh(&priv->statistics.lock);
1368
1369 general = &priv->statistics.common;
1370 dbg = &priv->statistics.common.dbg;
1371 div = &priv->statistics.common.div;
1372 accum_general = &priv->accum_stats.common;
1373 accum_dbg = &priv->accum_stats.common.dbg;
1374 accum_div = &priv->accum_stats.common.div;
1375 delta_general = &priv->delta_stats.common;
1376 max_general = &priv->max_delta_stats.common;
1377 delta_dbg = &priv->delta_stats.common.dbg;
1378 max_dbg = &priv->max_delta_stats.common.dbg;
1379 delta_div = &priv->delta_stats.common.div;
1380 max_div = &priv->max_delta_stats.common.div;
1381
1382 pos += iwl_statistics_flag(priv, buf, bufsz);
1383 pos += scnprintf(buf + pos, bufsz - pos,
1384 fmt_header, "Statistics_General:");
1385 pos += scnprintf(buf + pos, bufsz - pos,
1386 fmt_value, "temperature:",
1387 le32_to_cpu(general->temperature));
1388 pos += scnprintf(buf + pos, bufsz - pos,
1389 fmt_value, "temperature_m:",
1390 le32_to_cpu(general->temperature_m));
1391 pos += scnprintf(buf + pos, bufsz - pos,
1392 fmt_value, "ttl_timestamp:",
1393 le32_to_cpu(general->ttl_timestamp));
1394 pos += scnprintf(buf + pos, bufsz - pos,
1395 fmt_table, "burst_check:",
1396 le32_to_cpu(dbg->burst_check),
1397 accum_dbg->burst_check,
1398 delta_dbg->burst_check, max_dbg->burst_check);
1399 pos += scnprintf(buf + pos, bufsz - pos,
1400 fmt_table, "burst_count:",
1401 le32_to_cpu(dbg->burst_count),
1402 accum_dbg->burst_count,
1403 delta_dbg->burst_count, max_dbg->burst_count);
1404 pos += scnprintf(buf + pos, bufsz - pos,
1405 fmt_table, "wait_for_silence_timeout_count:",
1406 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
1407 accum_dbg->wait_for_silence_timeout_cnt,
1408 delta_dbg->wait_for_silence_timeout_cnt,
1409 max_dbg->wait_for_silence_timeout_cnt);
1410 pos += scnprintf(buf + pos, bufsz - pos,
1411 fmt_table, "sleep_time:",
1412 le32_to_cpu(general->sleep_time),
1413 accum_general->sleep_time,
1414 delta_general->sleep_time, max_general->sleep_time);
1415 pos += scnprintf(buf + pos, bufsz - pos,
1416 fmt_table, "slots_out:",
1417 le32_to_cpu(general->slots_out),
1418 accum_general->slots_out,
1419 delta_general->slots_out, max_general->slots_out);
1420 pos += scnprintf(buf + pos, bufsz - pos,
1421 fmt_table, "slots_idle:",
1422 le32_to_cpu(general->slots_idle),
1423 accum_general->slots_idle,
1424 delta_general->slots_idle, max_general->slots_idle);
1425 pos += scnprintf(buf + pos, bufsz - pos,
1426 fmt_table, "tx_on_a:",
1427 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1428 delta_div->tx_on_a, max_div->tx_on_a);
1429 pos += scnprintf(buf + pos, bufsz - pos,
1430 fmt_table, "tx_on_b:",
1431 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1432 delta_div->tx_on_b, max_div->tx_on_b);
1433 pos += scnprintf(buf + pos, bufsz - pos,
1434 fmt_table, "exec_time:",
1435 le32_to_cpu(div->exec_time), accum_div->exec_time,
1436 delta_div->exec_time, max_div->exec_time);
1437 pos += scnprintf(buf + pos, bufsz - pos,
1438 fmt_table, "probe_time:",
1439 le32_to_cpu(div->probe_time), accum_div->probe_time,
1440 delta_div->probe_time, max_div->probe_time);
1441 pos += scnprintf(buf + pos, bufsz - pos,
1442 fmt_table, "rx_enable_counter:",
1443 le32_to_cpu(general->rx_enable_counter),
1444 accum_general->rx_enable_counter,
1445 delta_general->rx_enable_counter,
1446 max_general->rx_enable_counter);
1447 pos += scnprintf(buf + pos, bufsz - pos,
1448 fmt_table, "num_of_sos_states:",
1449 le32_to_cpu(general->num_of_sos_states),
1450 accum_general->num_of_sos_states,
1451 delta_general->num_of_sos_states,
1452 max_general->num_of_sos_states);
1453
1454 spin_unlock_bh(&priv->statistics.lock);
1455
1456 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1457 kfree(buf);
1458 return ret;
1459}
1460
1461static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1462 char __user *user_buf,
1463 size_t count, loff_t *ppos)
1464{
1465 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1466 int pos = 0;
1467 char *buf;
1468 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
1469 ssize_t ret;
1470 struct statistics_bt_activity *bt, *accum_bt;
1471
1472 if (!iwl_is_alive(priv))
1473 return -EAGAIN;
1474
1475 if (!priv->bt_enable_flag)
1476 return -EINVAL;
1477
1478 /* make request to uCode to retrieve statistics information */
1479 mutex_lock(&priv->mutex);
1480 ret = iwl_send_statistics_request(priv, 0, false);
1481 mutex_unlock(&priv->mutex);
1482
1483 if (ret)
1484 return -EAGAIN;
1485 buf = kzalloc(bufsz, GFP_KERNEL);
1486 if (!buf)
1487 return -ENOMEM;
1488
1489 /*
1490 * the statistic information display here is based on
1491 * the last statistics notification from uCode
1492 * might not reflect the current uCode activity
1493 */
1494
1495 spin_lock_bh(&priv->statistics.lock);
1496
1497 bt = &priv->statistics.bt_activity;
1498 accum_bt = &priv->accum_stats.bt_activity;
1499
1500 pos += iwl_statistics_flag(priv, buf, bufsz);
1501 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
1502 pos += scnprintf(buf + pos, bufsz - pos,
1503 "\t\t\tcurrent\t\t\taccumulative\n");
1504 pos += scnprintf(buf + pos, bufsz - pos,
1505 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1506 le32_to_cpu(bt->hi_priority_tx_req_cnt),
1507 accum_bt->hi_priority_tx_req_cnt);
1508 pos += scnprintf(buf + pos, bufsz - pos,
1509 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1510 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
1511 accum_bt->hi_priority_tx_denied_cnt);
1512 pos += scnprintf(buf + pos, bufsz - pos,
1513 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1514 le32_to_cpu(bt->lo_priority_tx_req_cnt),
1515 accum_bt->lo_priority_tx_req_cnt);
1516 pos += scnprintf(buf + pos, bufsz - pos,
1517 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1518 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
1519 accum_bt->lo_priority_tx_denied_cnt);
1520 pos += scnprintf(buf + pos, bufsz - pos,
1521 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1522 le32_to_cpu(bt->hi_priority_rx_req_cnt),
1523 accum_bt->hi_priority_rx_req_cnt);
1524 pos += scnprintf(buf + pos, bufsz - pos,
1525 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1526 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
1527 accum_bt->hi_priority_rx_denied_cnt);
1528 pos += scnprintf(buf + pos, bufsz - pos,
1529 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1530 le32_to_cpu(bt->lo_priority_rx_req_cnt),
1531 accum_bt->lo_priority_rx_req_cnt);
1532 pos += scnprintf(buf + pos, bufsz - pos,
1533 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1534 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
1535 accum_bt->lo_priority_rx_denied_cnt);
1536
1537 pos += scnprintf(buf + pos, bufsz - pos,
1538 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
1539 le32_to_cpu(priv->statistics.num_bt_kills),
1540 priv->statistics.accum_num_bt_kills);
1541
1542 spin_unlock_bh(&priv->statistics.lock);
1543
1544 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1545 kfree(buf);
1546 return ret;
1547}
1548
1549static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1550 char __user *user_buf,
1551 size_t count, loff_t *ppos)
1552{
1553 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1554 int pos = 0;
1555 char *buf;
1556 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
1557 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
1558 ssize_t ret;
1559
1560 if (!iwl_is_alive(priv))
1561 return -EAGAIN;
1562
1563 buf = kzalloc(bufsz, GFP_KERNEL);
1564 if (!buf)
1565 return -ENOMEM;
1566
1567 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1568 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
1569 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
1570 priv->reply_tx_stats.pp_delay);
1571 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1572 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
1573 priv->reply_tx_stats.pp_few_bytes);
1574 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1575 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
1576 priv->reply_tx_stats.pp_bt_prio);
1577 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1578 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
1579 priv->reply_tx_stats.pp_quiet_period);
1580 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1581 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
1582 priv->reply_tx_stats.pp_calc_ttak);
1583 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1584 iwl_get_tx_fail_reason(
1585 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
1586 priv->reply_tx_stats.int_crossed_retry);
1587 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1588 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
1589 priv->reply_tx_stats.short_limit);
1590 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1591 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
1592 priv->reply_tx_stats.long_limit);
1593 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1594 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
1595 priv->reply_tx_stats.fifo_underrun);
1596 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1597 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
1598 priv->reply_tx_stats.drain_flow);
1599 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1600 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
1601 priv->reply_tx_stats.rfkill_flush);
1602 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1603 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
1604 priv->reply_tx_stats.life_expire);
1605 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1606 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
1607 priv->reply_tx_stats.dest_ps);
1608 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1609 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
1610 priv->reply_tx_stats.host_abort);
1611 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1612 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
1613 priv->reply_tx_stats.pp_delay);
1614 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1615 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1616 priv->reply_tx_stats.sta_invalid);
1617 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1618 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1619 priv->reply_tx_stats.frag_drop);
1620 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1621 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1622 priv->reply_tx_stats.tid_disable);
1623 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1624 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1625 priv->reply_tx_stats.fifo_flush);
1626 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1627 iwl_get_tx_fail_reason(
1628 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1629 priv->reply_tx_stats.insuff_cf_poll);
1630 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1631 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1632 priv->reply_tx_stats.fail_hw_drop);
1633 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1634 iwl_get_tx_fail_reason(
1635 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1636 priv->reply_tx_stats.sta_color_mismatch);
1637 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1638 priv->reply_tx_stats.unknown);
1639
1640 pos += scnprintf(buf + pos, bufsz - pos,
1641 "\nStatistics_Agg_TX_Error:\n");
1642
1643 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1644 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1645 priv->reply_agg_tx_stats.underrun);
1646 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1647 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1648 priv->reply_agg_tx_stats.bt_prio);
1649 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1650 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1651 priv->reply_agg_tx_stats.few_bytes);
1652 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1653 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
1654 priv->reply_agg_tx_stats.abort);
1655 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1656 iwl_get_agg_tx_fail_reason(
1657 AGG_TX_STATE_LAST_SENT_TTL_MSK),
1658 priv->reply_agg_tx_stats.last_sent_ttl);
1659 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1660 iwl_get_agg_tx_fail_reason(
1661 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
1662 priv->reply_agg_tx_stats.last_sent_try);
1663 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1664 iwl_get_agg_tx_fail_reason(
1665 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
1666 priv->reply_agg_tx_stats.last_sent_bt_kill);
1667 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1668 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
1669 priv->reply_agg_tx_stats.scd_query);
1670 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1671 iwl_get_agg_tx_fail_reason(
1672 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
1673 priv->reply_agg_tx_stats.bad_crc32);
1674 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1675 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
1676 priv->reply_agg_tx_stats.response);
1677 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1678 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
1679 priv->reply_agg_tx_stats.dump_tx);
1680 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1681 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
1682 priv->reply_agg_tx_stats.delay_tx);
1683 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1684 priv->reply_agg_tx_stats.unknown);
1685
1686 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1687 kfree(buf);
1688 return ret;
1689}
1690
1691static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1692 char __user *user_buf,
1693 size_t count, loff_t *ppos) {
1694
1695 struct iwl_priv *priv = file->private_data;
1696 int pos = 0;
1697 int cnt = 0;
1698 char *buf;
1699 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
1700 ssize_t ret;
1701 struct iwl_sensitivity_data *data;
1702
1703 data = &priv->sensitivity_data;
1704 buf = kzalloc(bufsz, GFP_KERNEL);
1705 if (!buf)
1706 return -ENOMEM;
1707
1708 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
1709 data->auto_corr_ofdm);
1710 pos += scnprintf(buf + pos, bufsz - pos,
1711 "auto_corr_ofdm_mrc:\t\t %u\n",
1712 data->auto_corr_ofdm_mrc);
1713 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
1714 data->auto_corr_ofdm_x1);
1715 pos += scnprintf(buf + pos, bufsz - pos,
1716 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
1717 data->auto_corr_ofdm_mrc_x1);
1718 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
1719 data->auto_corr_cck);
1720 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
1721 data->auto_corr_cck_mrc);
1722 pos += scnprintf(buf + pos, bufsz - pos,
1723 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
1724 data->last_bad_plcp_cnt_ofdm);
1725 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
1726 data->last_fa_cnt_ofdm);
1727 pos += scnprintf(buf + pos, bufsz - pos,
1728 "last_bad_plcp_cnt_cck:\t\t %u\n",
1729 data->last_bad_plcp_cnt_cck);
1730 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
1731 data->last_fa_cnt_cck);
1732 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
1733 data->nrg_curr_state);
1734 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
1735 data->nrg_prev_state);
1736 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
1737 for (cnt = 0; cnt < 10; cnt++) {
1738 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1739 data->nrg_value[cnt]);
1740 }
1741 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1742 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
1743 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
1744 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1745 data->nrg_silence_rssi[cnt]);
1746 }
1747 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1748 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1749 data->nrg_silence_ref);
1750 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1751 data->nrg_energy_idx);
1752 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1753 data->nrg_silence_idx);
1754 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1755 data->nrg_th_cck);
1756 pos += scnprintf(buf + pos, bufsz - pos,
1757 "nrg_auto_corr_silence_diff:\t %u\n",
1758 data->nrg_auto_corr_silence_diff);
1759 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1760 data->num_in_cck_no_fa);
1761 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1762 data->nrg_th_ofdm);
1763
1764 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1765 kfree(buf);
1766 return ret;
1767}
1768
1769
1770static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1771 char __user *user_buf,
1772 size_t count, loff_t *ppos) {
1773
1774 struct iwl_priv *priv = file->private_data;
1775 int pos = 0;
1776 int cnt = 0;
1777 char *buf;
1778 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
1779 ssize_t ret;
1780 struct iwl_chain_noise_data *data;
1781
1782 data = &priv->chain_noise_data;
1783 buf = kzalloc(bufsz, GFP_KERNEL);
1784 if (!buf)
1785 return -ENOMEM;
1786
1787 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1788 data->active_chains);
1789 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1790 data->chain_noise_a);
1791 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1792 data->chain_noise_b);
1793 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1794 data->chain_noise_c);
1795 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1796 data->chain_signal_a);
1797 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1798 data->chain_signal_b);
1799 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1800 data->chain_signal_c);
1801 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1802 data->beacon_count);
1803
1804 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1805 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1806 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1807 data->disconn_array[cnt]);
1808 }
1809 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1810 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1811 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1812 pos += scnprintf(buf + pos, bufsz - pos, " %u",
1813 data->delta_gain_code[cnt]);
1814 }
1815 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1816 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1817 data->radio_write);
1818 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1819 data->state);
1820
1821 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1822 kfree(buf);
1823 return ret;
1824}
1825
1826static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1827 char __user *user_buf,
1828 size_t count, loff_t *ppos)
1829{
1830 struct iwl_priv *priv = file->private_data;
1831 char buf[60];
1832 int pos = 0;
1833 const size_t bufsz = sizeof(buf);
1834 u32 pwrsave_status;
1835
1836 pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
1837 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1838
1839 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1840 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1841 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1842 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1843 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1844 "error");
1845
1846 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1847}
1848
1849static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1850 const char __user *user_buf,
1851 size_t count, loff_t *ppos)
1852{
1853 struct iwl_priv *priv = file->private_data;
1854 char buf[8];
1855 int buf_size;
1856 int clear;
1857
1858 memset(buf, 0, sizeof(buf));
1859 buf_size = min(count, sizeof(buf) - 1);
1860 if (copy_from_user(buf, user_buf, buf_size))
1861 return -EFAULT;
1862 if (sscanf(buf, "%d", &clear) != 1)
1863 return -EFAULT;
1864
1865 /* make request to uCode to retrieve statistics information */
1866 mutex_lock(&priv->mutex);
1867 iwl_send_statistics_request(priv, 0, true);
1868 mutex_unlock(&priv->mutex);
1869
1870 return count;
1871}
1872
1873static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
1874 char __user *user_buf,
1875 size_t count, loff_t *ppos) {
1876
1877 struct iwl_priv *priv = file->private_data;
1878 int pos = 0;
1879 char buf[128];
1880 const size_t bufsz = sizeof(buf);
1881
1882 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1883 priv->event_log.ucode_trace ? "On" : "Off");
1884 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1885 priv->event_log.non_wraps_count);
1886 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1887 priv->event_log.wraps_once_count);
1888 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1889 priv->event_log.wraps_more_count);
1890
1891 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1892}
1893
1894static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
1895 const char __user *user_buf,
1896 size_t count, loff_t *ppos)
1897{
1898 struct iwl_priv *priv = file->private_data;
1899 char buf[8];
1900 int buf_size;
1901 int trace;
1902
1903 memset(buf, 0, sizeof(buf));
1904 buf_size = min(count, sizeof(buf) - 1);
1905 if (copy_from_user(buf, user_buf, buf_size))
1906 return -EFAULT;
1907 if (sscanf(buf, "%d", &trace) != 1)
1908 return -EFAULT;
1909
1910 if (trace) {
1911 priv->event_log.ucode_trace = true;
1912 if (iwl_is_alive(priv)) {
1913 /* start collecting data now */
1914 mod_timer(&priv->ucode_trace, jiffies);
1915 }
1916 } else {
1917 priv->event_log.ucode_trace = false;
1918 del_timer_sync(&priv->ucode_trace);
1919 }
1920
1921 return count;
1922}
1923
1924static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1925 char __user *user_buf,
1926 size_t count, loff_t *ppos) {
1927
1928 struct iwl_priv *priv = file->private_data;
1929 int len = 0;
1930 char buf[20];
1931
1932 len = sprintf(buf, "0x%04X\n",
1933 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1934 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1935}
1936
1937static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1938 char __user *user_buf,
1939 size_t count, loff_t *ppos) {
1940
1941 struct iwl_priv *priv = file->private_data;
1942 int len = 0;
1943 char buf[20];
1944
1945 len = sprintf(buf, "0x%04X\n",
1946 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1947 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1948}
1949
1950static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
1951 char __user *user_buf,
1952 size_t count, loff_t *ppos) {
1953
1954 struct iwl_priv *priv = file->private_data;
1955 int pos = 0;
1956 char buf[12];
1957 const size_t bufsz = sizeof(buf);
1958
1959 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1960 priv->missed_beacon_threshold);
1961
1962 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1963}
1964
1965static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
1966 const char __user *user_buf,
1967 size_t count, loff_t *ppos)
1968{
1969 struct iwl_priv *priv = file->private_data;
1970 char buf[8];
1971 int buf_size;
1972 int missed;
1973
1974 memset(buf, 0, sizeof(buf));
1975 buf_size = min(count, sizeof(buf) - 1);
1976 if (copy_from_user(buf, user_buf, buf_size))
1977 return -EFAULT;
1978 if (sscanf(buf, "%d", &missed) != 1)
1979 return -EINVAL;
1980
1981 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1982 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1983 priv->missed_beacon_threshold =
1984 IWL_MISSED_BEACON_THRESHOLD_DEF;
1985 else
1986 priv->missed_beacon_threshold = missed;
1987
1988 return count;
1989}
1990
1991static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
1992 char __user *user_buf,
1993 size_t count, loff_t *ppos) {
1994
1995 struct iwl_priv *priv = file->private_data;
1996 int pos = 0;
1997 char buf[12];
1998 const size_t bufsz = sizeof(buf);
1999
2000 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2001 priv->plcp_delta_threshold);
2002
2003 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2004}
2005
2006static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2007 const char __user *user_buf,
2008 size_t count, loff_t *ppos) {
2009
2010 struct iwl_priv *priv = file->private_data;
2011 char buf[8];
2012 int buf_size;
2013 int plcp;
2014
2015 memset(buf, 0, sizeof(buf));
2016 buf_size = min(count, sizeof(buf) - 1);
2017 if (copy_from_user(buf, user_buf, buf_size))
2018 return -EFAULT;
2019 if (sscanf(buf, "%d", &plcp) != 1)
2020 return -EINVAL;
2021 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
2022 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
2023 priv->plcp_delta_threshold =
2024 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
2025 else
2026 priv->plcp_delta_threshold = plcp;
2027 return count;
2028}
2029
2030static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
2031 char __user *user_buf,
2032 size_t count, loff_t *ppos)
2033{
2034 struct iwl_priv *priv = file->private_data;
2035 int pos = 0;
2036 char buf[300];
2037 const size_t bufsz = sizeof(buf);
2038 struct iwl_rf_reset *rf_reset = &priv->rf_reset;
2039
2040 pos += scnprintf(buf + pos, bufsz - pos,
2041 "RF reset statistics\n");
2042 pos += scnprintf(buf + pos, bufsz - pos,
2043 "\tnumber of reset request: %d\n",
2044 rf_reset->reset_request_count);
2045 pos += scnprintf(buf + pos, bufsz - pos,
2046 "\tnumber of reset request success: %d\n",
2047 rf_reset->reset_success_count);
2048 pos += scnprintf(buf + pos, bufsz - pos,
2049 "\tnumber of reset request reject: %d\n",
2050 rf_reset->reset_reject_count);
2051
2052 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2053}
2054
2055static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
2056 const char __user *user_buf,
2057 size_t count, loff_t *ppos) {
2058
2059 struct iwl_priv *priv = file->private_data;
2060 int ret;
2061
2062 ret = iwl_force_rf_reset(priv, true);
2063 return ret ? ret : count;
2064}
2065
2066static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2067 const char __user *user_buf,
2068 size_t count, loff_t *ppos) {
2069
2070 struct iwl_priv *priv = file->private_data;
2071 char buf[8];
2072 int buf_size;
2073 int flush;
2074
2075 memset(buf, 0, sizeof(buf));
2076 buf_size = min(count, sizeof(buf) - 1);
2077 if (copy_from_user(buf, user_buf, buf_size))
2078 return -EFAULT;
2079 if (sscanf(buf, "%d", &flush) != 1)
2080 return -EINVAL;
2081
2082 if (iwl_is_rfkill(priv))
2083 return -EFAULT;
2084
2085 iwlagn_dev_txfifo_flush(priv);
2086
2087 return count;
2088}
2089
2090static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
2091 char __user *user_buf,
2092 size_t count, loff_t *ppos) {
2093
2094 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2095 int pos = 0;
2096 char buf[200];
2097 const size_t bufsz = sizeof(buf);
2098
2099 if (!priv->bt_enable_flag) {
2100 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
2101 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2102 }
2103 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
2104 priv->bt_enable_flag);
2105 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
2106 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
2107 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
2108 "last traffic notif: %d\n",
2109 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
2110 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
2111 "kill_ack_mask: %x, kill_cts_mask: %x\n",
2112 priv->bt_ch_announce, priv->kill_ack_mask,
2113 priv->kill_cts_mask);
2114
2115 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
2116 switch (priv->bt_traffic_load) {
2117 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
2118 pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
2119 break;
2120 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
2121 pos += scnprintf(buf + pos, bufsz - pos, "High\n");
2122 break;
2123 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
2124 pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
2125 break;
2126 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
2127 default:
2128 pos += scnprintf(buf + pos, bufsz - pos, "None\n");
2129 break;
2130 }
2131
2132 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2133}
2134
2135static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
2136 char __user *user_buf,
2137 size_t count, loff_t *ppos)
2138{
2139 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2140
2141 int pos = 0;
2142 char buf[40];
2143 const size_t bufsz = sizeof(buf);
2144
2145 if (priv->cfg->ht_params)
2146 pos += scnprintf(buf + pos, bufsz - pos,
2147 "use %s for aggregation\n",
2148 (priv->hw_params.use_rts_for_aggregation) ?
2149 "rts/cts" : "cts-to-self");
2150 else
2151 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
2152
2153 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2154}
2155
2156static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2157 const char __user *user_buf,
2158 size_t count, loff_t *ppos) {
2159
2160 struct iwl_priv *priv = file->private_data;
2161 char buf[8];
2162 int buf_size;
2163 int rts;
2164
2165 if (!priv->cfg->ht_params)
2166 return -EINVAL;
2167
2168 memset(buf, 0, sizeof(buf));
2169 buf_size = min(count, sizeof(buf) - 1);
2170 if (copy_from_user(buf, user_buf, buf_size))
2171 return -EFAULT;
2172 if (sscanf(buf, "%d", &rts) != 1)
2173 return -EINVAL;
2174 if (rts)
2175 priv->hw_params.use_rts_for_aggregation = true;
2176 else
2177 priv->hw_params.use_rts_for_aggregation = false;
2178 return count;
2179}
2180
2181static int iwl_cmd_echo_test(struct iwl_priv *priv)
2182{
2183 int ret;
2184 struct iwl_host_cmd cmd = {
2185 .id = REPLY_ECHO,
2186 .len = { 0 },
2187 };
2188
2189 ret = iwl_dvm_send_cmd(priv, &cmd);
2190 if (ret)
2191 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
2192 else
2193 IWL_DEBUG_INFO(priv, "echo testing pass\n");
2194 return ret;
2195}
2196
2197static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2198 const char __user *user_buf,
2199 size_t count, loff_t *ppos)
2200{
2201 struct iwl_priv *priv = file->private_data;
2202 char buf[8];
2203 int buf_size;
2204
2205 memset(buf, 0, sizeof(buf));
2206 buf_size = min(count, sizeof(buf) - 1);
2207 if (copy_from_user(buf, user_buf, buf_size))
2208 return -EFAULT;
2209
2210 iwl_cmd_echo_test(priv);
2211 return count;
2212}
2213
2214#ifdef CONFIG_IWLWIFI_DEBUG
2215static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2216 char __user *user_buf,
2217 size_t count, loff_t *ppos)
2218{
2219 struct iwl_priv *priv = file->private_data;
2220 char *buf = NULL;
2221 ssize_t ret;
2222
2223 ret = iwl_dump_nic_event_log(priv, true, &buf);
2224 if (ret > 0)
2225 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2226 kfree(buf);
2227 return ret;
2228}
2229
2230static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2231 const char __user *user_buf,
2232 size_t count, loff_t *ppos)
2233{
2234 struct iwl_priv *priv = file->private_data;
2235 u32 event_log_flag;
2236 char buf[8];
2237 int buf_size;
2238
2239 /* check that the interface is up */
2240 if (!iwl_is_ready(priv))
2241 return -EAGAIN;
2242
2243 memset(buf, 0, sizeof(buf));
2244 buf_size = min(count, sizeof(buf) - 1);
2245 if (copy_from_user(buf, user_buf, buf_size))
2246 return -EFAULT;
2247 if (sscanf(buf, "%d", &event_log_flag) != 1)
2248 return -EFAULT;
2249 if (event_log_flag == 1)
2250 iwl_dump_nic_event_log(priv, true, NULL);
2251
2252 return count;
2253}
2254#endif
2255
2256static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
2257 char __user *user_buf,
2258 size_t count, loff_t *ppos)
2259{
2260 struct iwl_priv *priv = file->private_data;
2261 char buf[120];
2262 int pos = 0;
2263 const size_t bufsz = sizeof(buf);
2264
2265 pos += scnprintf(buf + pos, bufsz - pos,
2266 "Sensitivity calibrations %s\n",
2267 (priv->calib_disabled &
2268 IWL_SENSITIVITY_CALIB_DISABLED) ?
2269 "DISABLED" : "ENABLED");
2270 pos += scnprintf(buf + pos, bufsz - pos,
2271 "Chain noise calibrations %s\n",
2272 (priv->calib_disabled &
2273 IWL_CHAIN_NOISE_CALIB_DISABLED) ?
2274 "DISABLED" : "ENABLED");
2275 pos += scnprintf(buf + pos, bufsz - pos,
2276 "Tx power calibrations %s\n",
2277 (priv->calib_disabled &
2278 IWL_TX_POWER_CALIB_DISABLED) ?
2279 "DISABLED" : "ENABLED");
2280
2281 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2282}
2283
2284static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2285 const char __user *user_buf,
2286 size_t count, loff_t *ppos)
2287{
2288 struct iwl_priv *priv = file->private_data;
2289 char buf[8];
2290 u32 calib_disabled;
2291 int buf_size;
2292
2293 memset(buf, 0, sizeof(buf));
2294 buf_size = min(count, sizeof(buf) - 1);
2295 if (copy_from_user(buf, user_buf, buf_size))
2296 return -EFAULT;
2297 if (sscanf(buf, "%x", &calib_disabled) != 1)
2298 return -EFAULT;
2299
2300 priv->calib_disabled = calib_disabled;
2301
2302 return count;
2303}
2304
2305static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2306 const char __user *user_buf,
2307 size_t count, loff_t *ppos)
2308{
2309 struct iwl_priv *priv = file->private_data;
2310 bool restart_fw = iwlwifi_mod_params.restart_fw;
2311 int ret;
2312
2313 iwlwifi_mod_params.restart_fw = true;
2314
2315 mutex_lock(&priv->mutex);
2316
2317 /* take the return value to make compiler happy - it will fail anyway */
2318 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
2319
2320 mutex_unlock(&priv->mutex);
2321
2322 iwlwifi_mod_params.restart_fw = restart_fw;
2323
2324 return count;
2325}
2326
2327DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2328DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2329DEBUGFS_READ_FILE_OPS(ucode_general_stats);
2330DEBUGFS_READ_FILE_OPS(sensitivity);
2331DEBUGFS_READ_FILE_OPS(chain_noise);
2332DEBUGFS_READ_FILE_OPS(power_save_status);
2333DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2334DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2335DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2336DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2337DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
2338DEBUGFS_READ_FILE_OPS(rxon_flags);
2339DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2340DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
2341DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
2342DEBUGFS_READ_FILE_OPS(bt_traffic);
2343DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2344DEBUGFS_READ_FILE_OPS(reply_tx_error);
2345DEBUGFS_WRITE_FILE_OPS(echo_test);
2346DEBUGFS_WRITE_FILE_OPS(fw_restart);
2347#ifdef CONFIG_IWLWIFI_DEBUG
2348DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2349#endif
2350DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2351
2352/*
2353 * Create the debugfs files and directories
2354 *
2355 */
2356int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2357{
2358 struct dentry *dir_data, *dir_rf, *dir_debug;
2359
2360 priv->debugfs_dir = dbgfs_dir;
2361
2362 dir_data = debugfs_create_dir("data", dbgfs_dir);
2363 if (!dir_data)
2364 goto err;
2365 dir_rf = debugfs_create_dir("rf", dbgfs_dir);
2366 if (!dir_rf)
2367 goto err;
2368 dir_debug = debugfs_create_dir("debug", dbgfs_dir);
2369 if (!dir_debug)
2370 goto err;
2371
2372 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
2373 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
2374 DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
2375 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
2376 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
2377 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
2378 DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
2379 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
2380 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
2381 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
2382 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
2383 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2384 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
2385
2386 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2387 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2388 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2389 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2390 DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR);
2391 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2392 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2393 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
2394 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
2395 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
2396 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
2397 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2398 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
2399 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
2400 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
2401 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2402 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2403 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2404 DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR);
2405#ifdef CONFIG_IWLWIFI_DEBUG
2406 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2407#endif
2408
2409 if (iwl_advanced_bt_coexist(priv))
2410 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
2411
2412 /* Calibrations disabled/enabled status*/
2413 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2414
2415 /*
2416 * Create a symlink with mac80211. This is not very robust, as it does
2417 * not remove the symlink created. The implicit assumption is that
2418 * when the opmode exits, mac80211 will also exit, and will remove
2419 * this symlink as part of its cleanup.
2420 */
2421 if (priv->mac80211_registered) {
2422 char buf[100];
2423 struct dentry *mac80211_dir, *dev_dir, *root_dir;
2424
2425 dev_dir = dbgfs_dir->d_parent;
2426 root_dir = dev_dir->d_parent;
2427 mac80211_dir = priv->hw->wiphy->debugfsdir;
2428
2429 snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
2430 dev_dir->d_name.name);
2431
2432 if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
2433 goto err;
2434 }
2435
2436 return 0;
2437
2438err:
2439 IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
2440 return -ENOMEM;
2441}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
new file mode 100644
index 000000000000..0ba3e56d6015
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -0,0 +1,949 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (dev.h) for driver implementation definitions.
28 * Please use commands.h for uCode API definitions.
29 */
30
31#ifndef __iwl_dev_h__
32#define __iwl_dev_h__
33
34#include <linux/interrupt.h>
35#include <linux/kernel.h>
36#include <linux/wait.h>
37#include <linux/leds.h>
38#include <linux/slab.h>
39#include <linux/mutex.h>
40
41#include "iwl-fw.h"
42#include "iwl-eeprom-parse.h"
43#include "iwl-csr.h"
44#include "iwl-debug.h"
45#include "iwl-agn-hw.h"
46#include "iwl-op-mode.h"
47#include "iwl-notif-wait.h"
48#include "iwl-trans.h"
49
50#include "led.h"
51#include "power.h"
52#include "rs.h"
53#include "tt.h"
54
55/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57#define CT_KILL_THRESHOLD 114 /* in Celsius */
58#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */
59
60/* Default noise level to report when noise measurement is not available.
61 * This may be because we're:
62 * 1) Not associated no beacon statistics being sent to driver)
63 * 2) Scanning (noise measurement does not apply to associated channel)
64 * Use default noise value of -127 ... this is below the range of measurable
65 * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
66 * Also, -127 works better than 0 when averaging frames with/without
67 * noise info (e.g. averaging might be done in app); measured dBm values are
68 * always negative ... using a negative value as the default keeps all
69 * averages within an s8's (used in some apps) range of negative values. */
70#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
71
72/*
73 * RTS threshold here is total size [2347] minus 4 FCS bytes
74 * Per spec:
75 * a value of 0 means RTS on all data/management packets
76 * a value > max MSDU size means no RTS
77 * else RTS for data/management frames where MPDU is larger
78 * than RTS value.
79 */
80#define DEFAULT_RTS_THRESHOLD 2347U
81#define MIN_RTS_THRESHOLD 0U
82#define MAX_RTS_THRESHOLD 2347U
83#define MAX_MSDU_SIZE 2304U
84#define MAX_MPDU_SIZE 2346U
85#define DEFAULT_BEACON_INTERVAL 200U
86#define DEFAULT_SHORT_RETRY_LIMIT 7U
87#define DEFAULT_LONG_RETRY_LIMIT 4U
88
89#define IWL_NUM_SCAN_RATES (2)
90
91
92#define IEEE80211_DATA_LEN 2304
93#define IEEE80211_4ADDR_LEN 30
94#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
95#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
96
97#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
98#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
99#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
100
101#define IWL_SUPPORTED_RATES_IE_LEN 8
102
103#define IWL_INVALID_RATE 0xFF
104#define IWL_INVALID_VALUE -1
105
106union iwl_ht_rate_supp {
107 u16 rates;
108 struct {
109 u8 siso_rate;
110 u8 mimo_rate;
111 };
112};
113
114struct iwl_ht_config {
115 bool single_chain_sufficient;
116 enum ieee80211_smps_mode smps; /* current smps mode */
117};
118
119/* QoS structures */
120struct iwl_qos_info {
121 int qos_active;
122 struct iwl_qosparam_cmd def_qos_parm;
123};
124
125/**
126 * enum iwl_agg_state
127 *
128 * The state machine of the BA agreement establishment / tear down.
129 * These states relate to a specific RA / TID.
130 *
131 * @IWL_AGG_OFF: aggregation is not used
132 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
133 * @IWL_AGG_ON: aggregation session is up
134 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
135 * HW queue to be empty from packets for this RA /TID.
136 * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
137 * HW queue to be empty from packets for this RA /TID.
138 */
139enum iwl_agg_state {
140 IWL_AGG_OFF = 0,
141 IWL_AGG_STARTING,
142 IWL_AGG_ON,
143 IWL_EMPTYING_HW_QUEUE_ADDBA,
144 IWL_EMPTYING_HW_QUEUE_DELBA,
145};
146
147/**
148 * struct iwl_ht_agg - aggregation state machine
149
150 * This structs holds the states for the BA agreement establishment and tear
151 * down. It also holds the state during the BA session itself. This struct is
152 * duplicated for each RA / TID.
153
154 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
155 * Tx response (REPLY_TX), and the block ack notification
156 * (REPLY_COMPRESSED_BA).
157 * @state: state of the BA agreement establishment / tear down.
158 * @txq_id: Tx queue used by the BA session
159 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
160 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
161 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
162 * we are ready to finish the Tx AGG stop / start flow.
163 * @wait_for_ba: Expect block-ack before next Tx reply
164 */
165struct iwl_ht_agg {
166 u32 rate_n_flags;
167 enum iwl_agg_state state;
168 u16 txq_id;
169 u16 ssn;
170 bool wait_for_ba;
171};
172
173/**
174 * struct iwl_tid_data - one for each RA / TID
175
176 * This structs holds the states for each RA / TID.
177
178 * @seq_number: the next WiFi sequence number to use
179 * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
180 * This is basically (last acked packet++).
181 * @agg: aggregation state machine
182 */
183struct iwl_tid_data {
184 u16 seq_number;
185 u16 next_reclaimed;
186 struct iwl_ht_agg agg;
187};
188
189/*
190 * Structure should be accessed with sta_lock held. When station addition
191 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
192 * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
193 * held.
194 */
195struct iwl_station_entry {
196 struct iwl_addsta_cmd sta;
197 u8 used, ctxid;
198 struct iwl_link_quality_cmd *lq;
199};
200
201/*
202 * iwl_station_priv: Driver's private station information
203 *
204 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
205 * in the structure for use by driver. This structure is places in that
206 * space.
207 */
208struct iwl_station_priv {
209 struct iwl_rxon_context *ctx;
210 struct iwl_lq_sta lq_sta;
211 atomic_t pending_frames;
212 bool client;
213 bool asleep;
214 u8 max_agg_bufsize;
215 u8 sta_id;
216};
217
218/**
219 * struct iwl_vif_priv - driver's private per-interface information
220 *
221 * When mac80211 allocates a virtual interface, it can allocate
222 * space for us to put data into.
223 */
224struct iwl_vif_priv {
225 struct iwl_rxon_context *ctx;
226 u8 ibss_bssid_sta_id;
227};
228
229struct iwl_sensitivity_ranges {
230 u16 min_nrg_cck;
231
232 u16 nrg_th_cck;
233 u16 nrg_th_ofdm;
234
235 u16 auto_corr_min_ofdm;
236 u16 auto_corr_min_ofdm_mrc;
237 u16 auto_corr_min_ofdm_x1;
238 u16 auto_corr_min_ofdm_mrc_x1;
239
240 u16 auto_corr_max_ofdm;
241 u16 auto_corr_max_ofdm_mrc;
242 u16 auto_corr_max_ofdm_x1;
243 u16 auto_corr_max_ofdm_mrc_x1;
244
245 u16 auto_corr_max_cck;
246 u16 auto_corr_max_cck_mrc;
247 u16 auto_corr_min_cck;
248 u16 auto_corr_min_cck_mrc;
249
250 u16 barker_corr_th_min;
251 u16 barker_corr_th_min_mrc;
252 u16 nrg_th_cca;
253};
254
255
256#define KELVIN_TO_CELSIUS(x) ((x)-273)
257#define CELSIUS_TO_KELVIN(x) ((x)+273)
258
259
260/******************************************************************************
261 *
262 * Functions implemented in core module which are forward declared here
263 * for use by iwl-[4-5].c
264 *
265 * NOTE: The implementation of these functions are not hardware specific
266 * which is why they are in the core module files.
267 *
268 * Naming convention --
269 * iwl_ <-- Is part of iwlwifi
270 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
271 *
272 ****************************************************************************/
273void iwl_update_chain_flags(struct iwl_priv *priv);
274extern const u8 iwl_bcast_addr[ETH_ALEN];
275
276#define IWL_OPERATION_MODE_AUTO 0
277#define IWL_OPERATION_MODE_HT_ONLY 1
278#define IWL_OPERATION_MODE_MIXED 2
279#define IWL_OPERATION_MODE_20MHZ 3
280
281#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
282
283/* Sensitivity and chain noise calibration */
284#define INITIALIZATION_VALUE 0xFFFF
285#define IWL_CAL_NUM_BEACONS 16
286#define MAXIMUM_ALLOWED_PATHLOSS 15
287
288#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
289
290#define MAX_FA_OFDM 50
291#define MIN_FA_OFDM 5
292#define MAX_FA_CCK 50
293#define MIN_FA_CCK 5
294
295#define AUTO_CORR_STEP_OFDM 1
296
297#define AUTO_CORR_STEP_CCK 3
298#define AUTO_CORR_MAX_TH_CCK 160
299
300#define NRG_DIFF 2
301#define NRG_STEP_CCK 2
302#define NRG_MARGIN 8
303#define MAX_NUMBER_CCK_NO_FA 100
304
305#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
306
307#define CHAIN_A 0
308#define CHAIN_B 1
309#define CHAIN_C 2
310#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
311#define ALL_BAND_FILTER 0xFF00
312#define IN_BAND_FILTER 0xFF
313#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
314
315#define NRG_NUM_PREV_STAT_L 20
316#define NUM_RX_CHAINS 3
317
318enum iwlagn_false_alarm_state {
319 IWL_FA_TOO_MANY = 0,
320 IWL_FA_TOO_FEW = 1,
321 IWL_FA_GOOD_RANGE = 2,
322};
323
324enum iwlagn_chain_noise_state {
325 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
326 IWL_CHAIN_NOISE_ACCUMULATE,
327 IWL_CHAIN_NOISE_CALIBRATED,
328 IWL_CHAIN_NOISE_DONE,
329};
330
331/* Sensitivity calib data */
332struct iwl_sensitivity_data {
333 u32 auto_corr_ofdm;
334 u32 auto_corr_ofdm_mrc;
335 u32 auto_corr_ofdm_x1;
336 u32 auto_corr_ofdm_mrc_x1;
337 u32 auto_corr_cck;
338 u32 auto_corr_cck_mrc;
339
340 u32 last_bad_plcp_cnt_ofdm;
341 u32 last_fa_cnt_ofdm;
342 u32 last_bad_plcp_cnt_cck;
343 u32 last_fa_cnt_cck;
344
345 u32 nrg_curr_state;
346 u32 nrg_prev_state;
347 u32 nrg_value[10];
348 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
349 u32 nrg_silence_ref;
350 u32 nrg_energy_idx;
351 u32 nrg_silence_idx;
352 u32 nrg_th_cck;
353 s32 nrg_auto_corr_silence_diff;
354 u32 num_in_cck_no_fa;
355 u32 nrg_th_ofdm;
356
357 u16 barker_corr_th_min;
358 u16 barker_corr_th_min_mrc;
359 u16 nrg_th_cca;
360};
361
362/* Chain noise (differential Rx gain) calib data */
363struct iwl_chain_noise_data {
364 u32 active_chains;
365 u32 chain_noise_a;
366 u32 chain_noise_b;
367 u32 chain_noise_c;
368 u32 chain_signal_a;
369 u32 chain_signal_b;
370 u32 chain_signal_c;
371 u16 beacon_count;
372 u8 disconn_array[NUM_RX_CHAINS];
373 u8 delta_gain_code[NUM_RX_CHAINS];
374 u8 radio_write;
375 u8 state;
376};
377
378enum {
379 MEASUREMENT_READY = (1 << 0),
380 MEASUREMENT_ACTIVE = (1 << 1),
381};
382
383/* reply_tx_statistics (for _agn devices) */
384struct reply_tx_error_statistics {
385 u32 pp_delay;
386 u32 pp_few_bytes;
387 u32 pp_bt_prio;
388 u32 pp_quiet_period;
389 u32 pp_calc_ttak;
390 u32 int_crossed_retry;
391 u32 short_limit;
392 u32 long_limit;
393 u32 fifo_underrun;
394 u32 drain_flow;
395 u32 rfkill_flush;
396 u32 life_expire;
397 u32 dest_ps;
398 u32 host_abort;
399 u32 bt_retry;
400 u32 sta_invalid;
401 u32 frag_drop;
402 u32 tid_disable;
403 u32 fifo_flush;
404 u32 insuff_cf_poll;
405 u32 fail_hw_drop;
406 u32 sta_color_mismatch;
407 u32 unknown;
408};
409
410/* reply_agg_tx_statistics (for _agn devices) */
411struct reply_agg_tx_error_statistics {
412 u32 underrun;
413 u32 bt_prio;
414 u32 few_bytes;
415 u32 abort;
416 u32 last_sent_ttl;
417 u32 last_sent_try;
418 u32 last_sent_bt_kill;
419 u32 scd_query;
420 u32 bad_crc32;
421 u32 response;
422 u32 dump_tx;
423 u32 delay_tx;
424 u32 unknown;
425};
426
427/*
428 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
429 * to perform continuous uCode event logging operation if enabled
430 */
431#define UCODE_TRACE_PERIOD (10)
432
433/*
434 * iwl_event_log: current uCode event log position
435 *
436 * @ucode_trace: enable/disable ucode continuous trace timer
437 * @num_wraps: how many times the event buffer wraps
438 * @next_entry: the entry just before the next one that uCode would fill
439 * @non_wraps_count: counter for no wrap detected when dump ucode events
440 * @wraps_once_count: counter for wrap once detected when dump ucode events
441 * @wraps_more_count: counter for wrap more than once detected
442 * when dump ucode events
443 */
444struct iwl_event_log {
445 bool ucode_trace;
446 u32 num_wraps;
447 u32 next_entry;
448 int non_wraps_count;
449 int wraps_once_count;
450 int wraps_more_count;
451};
452
453#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
454
455/* BT Antenna Coupling Threshold (dB) */
456#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
457
458/* Firmware reload counter and Timestamp */
459#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
460#define IWL_MAX_CONTINUE_RELOAD_CNT 4
461
462
463struct iwl_rf_reset {
464 int reset_request_count;
465 int reset_success_count;
466 int reset_reject_count;
467 unsigned long last_reset_jiffies;
468};
469
470enum iwl_rxon_context_id {
471 IWL_RXON_CTX_BSS,
472 IWL_RXON_CTX_PAN,
473
474 NUM_IWL_RXON_CTX
475};
476
477/* extend beacon time format bit shifting */
478/*
479 * for _agn devices
480 * bits 31:22 - extended
481 * bits 21:0 - interval
482 */
483#define IWLAGN_EXT_BEACON_TIME_POS 22
484
485struct iwl_rxon_context {
486 struct ieee80211_vif *vif;
487
488 u8 mcast_queue;
489 u8 ac_to_queue[IEEE80211_NUM_ACS];
490 u8 ac_to_fifo[IEEE80211_NUM_ACS];
491
492 /*
493 * We could use the vif to indicate active, but we
494 * also need it to be active during disabling when
495 * we already removed the vif for type setting.
496 */
497 bool always_active, is_active;
498
499 bool ht_need_multiple_chains;
500
501 enum iwl_rxon_context_id ctxid;
502
503 u32 interface_modes, exclusive_interface_modes;
504 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
505
506 /*
507 * We declare this const so it can only be
508 * changed via explicit cast within the
509 * routines that actually update the physical
510 * hardware.
511 */
512 const struct iwl_rxon_cmd active;
513 struct iwl_rxon_cmd staging;
514
515 struct iwl_rxon_time_cmd timing;
516
517 struct iwl_qos_info qos_data;
518
519 u8 bcast_sta_id, ap_sta_id;
520
521 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
522 u8 qos_cmd;
523 u8 wep_key_cmd;
524
525 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
526 u8 key_mapping_keys;
527
528 __le32 station_flags;
529
530 int beacon_int;
531
532 struct {
533 bool non_gf_sta_present;
534 u8 protection;
535 bool enabled, is_40mhz;
536 u8 extension_chan_offset;
537 } ht;
538};
539
540enum iwl_scan_type {
541 IWL_SCAN_NORMAL,
542 IWL_SCAN_RADIO_RESET,
543};
544
545/**
546 * struct iwl_hw_params
547 *
548 * Holds the module parameters
549 *
550 * @tx_chains_num: Number of TX chains
551 * @rx_chains_num: Number of RX chains
552 * @ct_kill_threshold: temperature threshold - in hw dependent unit
553 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
554 * relevant for 1000, 6000 and up
555 * @struct iwl_sensitivity_ranges: range of sensitivity values
556 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
557 */
558struct iwl_hw_params {
559 u8 tx_chains_num;
560 u8 rx_chains_num;
561 bool use_rts_for_aggregation;
562 u32 ct_kill_threshold;
563 u32 ct_kill_exit_threshold;
564
565 const struct iwl_sensitivity_ranges *sens;
566};
567
568/**
569 * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters
570 * @advanced_bt_coexist: support advanced bt coexist
571 * @bt_init_traffic_load: specify initial bt traffic load
572 * @bt_prio_boost: default bt priority boost value
573 * @agg_time_limit: maximum number of uSec in aggregation
574 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
575 */
576struct iwl_dvm_bt_params {
577 bool advanced_bt_coexist;
578 u8 bt_init_traffic_load;
579 u32 bt_prio_boost;
580 u16 agg_time_limit;
581 bool bt_sco_disable;
582 bool bt_session_2;
583};
584
585/**
586 * struct iwl_dvm_cfg - DVM firmware specific device configuration
587 * @set_hw_params: set hardware parameters
588 * @set_channel_switch: send channel switch command
589 * @nic_config: apply device specific configuration
590 * @temperature: read temperature
591 * @adv_thermal_throttle: support advance thermal throttle
592 * @support_ct_kill_exit: support ct kill exit condition
593 * @plcp_delta_threshold: plcp error rate threshold used to trigger
594 * radio tuning when there is a high receiving plcp error rate
595 * @chain_noise_scale: default chain noise scale used for gain computation
596 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
597 * @no_idle_support: do not support idle mode
598 * @bt_params: pointer to BT parameters
599 * @need_temp_offset_calib: need to perform temperature offset calibration
600 * @no_xtal_calib: some devices do not need crystal calibration data,
601 * don't send it to those
602 * @temp_offset_v2: support v2 of temperature offset calibration
603 * @adv_pm: advanced power management
604 */
605struct iwl_dvm_cfg {
606 void (*set_hw_params)(struct iwl_priv *priv);
607 int (*set_channel_switch)(struct iwl_priv *priv,
608 struct ieee80211_channel_switch *ch_switch);
609 void (*nic_config)(struct iwl_priv *priv);
610 void (*temperature)(struct iwl_priv *priv);
611
612 const struct iwl_dvm_bt_params *bt_params;
613 s32 chain_noise_scale;
614 u8 plcp_delta_threshold;
615 bool adv_thermal_throttle;
616 bool support_ct_kill_exit;
617 bool hd_v2;
618 bool no_idle_support;
619 bool need_temp_offset_calib;
620 bool no_xtal_calib;
621 bool temp_offset_v2;
622 bool adv_pm;
623};
624
625struct iwl_wipan_noa_data {
626 struct rcu_head rcu_head;
627 u32 length;
628 u8 data[];
629};
630
631/* Calibration disabling bit mask */
632enum {
633 IWL_CALIB_ENABLE_ALL = 0,
634
635 IWL_SENSITIVITY_CALIB_DISABLED = BIT(0),
636 IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1),
637 IWL_TX_POWER_CALIB_DISABLED = BIT(2),
638
639 IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF,
640};
641
642#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
643 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
644
645#define IWL_MAC80211_GET_DVM(_hw) \
646 ((struct iwl_priv *) ((struct iwl_op_mode *) \
647 (_hw)->priv)->op_mode_specific)
648
649struct iwl_priv {
650
651 struct iwl_trans *trans;
652 struct device *dev; /* for debug prints only */
653 const struct iwl_cfg *cfg;
654 const struct iwl_fw *fw;
655 const struct iwl_dvm_cfg *lib;
656 unsigned long status;
657
658 spinlock_t sta_lock;
659 struct mutex mutex;
660
661 unsigned long transport_queue_stop;
662 bool passive_no_rx;
663#define IWL_INVALID_MAC80211_QUEUE 0xff
664 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
665 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
666
667 unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
668
669 /* ieee device used by generic ieee processing code */
670 struct ieee80211_hw *hw;
671
672 struct napi_struct *napi;
673
674 struct list_head calib_results;
675
676 struct workqueue_struct *workqueue;
677
678 struct iwl_hw_params hw_params;
679
680 enum ieee80211_band band;
681 u8 valid_contexts;
682
683 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
684 struct iwl_rx_cmd_buffer *rxb);
685
686 struct iwl_notif_wait_data notif_wait;
687
688 /* spectrum measurement report caching */
689 struct iwl_spectrum_notification measure_report;
690 u8 measurement_status;
691
692 /* ucode beacon time */
693 u32 ucode_beacon_time;
694 int missed_beacon_threshold;
695
696 /* track IBSS manager (last beacon) status */
697 u32 ibss_manager;
698
699 /* jiffies when last recovery from statistics was performed */
700 unsigned long rx_statistics_jiffies;
701
702 /*counters */
703 u32 rx_handlers_stats[REPLY_MAX];
704
705 /* rf reset */
706 struct iwl_rf_reset rf_reset;
707
708 /* firmware reload counter and timestamp */
709 unsigned long reload_jiffies;
710 int reload_count;
711 bool ucode_loaded;
712
713 u8 plcp_delta_threshold;
714
715 /* thermal calibration */
716 s32 temperature; /* Celsius */
717 s32 last_temperature;
718
719 struct iwl_wipan_noa_data __rcu *noa_data;
720
721 /* Scan related variables */
722 unsigned long scan_start;
723 unsigned long scan_start_tsf;
724 void *scan_cmd;
725 enum ieee80211_band scan_band;
726 struct cfg80211_scan_request *scan_request;
727 struct ieee80211_vif *scan_vif;
728 enum iwl_scan_type scan_type;
729 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
730 u8 mgmt_tx_ant;
731
732 /* max number of station keys */
733 u8 sta_key_max_num;
734
735 bool new_scan_threshold_behaviour;
736
737 bool wowlan;
738
739 /* EEPROM MAC addresses */
740 struct mac_address addresses[2];
741
742 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
743
744 __le16 switch_channel;
745
746 u8 start_calib;
747 struct iwl_sensitivity_data sensitivity_data;
748 struct iwl_chain_noise_data chain_noise_data;
749 __le16 sensitivity_tbl[HD_TABLE_SIZE];
750 __le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
751
752 struct iwl_ht_config current_ht_config;
753
754 /* Rate scaling data */
755 u8 retry_rate;
756
757 int activity_timer_active;
758
759 struct iwl_power_mgr power_data;
760 struct iwl_tt_mgmt thermal_throttle;
761
762 /* station table variables */
763 int num_stations;
764 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
765 unsigned long ucode_key_table;
766 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
767 atomic_t num_aux_in_flight;
768
769 u8 mac80211_registered;
770
771 /* Indication if ieee80211_ops->open has been called */
772 u8 is_open;
773
774 enum nl80211_iftype iw_mode;
775
776 /* Last Rx'd beacon timestamp */
777 u64 timestamp;
778
779 struct {
780 __le32 flag;
781 struct statistics_general_common common;
782 struct statistics_rx_non_phy rx_non_phy;
783 struct statistics_rx_phy rx_ofdm;
784 struct statistics_rx_ht_phy rx_ofdm_ht;
785 struct statistics_rx_phy rx_cck;
786 struct statistics_tx tx;
787#ifdef CONFIG_IWLWIFI_DEBUGFS
788 struct statistics_bt_activity bt_activity;
789 __le32 num_bt_kills, accum_num_bt_kills;
790#endif
791 spinlock_t lock;
792 } statistics;
793#ifdef CONFIG_IWLWIFI_DEBUGFS
794 struct {
795 struct statistics_general_common common;
796 struct statistics_rx_non_phy rx_non_phy;
797 struct statistics_rx_phy rx_ofdm;
798 struct statistics_rx_ht_phy rx_ofdm_ht;
799 struct statistics_rx_phy rx_cck;
800 struct statistics_tx tx;
801 struct statistics_bt_activity bt_activity;
802 } accum_stats, delta_stats, max_delta_stats;
803#endif
804
805 /*
806 * reporting the number of tids has AGG on. 0 means
807 * no AGGREGATION
808 */
809 u8 agg_tids_count;
810
811 struct iwl_rx_phy_res last_phy_res;
812 u32 ampdu_ref;
813 bool last_phy_res_valid;
814
815 /*
816 * chain noise reset and gain commands are the
817 * two extra calibration commands follows the standard
818 * phy calibration commands
819 */
820 u8 phy_calib_chain_noise_reset_cmd;
821 u8 phy_calib_chain_noise_gain_cmd;
822
823 /* counts reply_tx error */
824 struct reply_tx_error_statistics reply_tx_stats;
825 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
826
827 /* bt coex */
828 u8 bt_enable_flag;
829 u8 bt_status;
830 u8 bt_traffic_load, last_bt_traffic_load;
831 bool bt_ch_announce;
832 bool bt_full_concurrent;
833 bool bt_ant_couple_ok;
834 __le32 kill_ack_mask;
835 __le32 kill_cts_mask;
836 __le16 bt_valid;
837 bool reduced_txpower;
838 u16 bt_on_thresh;
839 u16 bt_duration;
840 u16 dynamic_frag_thresh;
841 u8 bt_ci_compliance;
842 struct work_struct bt_traffic_change_work;
843 bool bt_enable_pspoll;
844 struct iwl_rxon_context *cur_rssi_ctx;
845 bool bt_is_sco;
846
847 struct work_struct restart;
848 struct work_struct scan_completed;
849 struct work_struct abort_scan;
850
851 struct work_struct beacon_update;
852 struct iwl_rxon_context *beacon_ctx;
853 struct sk_buff *beacon_skb;
854 void *beacon_cmd;
855
856 struct work_struct tt_work;
857 struct work_struct ct_enter;
858 struct work_struct ct_exit;
859 struct work_struct start_internal_scan;
860 struct work_struct tx_flush;
861 struct work_struct bt_full_concurrency;
862 struct work_struct bt_runtime_config;
863
864 struct delayed_work scan_check;
865
866 /* TX Power settings */
867 s8 tx_power_user_lmt;
868 s8 tx_power_next;
869
870#ifdef CONFIG_IWLWIFI_DEBUGFS
871 /* debugfs */
872 struct dentry *debugfs_dir;
873 u32 dbgfs_sram_offset, dbgfs_sram_len;
874 bool disable_ht40;
875 void *wowlan_sram;
876#endif /* CONFIG_IWLWIFI_DEBUGFS */
877
878 struct iwl_nvm_data *nvm_data;
879 /* eeprom blob for debugfs */
880 u8 *eeprom_blob;
881 size_t eeprom_blob_size;
882
883 struct work_struct txpower_work;
884 u32 calib_disabled;
885 struct work_struct run_time_calib_work;
886 struct timer_list statistics_periodic;
887 struct timer_list ucode_trace;
888
889 struct iwl_event_log event_log;
890
891#ifdef CONFIG_IWLWIFI_LEDS
892 struct led_classdev led;
893 unsigned long blink_on, blink_off;
894 bool led_registered;
895#endif
896
897 /* WoWLAN GTK rekey data */
898 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
899 __le64 replay_ctr;
900 __le16 last_seq_ctl;
901 bool have_rekey_data;
902#ifdef CONFIG_PM_SLEEP
903 struct wiphy_wowlan_support wowlan_support;
904#endif
905
906 /* device_pointers: pointers to ucode event tables */
907 struct {
908 u32 error_event_table;
909 u32 log_event_table;
910 } device_pointers;
911
912 /* indicator of loaded ucode image */
913 enum iwl_ucode_type cur_ucode;
914}; /*iwl_priv */
915
916static inline struct iwl_rxon_context *
917iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
918{
919 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
920
921 return vif_priv->ctx;
922}
923
924#define for_each_context(priv, ctx) \
925 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
926 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
927 if (priv->valid_contexts & BIT(ctx->ctxid))
928
929static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
930{
931 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
932}
933
934static inline int iwl_is_associated(struct iwl_priv *priv,
935 enum iwl_rxon_context_id ctxid)
936{
937 return iwl_is_associated_ctx(&priv->contexts[ctxid]);
938}
939
940static inline int iwl_is_any_associated(struct iwl_priv *priv)
941{
942 struct iwl_rxon_context *ctx;
943 for_each_context(priv, ctx)
944 if (iwl_is_associated_ctx(ctx))
945 return true;
946 return false;
947}
948
949#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
new file mode 100644
index 000000000000..34b41e5f7cfc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -0,0 +1,690 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27/*
28 * DVM device-specific data & functions
29 */
30#include "iwl-io.h"
31#include "iwl-prph.h"
32#include "iwl-eeprom-parse.h"
33
34#include "agn.h"
35#include "dev.h"
36#include "commands.h"
37
38
39/*
40 * 1000 series
41 * ===========
42 */
43
44/*
45 * For 1000, use advance thermal throttling critical temperature threshold,
46 * but legacy thermal management implementation for now.
47 * This is for the reason of 1000 uCode using advance thermal throttling API
48 * but not implement ct_kill_exit based on ct_kill exit temperature
49 * so the thermal throttling will still based on legacy thermal throttling
50 * management.
51 * The code here need to be modified once 1000 uCode has the advanced thermal
52 * throttling algorithm in place
53 */
54static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
55{
56 /* want Celsius */
57 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
58 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
59}
60
61/* NIC configuration for 1000 series */
62static void iwl1000_nic_config(struct iwl_priv *priv)
63{
64 /* Setting digital SVR for 1000 card to 1.32V */
65 /* locking is acquired in iwl_set_bits_mask_prph() function */
66 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
67 APMG_SVR_DIGITAL_VOLTAGE_1_32,
68 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
69}
70
71/**
72 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
73 * @priv -- pointer to iwl_priv data structure
74 * @tsf_bits -- number of bits need to shift for masking)
75 */
76static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
77 u16 tsf_bits)
78{
79 return (1 << tsf_bits) - 1;
80}
81
82/**
83 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
84 * @priv -- pointer to iwl_priv data structure
85 * @tsf_bits -- number of bits need to shift for masking)
86 */
87static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
88 u16 tsf_bits)
89{
90 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
91}
92
93/*
94 * extended beacon time format
95 * time in usec will be changed into a 32-bit value in extended:internal format
96 * the extended part is the beacon counts
97 * the internal part is the time in usec within one beacon interval
98 */
99static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
100 u32 beacon_interval)
101{
102 u32 quot;
103 u32 rem;
104 u32 interval = beacon_interval * TIME_UNIT;
105
106 if (!interval || !usec)
107 return 0;
108
109 quot = (usec / interval) &
110 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
111 IWLAGN_EXT_BEACON_TIME_POS);
112 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
113 IWLAGN_EXT_BEACON_TIME_POS);
114
115 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
116}
117
118/* base is usually what we get from ucode with each received frame,
119 * the same as HW timer counter counting down
120 */
121static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
122 u32 addon, u32 beacon_interval)
123{
124 u32 base_low = base & iwl_beacon_time_mask_low(priv,
125 IWLAGN_EXT_BEACON_TIME_POS);
126 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
127 IWLAGN_EXT_BEACON_TIME_POS);
128 u32 interval = beacon_interval * TIME_UNIT;
129 u32 res = (base & iwl_beacon_time_mask_high(priv,
130 IWLAGN_EXT_BEACON_TIME_POS)) +
131 (addon & iwl_beacon_time_mask_high(priv,
132 IWLAGN_EXT_BEACON_TIME_POS));
133
134 if (base_low > addon_low)
135 res += base_low - addon_low;
136 else if (base_low < addon_low) {
137 res += interval + base_low - addon_low;
138 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
139 } else
140 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
141
142 return cpu_to_le32(res);
143}
144
145static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
146 .min_nrg_cck = 95,
147 .auto_corr_min_ofdm = 90,
148 .auto_corr_min_ofdm_mrc = 170,
149 .auto_corr_min_ofdm_x1 = 120,
150 .auto_corr_min_ofdm_mrc_x1 = 240,
151
152 .auto_corr_max_ofdm = 120,
153 .auto_corr_max_ofdm_mrc = 210,
154 .auto_corr_max_ofdm_x1 = 155,
155 .auto_corr_max_ofdm_mrc_x1 = 290,
156
157 .auto_corr_min_cck = 125,
158 .auto_corr_max_cck = 200,
159 .auto_corr_min_cck_mrc = 170,
160 .auto_corr_max_cck_mrc = 400,
161 .nrg_th_cck = 95,
162 .nrg_th_ofdm = 95,
163
164 .barker_corr_th_min = 190,
165 .barker_corr_th_min_mrc = 390,
166 .nrg_th_cca = 62,
167};
168
169static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
170{
171 iwl1000_set_ct_threshold(priv);
172
173 /* Set initial sensitivity parameters */
174 priv->hw_params.sens = &iwl1000_sensitivity;
175}
176
177const struct iwl_dvm_cfg iwl_dvm_1000_cfg = {
178 .set_hw_params = iwl1000_hw_set_hw_params,
179 .nic_config = iwl1000_nic_config,
180 .temperature = iwlagn_temperature,
181 .support_ct_kill_exit = true,
182 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
183 .chain_noise_scale = 1000,
184};
185
186
187/*
188 * 2000 series
189 * ===========
190 */
191
192static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
193{
194 /* want Celsius */
195 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
196 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
197}
198
199/* NIC configuration for 2000 series */
200static void iwl2000_nic_config(struct iwl_priv *priv)
201{
202 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
203 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
204}
205
206static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
207 .min_nrg_cck = 97,
208 .auto_corr_min_ofdm = 80,
209 .auto_corr_min_ofdm_mrc = 128,
210 .auto_corr_min_ofdm_x1 = 105,
211 .auto_corr_min_ofdm_mrc_x1 = 192,
212
213 .auto_corr_max_ofdm = 145,
214 .auto_corr_max_ofdm_mrc = 232,
215 .auto_corr_max_ofdm_x1 = 110,
216 .auto_corr_max_ofdm_mrc_x1 = 232,
217
218 .auto_corr_min_cck = 125,
219 .auto_corr_max_cck = 175,
220 .auto_corr_min_cck_mrc = 160,
221 .auto_corr_max_cck_mrc = 310,
222 .nrg_th_cck = 97,
223 .nrg_th_ofdm = 100,
224
225 .barker_corr_th_min = 190,
226 .barker_corr_th_min_mrc = 390,
227 .nrg_th_cca = 62,
228};
229
230static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
231{
232 iwl2000_set_ct_threshold(priv);
233
234 /* Set initial sensitivity parameters */
235 priv->hw_params.sens = &iwl2000_sensitivity;
236}
237
238const struct iwl_dvm_cfg iwl_dvm_2000_cfg = {
239 .set_hw_params = iwl2000_hw_set_hw_params,
240 .nic_config = iwl2000_nic_config,
241 .temperature = iwlagn_temperature,
242 .adv_thermal_throttle = true,
243 .support_ct_kill_exit = true,
244 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
245 .chain_noise_scale = 1000,
246 .hd_v2 = true,
247 .need_temp_offset_calib = true,
248 .temp_offset_v2 = true,
249};
250
251const struct iwl_dvm_cfg iwl_dvm_105_cfg = {
252 .set_hw_params = iwl2000_hw_set_hw_params,
253 .nic_config = iwl2000_nic_config,
254 .temperature = iwlagn_temperature,
255 .adv_thermal_throttle = true,
256 .support_ct_kill_exit = true,
257 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
258 .chain_noise_scale = 1000,
259 .hd_v2 = true,
260 .need_temp_offset_calib = true,
261 .temp_offset_v2 = true,
262 .adv_pm = true,
263};
264
265static const struct iwl_dvm_bt_params iwl2030_bt_params = {
266 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
267 .advanced_bt_coexist = true,
268 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
269 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
270 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
271 .bt_sco_disable = true,
272 .bt_session_2 = true,
273};
274
275const struct iwl_dvm_cfg iwl_dvm_2030_cfg = {
276 .set_hw_params = iwl2000_hw_set_hw_params,
277 .nic_config = iwl2000_nic_config,
278 .temperature = iwlagn_temperature,
279 .adv_thermal_throttle = true,
280 .support_ct_kill_exit = true,
281 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
282 .chain_noise_scale = 1000,
283 .hd_v2 = true,
284 .bt_params = &iwl2030_bt_params,
285 .need_temp_offset_calib = true,
286 .temp_offset_v2 = true,
287 .adv_pm = true,
288};
289
290/*
291 * 5000 series
292 * ===========
293 */
294
295/* NIC configuration for 5000 series */
296static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
297 .min_nrg_cck = 100,
298 .auto_corr_min_ofdm = 90,
299 .auto_corr_min_ofdm_mrc = 170,
300 .auto_corr_min_ofdm_x1 = 105,
301 .auto_corr_min_ofdm_mrc_x1 = 220,
302
303 .auto_corr_max_ofdm = 120,
304 .auto_corr_max_ofdm_mrc = 210,
305 .auto_corr_max_ofdm_x1 = 120,
306 .auto_corr_max_ofdm_mrc_x1 = 240,
307
308 .auto_corr_min_cck = 125,
309 .auto_corr_max_cck = 200,
310 .auto_corr_min_cck_mrc = 200,
311 .auto_corr_max_cck_mrc = 400,
312 .nrg_th_cck = 100,
313 .nrg_th_ofdm = 100,
314
315 .barker_corr_th_min = 190,
316 .barker_corr_th_min_mrc = 390,
317 .nrg_th_cca = 62,
318};
319
320static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
321 .min_nrg_cck = 95,
322 .auto_corr_min_ofdm = 90,
323 .auto_corr_min_ofdm_mrc = 170,
324 .auto_corr_min_ofdm_x1 = 105,
325 .auto_corr_min_ofdm_mrc_x1 = 220,
326
327 .auto_corr_max_ofdm = 120,
328 .auto_corr_max_ofdm_mrc = 210,
329 /* max = min for performance bug in 5150 DSP */
330 .auto_corr_max_ofdm_x1 = 105,
331 .auto_corr_max_ofdm_mrc_x1 = 220,
332
333 .auto_corr_min_cck = 125,
334 .auto_corr_max_cck = 200,
335 .auto_corr_min_cck_mrc = 170,
336 .auto_corr_max_cck_mrc = 400,
337 .nrg_th_cck = 95,
338 .nrg_th_ofdm = 95,
339
340 .barker_corr_th_min = 190,
341 .barker_corr_th_min_mrc = 390,
342 .nrg_th_cca = 62,
343};
344
345#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
346
347static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
348{
349 u16 temperature, voltage;
350
351 temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature);
352 voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage);
353
354 /* offset = temp - volt / coeff */
355 return (s32)(temperature -
356 voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
357}
358
359static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
360{
361 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
362 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
363 iwl_temp_calib_to_offset(priv);
364
365 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
366}
367
368static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
369{
370 /* want Celsius */
371 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
372}
373
374static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
375{
376 iwl5000_set_ct_threshold(priv);
377
378 /* Set initial sensitivity parameters */
379 priv->hw_params.sens = &iwl5000_sensitivity;
380}
381
382static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
383{
384 iwl5150_set_ct_threshold(priv);
385
386 /* Set initial sensitivity parameters */
387 priv->hw_params.sens = &iwl5150_sensitivity;
388}
389
390static void iwl5150_temperature(struct iwl_priv *priv)
391{
392 u32 vt = 0;
393 s32 offset = iwl_temp_calib_to_offset(priv);
394
395 vt = le32_to_cpu(priv->statistics.common.temperature);
396 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
397 /* now vt hold the temperature in Kelvin */
398 priv->temperature = KELVIN_TO_CELSIUS(vt);
399 iwl_tt_handler(priv);
400}
401
402static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
403 struct ieee80211_channel_switch *ch_switch)
404{
405 /*
406 * MULTI-FIXME
407 * See iwlagn_mac_channel_switch.
408 */
409 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
410 struct iwl5000_channel_switch_cmd cmd;
411 u32 switch_time_in_usec, ucode_switch_time;
412 u16 ch;
413 u32 tsf_low;
414 u8 switch_count;
415 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
416 struct ieee80211_vif *vif = ctx->vif;
417 struct iwl_host_cmd hcmd = {
418 .id = REPLY_CHANNEL_SWITCH,
419 .len = { sizeof(cmd), },
420 .data = { &cmd, },
421 };
422
423 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
424 ch = ch_switch->chandef.chan->hw_value;
425 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
426 ctx->active.channel, ch);
427 cmd.channel = cpu_to_le16(ch);
428 cmd.rxon_flags = ctx->staging.flags;
429 cmd.rxon_filter_flags = ctx->staging.filter_flags;
430 switch_count = ch_switch->count;
431 tsf_low = ch_switch->timestamp & 0x0ffffffff;
432 /*
433 * calculate the ucode channel switch time
434 * adding TSF as one of the factor for when to switch
435 */
436 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
437 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
438 beacon_interval)) {
439 switch_count -= (priv->ucode_beacon_time -
440 tsf_low) / beacon_interval;
441 } else
442 switch_count = 0;
443 }
444 if (switch_count <= 1)
445 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
446 else {
447 switch_time_in_usec =
448 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
449 ucode_switch_time = iwl_usecs_to_beacons(priv,
450 switch_time_in_usec,
451 beacon_interval);
452 cmd.switch_time = iwl_add_beacon_time(priv,
453 priv->ucode_beacon_time,
454 ucode_switch_time,
455 beacon_interval);
456 }
457 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
458 cmd.switch_time);
459 cmd.expect_beacon =
460 ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
461
462 return iwl_dvm_send_cmd(priv, &hcmd);
463}
464
465const struct iwl_dvm_cfg iwl_dvm_5000_cfg = {
466 .set_hw_params = iwl5000_hw_set_hw_params,
467 .set_channel_switch = iwl5000_hw_channel_switch,
468 .temperature = iwlagn_temperature,
469 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
470 .chain_noise_scale = 1000,
471 .no_idle_support = true,
472};
473
474const struct iwl_dvm_cfg iwl_dvm_5150_cfg = {
475 .set_hw_params = iwl5150_hw_set_hw_params,
476 .set_channel_switch = iwl5000_hw_channel_switch,
477 .temperature = iwl5150_temperature,
478 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
479 .chain_noise_scale = 1000,
480 .no_idle_support = true,
481 .no_xtal_calib = true,
482};
483
484
485
486/*
487 * 6000 series
488 * ===========
489 */
490
491static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
492{
493 /* want Celsius */
494 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
495 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
496}
497
498/* NIC configuration for 6000 series */
499static void iwl6000_nic_config(struct iwl_priv *priv)
500{
501 switch (priv->cfg->device_family) {
502 case IWL_DEVICE_FAMILY_6005:
503 case IWL_DEVICE_FAMILY_6030:
504 case IWL_DEVICE_FAMILY_6000:
505 break;
506 case IWL_DEVICE_FAMILY_6000i:
507 /* 2x2 IPA phy type */
508 iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
509 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
510 break;
511 case IWL_DEVICE_FAMILY_6050:
512 /* Indicate calibration version to uCode. */
513 if (priv->nvm_data->calib_version >= 6)
514 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
515 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
516 break;
517 case IWL_DEVICE_FAMILY_6150:
518 /* Indicate calibration version to uCode. */
519 if (priv->nvm_data->calib_version >= 6)
520 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
521 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
522 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
523 CSR_GP_DRIVER_REG_BIT_6050_1x2);
524 break;
525 default:
526 WARN_ON(1);
527 }
528}
529
530static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
531 .min_nrg_cck = 110,
532 .auto_corr_min_ofdm = 80,
533 .auto_corr_min_ofdm_mrc = 128,
534 .auto_corr_min_ofdm_x1 = 105,
535 .auto_corr_min_ofdm_mrc_x1 = 192,
536
537 .auto_corr_max_ofdm = 145,
538 .auto_corr_max_ofdm_mrc = 232,
539 .auto_corr_max_ofdm_x1 = 110,
540 .auto_corr_max_ofdm_mrc_x1 = 232,
541
542 .auto_corr_min_cck = 125,
543 .auto_corr_max_cck = 175,
544 .auto_corr_min_cck_mrc = 160,
545 .auto_corr_max_cck_mrc = 310,
546 .nrg_th_cck = 110,
547 .nrg_th_ofdm = 110,
548
549 .barker_corr_th_min = 190,
550 .barker_corr_th_min_mrc = 336,
551 .nrg_th_cca = 62,
552};
553
554static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
555{
556 iwl6000_set_ct_threshold(priv);
557
558 /* Set initial sensitivity parameters */
559 priv->hw_params.sens = &iwl6000_sensitivity;
560
561}
562
563static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
564 struct ieee80211_channel_switch *ch_switch)
565{
566 /*
567 * MULTI-FIXME
568 * See iwlagn_mac_channel_switch.
569 */
570 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
571 struct iwl6000_channel_switch_cmd *cmd;
572 u32 switch_time_in_usec, ucode_switch_time;
573 u16 ch;
574 u32 tsf_low;
575 u8 switch_count;
576 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
577 struct ieee80211_vif *vif = ctx->vif;
578 struct iwl_host_cmd hcmd = {
579 .id = REPLY_CHANNEL_SWITCH,
580 .len = { sizeof(*cmd), },
581 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
582 };
583 int err;
584
585 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
586 if (!cmd)
587 return -ENOMEM;
588
589 hcmd.data[0] = cmd;
590
591 cmd->band = priv->band == IEEE80211_BAND_2GHZ;
592 ch = ch_switch->chandef.chan->hw_value;
593 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
594 ctx->active.channel, ch);
595 cmd->channel = cpu_to_le16(ch);
596 cmd->rxon_flags = ctx->staging.flags;
597 cmd->rxon_filter_flags = ctx->staging.filter_flags;
598 switch_count = ch_switch->count;
599 tsf_low = ch_switch->timestamp & 0x0ffffffff;
600 /*
601 * calculate the ucode channel switch time
602 * adding TSF as one of the factor for when to switch
603 */
604 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
605 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
606 beacon_interval)) {
607 switch_count -= (priv->ucode_beacon_time -
608 tsf_low) / beacon_interval;
609 } else
610 switch_count = 0;
611 }
612 if (switch_count <= 1)
613 cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
614 else {
615 switch_time_in_usec =
616 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
617 ucode_switch_time = iwl_usecs_to_beacons(priv,
618 switch_time_in_usec,
619 beacon_interval);
620 cmd->switch_time = iwl_add_beacon_time(priv,
621 priv->ucode_beacon_time,
622 ucode_switch_time,
623 beacon_interval);
624 }
625 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
626 cmd->switch_time);
627 cmd->expect_beacon =
628 ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
629
630 err = iwl_dvm_send_cmd(priv, &hcmd);
631 kfree(cmd);
632 return err;
633}
634
635const struct iwl_dvm_cfg iwl_dvm_6000_cfg = {
636 .set_hw_params = iwl6000_hw_set_hw_params,
637 .set_channel_switch = iwl6000_hw_channel_switch,
638 .nic_config = iwl6000_nic_config,
639 .temperature = iwlagn_temperature,
640 .adv_thermal_throttle = true,
641 .support_ct_kill_exit = true,
642 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
643 .chain_noise_scale = 1000,
644};
645
646const struct iwl_dvm_cfg iwl_dvm_6005_cfg = {
647 .set_hw_params = iwl6000_hw_set_hw_params,
648 .set_channel_switch = iwl6000_hw_channel_switch,
649 .nic_config = iwl6000_nic_config,
650 .temperature = iwlagn_temperature,
651 .adv_thermal_throttle = true,
652 .support_ct_kill_exit = true,
653 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
654 .chain_noise_scale = 1000,
655 .need_temp_offset_calib = true,
656};
657
658const struct iwl_dvm_cfg iwl_dvm_6050_cfg = {
659 .set_hw_params = iwl6000_hw_set_hw_params,
660 .set_channel_switch = iwl6000_hw_channel_switch,
661 .nic_config = iwl6000_nic_config,
662 .temperature = iwlagn_temperature,
663 .adv_thermal_throttle = true,
664 .support_ct_kill_exit = true,
665 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
666 .chain_noise_scale = 1500,
667};
668
669static const struct iwl_dvm_bt_params iwl6000_bt_params = {
670 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
671 .advanced_bt_coexist = true,
672 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
673 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
674 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
675 .bt_sco_disable = true,
676};
677
678const struct iwl_dvm_cfg iwl_dvm_6030_cfg = {
679 .set_hw_params = iwl6000_hw_set_hw_params,
680 .set_channel_switch = iwl6000_hw_channel_switch,
681 .nic_config = iwl6000_nic_config,
682 .temperature = iwlagn_temperature,
683 .adv_thermal_throttle = true,
684 .support_ct_kill_exit = true,
685 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
686 .chain_noise_scale = 1000,
687 .bt_params = &iwl6000_bt_params,
688 .need_temp_offset_calib = true,
689 .adv_pm = true,
690};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
new file mode 100644
index 000000000000..ca4d6692cc4e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -0,0 +1,223 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include "iwl-io.h"
37#include "iwl-trans.h"
38#include "iwl-modparams.h"
39#include "dev.h"
40#include "agn.h"
41
42/* Throughput OFF time(ms) ON time (ms)
43 * >300 25 25
44 * >200 to 300 40 40
45 * >100 to 200 55 55
46 * >70 to 100 65 65
47 * >50 to 70 75 75
48 * >20 to 50 85 85
49 * >10 to 20 95 95
50 * >5 to 10 110 110
51 * >1 to 5 130 130
52 * >0 to 1 167 167
53 * <=0 SOLID ON
54 */
55static const struct ieee80211_tpt_blink iwl_blink[] = {
56 { .throughput = 0, .blink_time = 334 },
57 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
58 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
59 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
60 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
61 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
62 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
63 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
64 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
65 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
66};
67
68/* Set led register off */
69void iwlagn_led_enable(struct iwl_priv *priv)
70{
71 iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
72}
73
74/*
75 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
76 * Led blink rate analysis showed an average deviation of 20% on 5000 series
77 * and up.
78 * Need to compensate on the led on/off time per HW according to the deviation
79 * to achieve the desired led frequency
80 * The calculation is: (100-averageDeviation)/100 * blinkTime
81 * For code efficiency the calculation will be:
82 * compensation = (100 - averageDeviation) * 64 / 100
83 * NewBlinkTime = (compensation * BlinkTime) / 64
84 */
85static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
86 u8 time, u16 compensation)
87{
88 if (!compensation) {
89 IWL_ERR(priv, "undefined blink compensation: "
90 "use pre-defined blinking time\n");
91 return time;
92 }
93
94 return (u8)((time * compensation) >> 6);
95}
96
97static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
98{
99 struct iwl_host_cmd cmd = {
100 .id = REPLY_LEDS_CMD,
101 .len = { sizeof(struct iwl_led_cmd), },
102 .data = { led_cmd, },
103 .flags = CMD_ASYNC,
104 };
105 u32 reg;
106
107 reg = iwl_read32(priv->trans, CSR_LED_REG);
108 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
109 iwl_write32(priv->trans, CSR_LED_REG,
110 reg & CSR_LED_BSM_CTRL_MSK);
111
112 return iwl_dvm_send_cmd(priv, &cmd);
113}
114
115/* Set led pattern command */
116static int iwl_led_cmd(struct iwl_priv *priv,
117 unsigned long on,
118 unsigned long off)
119{
120 struct iwl_led_cmd led_cmd = {
121 .id = IWL_LED_LINK,
122 .interval = IWL_DEF_LED_INTRVL
123 };
124 int ret;
125
126 if (!test_bit(STATUS_READY, &priv->status))
127 return -EBUSY;
128
129 if (priv->blink_on == on && priv->blink_off == off)
130 return 0;
131
132 if (off == 0) {
133 /* led is SOLID_ON */
134 on = IWL_LED_SOLID;
135 }
136
137 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
138 priv->cfg->base_params->led_compensation);
139 led_cmd.on = iwl_blink_compensation(priv, on,
140 priv->cfg->base_params->led_compensation);
141 led_cmd.off = iwl_blink_compensation(priv, off,
142 priv->cfg->base_params->led_compensation);
143
144 ret = iwl_send_led_cmd(priv, &led_cmd);
145 if (!ret) {
146 priv->blink_on = on;
147 priv->blink_off = off;
148 }
149 return ret;
150}
151
152static void iwl_led_brightness_set(struct led_classdev *led_cdev,
153 enum led_brightness brightness)
154{
155 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
156 unsigned long on = 0;
157
158 if (brightness > 0)
159 on = IWL_LED_SOLID;
160
161 iwl_led_cmd(priv, on, 0);
162}
163
164static int iwl_led_blink_set(struct led_classdev *led_cdev,
165 unsigned long *delay_on,
166 unsigned long *delay_off)
167{
168 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
169
170 return iwl_led_cmd(priv, *delay_on, *delay_off);
171}
172
173void iwl_leds_init(struct iwl_priv *priv)
174{
175 int mode = iwlwifi_mod_params.led_mode;
176 int ret;
177
178 if (mode == IWL_LED_DISABLE) {
179 IWL_INFO(priv, "Led disabled\n");
180 return;
181 }
182 if (mode == IWL_LED_DEFAULT)
183 mode = priv->cfg->led_mode;
184
185 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
186 wiphy_name(priv->hw->wiphy));
187 priv->led.brightness_set = iwl_led_brightness_set;
188 priv->led.blink_set = iwl_led_blink_set;
189 priv->led.max_brightness = 1;
190
191 switch (mode) {
192 case IWL_LED_DEFAULT:
193 WARN_ON(1);
194 break;
195 case IWL_LED_BLINK:
196 priv->led.default_trigger =
197 ieee80211_create_tpt_led_trigger(priv->hw,
198 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
199 iwl_blink, ARRAY_SIZE(iwl_blink));
200 break;
201 case IWL_LED_RF_STATE:
202 priv->led.default_trigger =
203 ieee80211_get_radio_led_name(priv->hw);
204 break;
205 }
206
207 ret = led_classdev_register(priv->trans->dev, &priv->led);
208 if (ret) {
209 kfree(priv->led.name);
210 return;
211 }
212
213 priv->led_registered = true;
214}
215
216void iwl_leds_exit(struct iwl_priv *priv)
217{
218 if (!priv->led_registered)
219 return;
220
221 led_classdev_unregister(&priv->led);
222 kfree(priv->led.name);
223}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
new file mode 100644
index 000000000000..1c6b2252d0f2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
@@ -0,0 +1,55 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_leds_h__
28#define __iwl_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39#ifdef CONFIG_IWLWIFI_LEDS
40void iwlagn_led_enable(struct iwl_priv *priv);
41void iwl_leds_init(struct iwl_priv *priv);
42void iwl_leds_exit(struct iwl_priv *priv);
43#else
44static inline void iwlagn_led_enable(struct iwl_priv *priv)
45{
46}
47static inline void iwl_leds_init(struct iwl_priv *priv)
48{
49}
50static inline void iwl_leds_exit(struct iwl_priv *priv)
51{
52}
53#endif
54
55#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
new file mode 100644
index 000000000000..e18629a16fb0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -0,0 +1,1300 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34
35#include "iwl-io.h"
36#include "iwl-agn-hw.h"
37#include "iwl-trans.h"
38#include "iwl-modparams.h"
39
40#include "dev.h"
41#include "agn.h"
42
43int iwlagn_hw_valid_rtc_data_addr(u32 addr)
44{
45 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
46 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
47}
48
49int iwlagn_send_tx_power(struct iwl_priv *priv)
50{
51 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
52 u8 tx_ant_cfg_cmd;
53
54 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
55 "TX Power requested while scanning!\n"))
56 return -EAGAIN;
57
58 /* half dBm need to multiply */
59 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
60
61 if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
62 /*
63 * For the newer devices which using enhanced/extend tx power
64 * table in EEPROM, the format is in half dBm. driver need to
65 * convert to dBm format before report to mac80211.
66 * By doing so, there is a possibility of 1/2 dBm resolution
67 * lost. driver will perform "round-up" operation before
68 * reporting, but it will cause 1/2 dBm tx power over the
69 * regulatory limit. Perform the checking here, if the
70 * "tx_power_user_lmt" is higher than EEPROM value (in
71 * half-dBm format), lower the tx power based on EEPROM
72 */
73 tx_power_cmd.global_lmt =
74 priv->nvm_data->max_tx_pwr_half_dbm;
75 }
76 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
77 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
78
79 if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
80 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
81 else
82 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
83
84 return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
85 sizeof(tx_power_cmd), &tx_power_cmd);
86}
87
88void iwlagn_temperature(struct iwl_priv *priv)
89{
90 lockdep_assert_held(&priv->statistics.lock);
91
92 /* store temperature from correct statistics (in Celsius) */
93 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
94 iwl_tt_handler(priv);
95}
96
97int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
98{
99 int idx = 0;
100 int band_offset = 0;
101
102 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
103 if (rate_n_flags & RATE_MCS_HT_MSK) {
104 idx = (rate_n_flags & 0xff);
105 return idx;
106 /* Legacy rate format, search for match in table */
107 } else {
108 if (band == IEEE80211_BAND_5GHZ)
109 band_offset = IWL_FIRST_OFDM_RATE;
110 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
111 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
112 return idx - band_offset;
113 }
114
115 return -1;
116}
117
118int iwlagn_manage_ibss_station(struct iwl_priv *priv,
119 struct ieee80211_vif *vif, bool add)
120{
121 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
122
123 if (add)
124 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
125 vif->bss_conf.bssid,
126 &vif_priv->ibss_bssid_sta_id);
127 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
128 vif->bss_conf.bssid);
129}
130
131/**
132 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
133 *
134 * pre-requirements:
135 * 1. acquire mutex before calling
136 * 2. make sure rf is on and not in exit state
137 */
138int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
139{
140 struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
141 .flush_control = cpu_to_le16(IWL_DROP_ALL),
142 };
143 struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
144 .flush_control = cpu_to_le16(IWL_DROP_ALL),
145 };
146
147 u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
148 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
149
150 if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
151 queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
152 IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
153 IWL_PAN_SCD_MGMT_MSK |
154 IWL_PAN_SCD_MULTICAST_MSK;
155
156 if (priv->nvm_data->sku_cap_11n_enable)
157 queue_control |= IWL_AGG_TX_QUEUE_MSK;
158
159 if (scd_q_msk)
160 queue_control = scd_q_msk;
161
162 IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
163 flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
164 flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
165
166 if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
167 return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
168 sizeof(flush_cmd_v3),
169 &flush_cmd_v3);
170 return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
171 sizeof(flush_cmd_v2), &flush_cmd_v2);
172}
173
174void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
175{
176 mutex_lock(&priv->mutex);
177 ieee80211_stop_queues(priv->hw);
178 if (iwlagn_txfifo_flush(priv, 0)) {
179 IWL_ERR(priv, "flush request fail\n");
180 goto done;
181 }
182 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
183 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
184done:
185 ieee80211_wake_queues(priv->hw);
186 mutex_unlock(&priv->mutex);
187}
188
189/*
190 * BT coex
191 */
192/* Notmal TDM */
193static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
194 cpu_to_le32(0xaaaaaaaa),
195 cpu_to_le32(0xaaaaaaaa),
196 cpu_to_le32(0xaeaaaaaa),
197 cpu_to_le32(0xaaaaaaaa),
198 cpu_to_le32(0xcc00ff28),
199 cpu_to_le32(0x0000aaaa),
200 cpu_to_le32(0xcc00aaaa),
201 cpu_to_le32(0x0000aaaa),
202 cpu_to_le32(0xc0004000),
203 cpu_to_le32(0x00004000),
204 cpu_to_le32(0xf0005000),
205 cpu_to_le32(0xf0005000),
206};
207
208
209/* Loose Coex */
210static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
211 cpu_to_le32(0xaaaaaaaa),
212 cpu_to_le32(0xaaaaaaaa),
213 cpu_to_le32(0xaeaaaaaa),
214 cpu_to_le32(0xaaaaaaaa),
215 cpu_to_le32(0xcc00ff28),
216 cpu_to_le32(0x0000aaaa),
217 cpu_to_le32(0xcc00aaaa),
218 cpu_to_le32(0x0000aaaa),
219 cpu_to_le32(0x00000000),
220 cpu_to_le32(0x00000000),
221 cpu_to_le32(0xf0005000),
222 cpu_to_le32(0xf0005000),
223};
224
225/* Full concurrency */
226static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
227 cpu_to_le32(0xaaaaaaaa),
228 cpu_to_le32(0xaaaaaaaa),
229 cpu_to_le32(0xaaaaaaaa),
230 cpu_to_le32(0xaaaaaaaa),
231 cpu_to_le32(0xaaaaaaaa),
232 cpu_to_le32(0xaaaaaaaa),
233 cpu_to_le32(0xaaaaaaaa),
234 cpu_to_le32(0xaaaaaaaa),
235 cpu_to_le32(0x00000000),
236 cpu_to_le32(0x00000000),
237 cpu_to_le32(0x00000000),
238 cpu_to_le32(0x00000000),
239};
240
241void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
242{
243 struct iwl_basic_bt_cmd basic = {
244 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
245 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
246 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
247 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
248 };
249 struct iwl_bt_cmd_v1 bt_cmd_v1;
250 struct iwl_bt_cmd_v2 bt_cmd_v2;
251 int ret;
252
253 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
254 sizeof(basic.bt3_lookup_table));
255
256 if (priv->lib->bt_params) {
257 /*
258 * newer generation of devices (2000 series and newer)
259 * use the version 2 of the bt command
260 * we need to make sure sending the host command
261 * with correct data structure to avoid uCode assert
262 */
263 if (priv->lib->bt_params->bt_session_2) {
264 bt_cmd_v2.prio_boost = cpu_to_le32(
265 priv->lib->bt_params->bt_prio_boost);
266 bt_cmd_v2.tx_prio_boost = 0;
267 bt_cmd_v2.rx_prio_boost = 0;
268 } else {
269 /* older version only has 8 bits */
270 WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
271 bt_cmd_v1.prio_boost =
272 priv->lib->bt_params->bt_prio_boost;
273 bt_cmd_v1.tx_prio_boost = 0;
274 bt_cmd_v1.rx_prio_boost = 0;
275 }
276 } else {
277 IWL_ERR(priv, "failed to construct BT Coex Config\n");
278 return;
279 }
280
281 /*
282 * Possible situations when BT needs to take over for receive,
283 * at the same time where STA needs to response to AP's frame(s),
284 * reduce the tx power of the required response frames, by that,
285 * allow the concurrent BT receive & WiFi transmit
286 * (BT - ANT A, WiFi -ANT B), without interference to one another
287 *
288 * Reduced tx power apply to control frames only (ACK/Back/CTS)
289 * when indicated by the BT config command
290 */
291 basic.kill_ack_mask = priv->kill_ack_mask;
292 basic.kill_cts_mask = priv->kill_cts_mask;
293 if (priv->reduced_txpower)
294 basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
295 basic.valid = priv->bt_valid;
296
297 /*
298 * Configure BT coex mode to "no coexistence" when the
299 * user disabled BT coexistence, we have no interface
300 * (might be in monitor mode), or the interface is in
301 * IBSS mode (no proper uCode support for coex then).
302 */
303 if (!iwlwifi_mod_params.bt_coex_active ||
304 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
305 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
306 } else {
307 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
308 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
309
310 if (!priv->bt_enable_pspoll)
311 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
312 else
313 basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
314
315 if (priv->bt_ch_announce)
316 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
317 IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
318 }
319 priv->bt_enable_flag = basic.flags;
320 if (priv->bt_full_concurrent)
321 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
322 sizeof(iwlagn_concurrent_lookup));
323 else
324 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
325 sizeof(iwlagn_def_3w_lookup));
326
327 IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
328 basic.flags ? "active" : "disabled",
329 priv->bt_full_concurrent ?
330 "full concurrency" : "3-wire");
331
332 if (priv->lib->bt_params->bt_session_2) {
333 memcpy(&bt_cmd_v2.basic, &basic,
334 sizeof(basic));
335 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
336 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
337 } else {
338 memcpy(&bt_cmd_v1.basic, &basic,
339 sizeof(basic));
340 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
341 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
342 }
343 if (ret)
344 IWL_ERR(priv, "failed to send BT Coex Config\n");
345
346}
347
348void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
349{
350 struct iwl_rxon_context *ctx, *found_ctx = NULL;
351 bool found_ap = false;
352
353 lockdep_assert_held(&priv->mutex);
354
355 /* Check whether AP or GO mode is active. */
356 if (rssi_ena) {
357 for_each_context(priv, ctx) {
358 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
359 iwl_is_associated_ctx(ctx)) {
360 found_ap = true;
361 break;
362 }
363 }
364 }
365
366 /*
367 * If disable was received or If GO/AP mode, disable RSSI
368 * measurements.
369 */
370 if (!rssi_ena || found_ap) {
371 if (priv->cur_rssi_ctx) {
372 ctx = priv->cur_rssi_ctx;
373 ieee80211_disable_rssi_reports(ctx->vif);
374 priv->cur_rssi_ctx = NULL;
375 }
376 return;
377 }
378
379 /*
380 * If rssi measurements need to be enabled, consider all cases now.
381 * Figure out how many contexts are active.
382 */
383 for_each_context(priv, ctx) {
384 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
385 iwl_is_associated_ctx(ctx)) {
386 found_ctx = ctx;
387 break;
388 }
389 }
390
391 /*
392 * rssi monitor already enabled for the correct interface...nothing
393 * to do.
394 */
395 if (found_ctx == priv->cur_rssi_ctx)
396 return;
397
398 /*
399 * Figure out if rssi monitor is currently enabled, and needs
400 * to be changed. If rssi monitor is already enabled, disable
401 * it first else just enable rssi measurements on the
402 * interface found above.
403 */
404 if (priv->cur_rssi_ctx) {
405 ctx = priv->cur_rssi_ctx;
406 if (ctx->vif)
407 ieee80211_disable_rssi_reports(ctx->vif);
408 }
409
410 priv->cur_rssi_ctx = found_ctx;
411
412 if (!found_ctx)
413 return;
414
415 ieee80211_enable_rssi_reports(found_ctx->vif,
416 IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
417 IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
418}
419
420static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
421{
422 return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
423 BT_UART_MSG_FRAME3SCOESCO_POS;
424}
425
426static void iwlagn_bt_traffic_change_work(struct work_struct *work)
427{
428 struct iwl_priv *priv =
429 container_of(work, struct iwl_priv, bt_traffic_change_work);
430 struct iwl_rxon_context *ctx;
431 int smps_request = -1;
432
433 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
434 /* bt coex disabled */
435 return;
436 }
437
438 /*
439 * Note: bt_traffic_load can be overridden by scan complete and
440 * coex profile notifications. Ignore that since only bad consequence
441 * can be not matching debug print with actual state.
442 */
443 IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
444 priv->bt_traffic_load);
445
446 switch (priv->bt_traffic_load) {
447 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
448 if (priv->bt_status)
449 smps_request = IEEE80211_SMPS_DYNAMIC;
450 else
451 smps_request = IEEE80211_SMPS_AUTOMATIC;
452 break;
453 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
454 smps_request = IEEE80211_SMPS_DYNAMIC;
455 break;
456 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
457 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
458 smps_request = IEEE80211_SMPS_STATIC;
459 break;
460 default:
461 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
462 priv->bt_traffic_load);
463 break;
464 }
465
466 mutex_lock(&priv->mutex);
467
468 /*
469 * We can not send command to firmware while scanning. When the scan
470 * complete we will schedule this work again. We do check with mutex
471 * locked to prevent new scan request to arrive. We do not check
472 * STATUS_SCANNING to avoid race when queue_work two times from
473 * different notifications, but quit and not perform any work at all.
474 */
475 if (test_bit(STATUS_SCAN_HW, &priv->status))
476 goto out;
477
478 iwl_update_chain_flags(priv);
479
480 if (smps_request != -1) {
481 priv->current_ht_config.smps = smps_request;
482 for_each_context(priv, ctx) {
483 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
484 ieee80211_request_smps(ctx->vif, smps_request);
485 }
486 }
487
488 /*
489 * Dynamic PS poll related functionality. Adjust RSSI measurements if
490 * necessary.
491 */
492 iwlagn_bt_coex_rssi_monitor(priv);
493out:
494 mutex_unlock(&priv->mutex);
495}
496
497/*
498 * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
499 * correct interface or disable it if this is the last interface to be
500 * removed.
501 */
502void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
503{
504 if (priv->bt_is_sco &&
505 priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
506 iwlagn_bt_adjust_rssi_monitor(priv, true);
507 else
508 iwlagn_bt_adjust_rssi_monitor(priv, false);
509}
510
511static void iwlagn_print_uartmsg(struct iwl_priv *priv,
512 struct iwl_bt_uart_msg *uart_msg)
513{
514 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
515 "Update Req = 0x%X\n",
516 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
517 BT_UART_MSG_FRAME1MSGTYPE_POS,
518 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
519 BT_UART_MSG_FRAME1SSN_POS,
520 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
521 BT_UART_MSG_FRAME1UPDATEREQ_POS);
522
523 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
524 "Chl_SeqN = 0x%X, In band = 0x%X\n",
525 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
526 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
527 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
528 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
529 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
530 BT_UART_MSG_FRAME2CHLSEQN_POS,
531 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
532 BT_UART_MSG_FRAME2INBAND_POS);
533
534 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
535 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
536 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
537 BT_UART_MSG_FRAME3SCOESCO_POS,
538 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
539 BT_UART_MSG_FRAME3SNIFF_POS,
540 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
541 BT_UART_MSG_FRAME3A2DP_POS,
542 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
543 BT_UART_MSG_FRAME3ACL_POS,
544 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
545 BT_UART_MSG_FRAME3MASTER_POS,
546 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
547 BT_UART_MSG_FRAME3OBEX_POS);
548
549 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
550 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
551 BT_UART_MSG_FRAME4IDLEDURATION_POS);
552
553 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
554 "eSCO Retransmissions = 0x%X\n",
555 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
556 BT_UART_MSG_FRAME5TXACTIVITY_POS,
557 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
558 BT_UART_MSG_FRAME5RXACTIVITY_POS,
559 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
560 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
561
562 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
563 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
564 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
565 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
566 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
567
568 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
569 "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
570 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
571 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
572 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
573 BT_UART_MSG_FRAME7PAGE_POS,
574 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
575 BT_UART_MSG_FRAME7INQUIRY_POS,
576 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
577 BT_UART_MSG_FRAME7CONNECTABLE_POS);
578}
579
580static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
581 struct iwl_bt_uart_msg *uart_msg)
582{
583 bool need_update = false;
584 u8 kill_msk = IWL_BT_KILL_REDUCE;
585 static const __le32 bt_kill_ack_msg[3] = {
586 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
587 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
588 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
589 static const __le32 bt_kill_cts_msg[3] = {
590 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
591 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
592 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
593
594 if (!priv->reduced_txpower)
595 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
596 ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
597 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
598 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
599 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
600 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
601 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
602 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
603 need_update = true;
604 }
605 return need_update;
606}
607
608/*
609 * Upon RSSI changes, sends a bt config command with following changes
610 * 1. enable/disable "reduced control frames tx power
611 * 2. update the "kill)ack_mask" and "kill_cts_mask"
612 *
613 * If "reduced tx power" is enabled, uCode shall
614 * 1. ACK/Back/CTS rate shall reduced to 6Mbps
615 * 2. not use duplciate 20/40MHz mode
616 */
617static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
618 struct iwl_bt_uart_msg *uart_msg)
619{
620 bool need_update = false;
621 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
622 int ave_rssi;
623
624 if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
625 IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
626 return false;
627 }
628
629 ave_rssi = ieee80211_ave_rssi(ctx->vif);
630 if (!ave_rssi) {
631 /* no rssi data, no changes to reduce tx power */
632 IWL_DEBUG_COEX(priv, "no rssi data available\n");
633 return need_update;
634 }
635 if (!priv->reduced_txpower &&
636 !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
637 (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
638 (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
639 BT_UART_MSG_FRAME3OBEX_MSK)) &&
640 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
641 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
642 /* enabling reduced tx power */
643 priv->reduced_txpower = true;
644 priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
645 need_update = true;
646 } else if (priv->reduced_txpower &&
647 (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
648 (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
649 (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
650 BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
651 !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
652 BT_UART_MSG_FRAME3OBEX_MSK)))) {
653 /* disable reduced tx power */
654 priv->reduced_txpower = false;
655 priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
656 need_update = true;
657 }
658
659 return need_update;
660}
661
662static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
663 struct iwl_rx_cmd_buffer *rxb)
664{
665 struct iwl_rx_packet *pkt = rxb_addr(rxb);
666 struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
667 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
668
669 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
670 /* bt coex disabled */
671 return;
672 }
673
674 IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
675 IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
676 IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
677 IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
678 coex->bt_ci_compliance);
679 iwlagn_print_uartmsg(priv, uart_msg);
680
681 priv->last_bt_traffic_load = priv->bt_traffic_load;
682 priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
683
684 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
685 if (priv->bt_status != coex->bt_status ||
686 priv->last_bt_traffic_load != coex->bt_traffic_load) {
687 if (coex->bt_status) {
688 /* BT on */
689 if (!priv->bt_ch_announce)
690 priv->bt_traffic_load =
691 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
692 else
693 priv->bt_traffic_load =
694 coex->bt_traffic_load;
695 } else {
696 /* BT off */
697 priv->bt_traffic_load =
698 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
699 }
700 priv->bt_status = coex->bt_status;
701 queue_work(priv->workqueue,
702 &priv->bt_traffic_change_work);
703 }
704 }
705
706 /* schedule to send runtime bt_config */
707 /* check reduce power before change ack/cts kill mask */
708 if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
709 iwlagn_set_kill_msk(priv, uart_msg))
710 queue_work(priv->workqueue, &priv->bt_runtime_config);
711
712
713 /* FIXME: based on notification, adjust the prio_boost */
714
715 priv->bt_ci_compliance = coex->bt_ci_compliance;
716}
717
718void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
719{
720 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
721 iwlagn_bt_coex_profile_notif;
722}
723
724void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
725{
726 INIT_WORK(&priv->bt_traffic_change_work,
727 iwlagn_bt_traffic_change_work);
728}
729
730void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
731{
732 cancel_work_sync(&priv->bt_traffic_change_work);
733}
734
735static bool is_single_rx_stream(struct iwl_priv *priv)
736{
737 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
738 priv->current_ht_config.single_chain_sufficient;
739}
740
741#define IWL_NUM_RX_CHAINS_MULTIPLE 3
742#define IWL_NUM_RX_CHAINS_SINGLE 2
743#define IWL_NUM_IDLE_CHAINS_DUAL 2
744#define IWL_NUM_IDLE_CHAINS_SINGLE 1
745
746/*
747 * Determine how many receiver/antenna chains to use.
748 *
749 * More provides better reception via diversity. Fewer saves power
750 * at the expense of throughput, but only when not in powersave to
751 * start with.
752 *
753 * MIMO (dual stream) requires at least 2, but works better with 3.
754 * This does not determine *which* chains to use, just how many.
755 */
756static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
757{
758 if (priv->lib->bt_params &&
759 priv->lib->bt_params->advanced_bt_coexist &&
760 (priv->bt_full_concurrent ||
761 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
762 /*
763 * only use chain 'A' in bt high traffic load or
764 * full concurrency mode
765 */
766 return IWL_NUM_RX_CHAINS_SINGLE;
767 }
768 /* # of Rx chains to use when expecting MIMO. */
769 if (is_single_rx_stream(priv))
770 return IWL_NUM_RX_CHAINS_SINGLE;
771 else
772 return IWL_NUM_RX_CHAINS_MULTIPLE;
773}
774
775/*
776 * When we are in power saving mode, unless device support spatial
777 * multiplexing power save, use the active count for rx chain count.
778 */
779static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
780{
781 /* # Rx chains when idling, depending on SMPS mode */
782 switch (priv->current_ht_config.smps) {
783 case IEEE80211_SMPS_STATIC:
784 case IEEE80211_SMPS_DYNAMIC:
785 return IWL_NUM_IDLE_CHAINS_SINGLE;
786 case IEEE80211_SMPS_AUTOMATIC:
787 case IEEE80211_SMPS_OFF:
788 return active_cnt;
789 default:
790 WARN(1, "invalid SMPS mode %d",
791 priv->current_ht_config.smps);
792 return active_cnt;
793 }
794}
795
796/* up to 4 chains */
797static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
798{
799 u8 res;
800 res = (chain_bitmap & BIT(0)) >> 0;
801 res += (chain_bitmap & BIT(1)) >> 1;
802 res += (chain_bitmap & BIT(2)) >> 2;
803 res += (chain_bitmap & BIT(3)) >> 3;
804 return res;
805}
806
807/**
808 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
809 *
810 * Selects how many and which Rx receivers/antennas/chains to use.
811 * This should not be used for scan command ... it puts data in wrong place.
812 */
813void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
814{
815 bool is_single = is_single_rx_stream(priv);
816 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
817 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
818 u32 active_chains;
819 u16 rx_chain;
820
821 /* Tell uCode which antennas are actually connected.
822 * Before first association, we assume all antennas are connected.
823 * Just after first association, iwl_chain_noise_calibration()
824 * checks which antennas actually *are* connected. */
825 if (priv->chain_noise_data.active_chains)
826 active_chains = priv->chain_noise_data.active_chains;
827 else
828 active_chains = priv->nvm_data->valid_rx_ant;
829
830 if (priv->lib->bt_params &&
831 priv->lib->bt_params->advanced_bt_coexist &&
832 (priv->bt_full_concurrent ||
833 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
834 /*
835 * only use chain 'A' in bt high traffic load or
836 * full concurrency mode
837 */
838 active_chains = first_antenna(active_chains);
839 }
840
841 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
842
843 /* How many receivers should we use? */
844 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
845 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
846
847
848 /* correct rx chain count according hw settings
849 * and chain noise calibration
850 */
851 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
852 if (valid_rx_cnt < active_rx_cnt)
853 active_rx_cnt = valid_rx_cnt;
854
855 if (valid_rx_cnt < idle_rx_cnt)
856 idle_rx_cnt = valid_rx_cnt;
857
858 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
859 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
860
861 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
862
863 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
864 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
865 else
866 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
867
868 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
869 ctx->staging.rx_chain,
870 active_rx_cnt, idle_rx_cnt);
871
872 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
873 active_rx_cnt < idle_rx_cnt);
874}
875
876u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
877{
878 int i;
879 u8 ind = ant;
880
881 if (priv->band == IEEE80211_BAND_2GHZ &&
882 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
883 return 0;
884
885 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
886 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
887 if (valid & BIT(ind))
888 return ind;
889 }
890 return ant;
891}
892
893#ifdef CONFIG_PM_SLEEP
894static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
895{
896 int i;
897
898 for (i = 0; i < IWLAGN_P1K_SIZE; i++)
899 out[i] = cpu_to_le16(p1k[i]);
900}
901
902struct wowlan_key_data {
903 struct iwl_rxon_context *ctx;
904 struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
905 struct iwlagn_wowlan_tkip_params_cmd *tkip;
906 const u8 *bssid;
907 bool error, use_rsc_tsc, use_tkip;
908};
909
910
911static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
912 struct ieee80211_vif *vif,
913 struct ieee80211_sta *sta,
914 struct ieee80211_key_conf *key,
915 void *_data)
916{
917 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
918 struct wowlan_key_data *data = _data;
919 struct iwl_rxon_context *ctx = data->ctx;
920 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
921 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
922 struct iwlagn_p1k_cache *rx_p1ks;
923 u8 *rx_mic_key;
924 struct ieee80211_key_seq seq;
925 u32 cur_rx_iv32 = 0;
926 u16 p1k[IWLAGN_P1K_SIZE];
927 int ret, i;
928
929 mutex_lock(&priv->mutex);
930
931 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
932 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
933 !sta && !ctx->key_mapping_keys)
934 ret = iwl_set_default_wep_key(priv, ctx, key);
935 else
936 ret = iwl_set_dynamic_key(priv, ctx, key, sta);
937
938 if (ret) {
939 IWL_ERR(priv, "Error setting key during suspend!\n");
940 data->error = true;
941 }
942
943 switch (key->cipher) {
944 case WLAN_CIPHER_SUITE_TKIP:
945 if (sta) {
946 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
947 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
948
949 rx_p1ks = data->tkip->rx_uni;
950
951 ieee80211_get_key_tx_seq(key, &seq);
952 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
953 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
954
955 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
956 iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
957
958 memcpy(data->tkip->mic_keys.tx,
959 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
960 IWLAGN_MIC_KEY_SIZE);
961
962 rx_mic_key = data->tkip->mic_keys.rx_unicast;
963 } else {
964 tkip_sc =
965 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
966 rx_p1ks = data->tkip->rx_multi;
967 rx_mic_key = data->tkip->mic_keys.rx_mcast;
968 }
969
970 /*
971 * For non-QoS this relies on the fact that both the uCode and
972 * mac80211 use TID 0 (as they need to to avoid replay attacks)
973 * for checking the IV in the frames.
974 */
975 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
976 ieee80211_get_key_rx_seq(key, i, &seq);
977 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
978 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
979 /* wrapping isn't allowed, AP must rekey */
980 if (seq.tkip.iv32 > cur_rx_iv32)
981 cur_rx_iv32 = seq.tkip.iv32;
982 }
983
984 ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
985 iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
986 ieee80211_get_tkip_rx_p1k(key, data->bssid,
987 cur_rx_iv32 + 1, p1k);
988 iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
989
990 memcpy(rx_mic_key,
991 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
992 IWLAGN_MIC_KEY_SIZE);
993
994 data->use_tkip = true;
995 data->use_rsc_tsc = true;
996 break;
997 case WLAN_CIPHER_SUITE_CCMP:
998 if (sta) {
999 u8 *pn = seq.ccmp.pn;
1000
1001 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
1002 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
1003
1004 ieee80211_get_key_tx_seq(key, &seq);
1005 aes_tx_sc->pn = cpu_to_le64(
1006 (u64)pn[5] |
1007 ((u64)pn[4] << 8) |
1008 ((u64)pn[3] << 16) |
1009 ((u64)pn[2] << 24) |
1010 ((u64)pn[1] << 32) |
1011 ((u64)pn[0] << 40));
1012 } else
1013 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
1014
1015 /*
1016 * For non-QoS this relies on the fact that both the uCode and
1017 * mac80211 use TID 0 for checking the IV in the frames.
1018 */
1019 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
1020 u8 *pn = seq.ccmp.pn;
1021
1022 ieee80211_get_key_rx_seq(key, i, &seq);
1023 aes_sc[i].pn = cpu_to_le64(
1024 (u64)pn[5] |
1025 ((u64)pn[4] << 8) |
1026 ((u64)pn[3] << 16) |
1027 ((u64)pn[2] << 24) |
1028 ((u64)pn[1] << 32) |
1029 ((u64)pn[0] << 40));
1030 }
1031 data->use_rsc_tsc = true;
1032 break;
1033 }
1034
1035 mutex_unlock(&priv->mutex);
1036}
1037
1038int iwlagn_send_patterns(struct iwl_priv *priv,
1039 struct cfg80211_wowlan *wowlan)
1040{
1041 struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
1042 struct iwl_host_cmd cmd = {
1043 .id = REPLY_WOWLAN_PATTERNS,
1044 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1045 };
1046 int i, err;
1047
1048 if (!wowlan->n_patterns)
1049 return 0;
1050
1051 cmd.len[0] = sizeof(*pattern_cmd) +
1052 wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
1053
1054 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
1055 if (!pattern_cmd)
1056 return -ENOMEM;
1057
1058 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
1059
1060 for (i = 0; i < wowlan->n_patterns; i++) {
1061 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
1062
1063 memcpy(&pattern_cmd->patterns[i].mask,
1064 wowlan->patterns[i].mask, mask_len);
1065 memcpy(&pattern_cmd->patterns[i].pattern,
1066 wowlan->patterns[i].pattern,
1067 wowlan->patterns[i].pattern_len);
1068 pattern_cmd->patterns[i].mask_size = mask_len;
1069 pattern_cmd->patterns[i].pattern_size =
1070 wowlan->patterns[i].pattern_len;
1071 }
1072
1073 cmd.data[0] = pattern_cmd;
1074 err = iwl_dvm_send_cmd(priv, &cmd);
1075 kfree(pattern_cmd);
1076 return err;
1077}
1078
1079int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1080{
1081 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
1082 struct iwl_rxon_cmd rxon;
1083 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1084 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
1085 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
1086 struct iwlagn_d3_config_cmd d3_cfg_cmd = {
1087 /*
1088 * Program the minimum sleep time to 10 seconds, as many
1089 * platforms have issues processing a wakeup signal while
1090 * still being in the process of suspending.
1091 */
1092 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1093 };
1094 struct wowlan_key_data key_data = {
1095 .ctx = ctx,
1096 .bssid = ctx->active.bssid_addr,
1097 .use_rsc_tsc = false,
1098 .tkip = &tkip_cmd,
1099 .use_tkip = false,
1100 };
1101 int ret, i;
1102 u16 seq;
1103
1104 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
1105 if (!key_data.rsc_tsc)
1106 return -ENOMEM;
1107
1108 memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
1109
1110 /*
1111 * We know the last used seqno, and the uCode expects to know that
1112 * one, it will increment before TX.
1113 */
1114 seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
1115 wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
1116
1117 /*
1118 * For QoS counters, we store the one to use next, so subtract 0x10
1119 * since the uCode will add 0x10 before using the value.
1120 */
1121 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1122 seq = priv->tid_data[IWL_AP_ID][i].seq_number;
1123 seq -= 0x10;
1124 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
1125 }
1126
1127 if (wowlan->disconnect)
1128 wakeup_filter_cmd.enabled |=
1129 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
1130 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
1131 if (wowlan->magic_pkt)
1132 wakeup_filter_cmd.enabled |=
1133 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
1134 if (wowlan->gtk_rekey_failure)
1135 wakeup_filter_cmd.enabled |=
1136 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
1137 if (wowlan->eap_identity_req)
1138 wakeup_filter_cmd.enabled |=
1139 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
1140 if (wowlan->four_way_handshake)
1141 wakeup_filter_cmd.enabled |=
1142 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
1143 if (wowlan->n_patterns)
1144 wakeup_filter_cmd.enabled |=
1145 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
1146
1147 if (wowlan->rfkill_release)
1148 d3_cfg_cmd.wakeup_flags |=
1149 cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
1150
1151 iwl_scan_cancel_timeout(priv, 200);
1152
1153 memcpy(&rxon, &ctx->active, sizeof(rxon));
1154
1155 priv->ucode_loaded = false;
1156 iwl_trans_stop_device(priv->trans);
1157
1158 priv->wowlan = true;
1159
1160 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
1161 if (ret)
1162 goto out;
1163
1164 /* now configure WoWLAN ucode */
1165 ret = iwl_alive_start(priv);
1166 if (ret)
1167 goto out;
1168
1169 memcpy(&ctx->staging, &rxon, sizeof(rxon));
1170 ret = iwlagn_commit_rxon(priv, ctx);
1171 if (ret)
1172 goto out;
1173
1174 ret = iwl_power_update_mode(priv, true);
1175 if (ret)
1176 goto out;
1177
1178 if (!iwlwifi_mod_params.sw_crypto) {
1179 /* mark all keys clear */
1180 priv->ucode_key_table = 0;
1181 ctx->key_mapping_keys = 0;
1182
1183 /*
1184 * This needs to be unlocked due to lock ordering
1185 * constraints. Since we're in the suspend path
1186 * that isn't really a problem though.
1187 */
1188 mutex_unlock(&priv->mutex);
1189 ieee80211_iter_keys(priv->hw, ctx->vif,
1190 iwlagn_wowlan_program_keys,
1191 &key_data);
1192 mutex_lock(&priv->mutex);
1193 if (key_data.error) {
1194 ret = -EIO;
1195 goto out;
1196 }
1197
1198 if (key_data.use_rsc_tsc) {
1199 struct iwl_host_cmd rsc_tsc_cmd = {
1200 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
1201 .data[0] = key_data.rsc_tsc,
1202 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1203 .len[0] = sizeof(*key_data.rsc_tsc),
1204 };
1205
1206 ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
1207 if (ret)
1208 goto out;
1209 }
1210
1211 if (key_data.use_tkip) {
1212 ret = iwl_dvm_send_cmd_pdu(priv,
1213 REPLY_WOWLAN_TKIP_PARAMS,
1214 0, sizeof(tkip_cmd),
1215 &tkip_cmd);
1216 if (ret)
1217 goto out;
1218 }
1219
1220 if (priv->have_rekey_data) {
1221 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1222 memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
1223 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1224 memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
1225 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1226 kek_kck_cmd.replay_ctr = priv->replay_ctr;
1227
1228 ret = iwl_dvm_send_cmd_pdu(priv,
1229 REPLY_WOWLAN_KEK_KCK_MATERIAL,
1230 0, sizeof(kek_kck_cmd),
1231 &kek_kck_cmd);
1232 if (ret)
1233 goto out;
1234 }
1235 }
1236
1237 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
1238 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1239 if (ret)
1240 goto out;
1241
1242 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
1243 0, sizeof(wakeup_filter_cmd),
1244 &wakeup_filter_cmd);
1245 if (ret)
1246 goto out;
1247
1248 ret = iwlagn_send_patterns(priv, wowlan);
1249 out:
1250 kfree(key_data.rsc_tsc);
1251 return ret;
1252}
1253#endif
1254
1255int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1256{
1257 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
1258 IWL_WARN(priv, "Not sending command - %s KILL\n",
1259 iwl_is_rfkill(priv) ? "RF" : "CT");
1260 return -EIO;
1261 }
1262
1263 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
1264 IWL_ERR(priv, "Command %s failed: FW Error\n",
1265 iwl_dvm_get_cmd_string(cmd->id));
1266 return -EIO;
1267 }
1268
1269 /*
1270 * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
1271 * in iwl_down but cancel the workers only later.
1272 */
1273 if (!priv->ucode_loaded) {
1274 IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
1275 return -EIO;
1276 }
1277
1278 /*
1279 * Synchronous commands from this op-mode must hold
1280 * the mutex, this ensures we don't try to send two
1281 * (or more) synchronous commands at a time.
1282 */
1283 if (!(cmd->flags & CMD_ASYNC))
1284 lockdep_assert_held(&priv->mutex);
1285
1286 return iwl_trans_send_cmd(priv->trans, cmd);
1287}
1288
1289int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
1290 u32 flags, u16 len, const void *data)
1291{
1292 struct iwl_host_cmd cmd = {
1293 .id = id,
1294 .len = { len, },
1295 .data = { data, },
1296 .flags = flags,
1297 };
1298
1299 return iwl_dvm_send_cmd(priv, &cmd);
1300}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
new file mode 100644
index 000000000000..b3ad34e8bf5a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -0,0 +1,1655 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_arp.h>
39
40#include <net/ieee80211_radiotap.h>
41#include <net/mac80211.h>
42
43#include <asm/div64.h>
44
45#include "iwl-io.h"
46#include "iwl-trans.h"
47#include "iwl-op-mode.h"
48#include "iwl-modparams.h"
49
50#include "dev.h"
51#include "calib.h"
52#include "agn.h"
53
54/*****************************************************************************
55 *
56 * mac80211 entry point functions
57 *
58 *****************************************************************************/
59
60static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
61 {
62 .max = 1,
63 .types = BIT(NL80211_IFTYPE_STATION),
64 },
65 {
66 .max = 1,
67 .types = BIT(NL80211_IFTYPE_AP),
68 },
69};
70
71static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
72 {
73 .max = 2,
74 .types = BIT(NL80211_IFTYPE_STATION),
75 },
76};
77
78static const struct ieee80211_iface_combination
79iwlagn_iface_combinations_dualmode[] = {
80 { .num_different_channels = 1,
81 .max_interfaces = 2,
82 .beacon_int_infra_match = true,
83 .limits = iwlagn_sta_ap_limits,
84 .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
85 },
86 { .num_different_channels = 1,
87 .max_interfaces = 2,
88 .limits = iwlagn_2sta_limits,
89 .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
90 },
91};
92
93/*
94 * Not a mac80211 entry point function, but it fits in with all the
95 * other mac80211 functions grouped here.
96 */
97int iwlagn_mac_setup_register(struct iwl_priv *priv,
98 const struct iwl_ucode_capabilities *capa)
99{
100 int ret;
101 struct ieee80211_hw *hw = priv->hw;
102 struct iwl_rxon_context *ctx;
103
104 hw->rate_control_algorithm = "iwl-agn-rs";
105
106 /* Tell mac80211 our characteristics */
107 ieee80211_hw_set(hw, SIGNAL_DBM);
108 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
109 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
110 ieee80211_hw_set(hw, SPECTRUM_MGMT);
111 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
112 ieee80211_hw_set(hw, QUEUE_CONTROL);
113 ieee80211_hw_set(hw, SUPPORTS_PS);
114 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
115 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
116 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
117
118 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
119 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
120
121 /*
122 * Including the following line will crash some AP's. This
123 * workaround removes the stimulus which causes the crash until
124 * the AP software can be fixed.
125 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
126 */
127
128 if (priv->nvm_data->sku_cap_11n_enable)
129 hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
130 NL80211_FEATURE_STATIC_SMPS;
131
132 /*
133 * Enable 11w if advertised by firmware and software crypto
134 * is not enabled (as the firmware will interpret some mgmt
135 * packets, so enabling it with software crypto isn't safe)
136 */
137 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
138 !iwlwifi_mod_params.sw_crypto)
139 ieee80211_hw_set(hw, MFP_CAPABLE);
140
141 hw->sta_data_size = sizeof(struct iwl_station_priv);
142 hw->vif_data_size = sizeof(struct iwl_vif_priv);
143
144 for_each_context(priv, ctx) {
145 hw->wiphy->interface_modes |= ctx->interface_modes;
146 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
147 }
148
149 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
150
151 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
152 hw->wiphy->iface_combinations =
153 iwlagn_iface_combinations_dualmode;
154 hw->wiphy->n_iface_combinations =
155 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
156 }
157
158 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
159 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
160 REGULATORY_DISABLE_BEACON_HINTS;
161
162#ifdef CONFIG_PM_SLEEP
163 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
164 priv->trans->ops->d3_suspend &&
165 priv->trans->ops->d3_resume &&
166 device_can_wakeup(priv->trans->dev)) {
167 priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
168 WIPHY_WOWLAN_DISCONNECT |
169 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
170 WIPHY_WOWLAN_RFKILL_RELEASE;
171 if (!iwlwifi_mod_params.sw_crypto)
172 priv->wowlan_support.flags |=
173 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
174 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
175
176 priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
177 priv->wowlan_support.pattern_min_len =
178 IWLAGN_WOWLAN_MIN_PATTERN_LEN;
179 priv->wowlan_support.pattern_max_len =
180 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
181 hw->wiphy->wowlan = &priv->wowlan_support;
182 }
183#endif
184
185 if (iwlwifi_mod_params.power_save)
186 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
187 else
188 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
189
190 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
191 /* we create the 802.11 header and a max-length SSID element */
192 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34;
193
194 /*
195 * We don't use all queues: 4 and 9 are unused and any
196 * aggregation queue gets mapped down to the AC queue.
197 */
198 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
199
200 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
201
202 if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
203 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
204 &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
205 if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
206 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
207 &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
208
209 hw->wiphy->hw_version = priv->trans->hw_id;
210
211 iwl_leds_init(priv);
212
213 ret = ieee80211_register_hw(priv->hw);
214 if (ret) {
215 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
216 iwl_leds_exit(priv);
217 return ret;
218 }
219 priv->mac80211_registered = 1;
220
221 return 0;
222}
223
224void iwlagn_mac_unregister(struct iwl_priv *priv)
225{
226 if (!priv->mac80211_registered)
227 return;
228 iwl_leds_exit(priv);
229 ieee80211_unregister_hw(priv->hw);
230 priv->mac80211_registered = 0;
231}
232
233static int __iwl_up(struct iwl_priv *priv)
234{
235 struct iwl_rxon_context *ctx;
236 int ret;
237
238 lockdep_assert_held(&priv->mutex);
239
240 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
241 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
242 return -EIO;
243 }
244
245 for_each_context(priv, ctx) {
246 ret = iwlagn_alloc_bcast_station(priv, ctx);
247 if (ret) {
248 iwl_dealloc_bcast_stations(priv);
249 return ret;
250 }
251 }
252
253 ret = iwl_trans_start_hw(priv->trans);
254 if (ret) {
255 IWL_ERR(priv, "Failed to start HW: %d\n", ret);
256 goto error;
257 }
258
259 ret = iwl_run_init_ucode(priv);
260 if (ret) {
261 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
262 goto error;
263 }
264
265 ret = iwl_trans_start_hw(priv->trans);
266 if (ret) {
267 IWL_ERR(priv, "Failed to start HW: %d\n", ret);
268 goto error;
269 }
270
271 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
272 if (ret) {
273 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
274 goto error;
275 }
276
277 ret = iwl_alive_start(priv);
278 if (ret)
279 goto error;
280 return 0;
281
282 error:
283 set_bit(STATUS_EXIT_PENDING, &priv->status);
284 iwl_down(priv);
285 clear_bit(STATUS_EXIT_PENDING, &priv->status);
286
287 IWL_ERR(priv, "Unable to initialize device.\n");
288 return ret;
289}
290
291static int iwlagn_mac_start(struct ieee80211_hw *hw)
292{
293 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
294 int ret;
295
296 IWL_DEBUG_MAC80211(priv, "enter\n");
297
298 /* we should be verifying the device is ready to be opened */
299 mutex_lock(&priv->mutex);
300 ret = __iwl_up(priv);
301 mutex_unlock(&priv->mutex);
302 if (ret)
303 return ret;
304
305 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
306
307 /* Now we should be done, and the READY bit should be set. */
308 if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
309 ret = -EIO;
310
311 iwlagn_led_enable(priv);
312
313 priv->is_open = 1;
314 IWL_DEBUG_MAC80211(priv, "leave\n");
315 return 0;
316}
317
318static void iwlagn_mac_stop(struct ieee80211_hw *hw)
319{
320 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
321
322 IWL_DEBUG_MAC80211(priv, "enter\n");
323
324 if (!priv->is_open)
325 return;
326
327 priv->is_open = 0;
328
329 mutex_lock(&priv->mutex);
330 iwl_down(priv);
331 mutex_unlock(&priv->mutex);
332
333 iwl_cancel_deferred_work(priv);
334
335 flush_workqueue(priv->workqueue);
336
337 IWL_DEBUG_MAC80211(priv, "leave\n");
338}
339
340static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
341 struct ieee80211_vif *vif,
342 struct cfg80211_gtk_rekey_data *data)
343{
344 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
345
346 if (iwlwifi_mod_params.sw_crypto)
347 return;
348
349 IWL_DEBUG_MAC80211(priv, "enter\n");
350 mutex_lock(&priv->mutex);
351
352 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
353 goto out;
354
355 memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
356 memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
357 priv->replay_ctr =
358 cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
359 priv->have_rekey_data = true;
360
361 out:
362 mutex_unlock(&priv->mutex);
363 IWL_DEBUG_MAC80211(priv, "leave\n");
364}
365
366#ifdef CONFIG_PM_SLEEP
367
368static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
369 struct cfg80211_wowlan *wowlan)
370{
371 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
372 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
373 int ret;
374
375 if (WARN_ON(!wowlan))
376 return -EINVAL;
377
378 IWL_DEBUG_MAC80211(priv, "enter\n");
379 mutex_lock(&priv->mutex);
380
381 /* Don't attempt WoWLAN when not associated, tear down instead. */
382 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
383 !iwl_is_associated_ctx(ctx)) {
384 ret = 1;
385 goto out;
386 }
387
388 ret = iwlagn_suspend(priv, wowlan);
389 if (ret)
390 goto error;
391
392 /* let the ucode operate on its own */
393 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
394 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
395
396 iwl_trans_d3_suspend(priv->trans, false);
397
398 goto out;
399
400 error:
401 priv->wowlan = false;
402 iwlagn_prepare_restart(priv);
403 ieee80211_restart_hw(priv->hw);
404 out:
405 mutex_unlock(&priv->mutex);
406 IWL_DEBUG_MAC80211(priv, "leave\n");
407
408 return ret;
409}
410
411struct iwl_resume_data {
412 struct iwl_priv *priv;
413 struct iwlagn_wowlan_status *cmd;
414 bool valid;
415};
416
417static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
418 struct iwl_rx_packet *pkt, void *data)
419{
420 struct iwl_resume_data *resume_data = data;
421 struct iwl_priv *priv = resume_data->priv;
422
423 if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
424 IWL_ERR(priv, "rx wrong size data\n");
425 return true;
426 }
427 memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
428 resume_data->valid = true;
429
430 return true;
431}
432
433static int iwlagn_mac_resume(struct ieee80211_hw *hw)
434{
435 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
436 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
437 struct ieee80211_vif *vif;
438 u32 base;
439 int ret;
440 enum iwl_d3_status d3_status;
441 struct error_table_start {
442 /* cf. struct iwl_error_event_table */
443 u32 valid;
444 u32 error_id;
445 } err_info;
446 struct iwl_notification_wait status_wait;
447 static const u16 status_cmd[] = {
448 REPLY_WOWLAN_GET_STATUS,
449 };
450 struct iwlagn_wowlan_status status_data = {};
451 struct iwl_resume_data resume_data = {
452 .priv = priv,
453 .cmd = &status_data,
454 .valid = false,
455 };
456 struct cfg80211_wowlan_wakeup wakeup = {
457 .pattern_idx = -1,
458 };
459#ifdef CONFIG_IWLWIFI_DEBUGFS
460 const struct fw_img *img;
461#endif
462
463 IWL_DEBUG_MAC80211(priv, "enter\n");
464 mutex_lock(&priv->mutex);
465
466 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
467 vif = ctx->vif;
468
469 ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
470 if (ret)
471 goto out_unlock;
472
473 if (d3_status != IWL_D3_STATUS_ALIVE) {
474 IWL_INFO(priv, "Device was reset during suspend\n");
475 goto out_unlock;
476 }
477
478 /* uCode is no longer operating by itself */
479 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
480 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
481
482 base = priv->device_pointers.error_event_table;
483 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
484 IWL_WARN(priv, "Invalid error table during resume!\n");
485 goto out_unlock;
486 }
487
488 iwl_trans_read_mem_bytes(priv->trans, base,
489 &err_info, sizeof(err_info));
490
491 if (err_info.valid) {
492 IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
493 err_info.valid, err_info.error_id);
494 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
495 wakeup.rfkill_release = true;
496 ieee80211_report_wowlan_wakeup(vif, &wakeup,
497 GFP_KERNEL);
498 }
499 goto out_unlock;
500 }
501
502#ifdef CONFIG_IWLWIFI_DEBUGFS
503 img = &priv->fw->img[IWL_UCODE_WOWLAN];
504 if (!priv->wowlan_sram)
505 priv->wowlan_sram =
506 kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
507 GFP_KERNEL);
508
509 if (priv->wowlan_sram)
510 iwl_trans_read_mem(priv->trans, 0x800000,
511 priv->wowlan_sram,
512 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
513#endif
514
515 /*
516 * This is very strange. The GET_STATUS command is sent but the device
517 * doesn't reply properly, it seems it doesn't close the RBD so one is
518 * always left open ... As a result, we need to send another command
519 * and have to reset the driver afterwards. As we need to switch to
520 * runtime firmware again that'll happen.
521 */
522
523 iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
524 ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
525 &resume_data);
526
527 iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
528 iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
529 /* an RBD is left open in the firmware now! */
530
531 ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
532 if (ret)
533 goto out_unlock;
534
535 if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
536 u32 reasons = le32_to_cpu(status_data.wakeup_reason);
537 struct cfg80211_wowlan_wakeup *wakeup_report;
538
539 IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
540
541 if (reasons) {
542 if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
543 wakeup.magic_pkt = true;
544 if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
545 wakeup.pattern_idx = status_data.pattern_number;
546 if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
547 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
548 wakeup.disconnect = true;
549 if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
550 wakeup.gtk_rekey_failure = true;
551 if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
552 wakeup.eap_identity_req = true;
553 if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
554 wakeup.four_way_handshake = true;
555 wakeup_report = &wakeup;
556 } else {
557 wakeup_report = NULL;
558 }
559
560 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
561 }
562
563 priv->wowlan = false;
564
565 iwlagn_prepare_restart(priv);
566
567 memset((void *)&ctx->active, 0, sizeof(ctx->active));
568 iwl_connection_init_rx_config(priv, ctx);
569 iwlagn_set_rxon_chain(priv, ctx);
570
571 out_unlock:
572 mutex_unlock(&priv->mutex);
573 IWL_DEBUG_MAC80211(priv, "leave\n");
574
575 ieee80211_resume_disconnect(vif);
576
577 return 1;
578}
579
580static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
581{
582 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
583
584 device_set_wakeup_enable(priv->trans->dev, enabled);
585}
586#endif
587
588static void iwlagn_mac_tx(struct ieee80211_hw *hw,
589 struct ieee80211_tx_control *control,
590 struct sk_buff *skb)
591{
592 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
593
594 if (iwlagn_tx_skb(priv, control->sta, skb))
595 ieee80211_free_txskb(hw, skb);
596}
597
598static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
599 struct ieee80211_vif *vif,
600 struct ieee80211_key_conf *keyconf,
601 struct ieee80211_sta *sta,
602 u32 iv32, u16 *phase1key)
603{
604 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
605
606 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
607}
608
609static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
610 struct ieee80211_vif *vif,
611 struct ieee80211_sta *sta,
612 struct ieee80211_key_conf *key)
613{
614 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
615 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
616 struct iwl_rxon_context *ctx = vif_priv->ctx;
617 int ret;
618 bool is_default_wep_key = false;
619
620 IWL_DEBUG_MAC80211(priv, "enter\n");
621
622 if (iwlwifi_mod_params.sw_crypto) {
623 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
624 return -EOPNOTSUPP;
625 }
626
627 switch (key->cipher) {
628 case WLAN_CIPHER_SUITE_TKIP:
629 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
630 /* fall through */
631 case WLAN_CIPHER_SUITE_CCMP:
632 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
633 break;
634 default:
635 break;
636 }
637
638 /*
639 * We could program these keys into the hardware as well, but we
640 * don't expect much multicast traffic in IBSS and having keys
641 * for more stations is probably more useful.
642 *
643 * Mark key TX-only and return 0.
644 */
645 if (vif->type == NL80211_IFTYPE_ADHOC &&
646 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
647 key->hw_key_idx = WEP_INVALID_OFFSET;
648 return 0;
649 }
650
651 /* If they key was TX-only, accept deletion */
652 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
653 return 0;
654
655 mutex_lock(&priv->mutex);
656 iwl_scan_cancel_timeout(priv, 100);
657
658 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
659
660 /*
661 * If we are getting WEP group key and we didn't receive any key mapping
662 * so far, we are in legacy wep mode (group key only), otherwise we are
663 * in 1X mode.
664 * In legacy wep mode, we use another host command to the uCode.
665 */
666 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
667 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
668 if (cmd == SET_KEY)
669 is_default_wep_key = !ctx->key_mapping_keys;
670 else
671 is_default_wep_key =
672 key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
673 }
674
675
676 switch (cmd) {
677 case SET_KEY:
678 if (is_default_wep_key) {
679 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
680 break;
681 }
682 ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
683 if (ret) {
684 /*
685 * can't add key for RX, but we don't need it
686 * in the device for TX so still return 0
687 */
688 ret = 0;
689 key->hw_key_idx = WEP_INVALID_OFFSET;
690 }
691
692 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
693 break;
694 case DISABLE_KEY:
695 if (is_default_wep_key)
696 ret = iwl_remove_default_wep_key(priv, ctx, key);
697 else
698 ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
699
700 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
701 break;
702 default:
703 ret = -EINVAL;
704 }
705
706 mutex_unlock(&priv->mutex);
707 IWL_DEBUG_MAC80211(priv, "leave\n");
708
709 return ret;
710}
711
712static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
713{
714 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
715 return false;
716 return true;
717}
718
719static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
720{
721 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
722 return false;
723 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
724 return true;
725
726 /* disabled by default */
727 return false;
728}
729
730static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
731 struct ieee80211_vif *vif,
732 enum ieee80211_ampdu_mlme_action action,
733 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
734 u8 buf_size, bool amsdu)
735{
736 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
737 int ret = -EINVAL;
738 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
739
740 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
741 sta->addr, tid);
742
743 if (!(priv->nvm_data->sku_cap_11n_enable))
744 return -EACCES;
745
746 IWL_DEBUG_MAC80211(priv, "enter\n");
747 mutex_lock(&priv->mutex);
748
749 switch (action) {
750 case IEEE80211_AMPDU_RX_START:
751 if (!iwl_enable_rx_ampdu(priv->cfg))
752 break;
753 IWL_DEBUG_HT(priv, "start Rx\n");
754 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
755 break;
756 case IEEE80211_AMPDU_RX_STOP:
757 IWL_DEBUG_HT(priv, "stop Rx\n");
758 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
759 break;
760 case IEEE80211_AMPDU_TX_START:
761 if (!priv->trans->ops->txq_enable)
762 break;
763 if (!iwl_enable_tx_ampdu(priv->cfg))
764 break;
765 IWL_DEBUG_HT(priv, "start Tx\n");
766 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
767 break;
768 case IEEE80211_AMPDU_TX_STOP_FLUSH:
769 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
770 IWL_DEBUG_HT(priv, "Flush Tx\n");
771 ret = iwlagn_tx_agg_flush(priv, vif, sta, tid);
772 break;
773 case IEEE80211_AMPDU_TX_STOP_CONT:
774 IWL_DEBUG_HT(priv, "stop Tx\n");
775 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
776 if ((ret == 0) && (priv->agg_tids_count > 0)) {
777 priv->agg_tids_count--;
778 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
779 priv->agg_tids_count);
780 }
781 if (!priv->agg_tids_count &&
782 priv->hw_params.use_rts_for_aggregation) {
783 /*
784 * switch off RTS/CTS if it was previously enabled
785 */
786 sta_priv->lq_sta.lq.general_params.flags &=
787 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
788 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
789 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
790 }
791 break;
792 case IEEE80211_AMPDU_TX_OPERATIONAL:
793 ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
794 break;
795 }
796 mutex_unlock(&priv->mutex);
797 IWL_DEBUG_MAC80211(priv, "leave\n");
798 return ret;
799}
800
801static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
802 struct ieee80211_vif *vif,
803 struct ieee80211_sta *sta)
804{
805 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
806 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
807 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
808 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
809 int ret;
810 u8 sta_id;
811
812 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
813 sta->addr);
814 sta_priv->sta_id = IWL_INVALID_STATION;
815
816 atomic_set(&sta_priv->pending_frames, 0);
817 if (vif->type == NL80211_IFTYPE_AP)
818 sta_priv->client = true;
819
820 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
821 is_ap, sta, &sta_id);
822 if (ret) {
823 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
824 sta->addr, ret);
825 /* Should we return success if return code is EEXIST ? */
826 return ret;
827 }
828
829 sta_priv->sta_id = sta_id;
830
831 return 0;
832}
833
834static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
835 struct ieee80211_vif *vif,
836 struct ieee80211_sta *sta)
837{
838 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
839 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
840 int ret;
841
842 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr);
843
844 if (vif->type == NL80211_IFTYPE_STATION) {
845 /*
846 * Station will be removed from device when the RXON
847 * is set to unassociated -- just deactivate it here
848 * to avoid re-programming it.
849 */
850 ret = 0;
851 iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr);
852 } else {
853 ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
854 if (ret)
855 IWL_DEBUG_QUIET_RFKILL(priv,
856 "Error removing station %pM\n", sta->addr);
857 }
858 return ret;
859}
860
861static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
862 struct ieee80211_vif *vif,
863 struct ieee80211_sta *sta,
864 enum ieee80211_sta_state old_state,
865 enum ieee80211_sta_state new_state)
866{
867 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
868 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
869 enum {
870 NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT,
871 } op = NONE;
872 int ret;
873
874 IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n",
875 sta->addr, old_state, new_state);
876
877 mutex_lock(&priv->mutex);
878 if (vif->type == NL80211_IFTYPE_STATION) {
879 if (old_state == IEEE80211_STA_NOTEXIST &&
880 new_state == IEEE80211_STA_NONE)
881 op = ADD;
882 else if (old_state == IEEE80211_STA_NONE &&
883 new_state == IEEE80211_STA_NOTEXIST)
884 op = REMOVE;
885 else if (old_state == IEEE80211_STA_AUTH &&
886 new_state == IEEE80211_STA_ASSOC)
887 op = HT_RATE_INIT;
888 } else {
889 if (old_state == IEEE80211_STA_AUTH &&
890 new_state == IEEE80211_STA_ASSOC)
891 op = ADD_RATE_INIT;
892 else if (old_state == IEEE80211_STA_ASSOC &&
893 new_state == IEEE80211_STA_AUTH)
894 op = REMOVE;
895 }
896
897 switch (op) {
898 case ADD:
899 ret = iwlagn_mac_sta_add(hw, vif, sta);
900 if (ret)
901 break;
902 /*
903 * Clear the in-progress flag, the AP station entry was added
904 * but we'll initialize LQ only when we've associated (which
905 * would also clear the in-progress flag). This is necessary
906 * in case we never initialize LQ because association fails.
907 */
908 spin_lock_bh(&priv->sta_lock);
909 priv->stations[iwl_sta_id(sta)].used &=
910 ~IWL_STA_UCODE_INPROGRESS;
911 spin_unlock_bh(&priv->sta_lock);
912 break;
913 case REMOVE:
914 ret = iwlagn_mac_sta_remove(hw, vif, sta);
915 break;
916 case ADD_RATE_INIT:
917 ret = iwlagn_mac_sta_add(hw, vif, sta);
918 if (ret)
919 break;
920 /* Initialize rate scaling */
921 IWL_DEBUG_INFO(priv,
922 "Initializing rate scaling for station %pM\n",
923 sta->addr);
924 iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
925 ret = 0;
926 break;
927 case HT_RATE_INIT:
928 /* Initialize rate scaling */
929 ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta);
930 if (ret)
931 break;
932 IWL_DEBUG_INFO(priv,
933 "Initializing rate scaling for station %pM\n",
934 sta->addr);
935 iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
936 ret = 0;
937 break;
938 default:
939 ret = 0;
940 break;
941 }
942
943 /*
944 * mac80211 might WARN if we fail, but due the way we
945 * (badly) handle hard rfkill, we might fail here
946 */
947 if (iwl_is_rfkill(priv))
948 ret = 0;
949
950 mutex_unlock(&priv->mutex);
951 IWL_DEBUG_MAC80211(priv, "leave\n");
952
953 return ret;
954}
955
956static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
957 struct ieee80211_vif *vif,
958 struct ieee80211_channel_switch *ch_switch)
959{
960 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
961 struct ieee80211_conf *conf = &hw->conf;
962 struct ieee80211_channel *channel = ch_switch->chandef.chan;
963 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
964 /*
965 * MULTI-FIXME
966 * When we add support for multiple interfaces, we need to
967 * revisit this. The channel switch command in the device
968 * only affects the BSS context, but what does that really
969 * mean? And what if we get a CSA on the second interface?
970 * This needs a lot of work.
971 */
972 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
973 u16 ch;
974
975 IWL_DEBUG_MAC80211(priv, "enter\n");
976
977 mutex_lock(&priv->mutex);
978
979 if (iwl_is_rfkill(priv))
980 goto out;
981
982 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
983 test_bit(STATUS_SCANNING, &priv->status) ||
984 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
985 goto out;
986
987 if (!iwl_is_associated_ctx(ctx))
988 goto out;
989
990 if (!priv->lib->set_channel_switch)
991 goto out;
992
993 ch = channel->hw_value;
994 if (le16_to_cpu(ctx->active.channel) == ch)
995 goto out;
996
997 priv->current_ht_config.smps = conf->smps_mode;
998
999 /* Configure HT40 channels */
1000 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
1001 case NL80211_CHAN_NO_HT:
1002 case NL80211_CHAN_HT20:
1003 ctx->ht.is_40mhz = false;
1004 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1005 break;
1006 case NL80211_CHAN_HT40MINUS:
1007 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1008 ctx->ht.is_40mhz = true;
1009 break;
1010 case NL80211_CHAN_HT40PLUS:
1011 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1012 ctx->ht.is_40mhz = true;
1013 break;
1014 }
1015
1016 if ((le16_to_cpu(ctx->staging.channel) != ch))
1017 ctx->staging.flags = 0;
1018
1019 iwl_set_rxon_channel(priv, channel, ctx);
1020 iwl_set_rxon_ht(priv, ht_conf);
1021 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
1022
1023 /*
1024 * at this point, staging_rxon has the
1025 * configuration for channel switch
1026 */
1027 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
1028 priv->switch_channel = cpu_to_le16(ch);
1029 if (priv->lib->set_channel_switch(priv, ch_switch)) {
1030 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
1031 priv->switch_channel = 0;
1032 ieee80211_chswitch_done(ctx->vif, false);
1033 }
1034
1035out:
1036 mutex_unlock(&priv->mutex);
1037 IWL_DEBUG_MAC80211(priv, "leave\n");
1038}
1039
1040void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1041{
1042 /*
1043 * MULTI-FIXME
1044 * See iwlagn_mac_channel_switch.
1045 */
1046 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1047
1048 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1049 return;
1050
1051 if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
1052 return;
1053
1054 if (ctx->vif)
1055 ieee80211_chswitch_done(ctx->vif, is_success);
1056}
1057
1058static void iwlagn_configure_filter(struct ieee80211_hw *hw,
1059 unsigned int changed_flags,
1060 unsigned int *total_flags,
1061 u64 multicast)
1062{
1063 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1064 __le32 filter_or = 0, filter_nand = 0;
1065 struct iwl_rxon_context *ctx;
1066
1067#define CHK(test, flag) do { \
1068 if (*total_flags & (test)) \
1069 filter_or |= (flag); \
1070 else \
1071 filter_nand |= (flag); \
1072 } while (0)
1073
1074 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
1075 changed_flags, *total_flags);
1076
1077 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
1078 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
1079 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
1080 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
1081
1082#undef CHK
1083
1084 mutex_lock(&priv->mutex);
1085
1086 for_each_context(priv, ctx) {
1087 ctx->staging.filter_flags &= ~filter_nand;
1088 ctx->staging.filter_flags |= filter_or;
1089
1090 /*
1091 * Not committing directly because hardware can perform a scan,
1092 * but we'll eventually commit the filter flags change anyway.
1093 */
1094 }
1095
1096 mutex_unlock(&priv->mutex);
1097
1098 /*
1099 * Receiving all multicast frames is always enabled by the
1100 * default flags setup in iwl_connection_init_rx_config()
1101 * since we currently do not support programming multicast
1102 * filters into the device.
1103 */
1104 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
1105 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1106}
1107
1108static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1109 u32 queues, bool drop)
1110{
1111 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1112 u32 scd_queues;
1113
1114 mutex_lock(&priv->mutex);
1115 IWL_DEBUG_MAC80211(priv, "enter\n");
1116
1117 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1118 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
1119 goto done;
1120 }
1121 if (iwl_is_rfkill(priv)) {
1122 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
1123 goto done;
1124 }
1125
1126 scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
1127 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
1128 BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
1129
1130 if (drop) {
1131 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
1132 scd_queues);
1133 if (iwlagn_txfifo_flush(priv, scd_queues)) {
1134 IWL_ERR(priv, "flush request fail\n");
1135 goto done;
1136 }
1137 }
1138
1139 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
1140 iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
1141done:
1142 mutex_unlock(&priv->mutex);
1143 IWL_DEBUG_MAC80211(priv, "leave\n");
1144}
1145
1146static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
1147 struct ieee80211_vif *vif,
1148 const struct ieee80211_event *event)
1149{
1150 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1151
1152 if (event->type != RSSI_EVENT)
1153 return;
1154
1155 IWL_DEBUG_MAC80211(priv, "enter\n");
1156
1157 if (priv->lib->bt_params &&
1158 priv->lib->bt_params->advanced_bt_coexist) {
1159 if (event->u.rssi.data == RSSI_EVENT_LOW)
1160 priv->bt_enable_pspoll = true;
1161 else if (event->u.rssi.data == RSSI_EVENT_HIGH)
1162 priv->bt_enable_pspoll = false;
1163
1164 queue_work(priv->workqueue, &priv->bt_runtime_config);
1165 } else {
1166 IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
1167 "ignoring RSSI callback\n");
1168 }
1169
1170 IWL_DEBUG_MAC80211(priv, "leave\n");
1171}
1172
1173static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1174 struct ieee80211_sta *sta, bool set)
1175{
1176 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1177
1178 queue_work(priv->workqueue, &priv->beacon_update);
1179
1180 return 0;
1181}
1182
1183static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1184 struct ieee80211_vif *vif, u16 queue,
1185 const struct ieee80211_tx_queue_params *params)
1186{
1187 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1188 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1189 struct iwl_rxon_context *ctx = vif_priv->ctx;
1190 int q;
1191
1192 if (WARN_ON(!ctx))
1193 return -EINVAL;
1194
1195 IWL_DEBUG_MAC80211(priv, "enter\n");
1196
1197 if (!iwl_is_ready_rf(priv)) {
1198 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1199 return -EIO;
1200 }
1201
1202 if (queue >= AC_NUM) {
1203 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1204 return 0;
1205 }
1206
1207 q = AC_NUM - 1 - queue;
1208
1209 mutex_lock(&priv->mutex);
1210
1211 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1212 cpu_to_le16(params->cw_min);
1213 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1214 cpu_to_le16(params->cw_max);
1215 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1216 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1217 cpu_to_le16((params->txop * 32));
1218
1219 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1220
1221 mutex_unlock(&priv->mutex);
1222
1223 IWL_DEBUG_MAC80211(priv, "leave\n");
1224 return 0;
1225}
1226
1227static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1228{
1229 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1230
1231 return priv->ibss_manager == IWL_IBSS_MANAGER;
1232}
1233
1234static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1235{
1236 iwl_connection_init_rx_config(priv, ctx);
1237
1238 iwlagn_set_rxon_chain(priv, ctx);
1239
1240 return iwlagn_commit_rxon(priv, ctx);
1241}
1242
1243static int iwl_setup_interface(struct iwl_priv *priv,
1244 struct iwl_rxon_context *ctx)
1245{
1246 struct ieee80211_vif *vif = ctx->vif;
1247 int err, ac;
1248
1249 lockdep_assert_held(&priv->mutex);
1250
1251 /*
1252 * This variable will be correct only when there's just
1253 * a single context, but all code using it is for hardware
1254 * that supports only one context.
1255 */
1256 priv->iw_mode = vif->type;
1257
1258 ctx->is_active = true;
1259
1260 err = iwl_set_mode(priv, ctx);
1261 if (err) {
1262 if (!ctx->always_active)
1263 ctx->is_active = false;
1264 return err;
1265 }
1266
1267 if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist &&
1268 vif->type == NL80211_IFTYPE_ADHOC) {
1269 /*
1270 * pretend to have high BT traffic as long as we
1271 * are operating in IBSS mode, as this will cause
1272 * the rate scaling etc. to behave as intended.
1273 */
1274 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1275 }
1276
1277 /* set up queue mappings */
1278 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1279 vif->hw_queue[ac] = ctx->ac_to_queue[ac];
1280
1281 if (vif->type == NL80211_IFTYPE_AP)
1282 vif->cab_queue = ctx->mcast_queue;
1283 else
1284 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1285
1286 return 0;
1287}
1288
1289static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1290 struct ieee80211_vif *vif)
1291{
1292 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1293 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1294 struct iwl_rxon_context *tmp, *ctx = NULL;
1295 int err;
1296 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1297 bool reset = false;
1298
1299 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1300 viftype, vif->addr);
1301
1302 mutex_lock(&priv->mutex);
1303
1304 if (!iwl_is_ready_rf(priv)) {
1305 IWL_WARN(priv, "Try to add interface when device not ready\n");
1306 err = -EINVAL;
1307 goto out;
1308 }
1309
1310 for_each_context(priv, tmp) {
1311 u32 possible_modes =
1312 tmp->interface_modes | tmp->exclusive_interface_modes;
1313
1314 if (tmp->vif) {
1315 /* On reset we need to add the same interface again */
1316 if (tmp->vif == vif) {
1317 reset = true;
1318 ctx = tmp;
1319 break;
1320 }
1321
1322 /* check if this busy context is exclusive */
1323 if (tmp->exclusive_interface_modes &
1324 BIT(tmp->vif->type)) {
1325 err = -EINVAL;
1326 goto out;
1327 }
1328 continue;
1329 }
1330
1331 if (!(possible_modes & BIT(viftype)))
1332 continue;
1333
1334 /* have maybe usable context w/o interface */
1335 ctx = tmp;
1336 break;
1337 }
1338
1339 if (!ctx) {
1340 err = -EOPNOTSUPP;
1341 goto out;
1342 }
1343
1344 vif_priv->ctx = ctx;
1345 ctx->vif = vif;
1346
1347 /*
1348 * In SNIFFER device type, the firmware reports the FCS to
1349 * the host, rather than snipping it off. Unfortunately,
1350 * mac80211 doesn't (yet) provide a per-packet flag for
1351 * this, so that we have to set the hardware flag based
1352 * on the interfaces added. As the monitor interface can
1353 * only be present by itself, and will be removed before
1354 * other interfaces are added, this is safe.
1355 */
1356 if (vif->type == NL80211_IFTYPE_MONITOR)
1357 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1358 else
1359 __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags);
1360
1361 err = iwl_setup_interface(priv, ctx);
1362 if (!err || reset)
1363 goto out;
1364
1365 ctx->vif = NULL;
1366 priv->iw_mode = NL80211_IFTYPE_STATION;
1367 out:
1368 mutex_unlock(&priv->mutex);
1369
1370 IWL_DEBUG_MAC80211(priv, "leave\n");
1371 return err;
1372}
1373
1374static void iwl_teardown_interface(struct iwl_priv *priv,
1375 struct ieee80211_vif *vif,
1376 bool mode_change)
1377{
1378 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1379
1380 lockdep_assert_held(&priv->mutex);
1381
1382 if (priv->scan_vif == vif) {
1383 iwl_scan_cancel_timeout(priv, 200);
1384 iwl_force_scan_end(priv);
1385 }
1386
1387 if (!mode_change) {
1388 iwl_set_mode(priv, ctx);
1389 if (!ctx->always_active)
1390 ctx->is_active = false;
1391 }
1392
1393 /*
1394 * When removing the IBSS interface, overwrite the
1395 * BT traffic load with the stored one from the last
1396 * notification, if any. If this is a device that
1397 * doesn't implement this, this has no effect since
1398 * both values are the same and zero.
1399 */
1400 if (vif->type == NL80211_IFTYPE_ADHOC)
1401 priv->bt_traffic_load = priv->last_bt_traffic_load;
1402}
1403
1404static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
1405 struct ieee80211_vif *vif)
1406{
1407 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1408 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1409
1410 IWL_DEBUG_MAC80211(priv, "enter\n");
1411
1412 mutex_lock(&priv->mutex);
1413
1414 if (WARN_ON(ctx->vif != vif)) {
1415 struct iwl_rxon_context *tmp;
1416 IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
1417 for_each_context(priv, tmp)
1418 IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
1419 tmp->ctxid, tmp, tmp->vif);
1420 }
1421 ctx->vif = NULL;
1422
1423 iwl_teardown_interface(priv, vif, false);
1424
1425 mutex_unlock(&priv->mutex);
1426
1427 IWL_DEBUG_MAC80211(priv, "leave\n");
1428
1429}
1430
1431static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1432 struct ieee80211_vif *vif,
1433 enum nl80211_iftype newtype, bool newp2p)
1434{
1435 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1436 struct iwl_rxon_context *ctx, *tmp;
1437 enum nl80211_iftype newviftype = newtype;
1438 u32 interface_modes;
1439 int err;
1440
1441 IWL_DEBUG_MAC80211(priv, "enter\n");
1442
1443 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1444
1445 mutex_lock(&priv->mutex);
1446
1447 ctx = iwl_rxon_ctx_from_vif(vif);
1448
1449 /*
1450 * To simplify this code, only support changes on the
1451 * BSS context. The PAN context is usually reassigned
1452 * by creating/removing P2P interfaces anyway.
1453 */
1454 if (ctx->ctxid != IWL_RXON_CTX_BSS) {
1455 err = -EBUSY;
1456 goto out;
1457 }
1458
1459 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1460 /*
1461 * Huh? But wait ... this can maybe happen when
1462 * we're in the middle of a firmware restart!
1463 */
1464 err = -EBUSY;
1465 goto out;
1466 }
1467
1468 /* Check if the switch is supported in the same context */
1469 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1470 if (!(interface_modes & BIT(newtype))) {
1471 err = -EBUSY;
1472 goto out;
1473 }
1474
1475 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1476 for_each_context(priv, tmp) {
1477 if (ctx == tmp)
1478 continue;
1479
1480 if (!tmp->is_active)
1481 continue;
1482
1483 /*
1484 * The current mode switch would be exclusive, but
1485 * another context is active ... refuse the switch.
1486 */
1487 err = -EBUSY;
1488 goto out;
1489 }
1490 }
1491
1492 /* success */
1493 iwl_teardown_interface(priv, vif, true);
1494 vif->type = newviftype;
1495 vif->p2p = newp2p;
1496 err = iwl_setup_interface(priv, ctx);
1497 WARN_ON(err);
1498 /*
1499 * We've switched internally, but submitting to the
1500 * device may have failed for some reason. Mask this
1501 * error, because otherwise mac80211 will not switch
1502 * (and set the interface type back) and we'll be
1503 * out of sync with it.
1504 */
1505 err = 0;
1506
1507 out:
1508 mutex_unlock(&priv->mutex);
1509 IWL_DEBUG_MAC80211(priv, "leave\n");
1510
1511 return err;
1512}
1513
1514static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1515 struct ieee80211_vif *vif,
1516 struct ieee80211_scan_request *hw_req)
1517{
1518 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1519 struct cfg80211_scan_request *req = &hw_req->req;
1520 int ret;
1521
1522 IWL_DEBUG_MAC80211(priv, "enter\n");
1523
1524 if (req->n_channels == 0)
1525 return -EINVAL;
1526
1527 mutex_lock(&priv->mutex);
1528
1529 /*
1530 * If an internal scan is in progress, just set
1531 * up the scan_request as per above.
1532 */
1533 if (priv->scan_type != IWL_SCAN_NORMAL) {
1534 IWL_DEBUG_SCAN(priv,
1535 "SCAN request during internal scan - defer\n");
1536 priv->scan_request = req;
1537 priv->scan_vif = vif;
1538 ret = 0;
1539 } else {
1540 priv->scan_request = req;
1541 priv->scan_vif = vif;
1542 /*
1543 * mac80211 will only ask for one band at a time
1544 * so using channels[0] here is ok
1545 */
1546 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
1547 req->channels[0]->band);
1548 if (ret) {
1549 priv->scan_request = NULL;
1550 priv->scan_vif = NULL;
1551 }
1552 }
1553
1554 IWL_DEBUG_MAC80211(priv, "leave\n");
1555
1556 mutex_unlock(&priv->mutex);
1557
1558 return ret;
1559}
1560
1561static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1562{
1563 struct iwl_addsta_cmd cmd = {
1564 .mode = STA_CONTROL_MODIFY_MSK,
1565 .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
1566 .sta.sta_id = sta_id,
1567 };
1568
1569 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1570}
1571
1572static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1573 struct ieee80211_vif *vif,
1574 enum sta_notify_cmd cmd,
1575 struct ieee80211_sta *sta)
1576{
1577 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1578 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1579 int sta_id;
1580
1581 IWL_DEBUG_MAC80211(priv, "enter\n");
1582
1583 switch (cmd) {
1584 case STA_NOTIFY_SLEEP:
1585 WARN_ON(!sta_priv->client);
1586 sta_priv->asleep = true;
1587 if (atomic_read(&sta_priv->pending_frames) > 0)
1588 ieee80211_sta_block_awake(hw, sta, true);
1589 break;
1590 case STA_NOTIFY_AWAKE:
1591 WARN_ON(!sta_priv->client);
1592 if (!sta_priv->asleep)
1593 break;
1594 sta_priv->asleep = false;
1595 sta_id = iwl_sta_id(sta);
1596 if (sta_id != IWL_INVALID_STATION)
1597 iwl_sta_modify_ps_wake(priv, sta_id);
1598 break;
1599 default:
1600 break;
1601 }
1602 IWL_DEBUG_MAC80211(priv, "leave\n");
1603}
1604
1605const struct ieee80211_ops iwlagn_hw_ops = {
1606 .tx = iwlagn_mac_tx,
1607 .start = iwlagn_mac_start,
1608 .stop = iwlagn_mac_stop,
1609#ifdef CONFIG_PM_SLEEP
1610 .suspend = iwlagn_mac_suspend,
1611 .resume = iwlagn_mac_resume,
1612 .set_wakeup = iwlagn_mac_set_wakeup,
1613#endif
1614 .add_interface = iwlagn_mac_add_interface,
1615 .remove_interface = iwlagn_mac_remove_interface,
1616 .change_interface = iwlagn_mac_change_interface,
1617 .config = iwlagn_mac_config,
1618 .configure_filter = iwlagn_configure_filter,
1619 .set_key = iwlagn_mac_set_key,
1620 .update_tkip_key = iwlagn_mac_update_tkip_key,
1621 .set_rekey_data = iwlagn_mac_set_rekey_data,
1622 .conf_tx = iwlagn_mac_conf_tx,
1623 .bss_info_changed = iwlagn_bss_info_changed,
1624 .ampdu_action = iwlagn_mac_ampdu_action,
1625 .hw_scan = iwlagn_mac_hw_scan,
1626 .sta_notify = iwlagn_mac_sta_notify,
1627 .sta_state = iwlagn_mac_sta_state,
1628 .channel_switch = iwlagn_mac_channel_switch,
1629 .flush = iwlagn_mac_flush,
1630 .tx_last_beacon = iwlagn_mac_tx_last_beacon,
1631 .event_callback = iwlagn_mac_event_callback,
1632 .set_tim = iwlagn_mac_set_tim,
1633};
1634
1635/* This function both allocates and initializes hw and priv. */
1636struct ieee80211_hw *iwl_alloc_all(void)
1637{
1638 struct iwl_priv *priv;
1639 struct iwl_op_mode *op_mode;
1640 /* mac80211 allocates memory for this device instance, including
1641 * space for this driver's private structure */
1642 struct ieee80211_hw *hw;
1643
1644 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) +
1645 sizeof(struct iwl_op_mode), &iwlagn_hw_ops);
1646 if (!hw)
1647 goto out;
1648
1649 op_mode = hw->priv;
1650 priv = IWL_OP_MODE_GET_DVM(op_mode);
1651 priv->hw = hw;
1652
1653out:
1654 return hw;
1655}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
new file mode 100644
index 000000000000..e7616f0ee6e8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -0,0 +1,2077 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/delay.h>
37#include <linux/sched.h>
38#include <linux/skbuff.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/mac80211.h>
44
45#include <asm/div64.h>
46
47#include "iwl-eeprom-read.h"
48#include "iwl-eeprom-parse.h"
49#include "iwl-io.h"
50#include "iwl-trans.h"
51#include "iwl-op-mode.h"
52#include "iwl-drv.h"
53#include "iwl-modparams.h"
54#include "iwl-prph.h"
55
56#include "dev.h"
57#include "calib.h"
58#include "agn.h"
59
60
61/******************************************************************************
62 *
63 * module boiler plate
64 *
65 ******************************************************************************/
66
67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
68MODULE_DESCRIPTION(DRV_DESCRIPTION);
69MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
70MODULE_LICENSE("GPL");
71
72static const struct iwl_op_mode_ops iwl_dvm_ops;
73
74void iwl_update_chain_flags(struct iwl_priv *priv)
75{
76 struct iwl_rxon_context *ctx;
77
78 for_each_context(priv, ctx) {
79 iwlagn_set_rxon_chain(priv, ctx);
80 if (ctx->active.rx_chain != ctx->staging.rx_chain)
81 iwlagn_commit_rxon(priv, ctx);
82 }
83}
84
85/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
86static void iwl_set_beacon_tim(struct iwl_priv *priv,
87 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
88 u8 *beacon, u32 frame_size)
89{
90 u16 tim_idx;
91 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
92
93 /*
94 * The index is relative to frame start but we start looking at the
95 * variable-length part of the beacon.
96 */
97 tim_idx = mgmt->u.beacon.variable - beacon;
98
99 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
100 while ((tim_idx < (frame_size - 2)) &&
101 (beacon[tim_idx] != WLAN_EID_TIM))
102 tim_idx += beacon[tim_idx+1] + 2;
103
104 /* If TIM field was found, set variables */
105 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
106 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
107 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
108 } else
109 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
110}
111
112int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
113{
114 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
115 struct iwl_host_cmd cmd = {
116 .id = REPLY_TX_BEACON,
117 };
118 struct ieee80211_tx_info *info;
119 u32 frame_size;
120 u32 rate_flags;
121 u32 rate;
122
123 /*
124 * We have to set up the TX command, the TX Beacon command, and the
125 * beacon contents.
126 */
127
128 lockdep_assert_held(&priv->mutex);
129
130 if (!priv->beacon_ctx) {
131 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
132 return 0;
133 }
134
135 if (WARN_ON(!priv->beacon_skb))
136 return -EINVAL;
137
138 /* Allocate beacon command */
139 if (!priv->beacon_cmd)
140 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
141 tx_beacon_cmd = priv->beacon_cmd;
142 if (!tx_beacon_cmd)
143 return -ENOMEM;
144
145 frame_size = priv->beacon_skb->len;
146
147 /* Set up TX command fields */
148 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
149 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
150 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
151 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
152 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
153
154 /* Set up TX beacon command fields */
155 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
156 frame_size);
157
158 /* Set up packet rate and flags */
159 info = IEEE80211_SKB_CB(priv->beacon_skb);
160
161 /*
162 * Let's set up the rate at least somewhat correctly;
163 * it will currently not actually be used by the uCode,
164 * it uses the broadcast station's rate instead.
165 */
166 if (info->control.rates[0].idx < 0 ||
167 info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
168 rate = 0;
169 else
170 rate = info->control.rates[0].idx;
171
172 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
173 priv->nvm_data->valid_tx_ant);
174 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
175
176 /* In mac80211, rates for 5 GHz start at 0 */
177 if (info->band == IEEE80211_BAND_5GHZ)
178 rate += IWL_FIRST_OFDM_RATE;
179 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
180 rate_flags |= RATE_MCS_CCK_MSK;
181
182 tx_beacon_cmd->tx.rate_n_flags =
183 iwl_hw_set_rate_n_flags(rate, rate_flags);
184
185 /* Submit command */
186 cmd.len[0] = sizeof(*tx_beacon_cmd);
187 cmd.data[0] = tx_beacon_cmd;
188 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
189 cmd.len[1] = frame_size;
190 cmd.data[1] = priv->beacon_skb->data;
191 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
192
193 return iwl_dvm_send_cmd(priv, &cmd);
194}
195
196static void iwl_bg_beacon_update(struct work_struct *work)
197{
198 struct iwl_priv *priv =
199 container_of(work, struct iwl_priv, beacon_update);
200 struct sk_buff *beacon;
201
202 mutex_lock(&priv->mutex);
203 if (!priv->beacon_ctx) {
204 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
205 goto out;
206 }
207
208 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
209 /*
210 * The ucode will send beacon notifications even in
211 * IBSS mode, but we don't want to process them. But
212 * we need to defer the type check to here due to
213 * requiring locking around the beacon_ctx access.
214 */
215 goto out;
216 }
217
218 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
219 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
220 if (!beacon) {
221 IWL_ERR(priv, "update beacon failed -- keeping old\n");
222 goto out;
223 }
224
225 /* new beacon skb is allocated every time; dispose previous.*/
226 dev_kfree_skb(priv->beacon_skb);
227
228 priv->beacon_skb = beacon;
229
230 iwlagn_send_beacon_cmd(priv);
231 out:
232 mutex_unlock(&priv->mutex);
233}
234
235static void iwl_bg_bt_runtime_config(struct work_struct *work)
236{
237 struct iwl_priv *priv =
238 container_of(work, struct iwl_priv, bt_runtime_config);
239
240 mutex_lock(&priv->mutex);
241 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
242 goto out;
243
244 /* dont send host command if rf-kill is on */
245 if (!iwl_is_ready_rf(priv))
246 goto out;
247
248 iwlagn_send_advance_bt_config(priv);
249out:
250 mutex_unlock(&priv->mutex);
251}
252
253static void iwl_bg_bt_full_concurrency(struct work_struct *work)
254{
255 struct iwl_priv *priv =
256 container_of(work, struct iwl_priv, bt_full_concurrency);
257 struct iwl_rxon_context *ctx;
258
259 mutex_lock(&priv->mutex);
260
261 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
262 goto out;
263
264 /* dont send host command if rf-kill is on */
265 if (!iwl_is_ready_rf(priv))
266 goto out;
267
268 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
269 priv->bt_full_concurrent ?
270 "full concurrency" : "3-wire");
271
272 /*
273 * LQ & RXON updated cmds must be sent before BT Config cmd
274 * to avoid 3-wire collisions
275 */
276 for_each_context(priv, ctx) {
277 iwlagn_set_rxon_chain(priv, ctx);
278 iwlagn_commit_rxon(priv, ctx);
279 }
280
281 iwlagn_send_advance_bt_config(priv);
282out:
283 mutex_unlock(&priv->mutex);
284}
285
286int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
287{
288 struct iwl_statistics_cmd statistics_cmd = {
289 .configuration_flags =
290 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
291 };
292
293 if (flags & CMD_ASYNC)
294 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
295 CMD_ASYNC,
296 sizeof(struct iwl_statistics_cmd),
297 &statistics_cmd);
298 else
299 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
300 sizeof(struct iwl_statistics_cmd),
301 &statistics_cmd);
302}
303
304/**
305 * iwl_bg_statistics_periodic - Timer callback to queue statistics
306 *
307 * This callback is provided in order to send a statistics request.
308 *
309 * This timer function is continually reset to execute within
310 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
311 * was received. We need to ensure we receive the statistics in order
312 * to update the temperature used for calibrating the TXPOWER.
313 */
314static void iwl_bg_statistics_periodic(unsigned long data)
315{
316 struct iwl_priv *priv = (struct iwl_priv *)data;
317
318 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
319 return;
320
321 /* dont send host command if rf-kill is on */
322 if (!iwl_is_ready_rf(priv))
323 return;
324
325 iwl_send_statistics_request(priv, CMD_ASYNC, false);
326}
327
328
329static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
330 u32 start_idx, u32 num_events,
331 u32 capacity, u32 mode)
332{
333 u32 i;
334 u32 ptr; /* SRAM byte address of log data */
335 u32 ev, time, data; /* event log data */
336 unsigned long reg_flags;
337
338 if (mode == 0)
339 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
340 else
341 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
342
343 /* Make sure device is powered up for SRAM reads */
344 if (!iwl_trans_grab_nic_access(priv->trans, false, &reg_flags))
345 return;
346
347 /* Set starting address; reads will auto-increment */
348 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
349
350 /*
351 * Refuse to read more than would have fit into the log from
352 * the current start_idx. This used to happen due to the race
353 * described below, but now WARN because the code below should
354 * prevent it from happening here.
355 */
356 if (WARN_ON(num_events > capacity - start_idx))
357 num_events = capacity - start_idx;
358
359 /*
360 * "time" is actually "data" for mode 0 (no timestamp).
361 * place event id # at far right for easier visual parsing.
362 */
363 for (i = 0; i < num_events; i++) {
364 ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
365 time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
366 if (mode == 0) {
367 trace_iwlwifi_dev_ucode_cont_event(
368 priv->trans->dev, 0, time, ev);
369 } else {
370 data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
371 trace_iwlwifi_dev_ucode_cont_event(
372 priv->trans->dev, time, data, ev);
373 }
374 }
375 /* Allow device to power down */
376 iwl_trans_release_nic_access(priv->trans, &reg_flags);
377}
378
379static void iwl_continuous_event_trace(struct iwl_priv *priv)
380{
381 u32 capacity; /* event log capacity in # entries */
382 struct {
383 u32 capacity;
384 u32 mode;
385 u32 wrap_counter;
386 u32 write_counter;
387 } __packed read;
388 u32 base; /* SRAM byte address of event log header */
389 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
390 u32 num_wraps; /* # times uCode wrapped to top of log */
391 u32 next_entry; /* index of next entry to be written by uCode */
392
393 base = priv->device_pointers.log_event_table;
394 if (iwlagn_hw_valid_rtc_data_addr(base)) {
395 iwl_trans_read_mem_bytes(priv->trans, base,
396 &read, sizeof(read));
397 capacity = read.capacity;
398 mode = read.mode;
399 num_wraps = read.wrap_counter;
400 next_entry = read.write_counter;
401 } else
402 return;
403
404 /*
405 * Unfortunately, the uCode doesn't use temporary variables.
406 * Therefore, it can happen that we read next_entry == capacity,
407 * which really means next_entry == 0.
408 */
409 if (unlikely(next_entry == capacity))
410 next_entry = 0;
411 /*
412 * Additionally, the uCode increases the write pointer before
413 * the wraps counter, so if the write pointer is smaller than
414 * the old write pointer (wrap occurred) but we read that no
415 * wrap occurred, we actually read between the next_entry and
416 * num_wraps update (this does happen in practice!!) -- take
417 * that into account by increasing num_wraps.
418 */
419 if (unlikely(next_entry < priv->event_log.next_entry &&
420 num_wraps == priv->event_log.num_wraps))
421 num_wraps++;
422
423 if (num_wraps == priv->event_log.num_wraps) {
424 iwl_print_cont_event_trace(
425 priv, base, priv->event_log.next_entry,
426 next_entry - priv->event_log.next_entry,
427 capacity, mode);
428
429 priv->event_log.non_wraps_count++;
430 } else {
431 if (num_wraps - priv->event_log.num_wraps > 1)
432 priv->event_log.wraps_more_count++;
433 else
434 priv->event_log.wraps_once_count++;
435
436 trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
437 num_wraps - priv->event_log.num_wraps,
438 next_entry, priv->event_log.next_entry);
439
440 if (next_entry < priv->event_log.next_entry) {
441 iwl_print_cont_event_trace(
442 priv, base, priv->event_log.next_entry,
443 capacity - priv->event_log.next_entry,
444 capacity, mode);
445
446 iwl_print_cont_event_trace(
447 priv, base, 0, next_entry, capacity, mode);
448 } else {
449 iwl_print_cont_event_trace(
450 priv, base, next_entry,
451 capacity - next_entry,
452 capacity, mode);
453
454 iwl_print_cont_event_trace(
455 priv, base, 0, next_entry, capacity, mode);
456 }
457 }
458
459 priv->event_log.num_wraps = num_wraps;
460 priv->event_log.next_entry = next_entry;
461}
462
463/**
464 * iwl_bg_ucode_trace - Timer callback to log ucode event
465 *
466 * The timer is continually set to execute every
467 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
468 * this function is to perform continuous uCode event logging operation
469 * if enabled
470 */
471static void iwl_bg_ucode_trace(unsigned long data)
472{
473 struct iwl_priv *priv = (struct iwl_priv *)data;
474
475 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
476 return;
477
478 if (priv->event_log.ucode_trace) {
479 iwl_continuous_event_trace(priv);
480 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
481 mod_timer(&priv->ucode_trace,
482 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
483 }
484}
485
486static void iwl_bg_tx_flush(struct work_struct *work)
487{
488 struct iwl_priv *priv =
489 container_of(work, struct iwl_priv, tx_flush);
490
491 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
492 return;
493
494 /* do nothing if rf-kill is on */
495 if (!iwl_is_ready_rf(priv))
496 return;
497
498 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
499 iwlagn_dev_txfifo_flush(priv);
500}
501
502/*
503 * queue/FIFO/AC mapping definitions
504 */
505
506static const u8 iwlagn_bss_ac_to_fifo[] = {
507 IWL_TX_FIFO_VO,
508 IWL_TX_FIFO_VI,
509 IWL_TX_FIFO_BE,
510 IWL_TX_FIFO_BK,
511};
512
513static const u8 iwlagn_bss_ac_to_queue[] = {
514 0, 1, 2, 3,
515};
516
517static const u8 iwlagn_pan_ac_to_fifo[] = {
518 IWL_TX_FIFO_VO_IPAN,
519 IWL_TX_FIFO_VI_IPAN,
520 IWL_TX_FIFO_BE_IPAN,
521 IWL_TX_FIFO_BK_IPAN,
522};
523
524static const u8 iwlagn_pan_ac_to_queue[] = {
525 7, 6, 5, 4,
526};
527
528static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
529{
530 int i;
531
532 /*
533 * The default context is always valid,
534 * the PAN context depends on uCode.
535 */
536 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
537 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
538 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
539
540 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
541 priv->contexts[i].ctxid = i;
542
543 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
544 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
545 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
546 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
547 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
548 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
549 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
550 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
551 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
552 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
553 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
554 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
555 BIT(NL80211_IFTYPE_STATION);
556 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
557 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
558 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
559 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
560 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
561 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
562 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
563 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
564
565 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
566 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
567 REPLY_WIPAN_RXON_TIMING;
568 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
569 REPLY_WIPAN_RXON_ASSOC;
570 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
571 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
572 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
573 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
574 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
575 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
576 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
577
578 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
579 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
580 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
581 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
582 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
583 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
584 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
585 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
586
587 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
588}
589
590static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
591{
592 struct iwl_ct_kill_config cmd;
593 struct iwl_ct_kill_throttling_config adv_cmd;
594 int ret = 0;
595
596 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
597 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
598
599 priv->thermal_throttle.ct_kill_toggle = false;
600
601 if (priv->lib->support_ct_kill_exit) {
602 adv_cmd.critical_temperature_enter =
603 cpu_to_le32(priv->hw_params.ct_kill_threshold);
604 adv_cmd.critical_temperature_exit =
605 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
606
607 ret = iwl_dvm_send_cmd_pdu(priv,
608 REPLY_CT_KILL_CONFIG_CMD,
609 0, sizeof(adv_cmd), &adv_cmd);
610 if (ret)
611 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
612 else
613 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
614 "succeeded, critical temperature enter is %d,"
615 "exit is %d\n",
616 priv->hw_params.ct_kill_threshold,
617 priv->hw_params.ct_kill_exit_threshold);
618 } else {
619 cmd.critical_temperature_R =
620 cpu_to_le32(priv->hw_params.ct_kill_threshold);
621
622 ret = iwl_dvm_send_cmd_pdu(priv,
623 REPLY_CT_KILL_CONFIG_CMD,
624 0, sizeof(cmd), &cmd);
625 if (ret)
626 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
627 else
628 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
629 "succeeded, "
630 "critical temperature is %d\n",
631 priv->hw_params.ct_kill_threshold);
632 }
633}
634
635static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
636{
637 struct iwl_calib_cfg_cmd calib_cfg_cmd;
638 struct iwl_host_cmd cmd = {
639 .id = CALIBRATION_CFG_CMD,
640 .len = { sizeof(struct iwl_calib_cfg_cmd), },
641 .data = { &calib_cfg_cmd, },
642 };
643
644 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
645 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
646 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
647
648 return iwl_dvm_send_cmd(priv, &cmd);
649}
650
651
652static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
653{
654 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
655 .valid = cpu_to_le32(valid_tx_ant),
656 };
657
658 if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
659 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
660 return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
661 sizeof(struct iwl_tx_ant_config_cmd),
662 &tx_ant_cmd);
663 } else {
664 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
665 return -EOPNOTSUPP;
666 }
667}
668
669static void iwl_send_bt_config(struct iwl_priv *priv)
670{
671 struct iwl_bt_cmd bt_cmd = {
672 .lead_time = BT_LEAD_TIME_DEF,
673 .max_kill = BT_MAX_KILL_DEF,
674 .kill_ack_mask = 0,
675 .kill_cts_mask = 0,
676 };
677
678 if (!iwlwifi_mod_params.bt_coex_active)
679 bt_cmd.flags = BT_COEX_DISABLE;
680 else
681 bt_cmd.flags = BT_COEX_ENABLE;
682
683 priv->bt_enable_flag = bt_cmd.flags;
684 IWL_DEBUG_INFO(priv, "BT coex %s\n",
685 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
686
687 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
688 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
689 IWL_ERR(priv, "failed to send BT Coex Config\n");
690}
691
692/**
693 * iwl_alive_start - called after REPLY_ALIVE notification received
694 * from protocol/runtime uCode (initialization uCode's
695 * Alive gets handled by iwl_init_alive_start()).
696 */
697int iwl_alive_start(struct iwl_priv *priv)
698{
699 int ret = 0;
700 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
701
702 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
703
704 /* After the ALIVE response, we can send host commands to the uCode */
705 set_bit(STATUS_ALIVE, &priv->status);
706
707 if (iwl_is_rfkill(priv))
708 return -ERFKILL;
709
710 if (priv->event_log.ucode_trace) {
711 /* start collecting data now */
712 mod_timer(&priv->ucode_trace, jiffies);
713 }
714
715 /* download priority table before any calibration request */
716 if (priv->lib->bt_params &&
717 priv->lib->bt_params->advanced_bt_coexist) {
718 /* Configure Bluetooth device coexistence support */
719 if (priv->lib->bt_params->bt_sco_disable)
720 priv->bt_enable_pspoll = false;
721 else
722 priv->bt_enable_pspoll = true;
723
724 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
725 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
726 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
727 iwlagn_send_advance_bt_config(priv);
728 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
729 priv->cur_rssi_ctx = NULL;
730
731 iwl_send_prio_tbl(priv);
732
733 /* FIXME: w/a to force change uCode BT state machine */
734 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
735 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
736 if (ret)
737 return ret;
738 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
739 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
740 if (ret)
741 return ret;
742 } else if (priv->lib->bt_params) {
743 /*
744 * default is 2-wire BT coexexistence support
745 */
746 iwl_send_bt_config(priv);
747 }
748
749 /*
750 * Perform runtime calibrations, including DC calibration.
751 */
752 iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
753
754 ieee80211_wake_queues(priv->hw);
755
756 /* Configure Tx antenna selection based on H/W config */
757 iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
758
759 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
760 struct iwl_rxon_cmd *active_rxon =
761 (struct iwl_rxon_cmd *)&ctx->active;
762 /* apply any changes in staging */
763 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
764 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
765 } else {
766 struct iwl_rxon_context *tmp;
767 /* Initialize our rx_config data */
768 for_each_context(priv, tmp)
769 iwl_connection_init_rx_config(priv, tmp);
770
771 iwlagn_set_rxon_chain(priv, ctx);
772 }
773
774 if (!priv->wowlan) {
775 /* WoWLAN ucode will not reply in the same way, skip it */
776 iwl_reset_run_time_calib(priv);
777 }
778
779 set_bit(STATUS_READY, &priv->status);
780
781 /* Configure the adapter for unassociated operation */
782 ret = iwlagn_commit_rxon(priv, ctx);
783 if (ret)
784 return ret;
785
786 /* At this point, the NIC is initialized and operational */
787 iwl_rf_kill_ct_config(priv);
788
789 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
790
791 return iwl_power_update_mode(priv, true);
792}
793
794/**
795 * iwl_clear_driver_stations - clear knowledge of all stations from driver
796 * @priv: iwl priv struct
797 *
798 * This is called during iwl_down() to make sure that in the case
799 * we're coming there from a hardware restart mac80211 will be
800 * able to reconfigure stations -- if we're getting there in the
801 * normal down flow then the stations will already be cleared.
802 */
803static void iwl_clear_driver_stations(struct iwl_priv *priv)
804{
805 struct iwl_rxon_context *ctx;
806
807 spin_lock_bh(&priv->sta_lock);
808 memset(priv->stations, 0, sizeof(priv->stations));
809 priv->num_stations = 0;
810
811 priv->ucode_key_table = 0;
812
813 for_each_context(priv, ctx) {
814 /*
815 * Remove all key information that is not stored as part
816 * of station information since mac80211 may not have had
817 * a chance to remove all the keys. When device is
818 * reconfigured by mac80211 after an error all keys will
819 * be reconfigured.
820 */
821 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
822 ctx->key_mapping_keys = 0;
823 }
824
825 spin_unlock_bh(&priv->sta_lock);
826}
827
828void iwl_down(struct iwl_priv *priv)
829{
830 int exit_pending;
831
832 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
833
834 lockdep_assert_held(&priv->mutex);
835
836 iwl_scan_cancel_timeout(priv, 200);
837
838 exit_pending =
839 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
840
841 iwl_clear_ucode_stations(priv, NULL);
842 iwl_dealloc_bcast_stations(priv);
843 iwl_clear_driver_stations(priv);
844
845 /* reset BT coex data */
846 priv->bt_status = 0;
847 priv->cur_rssi_ctx = NULL;
848 priv->bt_is_sco = 0;
849 if (priv->lib->bt_params)
850 priv->bt_traffic_load =
851 priv->lib->bt_params->bt_init_traffic_load;
852 else
853 priv->bt_traffic_load = 0;
854 priv->bt_full_concurrent = false;
855 priv->bt_ci_compliance = 0;
856
857 /* Wipe out the EXIT_PENDING status bit if we are not actually
858 * exiting the module */
859 if (!exit_pending)
860 clear_bit(STATUS_EXIT_PENDING, &priv->status);
861
862 if (priv->mac80211_registered)
863 ieee80211_stop_queues(priv->hw);
864
865 priv->ucode_loaded = false;
866 iwl_trans_stop_device(priv->trans);
867
868 /* Set num_aux_in_flight must be done after the transport is stopped */
869 atomic_set(&priv->num_aux_in_flight, 0);
870
871 /* Clear out all status bits but a few that are stable across reset */
872 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
873 STATUS_RF_KILL_HW |
874 test_bit(STATUS_FW_ERROR, &priv->status) <<
875 STATUS_FW_ERROR |
876 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
877 STATUS_EXIT_PENDING;
878
879 dev_kfree_skb(priv->beacon_skb);
880 priv->beacon_skb = NULL;
881}
882
883/*****************************************************************************
884 *
885 * Workqueue callbacks
886 *
887 *****************************************************************************/
888
889static void iwl_bg_run_time_calib_work(struct work_struct *work)
890{
891 struct iwl_priv *priv = container_of(work, struct iwl_priv,
892 run_time_calib_work);
893
894 mutex_lock(&priv->mutex);
895
896 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
897 test_bit(STATUS_SCANNING, &priv->status)) {
898 mutex_unlock(&priv->mutex);
899 return;
900 }
901
902 if (priv->start_calib) {
903 iwl_chain_noise_calibration(priv);
904 iwl_sensitivity_calibration(priv);
905 }
906
907 mutex_unlock(&priv->mutex);
908}
909
910void iwlagn_prepare_restart(struct iwl_priv *priv)
911{
912 bool bt_full_concurrent;
913 u8 bt_ci_compliance;
914 u8 bt_load;
915 u8 bt_status;
916 bool bt_is_sco;
917 int i;
918
919 lockdep_assert_held(&priv->mutex);
920
921 priv->is_open = 0;
922
923 /*
924 * __iwl_down() will clear the BT status variables,
925 * which is correct, but when we restart we really
926 * want to keep them so restore them afterwards.
927 *
928 * The restart process will later pick them up and
929 * re-configure the hw when we reconfigure the BT
930 * command.
931 */
932 bt_full_concurrent = priv->bt_full_concurrent;
933 bt_ci_compliance = priv->bt_ci_compliance;
934 bt_load = priv->bt_traffic_load;
935 bt_status = priv->bt_status;
936 bt_is_sco = priv->bt_is_sco;
937
938 iwl_down(priv);
939
940 priv->bt_full_concurrent = bt_full_concurrent;
941 priv->bt_ci_compliance = bt_ci_compliance;
942 priv->bt_traffic_load = bt_load;
943 priv->bt_status = bt_status;
944 priv->bt_is_sco = bt_is_sco;
945
946 /* reset aggregation queues */
947 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
948 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
949 /* and stop counts */
950 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
951 atomic_set(&priv->queue_stop_count[i], 0);
952
953 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
954}
955
956static void iwl_bg_restart(struct work_struct *data)
957{
958 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
959
960 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
961 return;
962
963 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
964 mutex_lock(&priv->mutex);
965 iwlagn_prepare_restart(priv);
966 mutex_unlock(&priv->mutex);
967 iwl_cancel_deferred_work(priv);
968 if (priv->mac80211_registered)
969 ieee80211_restart_hw(priv->hw);
970 else
971 IWL_ERR(priv,
972 "Cannot request restart before registrating with mac80211\n");
973 } else {
974 WARN_ON(1);
975 }
976}
977
978/*****************************************************************************
979 *
980 * driver setup and teardown
981 *
982 *****************************************************************************/
983
984static void iwl_setup_deferred_work(struct iwl_priv *priv)
985{
986 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
987
988 INIT_WORK(&priv->restart, iwl_bg_restart);
989 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
990 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
991 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
992 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
993 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
994
995 iwl_setup_scan_deferred_work(priv);
996
997 if (priv->lib->bt_params)
998 iwlagn_bt_setup_deferred_work(priv);
999
1000 setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
1001 (unsigned long)priv);
1002
1003 setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
1004 (unsigned long)priv);
1005}
1006
1007void iwl_cancel_deferred_work(struct iwl_priv *priv)
1008{
1009 if (priv->lib->bt_params)
1010 iwlagn_bt_cancel_deferred_work(priv);
1011
1012 cancel_work_sync(&priv->run_time_calib_work);
1013 cancel_work_sync(&priv->beacon_update);
1014
1015 iwl_cancel_scan_deferred_work(priv);
1016
1017 cancel_work_sync(&priv->bt_full_concurrency);
1018 cancel_work_sync(&priv->bt_runtime_config);
1019
1020 del_timer_sync(&priv->statistics_periodic);
1021 del_timer_sync(&priv->ucode_trace);
1022}
1023
1024static int iwl_init_drv(struct iwl_priv *priv)
1025{
1026 spin_lock_init(&priv->sta_lock);
1027
1028 mutex_init(&priv->mutex);
1029
1030 INIT_LIST_HEAD(&priv->calib_results);
1031
1032 priv->band = IEEE80211_BAND_2GHZ;
1033
1034 priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
1035
1036 priv->iw_mode = NL80211_IFTYPE_STATION;
1037 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
1038 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
1039 priv->agg_tids_count = 0;
1040
1041 priv->rx_statistics_jiffies = jiffies;
1042
1043 /* Choose which receivers/antennas to use */
1044 iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1045
1046 iwl_init_scan_params(priv);
1047
1048 /* init bt coex */
1049 if (priv->lib->bt_params &&
1050 priv->lib->bt_params->advanced_bt_coexist) {
1051 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1052 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1053 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
1054 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
1055 priv->bt_duration = BT_DURATION_LIMIT_DEF;
1056 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
1057 }
1058
1059 return 0;
1060}
1061
1062static void iwl_uninit_drv(struct iwl_priv *priv)
1063{
1064 kfree(priv->scan_cmd);
1065 kfree(priv->beacon_cmd);
1066 kfree(rcu_dereference_raw(priv->noa_data));
1067 iwl_calib_free_results(priv);
1068#ifdef CONFIG_IWLWIFI_DEBUGFS
1069 kfree(priv->wowlan_sram);
1070#endif
1071}
1072
1073static void iwl_set_hw_params(struct iwl_priv *priv)
1074{
1075 if (priv->cfg->ht_params)
1076 priv->hw_params.use_rts_for_aggregation =
1077 priv->cfg->ht_params->use_rts_for_aggregation;
1078
1079 /* Device-specific setup */
1080 priv->lib->set_hw_params(priv);
1081}
1082
1083
1084
1085/* show what optional capabilities we have */
1086static void iwl_option_config(struct iwl_priv *priv)
1087{
1088#ifdef CONFIG_IWLWIFI_DEBUG
1089 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
1090#else
1091 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
1092#endif
1093
1094#ifdef CONFIG_IWLWIFI_DEBUGFS
1095 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
1096#else
1097 IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
1098#endif
1099
1100#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1101 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
1102#else
1103 IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
1104#endif
1105}
1106
1107static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1108{
1109 struct iwl_nvm_data *data = priv->nvm_data;
1110
1111 if (data->sku_cap_11n_enable &&
1112 !priv->cfg->ht_params) {
1113 IWL_ERR(priv, "Invalid 11n configuration\n");
1114 return -EINVAL;
1115 }
1116
1117 if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
1118 !data->sku_cap_band_52GHz_enable) {
1119 IWL_ERR(priv, "Invalid device sku\n");
1120 return -EINVAL;
1121 }
1122
1123 IWL_DEBUG_INFO(priv,
1124 "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
1125 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
1126 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
1127 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
1128
1129 priv->hw_params.tx_chains_num =
1130 num_of_ant(data->valid_tx_ant);
1131 if (priv->cfg->rx_with_siso_diversity)
1132 priv->hw_params.rx_chains_num = 1;
1133 else
1134 priv->hw_params.rx_chains_num =
1135 num_of_ant(data->valid_rx_ant);
1136
1137 IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1138 data->valid_tx_ant,
1139 data->valid_rx_ant);
1140
1141 return 0;
1142}
1143
1144static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1145 const struct iwl_cfg *cfg,
1146 const struct iwl_fw *fw,
1147 struct dentry *dbgfs_dir)
1148{
1149 struct iwl_priv *priv;
1150 struct ieee80211_hw *hw;
1151 struct iwl_op_mode *op_mode;
1152 u16 num_mac;
1153 u32 ucode_flags;
1154 struct iwl_trans_config trans_cfg = {};
1155 static const u8 no_reclaim_cmds[] = {
1156 REPLY_RX_PHY_CMD,
1157 REPLY_RX_MPDU_CMD,
1158 REPLY_COMPRESSED_BA,
1159 STATISTICS_NOTIFICATION,
1160 REPLY_TX,
1161 };
1162 int i;
1163
1164 /************************
1165 * 1. Allocating HW data
1166 ************************/
1167 hw = iwl_alloc_all();
1168 if (!hw) {
1169 pr_err("%s: Cannot allocate network device\n", cfg->name);
1170 goto out;
1171 }
1172
1173 op_mode = hw->priv;
1174 op_mode->ops = &iwl_dvm_ops;
1175 priv = IWL_OP_MODE_GET_DVM(op_mode);
1176 priv->trans = trans;
1177 priv->dev = trans->dev;
1178 priv->cfg = cfg;
1179 priv->fw = fw;
1180
1181 switch (priv->cfg->device_family) {
1182 case IWL_DEVICE_FAMILY_1000:
1183 case IWL_DEVICE_FAMILY_100:
1184 priv->lib = &iwl_dvm_1000_cfg;
1185 break;
1186 case IWL_DEVICE_FAMILY_2000:
1187 priv->lib = &iwl_dvm_2000_cfg;
1188 break;
1189 case IWL_DEVICE_FAMILY_105:
1190 priv->lib = &iwl_dvm_105_cfg;
1191 break;
1192 case IWL_DEVICE_FAMILY_2030:
1193 case IWL_DEVICE_FAMILY_135:
1194 priv->lib = &iwl_dvm_2030_cfg;
1195 break;
1196 case IWL_DEVICE_FAMILY_5000:
1197 priv->lib = &iwl_dvm_5000_cfg;
1198 break;
1199 case IWL_DEVICE_FAMILY_5150:
1200 priv->lib = &iwl_dvm_5150_cfg;
1201 break;
1202 case IWL_DEVICE_FAMILY_6000:
1203 case IWL_DEVICE_FAMILY_6000i:
1204 priv->lib = &iwl_dvm_6000_cfg;
1205 break;
1206 case IWL_DEVICE_FAMILY_6005:
1207 priv->lib = &iwl_dvm_6005_cfg;
1208 break;
1209 case IWL_DEVICE_FAMILY_6050:
1210 case IWL_DEVICE_FAMILY_6150:
1211 priv->lib = &iwl_dvm_6050_cfg;
1212 break;
1213 case IWL_DEVICE_FAMILY_6030:
1214 priv->lib = &iwl_dvm_6030_cfg;
1215 break;
1216 default:
1217 break;
1218 }
1219
1220 if (WARN_ON(!priv->lib))
1221 goto out_free_hw;
1222
1223 /*
1224 * Populate the state variables that the transport layer needs
1225 * to know about.
1226 */
1227 trans_cfg.op_mode = op_mode;
1228 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1229 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1230 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
1231 trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
1232
1233 trans_cfg.command_names = iwl_dvm_cmd_strings;
1234 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
1235
1236 WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
1237 priv->cfg->base_params->num_of_queues);
1238
1239 ucode_flags = fw->ucode_capa.flags;
1240
1241 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1242 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1243 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1244 } else {
1245 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1246 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1247 }
1248
1249 /* Configure transport layer */
1250 iwl_trans_configure(priv->trans, &trans_cfg);
1251
1252 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1253 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
1254
1255 /* At this point both hw and priv are allocated. */
1256
1257 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
1258
1259 iwl_option_config(priv);
1260
1261 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
1262
1263 /* is antenna coupling more than 35dB ? */
1264 priv->bt_ant_couple_ok =
1265 (iwlwifi_mod_params.ant_coupling >
1266 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1267 true : false;
1268
1269 /* bt channel inhibition enabled*/
1270 priv->bt_ch_announce = true;
1271 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1272 (priv->bt_ch_announce) ? "On" : "Off");
1273
1274 /* these spin locks will be used in apm_ops.init and EEPROM access
1275 * we should init now
1276 */
1277 spin_lock_init(&priv->statistics.lock);
1278
1279 /***********************
1280 * 2. Read REV register
1281 ***********************/
1282 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
1283 priv->cfg->name, priv->trans->hw_rev);
1284
1285 if (iwl_trans_start_hw(priv->trans))
1286 goto out_free_hw;
1287
1288 /* Read the EEPROM */
1289 if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
1290 &priv->eeprom_blob_size)) {
1291 IWL_ERR(priv, "Unable to init EEPROM\n");
1292 goto out_free_hw;
1293 }
1294
1295 /* Reset chip to save power until we load uCode during "up". */
1296 iwl_trans_stop_device(priv->trans);
1297
1298 priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1299 priv->eeprom_blob,
1300 priv->eeprom_blob_size);
1301 if (!priv->nvm_data)
1302 goto out_free_eeprom_blob;
1303
1304 if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
1305 goto out_free_eeprom;
1306
1307 if (iwl_eeprom_init_hw_params(priv))
1308 goto out_free_eeprom;
1309
1310 /* extract MAC Address */
1311 memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
1312 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1313 priv->hw->wiphy->addresses = priv->addresses;
1314 priv->hw->wiphy->n_addresses = 1;
1315 num_mac = priv->nvm_data->n_hw_addrs;
1316 if (num_mac > 1) {
1317 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1318 ETH_ALEN);
1319 priv->addresses[1].addr[5]++;
1320 priv->hw->wiphy->n_addresses++;
1321 }
1322
1323 /************************
1324 * 4. Setup HW constants
1325 ************************/
1326 iwl_set_hw_params(priv);
1327
1328 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1329 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
1330 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1331 /*
1332 * if not PAN, then don't support P2P -- might be a uCode
1333 * packaging bug or due to the eeprom check above
1334 */
1335 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1336 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1337
1338 /* Configure transport layer again*/
1339 iwl_trans_configure(priv->trans, &trans_cfg);
1340 }
1341
1342 /*******************
1343 * 5. Setup priv
1344 *******************/
1345 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1346 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1347 if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
1348 i != IWL_DEFAULT_CMD_QUEUE_NUM &&
1349 i != IWL_IPAN_CMD_QUEUE_NUM)
1350 priv->queue_to_mac80211[i] = i;
1351 atomic_set(&priv->queue_stop_count[i], 0);
1352 }
1353
1354 if (iwl_init_drv(priv))
1355 goto out_free_eeprom;
1356
1357 /* At this point both hw and priv are initialized. */
1358
1359 /********************
1360 * 6. Setup services
1361 ********************/
1362 iwl_setup_deferred_work(priv);
1363 iwl_setup_rx_handlers(priv);
1364
1365 iwl_power_initialize(priv);
1366 iwl_tt_initialize(priv);
1367
1368 snprintf(priv->hw->wiphy->fw_version,
1369 sizeof(priv->hw->wiphy->fw_version),
1370 "%s", fw->fw_version);
1371
1372 priv->new_scan_threshold_behaviour =
1373 !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1374
1375 priv->phy_calib_chain_noise_reset_cmd =
1376 fw->ucode_capa.standard_phy_calibration_size;
1377 priv->phy_calib_chain_noise_gain_cmd =
1378 fw->ucode_capa.standard_phy_calibration_size + 1;
1379
1380 /* initialize all valid contexts */
1381 iwl_init_context(priv, ucode_flags);
1382
1383 /**************************************************
1384 * This is still part of probe() in a sense...
1385 *
1386 * 7. Setup and register with mac80211 and debugfs
1387 **************************************************/
1388 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1389 goto out_destroy_workqueue;
1390
1391 if (iwl_dbgfs_register(priv, dbgfs_dir))
1392 goto out_mac80211_unregister;
1393
1394 return op_mode;
1395
1396out_mac80211_unregister:
1397 iwlagn_mac_unregister(priv);
1398out_destroy_workqueue:
1399 iwl_tt_exit(priv);
1400 iwl_cancel_deferred_work(priv);
1401 destroy_workqueue(priv->workqueue);
1402 priv->workqueue = NULL;
1403 iwl_uninit_drv(priv);
1404out_free_eeprom_blob:
1405 kfree(priv->eeprom_blob);
1406out_free_eeprom:
1407 iwl_free_nvm_data(priv->nvm_data);
1408out_free_hw:
1409 ieee80211_free_hw(priv->hw);
1410out:
1411 op_mode = NULL;
1412 return op_mode;
1413}
1414
1415static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1416{
1417 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1418
1419 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1420
1421 iwlagn_mac_unregister(priv);
1422
1423 iwl_tt_exit(priv);
1424
1425 kfree(priv->eeprom_blob);
1426 iwl_free_nvm_data(priv->nvm_data);
1427
1428 /*netif_stop_queue(dev); */
1429 flush_workqueue(priv->workqueue);
1430
1431 /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
1432 * priv->workqueue... so we can't take down the workqueue
1433 * until now... */
1434 destroy_workqueue(priv->workqueue);
1435 priv->workqueue = NULL;
1436
1437 iwl_uninit_drv(priv);
1438
1439 dev_kfree_skb(priv->beacon_skb);
1440
1441 iwl_trans_op_mode_leave(priv->trans);
1442 ieee80211_free_hw(priv->hw);
1443}
1444
1445static const char * const desc_lookup_text[] = {
1446 "OK",
1447 "FAIL",
1448 "BAD_PARAM",
1449 "BAD_CHECKSUM",
1450 "NMI_INTERRUPT_WDG",
1451 "SYSASSERT",
1452 "FATAL_ERROR",
1453 "BAD_COMMAND",
1454 "HW_ERROR_TUNE_LOCK",
1455 "HW_ERROR_TEMPERATURE",
1456 "ILLEGAL_CHAN_FREQ",
1457 "VCC_NOT_STABLE",
1458 "FH_ERROR",
1459 "NMI_INTERRUPT_HOST",
1460 "NMI_INTERRUPT_ACTION_PT",
1461 "NMI_INTERRUPT_UNKNOWN",
1462 "UCODE_VERSION_MISMATCH",
1463 "HW_ERROR_ABS_LOCK",
1464 "HW_ERROR_CAL_LOCK_FAIL",
1465 "NMI_INTERRUPT_INST_ACTION_PT",
1466 "NMI_INTERRUPT_DATA_ACTION_PT",
1467 "NMI_TRM_HW_ER",
1468 "NMI_INTERRUPT_TRM",
1469 "NMI_INTERRUPT_BREAK_POINT",
1470 "DEBUG_0",
1471 "DEBUG_1",
1472 "DEBUG_2",
1473 "DEBUG_3",
1474};
1475
1476static struct { char *name; u8 num; } advanced_lookup[] = {
1477 { "NMI_INTERRUPT_WDG", 0x34 },
1478 { "SYSASSERT", 0x35 },
1479 { "UCODE_VERSION_MISMATCH", 0x37 },
1480 { "BAD_COMMAND", 0x38 },
1481 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1482 { "FATAL_ERROR", 0x3D },
1483 { "NMI_TRM_HW_ERR", 0x46 },
1484 { "NMI_INTERRUPT_TRM", 0x4C },
1485 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1486 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1487 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1488 { "NMI_INTERRUPT_HOST", 0x66 },
1489 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1490 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1491 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1492 { "ADVANCED_SYSASSERT", 0 },
1493};
1494
1495static const char *desc_lookup(u32 num)
1496{
1497 int i;
1498 int max = ARRAY_SIZE(desc_lookup_text);
1499
1500 if (num < max)
1501 return desc_lookup_text[num];
1502
1503 max = ARRAY_SIZE(advanced_lookup) - 1;
1504 for (i = 0; i < max; i++) {
1505 if (advanced_lookup[i].num == num)
1506 break;
1507 }
1508 return advanced_lookup[i].name;
1509}
1510
1511#define ERROR_START_OFFSET (1 * sizeof(u32))
1512#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1513
1514static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1515{
1516 struct iwl_trans *trans = priv->trans;
1517 u32 base;
1518 struct iwl_error_event_table table;
1519
1520 base = priv->device_pointers.error_event_table;
1521 if (priv->cur_ucode == IWL_UCODE_INIT) {
1522 if (!base)
1523 base = priv->fw->init_errlog_ptr;
1524 } else {
1525 if (!base)
1526 base = priv->fw->inst_errlog_ptr;
1527 }
1528
1529 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1530 IWL_ERR(priv,
1531 "Not valid error log pointer 0x%08X for %s uCode\n",
1532 base,
1533 (priv->cur_ucode == IWL_UCODE_INIT)
1534 ? "Init" : "RT");
1535 return;
1536 }
1537
1538 /*TODO: Update dbgfs with ISR error stats obtained below */
1539 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
1540
1541 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1542 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
1543 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
1544 priv->status, table.valid);
1545 }
1546
1547 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
1548 table.data1, table.data2, table.line,
1549 table.blink1, table.blink2, table.ilink1,
1550 table.ilink2, table.bcon_time, table.gp1,
1551 table.gp2, table.gp3, table.ucode_ver,
1552 table.hw_ver, 0, table.brd_ver);
1553 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1554 desc_lookup(table.error_id));
1555 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1556 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1557 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1558 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1559 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1560 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1561 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1562 IWL_ERR(priv, "0x%08X | line\n", table.line);
1563 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1564 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1565 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1566 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1567 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1568 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1569 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1570 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1571 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1572 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1573 IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
1574 IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
1575 IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
1576 IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
1577 IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
1578 IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
1579 IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
1580 IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
1581 IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
1582 IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
1583 IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
1584 IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
1585 IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
1586 IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
1587}
1588
1589#define EVENT_START_OFFSET (4 * sizeof(u32))
1590
1591/**
1592 * iwl_print_event_log - Dump error event log to syslog
1593 *
1594 */
1595static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1596 u32 num_events, u32 mode,
1597 int pos, char **buf, size_t bufsz)
1598{
1599 u32 i;
1600 u32 base; /* SRAM byte address of event log header */
1601 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1602 u32 ptr; /* SRAM byte address of log data */
1603 u32 ev, time, data; /* event log data */
1604 unsigned long reg_flags;
1605
1606 struct iwl_trans *trans = priv->trans;
1607
1608 if (num_events == 0)
1609 return pos;
1610
1611 base = priv->device_pointers.log_event_table;
1612 if (priv->cur_ucode == IWL_UCODE_INIT) {
1613 if (!base)
1614 base = priv->fw->init_evtlog_ptr;
1615 } else {
1616 if (!base)
1617 base = priv->fw->inst_evtlog_ptr;
1618 }
1619
1620 if (mode == 0)
1621 event_size = 2 * sizeof(u32);
1622 else
1623 event_size = 3 * sizeof(u32);
1624
1625 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1626
1627 /* Make sure device is powered up for SRAM reads */
1628 if (!iwl_trans_grab_nic_access(trans, false, &reg_flags))
1629 return pos;
1630
1631 /* Set starting address; reads will auto-increment */
1632 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
1633
1634 /* "time" is actually "data" for mode 0 (no timestamp).
1635 * place event id # at far right for easier visual parsing. */
1636 for (i = 0; i < num_events; i++) {
1637 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1638 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1639 if (mode == 0) {
1640 /* data, ev */
1641 if (bufsz) {
1642 pos += scnprintf(*buf + pos, bufsz - pos,
1643 "EVT_LOG:0x%08x:%04u\n",
1644 time, ev);
1645 } else {
1646 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
1647 time, ev);
1648 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1649 time, ev);
1650 }
1651 } else {
1652 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1653 if (bufsz) {
1654 pos += scnprintf(*buf + pos, bufsz - pos,
1655 "EVT_LOGT:%010u:0x%08x:%04u\n",
1656 time, data, ev);
1657 } else {
1658 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1659 time, data, ev);
1660 trace_iwlwifi_dev_ucode_event(trans->dev, time,
1661 data, ev);
1662 }
1663 }
1664 }
1665
1666 /* Allow device to power down */
1667 iwl_trans_release_nic_access(trans, &reg_flags);
1668 return pos;
1669}
1670
1671/**
1672 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1673 */
1674static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1675 u32 num_wraps, u32 next_entry,
1676 u32 size, u32 mode,
1677 int pos, char **buf, size_t bufsz)
1678{
1679 /*
1680 * display the newest DEFAULT_LOG_ENTRIES entries
1681 * i.e the entries just before the next ont that uCode would fill.
1682 */
1683 if (num_wraps) {
1684 if (next_entry < size) {
1685 pos = iwl_print_event_log(priv,
1686 capacity - (size - next_entry),
1687 size - next_entry, mode,
1688 pos, buf, bufsz);
1689 pos = iwl_print_event_log(priv, 0,
1690 next_entry, mode,
1691 pos, buf, bufsz);
1692 } else
1693 pos = iwl_print_event_log(priv, next_entry - size,
1694 size, mode, pos, buf, bufsz);
1695 } else {
1696 if (next_entry < size) {
1697 pos = iwl_print_event_log(priv, 0, next_entry,
1698 mode, pos, buf, bufsz);
1699 } else {
1700 pos = iwl_print_event_log(priv, next_entry - size,
1701 size, mode, pos, buf, bufsz);
1702 }
1703 }
1704 return pos;
1705}
1706
1707#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1708
1709int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1710 char **buf)
1711{
1712 u32 base; /* SRAM byte address of event log header */
1713 u32 capacity; /* event log capacity in # entries */
1714 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1715 u32 num_wraps; /* # times uCode wrapped to top of log */
1716 u32 next_entry; /* index of next entry to be written by uCode */
1717 u32 size; /* # entries that we'll print */
1718 u32 logsize;
1719 int pos = 0;
1720 size_t bufsz = 0;
1721 struct iwl_trans *trans = priv->trans;
1722
1723 base = priv->device_pointers.log_event_table;
1724 if (priv->cur_ucode == IWL_UCODE_INIT) {
1725 logsize = priv->fw->init_evtlog_size;
1726 if (!base)
1727 base = priv->fw->init_evtlog_ptr;
1728 } else {
1729 logsize = priv->fw->inst_evtlog_size;
1730 if (!base)
1731 base = priv->fw->inst_evtlog_ptr;
1732 }
1733
1734 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1735 IWL_ERR(priv,
1736 "Invalid event log pointer 0x%08X for %s uCode\n",
1737 base,
1738 (priv->cur_ucode == IWL_UCODE_INIT)
1739 ? "Init" : "RT");
1740 return -EINVAL;
1741 }
1742
1743 /* event log header */
1744 capacity = iwl_trans_read_mem32(trans, base);
1745 mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
1746 num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
1747 next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
1748
1749 if (capacity > logsize) {
1750 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
1751 "entries\n", capacity, logsize);
1752 capacity = logsize;
1753 }
1754
1755 if (next_entry > logsize) {
1756 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1757 next_entry, logsize);
1758 next_entry = logsize;
1759 }
1760
1761 size = num_wraps ? capacity : next_entry;
1762
1763 /* bail out if nothing in log */
1764 if (size == 0) {
1765 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
1766 return pos;
1767 }
1768
1769 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
1770 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1771 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1772 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1773 size);
1774
1775#ifdef CONFIG_IWLWIFI_DEBUG
1776 if (buf) {
1777 if (full_log)
1778 bufsz = capacity * 48;
1779 else
1780 bufsz = size * 48;
1781 *buf = kmalloc(bufsz, GFP_KERNEL);
1782 if (!*buf)
1783 return -ENOMEM;
1784 }
1785 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
1786 /*
1787 * if uCode has wrapped back to top of log,
1788 * start at the oldest entry,
1789 * i.e the next one that uCode would fill.
1790 */
1791 if (num_wraps)
1792 pos = iwl_print_event_log(priv, next_entry,
1793 capacity - next_entry, mode,
1794 pos, buf, bufsz);
1795 /* (then/else) start at top of log */
1796 pos = iwl_print_event_log(priv, 0,
1797 next_entry, mode, pos, buf, bufsz);
1798 } else
1799 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1800 next_entry, size, mode,
1801 pos, buf, bufsz);
1802#else
1803 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1804 next_entry, size, mode,
1805 pos, buf, bufsz);
1806#endif
1807 return pos;
1808}
1809
1810static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
1811{
1812 unsigned int reload_msec;
1813 unsigned long reload_jiffies;
1814
1815 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
1816 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
1817
1818 /* uCode is no longer loaded. */
1819 priv->ucode_loaded = false;
1820
1821 /* Set the FW error flag -- cleared on iwl_down */
1822 set_bit(STATUS_FW_ERROR, &priv->status);
1823
1824 iwl_abort_notification_waits(&priv->notif_wait);
1825
1826 /* Keep the restart process from trying to send host
1827 * commands by clearing the ready bit */
1828 clear_bit(STATUS_READY, &priv->status);
1829
1830 if (!ondemand) {
1831 /*
1832 * If firmware keep reloading, then it indicate something
1833 * serious wrong and firmware having problem to recover
1834 * from it. Instead of keep trying which will fill the syslog
1835 * and hang the system, let's just stop it
1836 */
1837 reload_jiffies = jiffies;
1838 reload_msec = jiffies_to_msecs((long) reload_jiffies -
1839 (long) priv->reload_jiffies);
1840 priv->reload_jiffies = reload_jiffies;
1841 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
1842 priv->reload_count++;
1843 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
1844 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
1845 return;
1846 }
1847 } else
1848 priv->reload_count = 0;
1849 }
1850
1851 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1852 if (iwlwifi_mod_params.restart_fw) {
1853 IWL_DEBUG_FW_ERRORS(priv,
1854 "Restarting adapter due to uCode error.\n");
1855 queue_work(priv->workqueue, &priv->restart);
1856 } else
1857 IWL_DEBUG_FW_ERRORS(priv,
1858 "Detected FW error, but not restarting\n");
1859 }
1860}
1861
1862static void iwl_nic_error(struct iwl_op_mode *op_mode)
1863{
1864 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1865
1866 IWL_ERR(priv, "Loaded firmware version: %s\n",
1867 priv->fw->fw_version);
1868
1869 iwl_dump_nic_error_log(priv);
1870 iwl_dump_nic_event_log(priv, false, NULL);
1871
1872 iwlagn_fw_error(priv, false);
1873}
1874
1875static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1876{
1877 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1878
1879 if (!iwl_check_for_ct_kill(priv)) {
1880 IWL_ERR(priv, "Restarting adapter queue is full\n");
1881 iwlagn_fw_error(priv, false);
1882 }
1883}
1884
1885#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
1886
1887static void iwl_nic_config(struct iwl_op_mode *op_mode)
1888{
1889 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1890
1891 /* SKU Control */
1892 iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
1893 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
1894 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
1895 (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
1896 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
1897 (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
1898 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
1899
1900 /* write radio config values to register */
1901 if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
1902 u32 reg_val =
1903 priv->nvm_data->radio_cfg_type <<
1904 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
1905 priv->nvm_data->radio_cfg_step <<
1906 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
1907 priv->nvm_data->radio_cfg_dash <<
1908 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1909
1910 iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
1911 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
1912 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
1913 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
1914 reg_val);
1915
1916 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
1917 priv->nvm_data->radio_cfg_type,
1918 priv->nvm_data->radio_cfg_step,
1919 priv->nvm_data->radio_cfg_dash);
1920 } else {
1921 WARN_ON(1);
1922 }
1923
1924 /* set CSR_HW_CONFIG_REG for uCode use */
1925 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1926 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1927 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
1928
1929 /* W/A : NIC is stuck in a reset state after Early PCIe power off
1930 * (PCIe power is lost before PERST# is asserted),
1931 * causing ME FW to lose ownership and not being able to obtain it back.
1932 */
1933 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
1934 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1935 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1936
1937 if (priv->lib->nic_config)
1938 priv->lib->nic_config(priv);
1939}
1940
1941static void iwl_wimax_active(struct iwl_op_mode *op_mode)
1942{
1943 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1944
1945 clear_bit(STATUS_READY, &priv->status);
1946 IWL_ERR(priv, "RF is used by WiMAX\n");
1947}
1948
1949static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
1950{
1951 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1952 int mq = priv->queue_to_mac80211[queue];
1953
1954 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
1955 return;
1956
1957 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
1958 IWL_DEBUG_TX_QUEUES(priv,
1959 "queue %d (mac80211 %d) already stopped\n",
1960 queue, mq);
1961 return;
1962 }
1963
1964 set_bit(mq, &priv->transport_queue_stop);
1965 ieee80211_stop_queue(priv->hw, mq);
1966}
1967
1968static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
1969{
1970 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1971 int mq = priv->queue_to_mac80211[queue];
1972
1973 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
1974 return;
1975
1976 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
1977 IWL_DEBUG_TX_QUEUES(priv,
1978 "queue %d (mac80211 %d) already awake\n",
1979 queue, mq);
1980 return;
1981 }
1982
1983 clear_bit(mq, &priv->transport_queue_stop);
1984
1985 if (!priv->passive_no_rx)
1986 ieee80211_wake_queue(priv->hw, mq);
1987}
1988
1989void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
1990{
1991 int mq;
1992
1993 if (!priv->passive_no_rx)
1994 return;
1995
1996 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
1997 if (!test_bit(mq, &priv->transport_queue_stop)) {
1998 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
1999 ieee80211_wake_queue(priv->hw, mq);
2000 } else {
2001 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
2002 }
2003 }
2004
2005 priv->passive_no_rx = false;
2006}
2007
2008static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2009{
2010 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2011 struct ieee80211_tx_info *info;
2012
2013 info = IEEE80211_SKB_CB(skb);
2014 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2015 ieee80211_free_txskb(priv->hw, skb);
2016}
2017
2018static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2019{
2020 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2021
2022 if (state)
2023 set_bit(STATUS_RF_KILL_HW, &priv->status);
2024 else
2025 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2026
2027 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2028
2029 return false;
2030}
2031
2032static const struct iwl_op_mode_ops iwl_dvm_ops = {
2033 .start = iwl_op_mode_dvm_start,
2034 .stop = iwl_op_mode_dvm_stop,
2035 .rx = iwl_rx_dispatch,
2036 .queue_full = iwl_stop_sw_queue,
2037 .queue_not_full = iwl_wake_sw_queue,
2038 .hw_rf_kill = iwl_set_hw_rfkill_state,
2039 .free_skb = iwl_free_skb,
2040 .nic_error = iwl_nic_error,
2041 .cmd_queue_full = iwl_cmd_queue_full,
2042 .nic_config = iwl_nic_config,
2043 .wimax_active = iwl_wimax_active,
2044};
2045
2046/*****************************************************************************
2047 *
2048 * driver and module entry point
2049 *
2050 *****************************************************************************/
2051static int __init iwl_init(void)
2052{
2053
2054 int ret;
2055
2056 ret = iwlagn_rate_control_register();
2057 if (ret) {
2058 pr_err("Unable to register rate control algorithm: %d\n", ret);
2059 return ret;
2060 }
2061
2062 ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
2063 if (ret) {
2064 pr_err("Unable to register op_mode: %d\n", ret);
2065 iwlagn_rate_control_unregister();
2066 }
2067
2068 return ret;
2069}
2070module_init(iwl_init);
2071
2072static void __exit iwl_exit(void)
2073{
2074 iwl_opmode_deregister("iwldvm");
2075 iwlagn_rate_control_unregister();
2076}
2077module_exit(iwl_exit);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
new file mode 100644
index 000000000000..1513dbc79c14
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -0,0 +1,395 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-io.h"
35#include "iwl-debug.h"
36#include "iwl-trans.h"
37#include "iwl-modparams.h"
38#include "dev.h"
39#include "agn.h"
40#include "commands.h"
41#include "power.h"
42
43static bool force_cam = true;
44module_param(force_cam, bool, 0644);
45MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
46
47/*
48 * Setting power level allows the card to go to sleep when not busy.
49 *
50 * We calculate a sleep command based on the required latency, which
51 * we get from mac80211. In order to handle thermal throttling, we can
52 * also use pre-defined power levels.
53 */
54
55/*
56 * This defines the old power levels. They are still used by default
57 * (level 1) and for thermal throttle (levels 3 through 5)
58 */
59
60struct iwl_power_vec_entry {
61 struct iwl_powertable_cmd cmd;
62 u8 no_dtim; /* number of skip dtim */
63};
64
65#define IWL_DTIM_RANGE_0_MAX 2
66#define IWL_DTIM_RANGE_1_MAX 10
67
68#define NOSLP cpu_to_le16(0), 0, 0
69#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
70#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
71 IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
72 IWL_POWER_ADVANCE_PM_ENA_MSK)
73#define ASLP_TOUT(T) cpu_to_le32(T)
74#define TU_TO_USEC 1024
75#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
76#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
77 cpu_to_le32(X1), \
78 cpu_to_le32(X2), \
79 cpu_to_le32(X3), \
80 cpu_to_le32(X4)}
81/* default power management (not Tx power) table values */
82/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
83/* DTIM 0 - 2 */
84static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
85 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
86 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
87 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
89 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
90};
91
92
93/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
94/* DTIM 3 - 10 */
95static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
96 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
97 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
98 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
100 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
101};
102
103/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
104/* DTIM 11 - */
105static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
108 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
111};
112
113/* advance power management */
114/* DTIM 0 - 2 */
115static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
116 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
117 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
118 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
119 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
120 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
121 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
122 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
123 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
124 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
125 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
126};
127
128
129/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
130/* DTIM 3 - 10 */
131static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
132 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
133 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
134 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
135 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
136 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
137 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
138 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
139 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
140 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
141 SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
142};
143
144/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
145/* DTIM 11 - */
146static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
147 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
148 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
149 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
150 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
151 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
152 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
153 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
154 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
155 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
156 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
157};
158
159static void iwl_static_sleep_cmd(struct iwl_priv *priv,
160 struct iwl_powertable_cmd *cmd,
161 enum iwl_power_level lvl, int period)
162{
163 const struct iwl_power_vec_entry *table;
164 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
165 int i;
166 u8 skip;
167 u32 slp_itrvl;
168
169 if (priv->lib->adv_pm) {
170 table = apm_range_2;
171 if (period <= IWL_DTIM_RANGE_1_MAX)
172 table = apm_range_1;
173 if (period <= IWL_DTIM_RANGE_0_MAX)
174 table = apm_range_0;
175 } else {
176 table = range_2;
177 if (period <= IWL_DTIM_RANGE_1_MAX)
178 table = range_1;
179 if (period <= IWL_DTIM_RANGE_0_MAX)
180 table = range_0;
181 }
182
183 if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
184 memset(cmd, 0, sizeof(*cmd));
185 else
186 *cmd = table[lvl].cmd;
187
188 if (period == 0) {
189 skip = 0;
190 period = 1;
191 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
192 max_sleep[i] = 1;
193
194 } else {
195 skip = table[lvl].no_dtim;
196 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
197 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
198 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
199 }
200
201 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
202 /* figure out the listen interval based on dtim period and skip */
203 if (slp_itrvl == 0xFF)
204 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
205 cpu_to_le32(period * (skip + 1));
206
207 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
208 if (slp_itrvl > period)
209 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
210 cpu_to_le32((slp_itrvl / period) * period);
211
212 if (skip)
213 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
214 else
215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
216
217 if (priv->cfg->base_params->shadow_reg_enable)
218 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
219 else
220 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
221
222 if (iwl_advanced_bt_coexist(priv)) {
223 if (!priv->lib->bt_params->bt_sco_disable)
224 cmd->flags |= IWL_POWER_BT_SCO_ENA;
225 else
226 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
227 }
228
229
230 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
231 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
232 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
233 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
234
235 /* enforce max sleep interval */
236 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
237 if (le32_to_cpu(cmd->sleep_interval[i]) >
238 (max_sleep[i] * period))
239 cmd->sleep_interval[i] =
240 cpu_to_le32(max_sleep[i] * period);
241 if (i != (IWL_POWER_VEC_SIZE - 1)) {
242 if (le32_to_cpu(cmd->sleep_interval[i]) >
243 le32_to_cpu(cmd->sleep_interval[i+1]))
244 cmd->sleep_interval[i] =
245 cmd->sleep_interval[i+1];
246 }
247 }
248
249 if (priv->power_data.bus_pm)
250 cmd->flags |= IWL_POWER_PCI_PM_MSK;
251 else
252 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
253
254 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
255 skip, period);
256 /* The power level here is 0-4 (used as array index), but user expects
257 to see 1-5 (according to spec). */
258 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
259}
260
261static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
262 struct iwl_powertable_cmd *cmd)
263{
264 memset(cmd, 0, sizeof(*cmd));
265
266 if (priv->power_data.bus_pm)
267 cmd->flags |= IWL_POWER_PCI_PM_MSK;
268
269 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
270}
271
272static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
273{
274 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
275 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
276 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
277 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
278 IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
279 le32_to_cpu(cmd->sleep_interval[0]),
280 le32_to_cpu(cmd->sleep_interval[1]),
281 le32_to_cpu(cmd->sleep_interval[2]),
282 le32_to_cpu(cmd->sleep_interval[3]),
283 le32_to_cpu(cmd->sleep_interval[4]));
284
285 return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
286 sizeof(struct iwl_powertable_cmd), cmd);
287}
288
289static void iwl_power_build_cmd(struct iwl_priv *priv,
290 struct iwl_powertable_cmd *cmd)
291{
292 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
293 int dtimper;
294
295 if (force_cam) {
296 iwl_power_sleep_cam_cmd(priv, cmd);
297 return;
298 }
299
300 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
301
302 if (priv->wowlan)
303 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
304 else if (!priv->lib->no_idle_support &&
305 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
306 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
307 else if (iwl_tt_is_low_power_state(priv)) {
308 /* in thermal throttling low power state */
309 iwl_static_sleep_cmd(priv, cmd,
310 iwl_tt_current_power_mode(priv), dtimper);
311 } else if (!enabled)
312 iwl_power_sleep_cam_cmd(priv, cmd);
313 else if (priv->power_data.debug_sleep_level_override >= 0)
314 iwl_static_sleep_cmd(priv, cmd,
315 priv->power_data.debug_sleep_level_override,
316 dtimper);
317 else {
318 /* Note that the user parameter is 1-5 (according to spec),
319 but we pass 0-4 because it acts as an array index. */
320 if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
321 iwlwifi_mod_params.power_level <= IWL_POWER_NUM)
322 iwl_static_sleep_cmd(priv, cmd,
323 iwlwifi_mod_params.power_level - 1, dtimper);
324 else
325 iwl_static_sleep_cmd(priv, cmd,
326 IWL_POWER_INDEX_1, dtimper);
327 }
328}
329
330int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
331 bool force)
332{
333 int ret;
334 bool update_chains;
335
336 lockdep_assert_held(&priv->mutex);
337
338 /* Don't update the RX chain when chain noise calibration is running */
339 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
340 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
341
342 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
343 return 0;
344
345 if (!iwl_is_ready_rf(priv))
346 return -EIO;
347
348 /* scan complete use sleep_power_next, need to be updated */
349 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
350 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
351 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
352 return 0;
353 }
354
355 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
356 iwl_dvm_set_pmi(priv, true);
357
358 ret = iwl_set_power(priv, cmd);
359 if (!ret) {
360 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
361 iwl_dvm_set_pmi(priv, false);
362
363 if (update_chains)
364 iwl_update_chain_flags(priv);
365 else
366 IWL_DEBUG_POWER(priv,
367 "Cannot update the power, chain noise "
368 "calibration running: %d\n",
369 priv->chain_noise_data.state);
370
371 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
372 } else
373 IWL_ERR(priv, "set power fail, ret = %d\n", ret);
374
375 return ret;
376}
377
378int iwl_power_update_mode(struct iwl_priv *priv, bool force)
379{
380 struct iwl_powertable_cmd cmd;
381
382 iwl_power_build_cmd(priv, &cmd);
383 return iwl_power_set_mode(priv, &cmd, force);
384}
385
386/* initialize to default */
387void iwl_power_initialize(struct iwl_priv *priv)
388{
389 priv->power_data.bus_pm = priv->trans->pm_support;
390
391 priv->power_data.debug_sleep_level_override = -1;
392
393 memset(&priv->power_data.sleep_cmd, 0,
394 sizeof(priv->power_data.sleep_cmd));
395}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
new file mode 100644
index 000000000000..570d3a5e4670
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
@@ -0,0 +1,47 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include "commands.h"
32
33struct iwl_power_mgr {
34 struct iwl_powertable_cmd sleep_cmd;
35 struct iwl_powertable_cmd sleep_cmd_next;
36 int debug_sleep_level_override;
37 bool bus_pm;
38};
39
40int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
41 bool force);
42int iwl_power_update_mode(struct iwl_priv *priv, bool force);
43void iwl_power_initialize(struct iwl_priv *priv);
44
45extern bool no_sleep_autoadjust;
46
47#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
new file mode 100644
index 000000000000..cef921c1a623
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -0,0 +1,3338 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/skbuff.h>
28#include <linux/slab.h>
29#include <net/mac80211.h>
30
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/delay.h>
34
35#include <linux/workqueue.h>
36
37#include "dev.h"
38#include "agn.h"
39
40#define RS_NAME "iwl-agn-rs"
41
42#define NUM_TRY_BEFORE_ANT_TOGGLE 1
43#define IWL_NUMBER_TRY 1
44#define IWL_HT_NUMBER_TRY 3
45
46#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
47#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
48#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
49
50/* max allowed rate miss before sync LQ cmd */
51#define IWL_MISSED_RATE_MAX 15
52/* max time to accum history 2 seconds */
53#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
54
55static u8 rs_ht_to_legacy[] = {
56 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
57 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
58 IWL_RATE_6M_INDEX,
59 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
60 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
61 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
62 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
63};
64
65static const u8 ant_toggle_lookup[] = {
66 /*ANT_NONE -> */ ANT_NONE,
67 /*ANT_A -> */ ANT_B,
68 /*ANT_B -> */ ANT_C,
69 /*ANT_AB -> */ ANT_BC,
70 /*ANT_C -> */ ANT_A,
71 /*ANT_AC -> */ ANT_AB,
72 /*ANT_BC -> */ ANT_AC,
73 /*ANT_ABC -> */ ANT_ABC,
74};
75
76#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
77 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
78 IWL_RATE_SISO_##s##M_PLCP, \
79 IWL_RATE_MIMO2_##s##M_PLCP,\
80 IWL_RATE_MIMO3_##s##M_PLCP,\
81 IWL_RATE_##r##M_IEEE, \
82 IWL_RATE_##ip##M_INDEX, \
83 IWL_RATE_##in##M_INDEX, \
84 IWL_RATE_##rp##M_INDEX, \
85 IWL_RATE_##rn##M_INDEX, \
86 IWL_RATE_##pp##M_INDEX, \
87 IWL_RATE_##np##M_INDEX }
88
89/*
90 * Parameter order:
91 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
92 *
93 * If there isn't a valid next or previous rate then INV is used which
94 * maps to IWL_RATE_INVALID
95 *
96 */
97const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
98 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
99 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
100 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
101 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
102 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
103 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
104 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
105 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
106 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
107 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
108 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
109 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
110 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
111 /* FIXME:RS: ^^ should be INV (legacy) */
112};
113
114static inline u8 rs_extract_rate(u32 rate_n_flags)
115{
116 return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
117}
118
119static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
120{
121 int idx = 0;
122
123 /* HT rate format */
124 if (rate_n_flags & RATE_MCS_HT_MSK) {
125 idx = rs_extract_rate(rate_n_flags);
126
127 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
128 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
129 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
130 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
131
132 idx += IWL_FIRST_OFDM_RATE;
133 /* skip 9M not supported in ht*/
134 if (idx >= IWL_RATE_9M_INDEX)
135 idx += 1;
136 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
137 return idx;
138
139 /* legacy rate format, search for match in table */
140 } else {
141 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
142 if (iwl_rates[idx].plcp ==
143 rs_extract_rate(rate_n_flags))
144 return idx;
145 }
146
147 return -1;
148}
149
150static void rs_rate_scale_perform(struct iwl_priv *priv,
151 struct sk_buff *skb,
152 struct ieee80211_sta *sta,
153 struct iwl_lq_sta *lq_sta);
154static void rs_fill_link_cmd(struct iwl_priv *priv,
155 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
156static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
157
158
159#ifdef CONFIG_MAC80211_DEBUGFS
160static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
161 u32 *rate_n_flags, int index);
162#else
163static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
164 u32 *rate_n_flags, int index)
165{}
166#endif
167
168/**
169 * The following tables contain the expected throughput metrics for all rates
170 *
171 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
172 *
173 * where invalid entries are zeros.
174 *
175 * CCK rates are only valid in legacy table and will only be used in G
176 * (2.4 GHz) band.
177 */
178
179static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
180 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
181};
182
183static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
184 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
185 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
186 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
187 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
188};
189
190static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
191 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
192 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
193 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
194 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
195};
196
197static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
198 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
199 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
200 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
201 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
202};
203
204static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
205 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
206 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
207 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
208 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
209};
210
211static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
212 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
213 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
214 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
215 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
216};
217
218static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
219 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
220 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
221 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
222 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
223};
224
225/* mbps, mcs */
226static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
227 { "1", "BPSK DSSS"},
228 { "2", "QPSK DSSS"},
229 {"5.5", "BPSK CCK"},
230 { "11", "QPSK CCK"},
231 { "6", "BPSK 1/2"},
232 { "9", "BPSK 1/2"},
233 { "12", "QPSK 1/2"},
234 { "18", "QPSK 3/4"},
235 { "24", "16QAM 1/2"},
236 { "36", "16QAM 3/4"},
237 { "48", "64QAM 2/3"},
238 { "54", "64QAM 3/4"},
239 { "60", "64QAM 5/6"},
240};
241
242#define MCS_INDEX_PER_STREAM (8)
243
244static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
245{
246 window->data = 0;
247 window->success_counter = 0;
248 window->success_ratio = IWL_INVALID_VALUE;
249 window->counter = 0;
250 window->average_tpt = IWL_INVALID_VALUE;
251 window->stamp = 0;
252}
253
254static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
255{
256 return (ant_type & valid_antenna) == ant_type;
257}
258
259/*
260 * removes the old data from the statistics. All data that is older than
261 * TID_MAX_TIME_DIFF, will be deleted.
262 */
263static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
264{
265 /* The oldest age we want to keep */
266 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
267
268 while (tl->queue_count &&
269 (tl->time_stamp < oldest_time)) {
270 tl->total -= tl->packet_count[tl->head];
271 tl->packet_count[tl->head] = 0;
272 tl->time_stamp += TID_QUEUE_CELL_SPACING;
273 tl->queue_count--;
274 tl->head++;
275 if (tl->head >= TID_QUEUE_MAX_SIZE)
276 tl->head = 0;
277 }
278}
279
280/*
281 * increment traffic load value for tid and also remove
282 * any old values if passed the certain time period
283 */
284static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
285 struct ieee80211_hdr *hdr)
286{
287 u32 curr_time = jiffies_to_msecs(jiffies);
288 u32 time_diff;
289 s32 index;
290 struct iwl_traffic_load *tl = NULL;
291 u8 tid;
292
293 if (ieee80211_is_data_qos(hdr->frame_control)) {
294 u8 *qc = ieee80211_get_qos_ctl(hdr);
295 tid = qc[0] & 0xf;
296 } else
297 return IWL_MAX_TID_COUNT;
298
299 if (unlikely(tid >= IWL_MAX_TID_COUNT))
300 return IWL_MAX_TID_COUNT;
301
302 tl = &lq_data->load[tid];
303
304 curr_time -= curr_time % TID_ROUND_VALUE;
305
306 /* Happens only for the first packet. Initialize the data */
307 if (!(tl->queue_count)) {
308 tl->total = 1;
309 tl->time_stamp = curr_time;
310 tl->queue_count = 1;
311 tl->head = 0;
312 tl->packet_count[0] = 1;
313 return IWL_MAX_TID_COUNT;
314 }
315
316 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
317 index = time_diff / TID_QUEUE_CELL_SPACING;
318
319 /* The history is too long: remove data that is older than */
320 /* TID_MAX_TIME_DIFF */
321 if (index >= TID_QUEUE_MAX_SIZE)
322 rs_tl_rm_old_stats(tl, curr_time);
323
324 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
325 tl->packet_count[index] = tl->packet_count[index] + 1;
326 tl->total = tl->total + 1;
327
328 if ((index + 1) > tl->queue_count)
329 tl->queue_count = index + 1;
330
331 return tid;
332}
333
334#ifdef CONFIG_MAC80211_DEBUGFS
335/**
336 * Program the device to use fixed rate for frame transmit
337 * This is for debugging/testing only
338 * once the device start use fixed rate, we need to reload the module
339 * to being back the normal operation.
340 */
341static void rs_program_fix_rate(struct iwl_priv *priv,
342 struct iwl_lq_sta *lq_sta)
343{
344 struct iwl_station_priv *sta_priv =
345 container_of(lq_sta, struct iwl_station_priv, lq_sta);
346 struct iwl_rxon_context *ctx = sta_priv->ctx;
347
348 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
349 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
350 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
351 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
352
353 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
354 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
355
356 if (lq_sta->dbg_fixed_rate) {
357 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
358 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
359 false);
360 }
361}
362#endif
363
364/*
365 get the traffic load value for tid
366*/
367static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
368{
369 u32 curr_time = jiffies_to_msecs(jiffies);
370 u32 time_diff;
371 s32 index;
372 struct iwl_traffic_load *tl = NULL;
373
374 if (tid >= IWL_MAX_TID_COUNT)
375 return 0;
376
377 tl = &(lq_data->load[tid]);
378
379 curr_time -= curr_time % TID_ROUND_VALUE;
380
381 if (!(tl->queue_count))
382 return 0;
383
384 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
385 index = time_diff / TID_QUEUE_CELL_SPACING;
386
387 /* The history is too long: remove data that is older than */
388 /* TID_MAX_TIME_DIFF */
389 if (index >= TID_QUEUE_MAX_SIZE)
390 rs_tl_rm_old_stats(tl, curr_time);
391
392 return tl->total;
393}
394
395static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
396 struct iwl_lq_sta *lq_data, u8 tid,
397 struct ieee80211_sta *sta)
398{
399 int ret = -EAGAIN;
400 u32 load;
401
402 /*
403 * Don't create TX aggregation sessions when in high
404 * BT traffic, as they would just be disrupted by BT.
405 */
406 if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
407 IWL_DEBUG_COEX(priv,
408 "BT traffic (%d), no aggregation allowed\n",
409 priv->bt_traffic_load);
410 return ret;
411 }
412
413 load = rs_tl_get_load(lq_data, tid);
414
415 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
416 sta->addr, tid);
417 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
418 if (ret == -EAGAIN) {
419 /*
420 * driver and mac80211 is out of sync
421 * this might be cause by reloading firmware
422 * stop the tx ba session here
423 */
424 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
425 tid);
426 ieee80211_stop_tx_ba_session(sta, tid);
427 }
428 return ret;
429}
430
431static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
432 struct iwl_lq_sta *lq_data,
433 struct ieee80211_sta *sta)
434{
435 if (tid < IWL_MAX_TID_COUNT)
436 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
437 else
438 IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
439 tid, IWL_MAX_TID_COUNT);
440}
441
442static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
443{
444 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
445 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
446 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
447}
448
449/*
450 * Static function to get the expected throughput from an iwl_scale_tbl_info
451 * that wraps a NULL pointer check
452 */
453static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
454{
455 if (tbl->expected_tpt)
456 return tbl->expected_tpt[rs_index];
457 return 0;
458}
459
460/**
461 * rs_collect_tx_data - Update the success/failure sliding window
462 *
463 * We keep a sliding window of the last 62 packets transmitted
464 * at this rate. window->data contains the bitmask of successful
465 * packets.
466 */
467static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
468 int scale_index, int attempts, int successes)
469{
470 struct iwl_rate_scale_data *window = NULL;
471 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
472 s32 fail_count, tpt;
473
474 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
475 return -EINVAL;
476
477 /* Select window for current tx bit rate */
478 window = &(tbl->win[scale_index]);
479
480 /* Get expected throughput */
481 tpt = get_expected_tpt(tbl, scale_index);
482
483 /*
484 * Keep track of only the latest 62 tx frame attempts in this rate's
485 * history window; anything older isn't really relevant any more.
486 * If we have filled up the sliding window, drop the oldest attempt;
487 * if the oldest attempt (highest bit in bitmap) shows "success",
488 * subtract "1" from the success counter (this is the main reason
489 * we keep these bitmaps!).
490 */
491 while (attempts > 0) {
492 if (window->counter >= IWL_RATE_MAX_WINDOW) {
493
494 /* remove earliest */
495 window->counter = IWL_RATE_MAX_WINDOW - 1;
496
497 if (window->data & mask) {
498 window->data &= ~mask;
499 window->success_counter--;
500 }
501 }
502
503 /* Increment frames-attempted counter */
504 window->counter++;
505
506 /* Shift bitmap by one frame to throw away oldest history */
507 window->data <<= 1;
508
509 /* Mark the most recent #successes attempts as successful */
510 if (successes > 0) {
511 window->success_counter++;
512 window->data |= 0x1;
513 successes--;
514 }
515
516 attempts--;
517 }
518
519 /* Calculate current success ratio, avoid divide-by-0! */
520 if (window->counter > 0)
521 window->success_ratio = 128 * (100 * window->success_counter)
522 / window->counter;
523 else
524 window->success_ratio = IWL_INVALID_VALUE;
525
526 fail_count = window->counter - window->success_counter;
527
528 /* Calculate average throughput, if we have enough history. */
529 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
530 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
531 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
532 else
533 window->average_tpt = IWL_INVALID_VALUE;
534
535 /* Tag this window as having been updated */
536 window->stamp = jiffies;
537
538 return 0;
539}
540
541/*
542 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
543 */
544/* FIXME:RS:remove this function and put the flags statically in the table */
545static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
546 struct iwl_scale_tbl_info *tbl,
547 int index, u8 use_green)
548{
549 u32 rate_n_flags = 0;
550
551 if (is_legacy(tbl->lq_type)) {
552 rate_n_flags = iwl_rates[index].plcp;
553 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
554 rate_n_flags |= RATE_MCS_CCK_MSK;
555
556 } else if (is_Ht(tbl->lq_type)) {
557 if (index > IWL_LAST_OFDM_RATE) {
558 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
559 index = IWL_LAST_OFDM_RATE;
560 }
561 rate_n_flags = RATE_MCS_HT_MSK;
562
563 if (is_siso(tbl->lq_type))
564 rate_n_flags |= iwl_rates[index].plcp_siso;
565 else if (is_mimo2(tbl->lq_type))
566 rate_n_flags |= iwl_rates[index].plcp_mimo2;
567 else
568 rate_n_flags |= iwl_rates[index].plcp_mimo3;
569 } else {
570 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
571 }
572
573 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
574 RATE_MCS_ANT_ABC_MSK);
575
576 if (is_Ht(tbl->lq_type)) {
577 if (tbl->is_ht40) {
578 if (tbl->is_dup)
579 rate_n_flags |= RATE_MCS_DUP_MSK;
580 else
581 rate_n_flags |= RATE_MCS_HT40_MSK;
582 }
583 if (tbl->is_SGI)
584 rate_n_flags |= RATE_MCS_SGI_MSK;
585
586 if (use_green) {
587 rate_n_flags |= RATE_MCS_GF_MSK;
588 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
589 rate_n_flags &= ~RATE_MCS_SGI_MSK;
590 IWL_ERR(priv, "GF was set with SGI:SISO\n");
591 }
592 }
593 }
594 return rate_n_flags;
595}
596
597/*
598 * Interpret uCode API's rate_n_flags format,
599 * fill "search" or "active" tx mode table.
600 */
601static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
602 enum ieee80211_band band,
603 struct iwl_scale_tbl_info *tbl,
604 int *rate_idx)
605{
606 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
607 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
608 u8 mcs;
609
610 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
611 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
612
613 if (*rate_idx == IWL_RATE_INVALID) {
614 *rate_idx = -1;
615 return -EINVAL;
616 }
617 tbl->is_SGI = 0; /* default legacy setup */
618 tbl->is_ht40 = 0;
619 tbl->is_dup = 0;
620 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
621 tbl->lq_type = LQ_NONE;
622 tbl->max_search = IWL_MAX_SEARCH;
623
624 /* legacy rate format */
625 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
626 if (num_of_ant == 1) {
627 if (band == IEEE80211_BAND_5GHZ)
628 tbl->lq_type = LQ_A;
629 else
630 tbl->lq_type = LQ_G;
631 }
632 /* HT rate format */
633 } else {
634 if (rate_n_flags & RATE_MCS_SGI_MSK)
635 tbl->is_SGI = 1;
636
637 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
638 (rate_n_flags & RATE_MCS_DUP_MSK))
639 tbl->is_ht40 = 1;
640
641 if (rate_n_flags & RATE_MCS_DUP_MSK)
642 tbl->is_dup = 1;
643
644 mcs = rs_extract_rate(rate_n_flags);
645
646 /* SISO */
647 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
648 if (num_of_ant == 1)
649 tbl->lq_type = LQ_SISO; /*else NONE*/
650 /* MIMO2 */
651 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
652 if (num_of_ant == 2)
653 tbl->lq_type = LQ_MIMO2;
654 /* MIMO3 */
655 } else {
656 if (num_of_ant == 3) {
657 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
658 tbl->lq_type = LQ_MIMO3;
659 }
660 }
661 }
662 return 0;
663}
664
665/* switch to another antenna/antennas and return 1 */
666/* if no other valid antenna found, return 0 */
667static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
668 struct iwl_scale_tbl_info *tbl)
669{
670 u8 new_ant_type;
671
672 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
673 return 0;
674
675 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
676 return 0;
677
678 new_ant_type = ant_toggle_lookup[tbl->ant_type];
679
680 while ((new_ant_type != tbl->ant_type) &&
681 !rs_is_valid_ant(valid_ant, new_ant_type))
682 new_ant_type = ant_toggle_lookup[new_ant_type];
683
684 if (new_ant_type == tbl->ant_type)
685 return 0;
686
687 tbl->ant_type = new_ant_type;
688 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
689 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
690 return 1;
691}
692
693/**
694 * Green-field mode is valid if the station supports it and
695 * there are no non-GF stations present in the BSS.
696 */
697static bool rs_use_green(struct ieee80211_sta *sta)
698{
699 /*
700 * There's a bug somewhere in this code that causes the
701 * scaling to get stuck because GF+SGI can't be combined
702 * in SISO rates. Until we find that bug, disable GF, it
703 * has only limited benefit and we still interoperate with
704 * GF APs since we can always receive GF transmissions.
705 */
706 return false;
707}
708
709/**
710 * rs_get_supported_rates - get the available rates
711 *
712 * if management frame or broadcast frame only return
713 * basic available rates.
714 *
715 */
716static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
717 struct ieee80211_hdr *hdr,
718 enum iwl_table_type rate_type)
719{
720 if (is_legacy(rate_type)) {
721 return lq_sta->active_legacy_rate;
722 } else {
723 if (is_siso(rate_type))
724 return lq_sta->active_siso_rate;
725 else if (is_mimo2(rate_type))
726 return lq_sta->active_mimo2_rate;
727 else
728 return lq_sta->active_mimo3_rate;
729 }
730}
731
732static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
733 int rate_type)
734{
735 u8 high = IWL_RATE_INVALID;
736 u8 low = IWL_RATE_INVALID;
737
738 /* 802.11A or ht walks to the next literal adjacent rate in
739 * the rate table */
740 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
741 int i;
742 u32 mask;
743
744 /* Find the previous rate that is in the rate mask */
745 i = index - 1;
746 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
747 if (rate_mask & mask) {
748 low = i;
749 break;
750 }
751 }
752
753 /* Find the next rate that is in the rate mask */
754 i = index + 1;
755 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
756 if (rate_mask & mask) {
757 high = i;
758 break;
759 }
760 }
761
762 return (high << 8) | low;
763 }
764
765 low = index;
766 while (low != IWL_RATE_INVALID) {
767 low = iwl_rates[low].prev_rs;
768 if (low == IWL_RATE_INVALID)
769 break;
770 if (rate_mask & (1 << low))
771 break;
772 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
773 }
774
775 high = index;
776 while (high != IWL_RATE_INVALID) {
777 high = iwl_rates[high].next_rs;
778 if (high == IWL_RATE_INVALID)
779 break;
780 if (rate_mask & (1 << high))
781 break;
782 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
783 }
784
785 return (high << 8) | low;
786}
787
788static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
789 struct iwl_scale_tbl_info *tbl,
790 u8 scale_index, u8 ht_possible)
791{
792 s32 low;
793 u16 rate_mask;
794 u16 high_low;
795 u8 switch_to_legacy = 0;
796 u8 is_green = lq_sta->is_green;
797 struct iwl_priv *priv = lq_sta->drv;
798
799 /* check if we need to switch from HT to legacy rates.
800 * assumption is that mandatory rates (1Mbps or 6Mbps)
801 * are always supported (spec demand) */
802 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
803 switch_to_legacy = 1;
804 scale_index = rs_ht_to_legacy[scale_index];
805 if (lq_sta->band == IEEE80211_BAND_5GHZ)
806 tbl->lq_type = LQ_A;
807 else
808 tbl->lq_type = LQ_G;
809
810 if (num_of_ant(tbl->ant_type) > 1)
811 tbl->ant_type =
812 first_antenna(priv->nvm_data->valid_tx_ant);
813
814 tbl->is_ht40 = 0;
815 tbl->is_SGI = 0;
816 tbl->max_search = IWL_MAX_SEARCH;
817 }
818
819 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
820
821 /* Mask with station rate restriction */
822 if (is_legacy(tbl->lq_type)) {
823 /* supp_rates has no CCK bits in A mode */
824 if (lq_sta->band == IEEE80211_BAND_5GHZ)
825 rate_mask = (u16)(rate_mask &
826 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
827 else
828 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
829 }
830
831 /* If we switched from HT to legacy, check current rate */
832 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
833 low = scale_index;
834 goto out;
835 }
836
837 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
838 tbl->lq_type);
839 low = high_low & 0xff;
840
841 if (low == IWL_RATE_INVALID)
842 low = scale_index;
843
844out:
845 return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
846}
847
848/*
849 * Simple function to compare two rate scale table types
850 */
851static bool table_type_matches(struct iwl_scale_tbl_info *a,
852 struct iwl_scale_tbl_info *b)
853{
854 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
855 (a->is_SGI == b->is_SGI);
856}
857
858static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
859 struct iwl_lq_sta *lq_sta)
860{
861 struct iwl_scale_tbl_info *tbl;
862 bool full_concurrent = priv->bt_full_concurrent;
863
864 if (priv->bt_ant_couple_ok) {
865 /*
866 * Is there a need to switch between
867 * full concurrency and 3-wire?
868 */
869 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
870 full_concurrent = true;
871 else
872 full_concurrent = false;
873 }
874 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
875 (priv->bt_full_concurrent != full_concurrent)) {
876 priv->bt_full_concurrent = full_concurrent;
877 priv->last_bt_traffic_load = priv->bt_traffic_load;
878
879 /* Update uCode's rate table. */
880 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
881 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
882 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
883
884 queue_work(priv->workqueue, &priv->bt_full_concurrency);
885 }
886}
887
888/*
889 * mac80211 sends us Tx status
890 */
891static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
892 struct ieee80211_sta *sta, void *priv_sta,
893 struct sk_buff *skb)
894{
895 int legacy_success;
896 int retries;
897 int rs_index, mac_index, i;
898 struct iwl_lq_sta *lq_sta = priv_sta;
899 struct iwl_link_quality_cmd *table;
900 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
901 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r;
902 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
903 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
904 enum mac80211_rate_control_flags mac_flags;
905 u32 tx_rate;
906 struct iwl_scale_tbl_info tbl_type;
907 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
908 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
909 struct iwl_rxon_context *ctx = sta_priv->ctx;
910
911 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
912
913 /* Treat uninitialized rate scaling data same as non-existing. */
914 if (!lq_sta) {
915 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
916 return;
917 } else if (!lq_sta->drv) {
918 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
919 return;
920 }
921
922 if (!ieee80211_is_data(hdr->frame_control) ||
923 info->flags & IEEE80211_TX_CTL_NO_ACK)
924 return;
925
926 /* This packet was aggregated but doesn't carry status info */
927 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
928 !(info->flags & IEEE80211_TX_STAT_AMPDU))
929 return;
930
931 /*
932 * Ignore this Tx frame response if its initial rate doesn't match
933 * that of latest Link Quality command. There may be stragglers
934 * from a previous Link Quality command, but we're no longer interested
935 * in those; they're either from the "active" mode while we're trying
936 * to check "search" mode, or a prior "search" mode after we've moved
937 * to a new "search" mode (which might become the new "active" mode).
938 */
939 table = &lq_sta->lq;
940 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
941 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
942 if (priv->band == IEEE80211_BAND_5GHZ)
943 rs_index -= IWL_FIRST_OFDM_RATE;
944 mac_flags = info->status.rates[0].flags;
945 mac_index = info->status.rates[0].idx;
946 /* For HT packets, map MCS to PLCP */
947 if (mac_flags & IEEE80211_TX_RC_MCS) {
948 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
949 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
950 mac_index++;
951 /*
952 * mac80211 HT index is always zero-indexed; we need to move
953 * HT OFDM rates after CCK rates in 2.4 GHz band
954 */
955 if (priv->band == IEEE80211_BAND_2GHZ)
956 mac_index += IWL_FIRST_OFDM_RATE;
957 }
958 /* Here we actually compare this rate to the latest LQ command */
959 if ((mac_index < 0) ||
960 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
961 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
962 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
963 (tbl_type.ant_type != info->status.antenna) ||
964 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
965 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
966 (rs_index != mac_index)) {
967 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
968 /*
969 * Since rates mis-match, the last LQ command may have failed.
970 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
971 * ... driver.
972 */
973 lq_sta->missed_rate_counter++;
974 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
975 lq_sta->missed_rate_counter = 0;
976 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
977 }
978 /* Regardless, ignore this status info for outdated rate */
979 return;
980 } else
981 /* Rate did match, so reset the missed_rate_counter */
982 lq_sta->missed_rate_counter = 0;
983
984 /* Figure out if rate scale algorithm is in active or search table */
985 if (table_type_matches(&tbl_type,
986 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
987 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
988 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
989 } else if (table_type_matches(&tbl_type,
990 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
991 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
992 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
993 } else {
994 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
995 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
996 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
997 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
998 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
999 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
1000 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
1001 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
1002 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
1003 /*
1004 * no matching table found, let's by-pass the data collection
1005 * and continue to perform rate scale to find the rate table
1006 */
1007 rs_stay_in_table(lq_sta, true);
1008 goto done;
1009 }
1010
1011 /*
1012 * Updating the frame history depends on whether packets were
1013 * aggregated.
1014 *
1015 * For aggregation, all packets were transmitted at the same rate, the
1016 * first index into rate scale table.
1017 */
1018 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1019 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
1020 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
1021 &rs_index);
1022 rs_collect_tx_data(curr_tbl, rs_index,
1023 info->status.ampdu_len,
1024 info->status.ampdu_ack_len);
1025
1026 /* Update success/fail counts if not searching for new mode */
1027 if (lq_sta->stay_in_tbl) {
1028 lq_sta->total_success += info->status.ampdu_ack_len;
1029 lq_sta->total_failed += (info->status.ampdu_len -
1030 info->status.ampdu_ack_len);
1031 }
1032 } else {
1033 /*
1034 * For legacy, update frame history with for each Tx retry.
1035 */
1036 retries = info->status.rates[0].count - 1;
1037 /* HW doesn't send more than 15 retries */
1038 retries = min(retries, 15);
1039
1040 /* The last transmission may have been successful */
1041 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1042 /* Collect data for each rate used during failed TX attempts */
1043 for (i = 0; i <= retries; ++i) {
1044 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
1045 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
1046 &tbl_type, &rs_index);
1047 /*
1048 * Only collect stats if retried rate is in the same RS
1049 * table as active/search.
1050 */
1051 if (table_type_matches(&tbl_type, curr_tbl))
1052 tmp_tbl = curr_tbl;
1053 else if (table_type_matches(&tbl_type, other_tbl))
1054 tmp_tbl = other_tbl;
1055 else
1056 continue;
1057 rs_collect_tx_data(tmp_tbl, rs_index, 1,
1058 i < retries ? 0 : legacy_success);
1059 }
1060
1061 /* Update success/fail counts if not searching for new mode */
1062 if (lq_sta->stay_in_tbl) {
1063 lq_sta->total_success += legacy_success;
1064 lq_sta->total_failed += retries + (1 - legacy_success);
1065 }
1066 }
1067 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1068 lq_sta->last_rate_n_flags = tx_rate;
1069done:
1070 /* See if there's a better rate or modulation mode to try. */
1071 if (sta && sta->supp_rates[sband->band])
1072 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1073
1074 if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
1075 rs_bt_update_lq(priv, ctx, lq_sta);
1076}
1077
1078/*
1079 * Begin a period of staying with a selected modulation mode.
1080 * Set "stay_in_tbl" flag to prevent any mode switches.
1081 * Set frame tx success limits according to legacy vs. high-throughput,
1082 * and reset overall (spanning all rates) tx success history statistics.
1083 * These control how long we stay using same modulation mode before
1084 * searching for a new mode.
1085 */
1086static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1087 struct iwl_lq_sta *lq_sta)
1088{
1089 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1090 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1091 if (is_legacy) {
1092 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1093 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1094 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1095 } else {
1096 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1097 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1098 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1099 }
1100 lq_sta->table_count = 0;
1101 lq_sta->total_failed = 0;
1102 lq_sta->total_success = 0;
1103 lq_sta->flush_timer = jiffies;
1104 lq_sta->action_counter = 0;
1105}
1106
1107/*
1108 * Find correct throughput table for given mode of modulation
1109 */
1110static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1111 struct iwl_scale_tbl_info *tbl)
1112{
1113 /* Used to choose among HT tables */
1114 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1115
1116 /* Check for invalid LQ type */
1117 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1118 tbl->expected_tpt = expected_tpt_legacy;
1119 return;
1120 }
1121
1122 /* Legacy rates have only one table */
1123 if (is_legacy(tbl->lq_type)) {
1124 tbl->expected_tpt = expected_tpt_legacy;
1125 return;
1126 }
1127
1128 /* Choose among many HT tables depending on number of streams
1129 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
1130 * status */
1131 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1132 ht_tbl_pointer = expected_tpt_siso20MHz;
1133 else if (is_siso(tbl->lq_type))
1134 ht_tbl_pointer = expected_tpt_siso40MHz;
1135 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1136 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1137 else if (is_mimo2(tbl->lq_type))
1138 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1139 else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1140 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1141 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1142 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1143
1144 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1145 tbl->expected_tpt = ht_tbl_pointer[0];
1146 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1147 tbl->expected_tpt = ht_tbl_pointer[1];
1148 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1149 tbl->expected_tpt = ht_tbl_pointer[2];
1150 else /* AGG+SGI */
1151 tbl->expected_tpt = ht_tbl_pointer[3];
1152}
1153
1154/*
1155 * Find starting rate for new "search" high-throughput mode of modulation.
1156 * Goal is to find lowest expected rate (under perfect conditions) that is
1157 * above the current measured throughput of "active" mode, to give new mode
1158 * a fair chance to prove itself without too many challenges.
1159 *
1160 * This gets called when transitioning to more aggressive modulation
1161 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1162 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1163 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1164 * bit rate will typically need to increase, but not if performance was bad.
1165 */
1166static s32 rs_get_best_rate(struct iwl_priv *priv,
1167 struct iwl_lq_sta *lq_sta,
1168 struct iwl_scale_tbl_info *tbl, /* "search" */
1169 u16 rate_mask, s8 index)
1170{
1171 /* "active" values */
1172 struct iwl_scale_tbl_info *active_tbl =
1173 &(lq_sta->lq_info[lq_sta->active_tbl]);
1174 s32 active_sr = active_tbl->win[index].success_ratio;
1175 s32 active_tpt = active_tbl->expected_tpt[index];
1176 /* expected "search" throughput */
1177 const u16 *tpt_tbl = tbl->expected_tpt;
1178
1179 s32 new_rate, high, low, start_hi;
1180 u16 high_low;
1181 s8 rate = index;
1182
1183 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1184
1185 for (; ;) {
1186 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1187 tbl->lq_type);
1188
1189 low = high_low & 0xff;
1190 high = (high_low >> 8) & 0xff;
1191
1192 /*
1193 * Lower the "search" bit rate, to give new "search" mode
1194 * approximately the same throughput as "active" if:
1195 *
1196 * 1) "Active" mode has been working modestly well (but not
1197 * great), and expected "search" throughput (under perfect
1198 * conditions) at candidate rate is above the actual
1199 * measured "active" throughput (but less than expected
1200 * "active" throughput under perfect conditions).
1201 * OR
1202 * 2) "Active" mode has been working perfectly or very well
1203 * and expected "search" throughput (under perfect
1204 * conditions) at candidate rate is above expected
1205 * "active" throughput (under perfect conditions).
1206 */
1207 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1208 ((active_sr > IWL_RATE_DECREASE_TH) &&
1209 (active_sr <= IWL_RATE_HIGH_TH) &&
1210 (tpt_tbl[rate] <= active_tpt))) ||
1211 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1212 (tpt_tbl[rate] > active_tpt))) {
1213
1214 /* (2nd or later pass)
1215 * If we've already tried to raise the rate, and are
1216 * now trying to lower it, use the higher rate. */
1217 if (start_hi != IWL_RATE_INVALID) {
1218 new_rate = start_hi;
1219 break;
1220 }
1221
1222 new_rate = rate;
1223
1224 /* Loop again with lower rate */
1225 if (low != IWL_RATE_INVALID)
1226 rate = low;
1227
1228 /* Lower rate not available, use the original */
1229 else
1230 break;
1231
1232 /* Else try to raise the "search" rate to match "active" */
1233 } else {
1234 /* (2nd or later pass)
1235 * If we've already tried to lower the rate, and are
1236 * now trying to raise it, use the lower rate. */
1237 if (new_rate != IWL_RATE_INVALID)
1238 break;
1239
1240 /* Loop again with higher rate */
1241 else if (high != IWL_RATE_INVALID) {
1242 start_hi = high;
1243 rate = high;
1244
1245 /* Higher rate not available, use the original */
1246 } else {
1247 new_rate = rate;
1248 break;
1249 }
1250 }
1251 }
1252
1253 return new_rate;
1254}
1255
1256/*
1257 * Set up search table for MIMO2
1258 */
1259static int rs_switch_to_mimo2(struct iwl_priv *priv,
1260 struct iwl_lq_sta *lq_sta,
1261 struct ieee80211_conf *conf,
1262 struct ieee80211_sta *sta,
1263 struct iwl_scale_tbl_info *tbl, int index)
1264{
1265 u16 rate_mask;
1266 s32 rate;
1267 s8 is_green = lq_sta->is_green;
1268 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1269 struct iwl_rxon_context *ctx = sta_priv->ctx;
1270
1271 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1272 return -1;
1273
1274 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
1275 return -1;
1276
1277 /* Need both Tx chains/antennas to support MIMO */
1278 if (priv->hw_params.tx_chains_num < 2)
1279 return -1;
1280
1281 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1282
1283 tbl->lq_type = LQ_MIMO2;
1284 tbl->is_dup = lq_sta->is_dup;
1285 tbl->action = 0;
1286 tbl->max_search = IWL_MAX_SEARCH;
1287 rate_mask = lq_sta->active_mimo2_rate;
1288
1289 if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
1290 tbl->is_ht40 = 1;
1291 else
1292 tbl->is_ht40 = 0;
1293
1294 rs_set_expected_tpt_table(lq_sta, tbl);
1295
1296 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1297
1298 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1299 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1300 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1301 rate, rate_mask);
1302 return -1;
1303 }
1304 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1305
1306 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1307 tbl->current_rate, is_green);
1308 return 0;
1309}
1310
1311/*
1312 * Set up search table for MIMO3
1313 */
1314static int rs_switch_to_mimo3(struct iwl_priv *priv,
1315 struct iwl_lq_sta *lq_sta,
1316 struct ieee80211_conf *conf,
1317 struct ieee80211_sta *sta,
1318 struct iwl_scale_tbl_info *tbl, int index)
1319{
1320 u16 rate_mask;
1321 s32 rate;
1322 s8 is_green = lq_sta->is_green;
1323 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1324 struct iwl_rxon_context *ctx = sta_priv->ctx;
1325
1326 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1327 return -1;
1328
1329 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
1330 return -1;
1331
1332 /* Need both Tx chains/antennas to support MIMO */
1333 if (priv->hw_params.tx_chains_num < 3)
1334 return -1;
1335
1336 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
1337
1338 tbl->lq_type = LQ_MIMO3;
1339 tbl->is_dup = lq_sta->is_dup;
1340 tbl->action = 0;
1341 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1342 rate_mask = lq_sta->active_mimo3_rate;
1343
1344 if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
1345 tbl->is_ht40 = 1;
1346 else
1347 tbl->is_ht40 = 0;
1348
1349 rs_set_expected_tpt_table(lq_sta, tbl);
1350
1351 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1352
1353 IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
1354 rate, rate_mask);
1355 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1356 IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
1357 rate, rate_mask);
1358 return -1;
1359 }
1360 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1361
1362 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1363 tbl->current_rate, is_green);
1364 return 0;
1365}
1366
1367/*
1368 * Set up search table for SISO
1369 */
1370static int rs_switch_to_siso(struct iwl_priv *priv,
1371 struct iwl_lq_sta *lq_sta,
1372 struct ieee80211_conf *conf,
1373 struct ieee80211_sta *sta,
1374 struct iwl_scale_tbl_info *tbl, int index)
1375{
1376 u16 rate_mask;
1377 u8 is_green = lq_sta->is_green;
1378 s32 rate;
1379 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1380 struct iwl_rxon_context *ctx = sta_priv->ctx;
1381
1382 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1383 return -1;
1384
1385 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1386
1387 tbl->is_dup = lq_sta->is_dup;
1388 tbl->lq_type = LQ_SISO;
1389 tbl->action = 0;
1390 tbl->max_search = IWL_MAX_SEARCH;
1391 rate_mask = lq_sta->active_siso_rate;
1392
1393 if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
1394 tbl->is_ht40 = 1;
1395 else
1396 tbl->is_ht40 = 0;
1397
1398 if (is_green)
1399 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1400
1401 rs_set_expected_tpt_table(lq_sta, tbl);
1402 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1403
1404 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1405 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1406 IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
1407 rate, rate_mask);
1408 return -1;
1409 }
1410 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
1411 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1412 tbl->current_rate, is_green);
1413 return 0;
1414}
1415
1416/*
1417 * Try to switch to new modulation mode from legacy
1418 */
1419static void rs_move_legacy_other(struct iwl_priv *priv,
1420 struct iwl_lq_sta *lq_sta,
1421 struct ieee80211_conf *conf,
1422 struct ieee80211_sta *sta,
1423 int index)
1424{
1425 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1426 struct iwl_scale_tbl_info *search_tbl =
1427 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1428 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1429 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1430 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1431 u8 start_action;
1432 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1433 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1434 int ret = 0;
1435 u8 update_search_tbl_counter = 0;
1436
1437 switch (priv->bt_traffic_load) {
1438 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1439 /* nothing */
1440 break;
1441 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1442 /* avoid antenna B unless MIMO */
1443 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1444 tbl->action = IWL_LEGACY_SWITCH_SISO;
1445 break;
1446 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1447 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1448 /* avoid antenna B and MIMO */
1449 valid_tx_ant =
1450 first_antenna(priv->nvm_data->valid_tx_ant);
1451 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1452 tbl->action != IWL_LEGACY_SWITCH_SISO)
1453 tbl->action = IWL_LEGACY_SWITCH_SISO;
1454 break;
1455 default:
1456 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1457 break;
1458 }
1459
1460 if (!iwl_ht_enabled(priv))
1461 /* stay in Legacy */
1462 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1463 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1464 tbl->action > IWL_LEGACY_SWITCH_SISO)
1465 tbl->action = IWL_LEGACY_SWITCH_SISO;
1466
1467 /* configure as 1x1 if bt full concurrency */
1468 if (priv->bt_full_concurrent) {
1469 if (!iwl_ht_enabled(priv))
1470 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1471 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1472 tbl->action = IWL_LEGACY_SWITCH_SISO;
1473 valid_tx_ant =
1474 first_antenna(priv->nvm_data->valid_tx_ant);
1475 }
1476
1477 start_action = tbl->action;
1478 for (; ;) {
1479 lq_sta->action_counter++;
1480 switch (tbl->action) {
1481 case IWL_LEGACY_SWITCH_ANTENNA1:
1482 case IWL_LEGACY_SWITCH_ANTENNA2:
1483 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1484
1485 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1486 tx_chains_num <= 1) ||
1487 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1488 tx_chains_num <= 2))
1489 break;
1490
1491 /* Don't change antenna if success has been great */
1492 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1493 !priv->bt_full_concurrent &&
1494 priv->bt_traffic_load ==
1495 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1496 break;
1497
1498 /* Set up search table to try other antenna */
1499 memcpy(search_tbl, tbl, sz);
1500
1501 if (rs_toggle_antenna(valid_tx_ant,
1502 &search_tbl->current_rate, search_tbl)) {
1503 update_search_tbl_counter = 1;
1504 rs_set_expected_tpt_table(lq_sta, search_tbl);
1505 goto out;
1506 }
1507 break;
1508 case IWL_LEGACY_SWITCH_SISO:
1509 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1510
1511 /* Set up search table to try SISO */
1512 memcpy(search_tbl, tbl, sz);
1513 search_tbl->is_SGI = 0;
1514 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1515 search_tbl, index);
1516 if (!ret) {
1517 lq_sta->action_counter = 0;
1518 goto out;
1519 }
1520
1521 break;
1522 case IWL_LEGACY_SWITCH_MIMO2_AB:
1523 case IWL_LEGACY_SWITCH_MIMO2_AC:
1524 case IWL_LEGACY_SWITCH_MIMO2_BC:
1525 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1526
1527 /* Set up search table to try MIMO */
1528 memcpy(search_tbl, tbl, sz);
1529 search_tbl->is_SGI = 0;
1530
1531 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1532 search_tbl->ant_type = ANT_AB;
1533 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1534 search_tbl->ant_type = ANT_AC;
1535 else
1536 search_tbl->ant_type = ANT_BC;
1537
1538 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1539 break;
1540
1541 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1542 search_tbl, index);
1543 if (!ret) {
1544 lq_sta->action_counter = 0;
1545 goto out;
1546 }
1547 break;
1548
1549 case IWL_LEGACY_SWITCH_MIMO3_ABC:
1550 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
1551
1552 /* Set up search table to try MIMO3 */
1553 memcpy(search_tbl, tbl, sz);
1554 search_tbl->is_SGI = 0;
1555
1556 search_tbl->ant_type = ANT_ABC;
1557
1558 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1559 break;
1560
1561 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1562 search_tbl, index);
1563 if (!ret) {
1564 lq_sta->action_counter = 0;
1565 goto out;
1566 }
1567 break;
1568 }
1569 tbl->action++;
1570 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1571 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1572
1573 if (tbl->action == start_action)
1574 break;
1575
1576 }
1577 search_tbl->lq_type = LQ_NONE;
1578 return;
1579
1580out:
1581 lq_sta->search_better_tbl = 1;
1582 tbl->action++;
1583 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1584 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1585 if (update_search_tbl_counter)
1586 search_tbl->action = tbl->action;
1587}
1588
1589/*
1590 * Try to switch to new modulation mode from SISO
1591 */
1592static void rs_move_siso_to_other(struct iwl_priv *priv,
1593 struct iwl_lq_sta *lq_sta,
1594 struct ieee80211_conf *conf,
1595 struct ieee80211_sta *sta, int index)
1596{
1597 u8 is_green = lq_sta->is_green;
1598 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1599 struct iwl_scale_tbl_info *search_tbl =
1600 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1601 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1602 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1603 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1604 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1605 u8 start_action;
1606 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1607 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1608 u8 update_search_tbl_counter = 0;
1609 int ret;
1610
1611 switch (priv->bt_traffic_load) {
1612 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1613 /* nothing */
1614 break;
1615 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1616 /* avoid antenna B unless MIMO */
1617 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1618 tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
1619 break;
1620 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1621 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1622 /* avoid antenna B and MIMO */
1623 valid_tx_ant =
1624 first_antenna(priv->nvm_data->valid_tx_ant);
1625 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1626 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1627 break;
1628 default:
1629 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1630 break;
1631 }
1632
1633 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1634 tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
1635 /* stay in SISO */
1636 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1637 }
1638
1639 /* configure as 1x1 if bt full concurrency */
1640 if (priv->bt_full_concurrent) {
1641 valid_tx_ant =
1642 first_antenna(priv->nvm_data->valid_tx_ant);
1643 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1644 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1645 }
1646
1647 start_action = tbl->action;
1648 for (;;) {
1649 lq_sta->action_counter++;
1650 switch (tbl->action) {
1651 case IWL_SISO_SWITCH_ANTENNA1:
1652 case IWL_SISO_SWITCH_ANTENNA2:
1653 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1654 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1655 tx_chains_num <= 1) ||
1656 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1657 tx_chains_num <= 2))
1658 break;
1659
1660 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1661 !priv->bt_full_concurrent &&
1662 priv->bt_traffic_load ==
1663 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1664 break;
1665
1666 memcpy(search_tbl, tbl, sz);
1667 if (rs_toggle_antenna(valid_tx_ant,
1668 &search_tbl->current_rate, search_tbl)) {
1669 update_search_tbl_counter = 1;
1670 goto out;
1671 }
1672 break;
1673 case IWL_SISO_SWITCH_MIMO2_AB:
1674 case IWL_SISO_SWITCH_MIMO2_AC:
1675 case IWL_SISO_SWITCH_MIMO2_BC:
1676 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1677 memcpy(search_tbl, tbl, sz);
1678 search_tbl->is_SGI = 0;
1679
1680 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1681 search_tbl->ant_type = ANT_AB;
1682 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1683 search_tbl->ant_type = ANT_AC;
1684 else
1685 search_tbl->ant_type = ANT_BC;
1686
1687 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1688 break;
1689
1690 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1691 search_tbl, index);
1692 if (!ret)
1693 goto out;
1694 break;
1695 case IWL_SISO_SWITCH_GI:
1696 if (!tbl->is_ht40 && !(ht_cap->cap &
1697 IEEE80211_HT_CAP_SGI_20))
1698 break;
1699 if (tbl->is_ht40 && !(ht_cap->cap &
1700 IEEE80211_HT_CAP_SGI_40))
1701 break;
1702
1703 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1704
1705 memcpy(search_tbl, tbl, sz);
1706 if (is_green) {
1707 if (!tbl->is_SGI)
1708 break;
1709 else
1710 IWL_ERR(priv,
1711 "SGI was set in GF+SISO\n");
1712 }
1713 search_tbl->is_SGI = !tbl->is_SGI;
1714 rs_set_expected_tpt_table(lq_sta, search_tbl);
1715 if (tbl->is_SGI) {
1716 s32 tpt = lq_sta->last_tpt / 100;
1717 if (tpt >= search_tbl->expected_tpt[index])
1718 break;
1719 }
1720 search_tbl->current_rate =
1721 rate_n_flags_from_tbl(priv, search_tbl,
1722 index, is_green);
1723 update_search_tbl_counter = 1;
1724 goto out;
1725 case IWL_SISO_SWITCH_MIMO3_ABC:
1726 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
1727 memcpy(search_tbl, tbl, sz);
1728 search_tbl->is_SGI = 0;
1729 search_tbl->ant_type = ANT_ABC;
1730
1731 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1732 break;
1733
1734 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1735 search_tbl, index);
1736 if (!ret)
1737 goto out;
1738 break;
1739 }
1740 tbl->action++;
1741 if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
1742 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1743
1744 if (tbl->action == start_action)
1745 break;
1746 }
1747 search_tbl->lq_type = LQ_NONE;
1748 return;
1749
1750 out:
1751 lq_sta->search_better_tbl = 1;
1752 tbl->action++;
1753 if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
1754 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1755 if (update_search_tbl_counter)
1756 search_tbl->action = tbl->action;
1757}
1758
1759/*
1760 * Try to switch to new modulation mode from MIMO2
1761 */
1762static void rs_move_mimo2_to_other(struct iwl_priv *priv,
1763 struct iwl_lq_sta *lq_sta,
1764 struct ieee80211_conf *conf,
1765 struct ieee80211_sta *sta, int index)
1766{
1767 s8 is_green = lq_sta->is_green;
1768 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1769 struct iwl_scale_tbl_info *search_tbl =
1770 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1771 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1772 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1773 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1774 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1775 u8 start_action;
1776 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1777 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1778 u8 update_search_tbl_counter = 0;
1779 int ret;
1780
1781 switch (priv->bt_traffic_load) {
1782 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1783 /* nothing */
1784 break;
1785 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1786 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1787 /* avoid antenna B and MIMO */
1788 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1789 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1790 break;
1791 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1792 /* avoid antenna B unless MIMO */
1793 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1794 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1795 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1796 break;
1797 default:
1798 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1799 break;
1800 }
1801
1802 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1803 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1804 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
1805 /* switch in SISO */
1806 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1807 }
1808
1809 /* configure as 1x1 if bt full concurrency */
1810 if (priv->bt_full_concurrent &&
1811 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1812 tbl->action > IWL_MIMO2_SWITCH_SISO_C))
1813 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1814
1815 start_action = tbl->action;
1816 for (;;) {
1817 lq_sta->action_counter++;
1818 switch (tbl->action) {
1819 case IWL_MIMO2_SWITCH_ANTENNA1:
1820 case IWL_MIMO2_SWITCH_ANTENNA2:
1821 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1822
1823 if (tx_chains_num <= 2)
1824 break;
1825
1826 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1827 break;
1828
1829 memcpy(search_tbl, tbl, sz);
1830 if (rs_toggle_antenna(valid_tx_ant,
1831 &search_tbl->current_rate, search_tbl)) {
1832 update_search_tbl_counter = 1;
1833 goto out;
1834 }
1835 break;
1836 case IWL_MIMO2_SWITCH_SISO_A:
1837 case IWL_MIMO2_SWITCH_SISO_B:
1838 case IWL_MIMO2_SWITCH_SISO_C:
1839 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1840
1841 /* Set up new search table for SISO */
1842 memcpy(search_tbl, tbl, sz);
1843
1844 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1845 search_tbl->ant_type = ANT_A;
1846 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1847 search_tbl->ant_type = ANT_B;
1848 else
1849 search_tbl->ant_type = ANT_C;
1850
1851 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1852 break;
1853
1854 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1855 search_tbl, index);
1856 if (!ret)
1857 goto out;
1858
1859 break;
1860
1861 case IWL_MIMO2_SWITCH_GI:
1862 if (!tbl->is_ht40 && !(ht_cap->cap &
1863 IEEE80211_HT_CAP_SGI_20))
1864 break;
1865 if (tbl->is_ht40 && !(ht_cap->cap &
1866 IEEE80211_HT_CAP_SGI_40))
1867 break;
1868
1869 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1870
1871 /* Set up new search table for MIMO2 */
1872 memcpy(search_tbl, tbl, sz);
1873 search_tbl->is_SGI = !tbl->is_SGI;
1874 rs_set_expected_tpt_table(lq_sta, search_tbl);
1875 /*
1876 * If active table already uses the fastest possible
1877 * modulation (dual stream with short guard interval),
1878 * and it's working well, there's no need to look
1879 * for a better type of modulation!
1880 */
1881 if (tbl->is_SGI) {
1882 s32 tpt = lq_sta->last_tpt / 100;
1883 if (tpt >= search_tbl->expected_tpt[index])
1884 break;
1885 }
1886 search_tbl->current_rate =
1887 rate_n_flags_from_tbl(priv, search_tbl,
1888 index, is_green);
1889 update_search_tbl_counter = 1;
1890 goto out;
1891
1892 case IWL_MIMO2_SWITCH_MIMO3_ABC:
1893 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
1894 memcpy(search_tbl, tbl, sz);
1895 search_tbl->is_SGI = 0;
1896 search_tbl->ant_type = ANT_ABC;
1897
1898 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1899 break;
1900
1901 ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
1902 search_tbl, index);
1903 if (!ret)
1904 goto out;
1905
1906 break;
1907 }
1908 tbl->action++;
1909 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1910 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1911
1912 if (tbl->action == start_action)
1913 break;
1914 }
1915 search_tbl->lq_type = LQ_NONE;
1916 return;
1917 out:
1918 lq_sta->search_better_tbl = 1;
1919 tbl->action++;
1920 if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
1921 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1922 if (update_search_tbl_counter)
1923 search_tbl->action = tbl->action;
1924
1925}
1926
1927/*
1928 * Try to switch to new modulation mode from MIMO3
1929 */
1930static void rs_move_mimo3_to_other(struct iwl_priv *priv,
1931 struct iwl_lq_sta *lq_sta,
1932 struct ieee80211_conf *conf,
1933 struct ieee80211_sta *sta, int index)
1934{
1935 s8 is_green = lq_sta->is_green;
1936 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1937 struct iwl_scale_tbl_info *search_tbl =
1938 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1939 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1940 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1941 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1942 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1943 u8 start_action;
1944 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1945 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1946 int ret;
1947 u8 update_search_tbl_counter = 0;
1948
1949 switch (priv->bt_traffic_load) {
1950 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1951 /* nothing */
1952 break;
1953 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1954 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1955 /* avoid antenna B and MIMO */
1956 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1957 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1958 break;
1959 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1960 /* avoid antenna B unless MIMO */
1961 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1962 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1963 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1964 break;
1965 default:
1966 IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
1967 break;
1968 }
1969
1970 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1971 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1972 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
1973 /* switch in SISO */
1974 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1975 }
1976
1977 /* configure as 1x1 if bt full concurrency */
1978 if (priv->bt_full_concurrent &&
1979 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1980 tbl->action > IWL_MIMO3_SWITCH_SISO_C))
1981 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1982
1983 start_action = tbl->action;
1984 for (;;) {
1985 lq_sta->action_counter++;
1986 switch (tbl->action) {
1987 case IWL_MIMO3_SWITCH_ANTENNA1:
1988 case IWL_MIMO3_SWITCH_ANTENNA2:
1989 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
1990
1991 if (tx_chains_num <= 3)
1992 break;
1993
1994 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1995 break;
1996
1997 memcpy(search_tbl, tbl, sz);
1998 if (rs_toggle_antenna(valid_tx_ant,
1999 &search_tbl->current_rate, search_tbl))
2000 goto out;
2001 break;
2002 case IWL_MIMO3_SWITCH_SISO_A:
2003 case IWL_MIMO3_SWITCH_SISO_B:
2004 case IWL_MIMO3_SWITCH_SISO_C:
2005 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
2006
2007 /* Set up new search table for SISO */
2008 memcpy(search_tbl, tbl, sz);
2009
2010 if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
2011 search_tbl->ant_type = ANT_A;
2012 else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
2013 search_tbl->ant_type = ANT_B;
2014 else
2015 search_tbl->ant_type = ANT_C;
2016
2017 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2018 break;
2019
2020 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
2021 search_tbl, index);
2022 if (!ret)
2023 goto out;
2024
2025 break;
2026
2027 case IWL_MIMO3_SWITCH_MIMO2_AB:
2028 case IWL_MIMO3_SWITCH_MIMO2_AC:
2029 case IWL_MIMO3_SWITCH_MIMO2_BC:
2030 IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
2031
2032 memcpy(search_tbl, tbl, sz);
2033 search_tbl->is_SGI = 0;
2034 if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
2035 search_tbl->ant_type = ANT_AB;
2036 else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
2037 search_tbl->ant_type = ANT_AC;
2038 else
2039 search_tbl->ant_type = ANT_BC;
2040
2041 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
2042 break;
2043
2044 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
2045 search_tbl, index);
2046 if (!ret)
2047 goto out;
2048
2049 break;
2050
2051 case IWL_MIMO3_SWITCH_GI:
2052 if (!tbl->is_ht40 && !(ht_cap->cap &
2053 IEEE80211_HT_CAP_SGI_20))
2054 break;
2055 if (tbl->is_ht40 && !(ht_cap->cap &
2056 IEEE80211_HT_CAP_SGI_40))
2057 break;
2058
2059 IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
2060
2061 /* Set up new search table for MIMO */
2062 memcpy(search_tbl, tbl, sz);
2063 search_tbl->is_SGI = !tbl->is_SGI;
2064 rs_set_expected_tpt_table(lq_sta, search_tbl);
2065 /*
2066 * If active table already uses the fastest possible
2067 * modulation (dual stream with short guard interval),
2068 * and it's working well, there's no need to look
2069 * for a better type of modulation!
2070 */
2071 if (tbl->is_SGI) {
2072 s32 tpt = lq_sta->last_tpt / 100;
2073 if (tpt >= search_tbl->expected_tpt[index])
2074 break;
2075 }
2076 search_tbl->current_rate =
2077 rate_n_flags_from_tbl(priv, search_tbl,
2078 index, is_green);
2079 update_search_tbl_counter = 1;
2080 goto out;
2081 }
2082 tbl->action++;
2083 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2084 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2085
2086 if (tbl->action == start_action)
2087 break;
2088 }
2089 search_tbl->lq_type = LQ_NONE;
2090 return;
2091 out:
2092 lq_sta->search_better_tbl = 1;
2093 tbl->action++;
2094 if (tbl->action > IWL_MIMO3_SWITCH_GI)
2095 tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
2096 if (update_search_tbl_counter)
2097 search_tbl->action = tbl->action;
2098}
2099
2100/*
2101 * Check whether we should continue using same modulation mode, or
2102 * begin search for a new mode, based on:
2103 * 1) # tx successes or failures while using this mode
2104 * 2) # times calling this function
2105 * 3) elapsed time in this mode (not used, for now)
2106 */
2107static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2108{
2109 struct iwl_scale_tbl_info *tbl;
2110 int i;
2111 int active_tbl;
2112 int flush_interval_passed = 0;
2113 struct iwl_priv *priv;
2114
2115 priv = lq_sta->drv;
2116 active_tbl = lq_sta->active_tbl;
2117
2118 tbl = &(lq_sta->lq_info[active_tbl]);
2119
2120 /* If we've been disallowing search, see if we should now allow it */
2121 if (lq_sta->stay_in_tbl) {
2122
2123 /* Elapsed time using current modulation mode */
2124 if (lq_sta->flush_timer)
2125 flush_interval_passed =
2126 time_after(jiffies,
2127 (unsigned long)(lq_sta->flush_timer +
2128 IWL_RATE_SCALE_FLUSH_INTVL));
2129
2130 /*
2131 * Check if we should allow search for new modulation mode.
2132 * If many frames have failed or succeeded, or we've used
2133 * this same modulation for a long time, allow search, and
2134 * reset history stats that keep track of whether we should
2135 * allow a new search. Also (below) reset all bitmaps and
2136 * stats in active history.
2137 */
2138 if (force_search ||
2139 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
2140 (lq_sta->total_success > lq_sta->max_success_limit) ||
2141 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
2142 && (flush_interval_passed))) {
2143 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
2144 lq_sta->total_failed,
2145 lq_sta->total_success,
2146 flush_interval_passed);
2147
2148 /* Allow search for new mode */
2149 lq_sta->stay_in_tbl = 0; /* only place reset */
2150 lq_sta->total_failed = 0;
2151 lq_sta->total_success = 0;
2152 lq_sta->flush_timer = 0;
2153
2154 /*
2155 * Else if we've used this modulation mode enough repetitions
2156 * (regardless of elapsed time or success/failure), reset
2157 * history bitmaps and rate-specific stats for all rates in
2158 * active table.
2159 */
2160 } else {
2161 lq_sta->table_count++;
2162 if (lq_sta->table_count >=
2163 lq_sta->table_count_limit) {
2164 lq_sta->table_count = 0;
2165
2166 IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
2167 for (i = 0; i < IWL_RATE_COUNT; i++)
2168 rs_rate_scale_clear_window(
2169 &(tbl->win[i]));
2170 }
2171 }
2172
2173 /* If transitioning to allow "search", reset all history
2174 * bitmaps and stats in active table (this will become the new
2175 * "search" table). */
2176 if (!lq_sta->stay_in_tbl) {
2177 for (i = 0; i < IWL_RATE_COUNT; i++)
2178 rs_rate_scale_clear_window(&(tbl->win[i]));
2179 }
2180 }
2181}
2182
2183/*
2184 * setup rate table in uCode
2185 */
2186static void rs_update_rate_tbl(struct iwl_priv *priv,
2187 struct iwl_rxon_context *ctx,
2188 struct iwl_lq_sta *lq_sta,
2189 struct iwl_scale_tbl_info *tbl,
2190 int index, u8 is_green)
2191{
2192 u32 rate;
2193
2194 /* Update uCode's rate table. */
2195 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2196 rs_fill_link_cmd(priv, lq_sta, rate);
2197 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2198}
2199
2200/*
2201 * Do rate scaling and search for new modulation mode.
2202 */
2203static void rs_rate_scale_perform(struct iwl_priv *priv,
2204 struct sk_buff *skb,
2205 struct ieee80211_sta *sta,
2206 struct iwl_lq_sta *lq_sta)
2207{
2208 struct ieee80211_hw *hw = priv->hw;
2209 struct ieee80211_conf *conf = &hw->conf;
2210 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2211 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2212 int low = IWL_RATE_INVALID;
2213 int high = IWL_RATE_INVALID;
2214 int index;
2215 int i;
2216 struct iwl_rate_scale_data *window = NULL;
2217 int current_tpt = IWL_INVALID_VALUE;
2218 int low_tpt = IWL_INVALID_VALUE;
2219 int high_tpt = IWL_INVALID_VALUE;
2220 u32 fail_count;
2221 s8 scale_action = 0;
2222 u16 rate_mask;
2223 u8 update_lq = 0;
2224 struct iwl_scale_tbl_info *tbl, *tbl1;
2225 u16 rate_scale_index_msk = 0;
2226 u8 is_green = 0;
2227 u8 active_tbl = 0;
2228 u8 done_search = 0;
2229 u16 high_low;
2230 s32 sr;
2231 u8 tid = IWL_MAX_TID_COUNT;
2232 struct iwl_tid_data *tid_data;
2233 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2234 struct iwl_rxon_context *ctx = sta_priv->ctx;
2235
2236 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
2237
2238 /* Send management frames and NO_ACK data using lowest rate. */
2239 /* TODO: this could probably be improved.. */
2240 if (!ieee80211_is_data(hdr->frame_control) ||
2241 info->flags & IEEE80211_TX_CTL_NO_ACK)
2242 return;
2243
2244 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2245
2246 tid = rs_tl_add_packet(lq_sta, hdr);
2247 if ((tid != IWL_MAX_TID_COUNT) &&
2248 (lq_sta->tx_agg_tid_en & (1 << tid))) {
2249 tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
2250 if (tid_data->agg.state == IWL_AGG_OFF)
2251 lq_sta->is_agg = 0;
2252 else
2253 lq_sta->is_agg = 1;
2254 } else
2255 lq_sta->is_agg = 0;
2256
2257 /*
2258 * Select rate-scale / modulation-mode table to work with in
2259 * the rest of this function: "search" if searching for better
2260 * modulation mode, or "active" if doing rate scaling within a mode.
2261 */
2262 if (!lq_sta->search_better_tbl)
2263 active_tbl = lq_sta->active_tbl;
2264 else
2265 active_tbl = 1 - lq_sta->active_tbl;
2266
2267 tbl = &(lq_sta->lq_info[active_tbl]);
2268 if (is_legacy(tbl->lq_type))
2269 lq_sta->is_green = 0;
2270 else
2271 lq_sta->is_green = rs_use_green(sta);
2272 is_green = lq_sta->is_green;
2273
2274 /* current tx rate */
2275 index = lq_sta->last_txrate_idx;
2276
2277 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
2278 tbl->lq_type);
2279
2280 /* rates available for this association, and for modulation mode */
2281 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
2282
2283 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
2284
2285 /* mask with station rate restriction */
2286 if (is_legacy(tbl->lq_type)) {
2287 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2288 /* supp_rates has no CCK bits in A mode */
2289 rate_scale_index_msk = (u16) (rate_mask &
2290 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
2291 else
2292 rate_scale_index_msk = (u16) (rate_mask &
2293 lq_sta->supp_rates);
2294
2295 } else
2296 rate_scale_index_msk = rate_mask;
2297
2298 if (!rate_scale_index_msk)
2299 rate_scale_index_msk = rate_mask;
2300
2301 if (!((1 << index) & rate_scale_index_msk)) {
2302 IWL_ERR(priv, "Current Rate is not valid\n");
2303 if (lq_sta->search_better_tbl) {
2304 /* revert to active table if search table is not valid*/
2305 tbl->lq_type = LQ_NONE;
2306 lq_sta->search_better_tbl = 0;
2307 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2308 /* get "active" rate info */
2309 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2310 rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
2311 index, is_green);
2312 }
2313 return;
2314 }
2315
2316 /* Get expected throughput table and history window for current rate */
2317 if (!tbl->expected_tpt) {
2318 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
2319 return;
2320 }
2321
2322 /* force user max rate if set by user */
2323 if ((lq_sta->max_rate_idx != -1) &&
2324 (lq_sta->max_rate_idx < index)) {
2325 index = lq_sta->max_rate_idx;
2326 update_lq = 1;
2327 window = &(tbl->win[index]);
2328 goto lq_update;
2329 }
2330
2331 window = &(tbl->win[index]);
2332
2333 /*
2334 * If there is not enough history to calculate actual average
2335 * throughput, keep analyzing results of more tx frames, without
2336 * changing rate or mode (bypass most of the rest of this function).
2337 * Set up new rate table in uCode only if old rate is not supported
2338 * in current association (use new rate found above).
2339 */
2340 fail_count = window->counter - window->success_counter;
2341 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
2342 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
2343 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
2344 "for index %d\n",
2345 window->success_counter, window->counter, index);
2346
2347 /* Can't calculate this yet; not enough history */
2348 window->average_tpt = IWL_INVALID_VALUE;
2349
2350 /* Should we stay with this modulation mode,
2351 * or search for a new one? */
2352 rs_stay_in_table(lq_sta, false);
2353
2354 goto out;
2355 }
2356 /* Else we have enough samples; calculate estimate of
2357 * actual average throughput */
2358 if (window->average_tpt != ((window->success_ratio *
2359 tbl->expected_tpt[index] + 64) / 128)) {
2360 IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2361 window->average_tpt = ((window->success_ratio *
2362 tbl->expected_tpt[index] + 64) / 128);
2363 }
2364
2365 /* If we are searching for better modulation mode, check success. */
2366 if (lq_sta->search_better_tbl &&
2367 (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) {
2368 /* If good success, continue using the "search" mode;
2369 * no need to send new link quality command, since we're
2370 * continuing to use the setup that we've been trying. */
2371 if (window->average_tpt > lq_sta->last_tpt) {
2372
2373 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
2374 "suc=%d cur-tpt=%d old-tpt=%d\n",
2375 window->success_ratio,
2376 window->average_tpt,
2377 lq_sta->last_tpt);
2378
2379 if (!is_legacy(tbl->lq_type))
2380 lq_sta->enable_counter = 1;
2381
2382 /* Swap tables; "search" becomes "active" */
2383 lq_sta->active_tbl = active_tbl;
2384 current_tpt = window->average_tpt;
2385
2386 /* Else poor success; go back to mode in "active" table */
2387 } else {
2388
2389 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
2390 "suc=%d cur-tpt=%d old-tpt=%d\n",
2391 window->success_ratio,
2392 window->average_tpt,
2393 lq_sta->last_tpt);
2394
2395 /* Nullify "search" table */
2396 tbl->lq_type = LQ_NONE;
2397
2398 /* Revert to "active" table */
2399 active_tbl = lq_sta->active_tbl;
2400 tbl = &(lq_sta->lq_info[active_tbl]);
2401
2402 /* Revert to "active" rate and throughput info */
2403 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2404 current_tpt = lq_sta->last_tpt;
2405
2406 /* Need to set up a new rate table in uCode */
2407 update_lq = 1;
2408 }
2409
2410 /* Either way, we've made a decision; modulation mode
2411 * search is done, allow rate adjustment next time. */
2412 lq_sta->search_better_tbl = 0;
2413 done_search = 1; /* Don't switch modes below! */
2414 goto lq_update;
2415 }
2416
2417 /* (Else) not in search of better modulation mode, try for better
2418 * starting rate, while staying in this mode. */
2419 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
2420 tbl->lq_type);
2421 low = high_low & 0xff;
2422 high = (high_low >> 8) & 0xff;
2423
2424 /* If user set max rate, dont allow higher than user constrain */
2425 if ((lq_sta->max_rate_idx != -1) &&
2426 (lq_sta->max_rate_idx < high))
2427 high = IWL_RATE_INVALID;
2428
2429 sr = window->success_ratio;
2430
2431 /* Collect measured throughputs for current and adjacent rates */
2432 current_tpt = window->average_tpt;
2433 if (low != IWL_RATE_INVALID)
2434 low_tpt = tbl->win[low].average_tpt;
2435 if (high != IWL_RATE_INVALID)
2436 high_tpt = tbl->win[high].average_tpt;
2437
2438 scale_action = 0;
2439
2440 /* Too many failures, decrease rate */
2441 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
2442 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
2443 scale_action = -1;
2444
2445 /* No throughput measured yet for adjacent rates; try increase. */
2446 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2447 (high_tpt == IWL_INVALID_VALUE)) {
2448
2449 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2450 scale_action = 1;
2451 else if (low != IWL_RATE_INVALID)
2452 scale_action = 0;
2453 }
2454
2455 /* Both adjacent throughputs are measured, but neither one has better
2456 * throughput; we're using the best rate, don't change it! */
2457 else if ((low_tpt != IWL_INVALID_VALUE) &&
2458 (high_tpt != IWL_INVALID_VALUE) &&
2459 (low_tpt < current_tpt) &&
2460 (high_tpt < current_tpt))
2461 scale_action = 0;
2462
2463 /* At least one adjacent rate's throughput is measured,
2464 * and may have better performance. */
2465 else {
2466 /* Higher adjacent rate's throughput is measured */
2467 if (high_tpt != IWL_INVALID_VALUE) {
2468 /* Higher rate has better throughput */
2469 if (high_tpt > current_tpt &&
2470 sr >= IWL_RATE_INCREASE_TH) {
2471 scale_action = 1;
2472 } else {
2473 scale_action = 0;
2474 }
2475
2476 /* Lower adjacent rate's throughput is measured */
2477 } else if (low_tpt != IWL_INVALID_VALUE) {
2478 /* Lower rate has better throughput */
2479 if (low_tpt > current_tpt) {
2480 IWL_DEBUG_RATE(priv,
2481 "decrease rate because of low tpt\n");
2482 scale_action = -1;
2483 } else if (sr >= IWL_RATE_INCREASE_TH) {
2484 scale_action = 1;
2485 }
2486 }
2487 }
2488
2489 /* Sanity check; asked for decrease, but success rate or throughput
2490 * has been good at old rate. Don't change it. */
2491 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2492 ((sr > IWL_RATE_HIGH_TH) ||
2493 (current_tpt > (100 * tbl->expected_tpt[low]))))
2494 scale_action = 0;
2495 if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type))
2496 scale_action = -1;
2497 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
2498 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
2499 scale_action = -1;
2500
2501 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2502 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2503 if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
2504 /*
2505 * don't set scale_action, don't want to scale up if
2506 * the rate scale doesn't otherwise think that is a
2507 * good idea.
2508 */
2509 } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
2510 scale_action = -1;
2511 }
2512 }
2513 lq_sta->last_bt_traffic = priv->bt_traffic_load;
2514
2515 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2516 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2517 /* search for a new modulation */
2518 rs_stay_in_table(lq_sta, true);
2519 goto lq_update;
2520 }
2521
2522 switch (scale_action) {
2523 case -1:
2524 /* Decrease starting rate, update uCode's rate table */
2525 if (low != IWL_RATE_INVALID) {
2526 update_lq = 1;
2527 index = low;
2528 }
2529
2530 break;
2531 case 1:
2532 /* Increase starting rate, update uCode's rate table */
2533 if (high != IWL_RATE_INVALID) {
2534 update_lq = 1;
2535 index = high;
2536 }
2537
2538 break;
2539 case 0:
2540 /* No change */
2541 default:
2542 break;
2543 }
2544
2545 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2546 "high %d type %d\n",
2547 index, scale_action, low, high, tbl->lq_type);
2548
2549lq_update:
2550 /* Replace uCode's rate table for the destination station. */
2551 if (update_lq)
2552 rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
2553
2554 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2555 /* Should we stay with this modulation mode,
2556 * or search for a new one? */
2557 rs_stay_in_table(lq_sta, false);
2558 }
2559 /*
2560 * Search for new modulation mode if we're:
2561 * 1) Not changing rates right now
2562 * 2) Not just finishing up a search
2563 * 3) Allowing a new search
2564 */
2565 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
2566 /* Save current throughput to compare with "search" throughput*/
2567 lq_sta->last_tpt = current_tpt;
2568
2569 /* Select a new "search" modulation mode to try.
2570 * If one is found, set up the new "search" table. */
2571 if (is_legacy(tbl->lq_type))
2572 rs_move_legacy_other(priv, lq_sta, conf, sta, index);
2573 else if (is_siso(tbl->lq_type))
2574 rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
2575 else if (is_mimo2(tbl->lq_type))
2576 rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
2577 else
2578 rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
2579
2580 /* If new "search" mode was selected, set up in uCode table */
2581 if (lq_sta->search_better_tbl) {
2582 /* Access the "search" table, clear its history. */
2583 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2584 for (i = 0; i < IWL_RATE_COUNT; i++)
2585 rs_rate_scale_clear_window(&(tbl->win[i]));
2586
2587 /* Use new "search" start rate */
2588 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2589
2590 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2591 tbl->current_rate, index);
2592 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2593 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2594 } else
2595 done_search = 1;
2596 }
2597
2598 if (done_search && !lq_sta->stay_in_tbl) {
2599 /* If the "active" (non-search) mode was legacy,
2600 * and we've tried switching antennas,
2601 * but we haven't been able to try HT modes (not available),
2602 * stay with best antenna legacy modulation for a while
2603 * before next round of mode comparisons. */
2604 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2605 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2606 lq_sta->action_counter > tbl1->max_search) {
2607 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2608 rs_set_stay_in_table(priv, 1, lq_sta);
2609 }
2610
2611 /* If we're in an HT mode, and all 3 mode switch actions
2612 * have been tried and compared, stay in this best modulation
2613 * mode for a while before next round of mode comparisons. */
2614 if (lq_sta->enable_counter &&
2615 (lq_sta->action_counter >= tbl1->max_search) &&
2616 iwl_ht_enabled(priv)) {
2617 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2618 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2619 (tid != IWL_MAX_TID_COUNT)) {
2620 u8 sta_id = lq_sta->lq.sta_id;
2621 tid_data = &priv->tid_data[sta_id][tid];
2622 if (tid_data->agg.state == IWL_AGG_OFF) {
2623 IWL_DEBUG_RATE(priv,
2624 "try to aggregate tid %d\n",
2625 tid);
2626 rs_tl_turn_on_agg(priv, tid,
2627 lq_sta, sta);
2628 }
2629 }
2630 rs_set_stay_in_table(priv, 0, lq_sta);
2631 }
2632 }
2633
2634out:
2635 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2636 lq_sta->last_txrate_idx = index;
2637}
2638
2639/**
2640 * rs_initialize_lq - Initialize a station's hardware rate table
2641 *
2642 * The uCode's station table contains a table of fallback rates
2643 * for automatic fallback during transmission.
2644 *
2645 * NOTE: This sets up a default set of values. These will be replaced later
2646 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2647 * rc80211_simple.
2648 *
2649 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2650 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2651 * which requires station table entry to exist).
2652 */
2653static void rs_initialize_lq(struct iwl_priv *priv,
2654 struct ieee80211_sta *sta,
2655 struct iwl_lq_sta *lq_sta)
2656{
2657 struct iwl_scale_tbl_info *tbl;
2658 int rate_idx;
2659 int i;
2660 u32 rate;
2661 u8 use_green = rs_use_green(sta);
2662 u8 active_tbl = 0;
2663 u8 valid_tx_ant;
2664 struct iwl_station_priv *sta_priv;
2665 struct iwl_rxon_context *ctx;
2666
2667 if (!sta || !lq_sta)
2668 return;
2669
2670 sta_priv = (void *)sta->drv_priv;
2671 ctx = sta_priv->ctx;
2672
2673 i = lq_sta->last_txrate_idx;
2674
2675 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2676
2677 if (!lq_sta->search_better_tbl)
2678 active_tbl = lq_sta->active_tbl;
2679 else
2680 active_tbl = 1 - lq_sta->active_tbl;
2681
2682 tbl = &(lq_sta->lq_info[active_tbl]);
2683
2684 if ((i < 0) || (i >= IWL_RATE_COUNT))
2685 i = 0;
2686
2687 rate = iwl_rates[i].plcp;
2688 tbl->ant_type = first_antenna(valid_tx_ant);
2689 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2690
2691 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2692 rate |= RATE_MCS_CCK_MSK;
2693
2694 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2695 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2696 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2697
2698 rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2699 tbl->current_rate = rate;
2700 rs_set_expected_tpt_table(lq_sta, tbl);
2701 rs_fill_link_cmd(NULL, lq_sta, rate);
2702 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2703 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
2704}
2705
2706static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2707 struct ieee80211_tx_rate_control *txrc)
2708{
2709
2710 struct sk_buff *skb = txrc->skb;
2711 struct ieee80211_supported_band *sband = txrc->sband;
2712 struct iwl_op_mode *op_mode __maybe_unused =
2713 (struct iwl_op_mode *)priv_r;
2714 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
2715 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2716 struct iwl_lq_sta *lq_sta = priv_sta;
2717 int rate_idx;
2718
2719 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2720
2721 /* Get max rate if user set max rate */
2722 if (lq_sta) {
2723 lq_sta->max_rate_idx = txrc->max_rate_idx;
2724 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2725 (lq_sta->max_rate_idx != -1))
2726 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2727 if ((lq_sta->max_rate_idx < 0) ||
2728 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2729 lq_sta->max_rate_idx = -1;
2730 }
2731
2732 /* Treat uninitialized rate scaling data same as non-existing. */
2733 if (lq_sta && !lq_sta->drv) {
2734 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2735 priv_sta = NULL;
2736 }
2737
2738 /* Send management frames and NO_ACK data using lowest rate. */
2739 if (rate_control_send_low(sta, priv_sta, txrc))
2740 return;
2741
2742 rate_idx = lq_sta->last_txrate_idx;
2743
2744 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2745 rate_idx -= IWL_FIRST_OFDM_RATE;
2746 /* 6M and 9M shared same MCS index */
2747 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2748 if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2749 IWL_RATE_MIMO3_6M_PLCP)
2750 rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
2751 else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
2752 IWL_RATE_MIMO2_6M_PLCP)
2753 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2754 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2755 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2756 info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
2757 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2758 info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
2759 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2760 info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2761 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2762 info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
2763 } else {
2764 /* Check for invalid rates */
2765 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2766 ((sband->band == IEEE80211_BAND_5GHZ) &&
2767 (rate_idx < IWL_FIRST_OFDM_RATE)))
2768 rate_idx = rate_lowest_index(sband, sta);
2769 /* On valid 5 GHz rate, adjust index */
2770 else if (sband->band == IEEE80211_BAND_5GHZ)
2771 rate_idx -= IWL_FIRST_OFDM_RATE;
2772 info->control.rates[0].flags = 0;
2773 }
2774 info->control.rates[0].idx = rate_idx;
2775 info->control.rates[0].count = 1;
2776}
2777
2778static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2779 gfp_t gfp)
2780{
2781 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2782 struct iwl_op_mode *op_mode __maybe_unused =
2783 (struct iwl_op_mode *)priv_rate;
2784 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
2785
2786 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2787
2788 return &sta_priv->lq_sta;
2789}
2790
2791/*
2792 * Called after adding a new station to initialize rate scaling
2793 */
2794void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
2795{
2796 int i, j;
2797 struct ieee80211_hw *hw = priv->hw;
2798 struct ieee80211_conf *conf = &priv->hw->conf;
2799 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2800 struct iwl_station_priv *sta_priv;
2801 struct iwl_lq_sta *lq_sta;
2802 struct ieee80211_supported_band *sband;
2803 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2804
2805 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2806 lq_sta = &sta_priv->lq_sta;
2807 sband = hw->wiphy->bands[conf->chandef.chan->band];
2808
2809
2810 lq_sta->lq.sta_id = sta_id;
2811
2812 for (j = 0; j < LQ_SIZE; j++)
2813 for (i = 0; i < IWL_RATE_COUNT; i++)
2814 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2815
2816 lq_sta->flush_timer = 0;
2817 lq_sta->supp_rates = sta->supp_rates[sband->band];
2818
2819 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2820 sta_id);
2821 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2822 * the lowest or the highest rate.. Could consider using RSSI from
2823 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2824 * after assoc.. */
2825
2826 lq_sta->is_dup = 0;
2827 lq_sta->max_rate_idx = -1;
2828 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2829 lq_sta->is_green = rs_use_green(sta);
2830 lq_sta->band = sband->band;
2831 /*
2832 * active legacy rates as per supported rates bitmap
2833 */
2834 supp = sta->supp_rates[sband->band];
2835 lq_sta->active_legacy_rate = 0;
2836 for_each_set_bit(i, &supp, BITS_PER_LONG)
2837 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2838
2839 /*
2840 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2841 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2842 */
2843 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2844 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2845 lq_sta->active_siso_rate &= ~((u16)0x2);
2846 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2847
2848 /* Same here */
2849 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2850 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2851 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2852 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2853
2854 lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
2855 lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
2856 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2857 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2858
2859 IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2860 lq_sta->active_siso_rate,
2861 lq_sta->active_mimo2_rate,
2862 lq_sta->active_mimo3_rate);
2863
2864 /* These values will be overridden later */
2865 lq_sta->lq.general_params.single_stream_ant_msk =
2866 first_antenna(priv->nvm_data->valid_tx_ant);
2867 lq_sta->lq.general_params.dual_stream_ant_msk =
2868 priv->nvm_data->valid_tx_ant &
2869 ~first_antenna(priv->nvm_data->valid_tx_ant);
2870 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2871 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2872 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
2873 lq_sta->lq.general_params.dual_stream_ant_msk =
2874 priv->nvm_data->valid_tx_ant;
2875 }
2876
2877 /* as default allow aggregation for all tids */
2878 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2879 lq_sta->drv = priv;
2880
2881 /* Set last_txrate_idx to lowest rate */
2882 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2883 if (sband->band == IEEE80211_BAND_5GHZ)
2884 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2885 lq_sta->is_agg = 0;
2886#ifdef CONFIG_MAC80211_DEBUGFS
2887 lq_sta->dbg_fixed_rate = 0;
2888#endif
2889
2890 rs_initialize_lq(priv, sta, lq_sta);
2891}
2892
2893static void rs_fill_link_cmd(struct iwl_priv *priv,
2894 struct iwl_lq_sta *lq_sta, u32 new_rate)
2895{
2896 struct iwl_scale_tbl_info tbl_type;
2897 int index = 0;
2898 int rate_idx;
2899 int repeat_rate = 0;
2900 u8 ant_toggle_cnt = 0;
2901 u8 use_ht_possible = 1;
2902 u8 valid_tx_ant = 0;
2903 struct iwl_station_priv *sta_priv =
2904 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2905 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2906
2907 /* Override starting rate (index 0) if needed for debug purposes */
2908 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2909
2910 /* Interpret new_rate (rate_n_flags) */
2911 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2912 &tbl_type, &rate_idx);
2913
2914 if (priv && priv->bt_full_concurrent) {
2915 /* 1x1 only */
2916 tbl_type.ant_type =
2917 first_antenna(priv->nvm_data->valid_tx_ant);
2918 }
2919
2920 /* How many times should we repeat the initial rate? */
2921 if (is_legacy(tbl_type.lq_type)) {
2922 ant_toggle_cnt = 1;
2923 repeat_rate = IWL_NUMBER_TRY;
2924 } else {
2925 repeat_rate = min(IWL_HT_NUMBER_TRY,
2926 LINK_QUAL_AGG_DISABLE_START_DEF - 1);
2927 }
2928
2929 lq_cmd->general_params.mimo_delimiter =
2930 is_mimo(tbl_type.lq_type) ? 1 : 0;
2931
2932 /* Fill 1st table entry (index 0) */
2933 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2934
2935 if (num_of_ant(tbl_type.ant_type) == 1) {
2936 lq_cmd->general_params.single_stream_ant_msk =
2937 tbl_type.ant_type;
2938 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2939 lq_cmd->general_params.dual_stream_ant_msk =
2940 tbl_type.ant_type;
2941 } /* otherwise we don't modify the existing value */
2942
2943 index++;
2944 repeat_rate--;
2945 if (priv) {
2946 if (priv->bt_full_concurrent)
2947 valid_tx_ant = ANT_A;
2948 else
2949 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2950 }
2951
2952 /* Fill rest of rate table */
2953 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2954 /* Repeat initial/next rate.
2955 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2956 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2957 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2958 if (is_legacy(tbl_type.lq_type)) {
2959 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2960 ant_toggle_cnt++;
2961 else if (priv &&
2962 rs_toggle_antenna(valid_tx_ant,
2963 &new_rate, &tbl_type))
2964 ant_toggle_cnt = 1;
2965 }
2966
2967 /* Override next rate if needed for debug purposes */
2968 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2969
2970 /* Fill next table entry */
2971 lq_cmd->rs_table[index].rate_n_flags =
2972 cpu_to_le32(new_rate);
2973 repeat_rate--;
2974 index++;
2975 }
2976
2977 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2978 &rate_idx);
2979
2980 if (priv && priv->bt_full_concurrent) {
2981 /* 1x1 only */
2982 tbl_type.ant_type =
2983 first_antenna(priv->nvm_data->valid_tx_ant);
2984 }
2985
2986 /* Indicate to uCode which entries might be MIMO.
2987 * If initial rate was MIMO, this will finally end up
2988 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2989 if (is_mimo(tbl_type.lq_type))
2990 lq_cmd->general_params.mimo_delimiter = index;
2991
2992 /* Get next rate */
2993 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2994 use_ht_possible);
2995
2996 /* How many times should we repeat the next rate? */
2997 if (is_legacy(tbl_type.lq_type)) {
2998 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2999 ant_toggle_cnt++;
3000 else if (priv &&
3001 rs_toggle_antenna(valid_tx_ant,
3002 &new_rate, &tbl_type))
3003 ant_toggle_cnt = 1;
3004
3005 repeat_rate = IWL_NUMBER_TRY;
3006 } else {
3007 repeat_rate = IWL_HT_NUMBER_TRY;
3008 }
3009
3010 /* Don't allow HT rates after next pass.
3011 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
3012 use_ht_possible = 0;
3013
3014 /* Override next rate if needed for debug purposes */
3015 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
3016
3017 /* Fill next table entry */
3018 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
3019
3020 index++;
3021 repeat_rate--;
3022 }
3023
3024 lq_cmd->agg_params.agg_frame_cnt_limit =
3025 sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3026 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3027
3028 lq_cmd->agg_params.agg_time_limit =
3029 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3030 /*
3031 * overwrite if needed, pass aggregation time limit
3032 * to uCode in uSec
3033 */
3034 if (priv && priv->lib->bt_params &&
3035 priv->lib->bt_params->agg_time_limit &&
3036 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3037 lq_cmd->agg_params.agg_time_limit =
3038 cpu_to_le16(priv->lib->bt_params->agg_time_limit);
3039}
3040
3041static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
3042{
3043 return hw->priv;
3044}
3045/* rate scale requires free function to be implemented */
3046static void rs_free(void *priv_rate)
3047{
3048 return;
3049}
3050
3051static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
3052 void *priv_sta)
3053{
3054 struct iwl_op_mode *op_mode __maybe_unused = priv_r;
3055 struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
3056
3057 IWL_DEBUG_RATE(priv, "enter\n");
3058 IWL_DEBUG_RATE(priv, "leave\n");
3059}
3060
3061#ifdef CONFIG_MAC80211_DEBUGFS
3062static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3063 u32 *rate_n_flags, int index)
3064{
3065 struct iwl_priv *priv;
3066 u8 valid_tx_ant;
3067 u8 ant_sel_tx;
3068
3069 priv = lq_sta->drv;
3070 valid_tx_ant = priv->nvm_data->valid_tx_ant;
3071 if (lq_sta->dbg_fixed_rate) {
3072 ant_sel_tx =
3073 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
3074 >> RATE_MCS_ANT_POS);
3075 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
3076 *rate_n_flags = lq_sta->dbg_fixed_rate;
3077 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
3078 } else {
3079 lq_sta->dbg_fixed_rate = 0;
3080 IWL_ERR(priv,
3081 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
3082 ant_sel_tx, valid_tx_ant);
3083 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3084 }
3085 } else {
3086 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
3087 }
3088}
3089
3090static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3091 const char __user *user_buf, size_t count, loff_t *ppos)
3092{
3093 struct iwl_lq_sta *lq_sta = file->private_data;
3094 struct iwl_priv *priv;
3095 char buf[64];
3096 size_t buf_size;
3097 u32 parsed_rate;
3098
3099
3100 priv = lq_sta->drv;
3101 memset(buf, 0, sizeof(buf));
3102 buf_size = min(count, sizeof(buf) - 1);
3103 if (copy_from_user(buf, user_buf, buf_size))
3104 return -EFAULT;
3105
3106 if (sscanf(buf, "%x", &parsed_rate) == 1)
3107 lq_sta->dbg_fixed_rate = parsed_rate;
3108 else
3109 lq_sta->dbg_fixed_rate = 0;
3110
3111 rs_program_fix_rate(priv, lq_sta);
3112
3113 return count;
3114}
3115
3116static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3117 char __user *user_buf, size_t count, loff_t *ppos)
3118{
3119 char *buff;
3120 int desc = 0;
3121 int i = 0;
3122 int index = 0;
3123 ssize_t ret;
3124
3125 struct iwl_lq_sta *lq_sta = file->private_data;
3126 struct iwl_priv *priv;
3127 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3128
3129 priv = lq_sta->drv;
3130 buff = kmalloc(1024, GFP_KERNEL);
3131 if (!buff)
3132 return -ENOMEM;
3133
3134 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
3135 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
3136 lq_sta->total_failed, lq_sta->total_success,
3137 lq_sta->active_legacy_rate);
3138 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3139 lq_sta->dbg_fixed_rate);
3140 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3141 (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
3142 (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
3143 (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
3144 desc += sprintf(buff+desc, "lq type %s\n",
3145 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3146 if (is_Ht(tbl->lq_type)) {
3147 desc += sprintf(buff + desc, " %s",
3148 (is_siso(tbl->lq_type)) ? "SISO" :
3149 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
3150 desc += sprintf(buff + desc, " %s",
3151 (tbl->is_ht40) ? "40MHz" : "20MHz");
3152 desc += sprintf(buff + desc, " %s %s %s\n",
3153 (tbl->is_SGI) ? "SGI" : "",
3154 (lq_sta->is_green) ? "GF enabled" : "",
3155 (lq_sta->is_agg) ? "AGG on" : "");
3156 }
3157 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3158 lq_sta->last_rate_n_flags);
3159 desc += sprintf(buff+desc, "general:"
3160 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
3161 lq_sta->lq.general_params.flags,
3162 lq_sta->lq.general_params.mimo_delimiter,
3163 lq_sta->lq.general_params.single_stream_ant_msk,
3164 lq_sta->lq.general_params.dual_stream_ant_msk);
3165
3166 desc += sprintf(buff+desc, "agg:"
3167 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3168 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
3169 lq_sta->lq.agg_params.agg_dis_start_th,
3170 lq_sta->lq.agg_params.agg_frame_cnt_limit);
3171
3172 desc += sprintf(buff+desc,
3173 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3174 lq_sta->lq.general_params.start_rate_index[0],
3175 lq_sta->lq.general_params.start_rate_index[1],
3176 lq_sta->lq.general_params.start_rate_index[2],
3177 lq_sta->lq.general_params.start_rate_index[3]);
3178
3179 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3180 index = iwl_hwrate_to_plcp_idx(
3181 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
3182 if (is_legacy(tbl->lq_type)) {
3183 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
3184 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3185 iwl_rate_mcs[index].mbps);
3186 } else {
3187 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
3188 i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
3189 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
3190 }
3191 }
3192
3193 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3194 kfree(buff);
3195 return ret;
3196}
3197
3198static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3199 .write = rs_sta_dbgfs_scale_table_write,
3200 .read = rs_sta_dbgfs_scale_table_read,
3201 .open = simple_open,
3202 .llseek = default_llseek,
3203};
3204static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3205 char __user *user_buf, size_t count, loff_t *ppos)
3206{
3207 char *buff;
3208 int desc = 0;
3209 int i, j;
3210 ssize_t ret;
3211
3212 struct iwl_lq_sta *lq_sta = file->private_data;
3213
3214 buff = kmalloc(1024, GFP_KERNEL);
3215 if (!buff)
3216 return -ENOMEM;
3217
3218 for (i = 0; i < LQ_SIZE; i++) {
3219 desc += sprintf(buff+desc,
3220 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
3221 "rate=0x%X\n",
3222 lq_sta->active_tbl == i ? "*" : "x",
3223 lq_sta->lq_info[i].lq_type,
3224 lq_sta->lq_info[i].is_SGI,
3225 lq_sta->lq_info[i].is_ht40,
3226 lq_sta->lq_info[i].is_dup,
3227 lq_sta->is_green,
3228 lq_sta->lq_info[i].current_rate);
3229 for (j = 0; j < IWL_RATE_COUNT; j++) {
3230 desc += sprintf(buff+desc,
3231 "counter=%d success=%d %%=%d\n",
3232 lq_sta->lq_info[i].win[j].counter,
3233 lq_sta->lq_info[i].win[j].success_counter,
3234 lq_sta->lq_info[i].win[j].success_ratio);
3235 }
3236 }
3237 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3238 kfree(buff);
3239 return ret;
3240}
3241
3242static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3243 .read = rs_sta_dbgfs_stats_table_read,
3244 .open = simple_open,
3245 .llseek = default_llseek,
3246};
3247
3248static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3249 char __user *user_buf, size_t count, loff_t *ppos)
3250{
3251 struct iwl_lq_sta *lq_sta = file->private_data;
3252 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3253 char buff[120];
3254 int desc = 0;
3255
3256 if (is_Ht(tbl->lq_type))
3257 desc += sprintf(buff+desc,
3258 "Bit Rate= %d Mb/s\n",
3259 tbl->expected_tpt[lq_sta->last_txrate_idx]);
3260 else
3261 desc += sprintf(buff+desc,
3262 "Bit Rate= %d Mb/s\n",
3263 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
3264
3265 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3266}
3267
3268static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
3269 .read = rs_sta_dbgfs_rate_scale_data_read,
3270 .open = simple_open,
3271 .llseek = default_llseek,
3272};
3273
3274static void rs_add_debugfs(void *priv, void *priv_sta,
3275 struct dentry *dir)
3276{
3277 struct iwl_lq_sta *lq_sta = priv_sta;
3278 lq_sta->rs_sta_dbgfs_scale_table_file =
3279 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3280 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3281 lq_sta->rs_sta_dbgfs_stats_table_file =
3282 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3283 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3284 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
3285 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
3286 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
3287 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3288 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3289 &lq_sta->tx_agg_tid_en);
3290
3291}
3292
3293static void rs_remove_debugfs(void *priv, void *priv_sta)
3294{
3295 struct iwl_lq_sta *lq_sta = priv_sta;
3296 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
3297 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
3298 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
3299 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
3300}
3301#endif
3302
3303/*
3304 * Initialization of rate scaling information is done by driver after
3305 * the station is added. Since mac80211 calls this function before a
3306 * station is added we ignore it.
3307 */
3308static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
3309 struct cfg80211_chan_def *chandef,
3310 struct ieee80211_sta *sta, void *priv_sta)
3311{
3312}
3313
3314static const struct rate_control_ops rs_ops = {
3315 .name = RS_NAME,
3316 .tx_status = rs_tx_status,
3317 .get_rate = rs_get_rate,
3318 .rate_init = rs_rate_init_stub,
3319 .alloc = rs_alloc,
3320 .free = rs_free,
3321 .alloc_sta = rs_alloc_sta,
3322 .free_sta = rs_free_sta,
3323#ifdef CONFIG_MAC80211_DEBUGFS
3324 .add_sta_debugfs = rs_add_debugfs,
3325 .remove_sta_debugfs = rs_remove_debugfs,
3326#endif
3327};
3328
3329int iwlagn_rate_control_register(void)
3330{
3331 return ieee80211_rate_control_register(&rs_ops);
3332}
3333
3334void iwlagn_rate_control_unregister(void)
3335{
3336 ieee80211_rate_control_unregister(&rs_ops);
3337}
3338
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
new file mode 100644
index 000000000000..f6bd25cad203
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -0,0 +1,426 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_agn_rs_h__
28#define __iwl_agn_rs_h__
29
30#include <net/mac80211.h>
31
32#include "iwl-config.h"
33
34#include "commands.h"
35
36struct iwl_rate_info {
37 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
38 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
39 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
40 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
41 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
42 u8 prev_ieee; /* previous rate in IEEE speeds */
43 u8 next_ieee; /* next rate in IEEE speeds */
44 u8 prev_rs; /* previous rate used in rs algo */
45 u8 next_rs; /* next rate used in rs algo */
46 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
47 u8 next_rs_tgg; /* next rate used in TGG rs algo */
48};
49
50/*
51 * These serve as indexes into
52 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
53 */
54enum {
55 IWL_RATE_1M_INDEX = 0,
56 IWL_RATE_2M_INDEX,
57 IWL_RATE_5M_INDEX,
58 IWL_RATE_11M_INDEX,
59 IWL_RATE_6M_INDEX,
60 IWL_RATE_9M_INDEX,
61 IWL_RATE_12M_INDEX,
62 IWL_RATE_18M_INDEX,
63 IWL_RATE_24M_INDEX,
64 IWL_RATE_36M_INDEX,
65 IWL_RATE_48M_INDEX,
66 IWL_RATE_54M_INDEX,
67 IWL_RATE_60M_INDEX,
68 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
69 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
70 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
71 IWL_RATE_INVALID = IWL_RATE_COUNT,
72};
73
74enum {
75 IWL_RATE_6M_INDEX_TABLE = 0,
76 IWL_RATE_9M_INDEX_TABLE,
77 IWL_RATE_12M_INDEX_TABLE,
78 IWL_RATE_18M_INDEX_TABLE,
79 IWL_RATE_24M_INDEX_TABLE,
80 IWL_RATE_36M_INDEX_TABLE,
81 IWL_RATE_48M_INDEX_TABLE,
82 IWL_RATE_54M_INDEX_TABLE,
83 IWL_RATE_1M_INDEX_TABLE,
84 IWL_RATE_2M_INDEX_TABLE,
85 IWL_RATE_5M_INDEX_TABLE,
86 IWL_RATE_11M_INDEX_TABLE,
87 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
88};
89
90enum {
91 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
92 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
93 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
94 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
95};
96
97/* #define vs. enum to keep from defaulting to 'large integer' */
98#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
99#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
100#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
101#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
102#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
103#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
104#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
105#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
106#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
107#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
108#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
109#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
110#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
111
112/* uCode API values for legacy bit rates, both OFDM and CCK */
113enum {
114 IWL_RATE_6M_PLCP = 13,
115 IWL_RATE_9M_PLCP = 15,
116 IWL_RATE_12M_PLCP = 5,
117 IWL_RATE_18M_PLCP = 7,
118 IWL_RATE_24M_PLCP = 9,
119 IWL_RATE_36M_PLCP = 11,
120 IWL_RATE_48M_PLCP = 1,
121 IWL_RATE_54M_PLCP = 3,
122 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
123 IWL_RATE_1M_PLCP = 10,
124 IWL_RATE_2M_PLCP = 20,
125 IWL_RATE_5M_PLCP = 55,
126 IWL_RATE_11M_PLCP = 110,
127 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
128 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
129};
130
131/* uCode API values for OFDM high-throughput (HT) bit rates */
132enum {
133 IWL_RATE_SISO_6M_PLCP = 0,
134 IWL_RATE_SISO_12M_PLCP = 1,
135 IWL_RATE_SISO_18M_PLCP = 2,
136 IWL_RATE_SISO_24M_PLCP = 3,
137 IWL_RATE_SISO_36M_PLCP = 4,
138 IWL_RATE_SISO_48M_PLCP = 5,
139 IWL_RATE_SISO_54M_PLCP = 6,
140 IWL_RATE_SISO_60M_PLCP = 7,
141 IWL_RATE_MIMO2_6M_PLCP = 0x8,
142 IWL_RATE_MIMO2_12M_PLCP = 0x9,
143 IWL_RATE_MIMO2_18M_PLCP = 0xa,
144 IWL_RATE_MIMO2_24M_PLCP = 0xb,
145 IWL_RATE_MIMO2_36M_PLCP = 0xc,
146 IWL_RATE_MIMO2_48M_PLCP = 0xd,
147 IWL_RATE_MIMO2_54M_PLCP = 0xe,
148 IWL_RATE_MIMO2_60M_PLCP = 0xf,
149 IWL_RATE_MIMO3_6M_PLCP = 0x10,
150 IWL_RATE_MIMO3_12M_PLCP = 0x11,
151 IWL_RATE_MIMO3_18M_PLCP = 0x12,
152 IWL_RATE_MIMO3_24M_PLCP = 0x13,
153 IWL_RATE_MIMO3_36M_PLCP = 0x14,
154 IWL_RATE_MIMO3_48M_PLCP = 0x15,
155 IWL_RATE_MIMO3_54M_PLCP = 0x16,
156 IWL_RATE_MIMO3_60M_PLCP = 0x17,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
160};
161
162/* MAC header values for bit rates */
163enum {
164 IWL_RATE_6M_IEEE = 12,
165 IWL_RATE_9M_IEEE = 18,
166 IWL_RATE_12M_IEEE = 24,
167 IWL_RATE_18M_IEEE = 36,
168 IWL_RATE_24M_IEEE = 48,
169 IWL_RATE_36M_IEEE = 72,
170 IWL_RATE_48M_IEEE = 96,
171 IWL_RATE_54M_IEEE = 108,
172 IWL_RATE_60M_IEEE = 120,
173 IWL_RATE_1M_IEEE = 2,
174 IWL_RATE_2M_IEEE = 4,
175 IWL_RATE_5M_IEEE = 11,
176 IWL_RATE_11M_IEEE = 22,
177};
178
179#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
180
181#define IWL_INVALID_VALUE -1
182
183#define IWL_MIN_RSSI_VAL -100
184#define IWL_MAX_RSSI_VAL 0
185
186/* These values specify how many Tx frame attempts before
187 * searching for a new modulation mode */
188#define IWL_LEGACY_FAILURE_LIMIT 160
189#define IWL_LEGACY_SUCCESS_LIMIT 480
190#define IWL_LEGACY_TABLE_COUNT 160
191
192#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
193#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
194#define IWL_NONE_LEGACY_TABLE_COUNT 1500
195
196/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
197#define IWL_RS_GOOD_RATIO 12800 /* 100% */
198#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
199#define IWL_RATE_HIGH_TH 10880 /* 85% */
200#define IWL_RATE_INCREASE_TH 6400 /* 50% */
201#define IWL_RATE_DECREASE_TH 1920 /* 15% */
202
203/* possible actions when in legacy mode */
204#define IWL_LEGACY_SWITCH_ANTENNA1 0
205#define IWL_LEGACY_SWITCH_ANTENNA2 1
206#define IWL_LEGACY_SWITCH_SISO 2
207#define IWL_LEGACY_SWITCH_MIMO2_AB 3
208#define IWL_LEGACY_SWITCH_MIMO2_AC 4
209#define IWL_LEGACY_SWITCH_MIMO2_BC 5
210#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
211
212/* possible actions when in siso mode */
213#define IWL_SISO_SWITCH_ANTENNA1 0
214#define IWL_SISO_SWITCH_ANTENNA2 1
215#define IWL_SISO_SWITCH_MIMO2_AB 2
216#define IWL_SISO_SWITCH_MIMO2_AC 3
217#define IWL_SISO_SWITCH_MIMO2_BC 4
218#define IWL_SISO_SWITCH_GI 5
219#define IWL_SISO_SWITCH_MIMO3_ABC 6
220
221
222/* possible actions when in mimo mode */
223#define IWL_MIMO2_SWITCH_ANTENNA1 0
224#define IWL_MIMO2_SWITCH_ANTENNA2 1
225#define IWL_MIMO2_SWITCH_SISO_A 2
226#define IWL_MIMO2_SWITCH_SISO_B 3
227#define IWL_MIMO2_SWITCH_SISO_C 4
228#define IWL_MIMO2_SWITCH_GI 5
229#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
230
231
232/* possible actions when in mimo3 mode */
233#define IWL_MIMO3_SWITCH_ANTENNA1 0
234#define IWL_MIMO3_SWITCH_ANTENNA2 1
235#define IWL_MIMO3_SWITCH_SISO_A 2
236#define IWL_MIMO3_SWITCH_SISO_B 3
237#define IWL_MIMO3_SWITCH_SISO_C 4
238#define IWL_MIMO3_SWITCH_MIMO2_AB 5
239#define IWL_MIMO3_SWITCH_MIMO2_AC 6
240#define IWL_MIMO3_SWITCH_MIMO2_BC 7
241#define IWL_MIMO3_SWITCH_GI 8
242
243
244#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
245#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
246
247/*FIXME:RS:add possible actions for MIMO3*/
248
249#define IWL_ACTION_LIMIT 3 /* # possible actions */
250
251#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
252
253/* load per tid defines for A-MPDU activation */
254#define IWL_AGG_TPT_THREHOLD 0
255#define IWL_AGG_LOAD_THRESHOLD 10
256#define IWL_AGG_ALL_TID 0xff
257#define TID_QUEUE_CELL_SPACING 50 /*mS */
258#define TID_QUEUE_MAX_SIZE 20
259#define TID_ROUND_VALUE 5 /* mS */
260
261#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
262#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
263
264extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
265
266enum iwl_table_type {
267 LQ_NONE,
268 LQ_G, /* legacy types */
269 LQ_A,
270 LQ_SISO, /* high-throughput types */
271 LQ_MIMO2,
272 LQ_MIMO3,
273 LQ_MAX,
274};
275
276#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
277#define is_siso(tbl) ((tbl) == LQ_SISO)
278#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
279#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
280#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
281#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
282#define is_a_band(tbl) ((tbl) == LQ_A)
283#define is_g_and(tbl) ((tbl) == LQ_G)
284
285#define IWL_MAX_MCS_DISPLAY_SIZE 12
286
287struct iwl_rate_mcs_info {
288 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
289 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
290};
291
292/**
293 * struct iwl_rate_scale_data -- tx success history for one rate
294 */
295struct iwl_rate_scale_data {
296 u64 data; /* bitmap of successful frames */
297 s32 success_counter; /* number of frames successful */
298 s32 success_ratio; /* per-cent * 128 */
299 s32 counter; /* number of frames attempted */
300 s32 average_tpt; /* success ratio * expected throughput */
301 unsigned long stamp;
302};
303
304/**
305 * struct iwl_scale_tbl_info -- tx params and success history for all rates
306 *
307 * There are two of these in struct iwl_lq_sta,
308 * one for "active", and one for "search".
309 */
310struct iwl_scale_tbl_info {
311 enum iwl_table_type lq_type;
312 u8 ant_type;
313 u8 is_SGI; /* 1 = short guard interval */
314 u8 is_ht40; /* 1 = 40 MHz channel width */
315 u8 is_dup; /* 1 = duplicated data streams */
316 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
317 u8 max_search; /* maximun number of tables we can search */
318 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
319 u32 current_rate; /* rate_n_flags, uCode API format */
320 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
321};
322
323struct iwl_traffic_load {
324 unsigned long time_stamp; /* age of the oldest statistics */
325 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
326 * slice */
327 u32 total; /* total num of packets during the
328 * last TID_MAX_TIME_DIFF */
329 u8 queue_count; /* number of queues that has
330 * been used since the last cleanup */
331 u8 head; /* start of the circular buffer */
332};
333
334/**
335 * struct iwl_lq_sta -- driver's rate scaling private structure
336 *
337 * Pointer to this gets passed back and forth between driver and mac80211.
338 */
339struct iwl_lq_sta {
340 u8 active_tbl; /* index of active table, range 0-1 */
341 u8 enable_counter; /* indicates HT mode */
342 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
343 u8 search_better_tbl; /* 1: currently trying alternate mode */
344 s32 last_tpt;
345
346 /* The following determine when to search for a new mode */
347 u32 table_count_limit;
348 u32 max_failure_limit; /* # failed frames before new search */
349 u32 max_success_limit; /* # successful frames before new search */
350 u32 table_count;
351 u32 total_failed; /* total failed frames, any/all rates */
352 u32 total_success; /* total successful frames, any/all rates */
353 u64 flush_timer; /* time staying in mode before new search */
354
355 u8 action_counter; /* # mode-switch actions tried */
356 u8 is_green;
357 u8 is_dup;
358 enum ieee80211_band band;
359
360 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
361 u32 supp_rates;
362 u16 active_legacy_rate;
363 u16 active_siso_rate;
364 u16 active_mimo2_rate;
365 u16 active_mimo3_rate;
366 s8 max_rate_idx; /* Max rate set by user */
367 u8 missed_rate_counter;
368
369 struct iwl_link_quality_cmd lq;
370 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
371 struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
372 u8 tx_agg_tid_en;
373#ifdef CONFIG_MAC80211_DEBUGFS
374 struct dentry *rs_sta_dbgfs_scale_table_file;
375 struct dentry *rs_sta_dbgfs_stats_table_file;
376 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
377 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
378 u32 dbg_fixed_rate;
379#endif
380 struct iwl_priv *drv;
381
382 /* used to be in sta_info */
383 int last_txrate_idx;
384 /* last tx rate_n_flags */
385 u32 last_rate_n_flags;
386 /* packets destined for this STA are aggregated */
387 u8 is_agg;
388 /* BT traffic this sta was last updated in */
389 u8 last_bt_traffic;
390};
391
392static inline u8 first_antenna(u8 mask)
393{
394 if (mask & ANT_A)
395 return ANT_A;
396 if (mask & ANT_B)
397 return ANT_B;
398 return ANT_C;
399}
400
401
402/* Initialize station's rate scaling information after adding station */
403void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
404 u8 sta_id);
405
406/**
407 * iwl_rate_control_register - Register the rate control algorithm callbacks
408 *
409 * Since the rate control algorithm is hardware specific, there is no need
410 * or reason to place it as a stand alone module. The driver can call
411 * iwl_rate_control_register in order to register the rate control callbacks
412 * with the mac80211 subsystem. This should be performed prior to calling
413 * ieee80211_register_hw
414 *
415 */
416int iwlagn_rate_control_register(void);
417
418/**
419 * iwl_rate_control_unregister - Unregister the rate control callbacks
420 *
421 * This should be called after calling ieee80211_unregister_hw, but before
422 * the driver is unloaded.
423 */
424void iwlagn_rate_control_unregister(void);
425
426#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
new file mode 100644
index 000000000000..4a45b0b594c7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -0,0 +1,1101 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portionhelp of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34#include <asm/unaligned.h>
35#include "iwl-io.h"
36#include "dev.h"
37#include "calib.h"
38#include "agn.h"
39
40#define IWL_CMD_ENTRY(x) [x] = #x
41
42const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = {
43 IWL_CMD_ENTRY(REPLY_ALIVE),
44 IWL_CMD_ENTRY(REPLY_ERROR),
45 IWL_CMD_ENTRY(REPLY_ECHO),
46 IWL_CMD_ENTRY(REPLY_RXON),
47 IWL_CMD_ENTRY(REPLY_RXON_ASSOC),
48 IWL_CMD_ENTRY(REPLY_QOS_PARAM),
49 IWL_CMD_ENTRY(REPLY_RXON_TIMING),
50 IWL_CMD_ENTRY(REPLY_ADD_STA),
51 IWL_CMD_ENTRY(REPLY_REMOVE_STA),
52 IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA),
53 IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH),
54 IWL_CMD_ENTRY(REPLY_WEPKEY),
55 IWL_CMD_ENTRY(REPLY_TX),
56 IWL_CMD_ENTRY(REPLY_LEDS_CMD),
57 IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD),
58 IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD),
59 IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION),
60 IWL_CMD_ENTRY(COEX_EVENT_CMD),
61 IWL_CMD_ENTRY(REPLY_QUIET_CMD),
62 IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH),
63 IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION),
64 IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD),
65 IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION),
66 IWL_CMD_ENTRY(POWER_TABLE_CMD),
67 IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION),
68 IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC),
69 IWL_CMD_ENTRY(REPLY_SCAN_CMD),
70 IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD),
71 IWL_CMD_ENTRY(SCAN_START_NOTIFICATION),
72 IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION),
73 IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION),
74 IWL_CMD_ENTRY(BEACON_NOTIFICATION),
75 IWL_CMD_ENTRY(REPLY_TX_BEACON),
76 IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION),
77 IWL_CMD_ENTRY(QUIET_NOTIFICATION),
78 IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD),
79 IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION),
80 IWL_CMD_ENTRY(REPLY_BT_CONFIG),
81 IWL_CMD_ENTRY(REPLY_STATISTICS_CMD),
82 IWL_CMD_ENTRY(STATISTICS_NOTIFICATION),
83 IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD),
84 IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION),
85 IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION),
86 IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD),
87 IWL_CMD_ENTRY(SENSITIVITY_CMD),
88 IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
89 IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
90 IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
91 IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
92 IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
93 IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
94 IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION),
95 IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD),
96 IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION),
97 IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD),
98 IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF),
99 IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE),
100 IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV),
101 IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS),
102 IWL_CMD_ENTRY(REPLY_WIPAN_RXON),
103 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING),
104 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC),
105 IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM),
106 IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY),
107 IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
108 IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION),
109 IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE),
110 IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS),
111 IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER),
112 IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS),
113 IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS),
114 IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL),
115 IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS),
116 IWL_CMD_ENTRY(REPLY_D3_CONFIG),
117};
118#undef IWL_CMD_ENTRY
119
120/******************************************************************************
121 *
122 * Generic RX handler implementations
123 *
124 ******************************************************************************/
125
126static void iwlagn_rx_reply_error(struct iwl_priv *priv,
127 struct iwl_rx_cmd_buffer *rxb)
128{
129 struct iwl_rx_packet *pkt = rxb_addr(rxb);
130 struct iwl_error_resp *err_resp = (void *)pkt->data;
131
132 IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
133 "seq 0x%04X ser 0x%08X\n",
134 le32_to_cpu(err_resp->error_type),
135 err_resp->cmd_id,
136 le16_to_cpu(err_resp->bad_cmd_seq_num),
137 le32_to_cpu(err_resp->error_info));
138}
139
140static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
141{
142 struct iwl_rx_packet *pkt = rxb_addr(rxb);
143 struct iwl_csa_notification *csa = (void *)pkt->data;
144 /*
145 * MULTI-FIXME
146 * See iwlagn_mac_channel_switch.
147 */
148 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
149 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
150
151 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
152 return;
153
154 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
155 rxon->channel = csa->channel;
156 ctx->staging.channel = csa->channel;
157 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
158 le16_to_cpu(csa->channel));
159 iwl_chswitch_done(priv, true);
160 } else {
161 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
162 le16_to_cpu(csa->channel));
163 iwl_chswitch_done(priv, false);
164 }
165}
166
167
168static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
169 struct iwl_rx_cmd_buffer *rxb)
170{
171 struct iwl_rx_packet *pkt = rxb_addr(rxb);
172 struct iwl_spectrum_notification *report = (void *)pkt->data;
173
174 if (!report->state) {
175 IWL_DEBUG_11H(priv,
176 "Spectrum Measure Notification: Start\n");
177 return;
178 }
179
180 memcpy(&priv->measure_report, report, sizeof(*report));
181 priv->measurement_status |= MEASUREMENT_READY;
182}
183
184static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
185 struct iwl_rx_cmd_buffer *rxb)
186{
187#ifdef CONFIG_IWLWIFI_DEBUG
188 struct iwl_rx_packet *pkt = rxb_addr(rxb);
189 struct iwl_sleep_notification *sleep = (void *)pkt->data;
190 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
191 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
192#endif
193}
194
195static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
196 struct iwl_rx_cmd_buffer *rxb)
197{
198 struct iwl_rx_packet *pkt = rxb_addr(rxb);
199 u32 __maybe_unused len = iwl_rx_packet_len(pkt);
200 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
201 "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
202 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
203}
204
205static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
206 struct iwl_rx_cmd_buffer *rxb)
207{
208 struct iwl_rx_packet *pkt = rxb_addr(rxb);
209 struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
210#ifdef CONFIG_IWLWIFI_DEBUG
211 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
212 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
213
214 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
215 "tsf:0x%.8x%.8x rate:%d\n",
216 status & TX_STATUS_MSK,
217 beacon->beacon_notify_hdr.failure_frame,
218 le32_to_cpu(beacon->ibss_mgr_status),
219 le32_to_cpu(beacon->high_tsf),
220 le32_to_cpu(beacon->low_tsf), rate);
221#endif
222
223 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
224}
225
226/**
227 * iwl_good_plcp_health - checks for plcp error.
228 *
229 * When the plcp error is exceeding the thresholds, reset the radio
230 * to improve the throughput.
231 */
232static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
233 struct statistics_rx_phy *cur_ofdm,
234 struct statistics_rx_ht_phy *cur_ofdm_ht,
235 unsigned int msecs)
236{
237 int delta;
238 int threshold = priv->plcp_delta_threshold;
239
240 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
241 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
242 return true;
243 }
244
245 delta = le32_to_cpu(cur_ofdm->plcp_err) -
246 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
247 le32_to_cpu(cur_ofdm_ht->plcp_err) -
248 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
249
250 /* Can be negative if firmware reset statistics */
251 if (delta <= 0)
252 return true;
253
254 if ((delta * 100 / msecs) > threshold) {
255 IWL_DEBUG_RADIO(priv,
256 "plcp health threshold %u delta %d msecs %u\n",
257 threshold, delta, msecs);
258 return false;
259 }
260
261 return true;
262}
263
264int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
265{
266 struct iwl_rf_reset *rf_reset;
267
268 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
269 return -EAGAIN;
270
271 if (!iwl_is_any_associated(priv)) {
272 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
273 return -ENOLINK;
274 }
275
276 rf_reset = &priv->rf_reset;
277 rf_reset->reset_request_count++;
278 if (!external && rf_reset->last_reset_jiffies &&
279 time_after(rf_reset->last_reset_jiffies +
280 IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
281 IWL_DEBUG_INFO(priv, "RF reset rejected\n");
282 rf_reset->reset_reject_count++;
283 return -EAGAIN;
284 }
285 rf_reset->reset_success_count++;
286 rf_reset->last_reset_jiffies = jiffies;
287
288 /*
289 * There is no easy and better way to force reset the radio,
290 * the only known method is switching channel which will force to
291 * reset and tune the radio.
292 * Use internal short scan (single channel) operation to should
293 * achieve this objective.
294 * Driver should reset the radio when number of consecutive missed
295 * beacon, or any other uCode error condition detected.
296 */
297 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
298 iwl_internal_short_hw_scan(priv);
299 return 0;
300}
301
302
303static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
304 struct statistics_rx_phy *cur_ofdm,
305 struct statistics_rx_ht_phy *cur_ofdm_ht,
306 struct statistics_tx *tx,
307 unsigned long stamp)
308{
309 unsigned int msecs;
310
311 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
312 return;
313
314 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
315
316 /* Only gather statistics and update time stamp when not associated */
317 if (!iwl_is_any_associated(priv))
318 return;
319
320 /* Do not check/recover when do not have enough statistics data */
321 if (msecs < 99)
322 return;
323
324 if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
325 iwl_force_rf_reset(priv, false);
326}
327
328/* Calculate noise level, based on measurements during network silence just
329 * before arriving beacon. This measurement can be done only if we know
330 * exactly when to expect beacons, therefore only when we're associated. */
331static void iwlagn_rx_calc_noise(struct iwl_priv *priv)
332{
333 struct statistics_rx_non_phy *rx_info;
334 int num_active_rx = 0;
335 int total_silence = 0;
336 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
337 int last_rx_noise;
338
339 rx_info = &priv->statistics.rx_non_phy;
340
341 bcn_silence_a =
342 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
343 bcn_silence_b =
344 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
345 bcn_silence_c =
346 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
347
348 if (bcn_silence_a) {
349 total_silence += bcn_silence_a;
350 num_active_rx++;
351 }
352 if (bcn_silence_b) {
353 total_silence += bcn_silence_b;
354 num_active_rx++;
355 }
356 if (bcn_silence_c) {
357 total_silence += bcn_silence_c;
358 num_active_rx++;
359 }
360
361 /* Average among active antennas */
362 if (num_active_rx)
363 last_rx_noise = (total_silence / num_active_rx) - 107;
364 else
365 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
366
367 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
368 bcn_silence_a, bcn_silence_b, bcn_silence_c,
369 last_rx_noise);
370}
371
372#ifdef CONFIG_IWLWIFI_DEBUGFS
373/*
374 * based on the assumption of all statistics counter are in DWORD
375 * FIXME: This function is for debugging, do not deal with
376 * the case of counters roll-over.
377 */
378static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
379 __le32 *max_delta, __le32 *accum, int size)
380{
381 int i;
382
383 for (i = 0;
384 i < size / sizeof(__le32);
385 i++, prev++, cur++, delta++, max_delta++, accum++) {
386 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
387 *delta = cpu_to_le32(
388 le32_to_cpu(*cur) - le32_to_cpu(*prev));
389 le32_add_cpu(accum, le32_to_cpu(*delta));
390 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
391 *max_delta = *delta;
392 }
393 }
394}
395
396static void
397iwlagn_accumulative_statistics(struct iwl_priv *priv,
398 struct statistics_general_common *common,
399 struct statistics_rx_non_phy *rx_non_phy,
400 struct statistics_rx_phy *rx_ofdm,
401 struct statistics_rx_ht_phy *rx_ofdm_ht,
402 struct statistics_rx_phy *rx_cck,
403 struct statistics_tx *tx,
404 struct statistics_bt_activity *bt_activity)
405{
406#define ACCUM(_name) \
407 accum_stats((__le32 *)&priv->statistics._name, \
408 (__le32 *)_name, \
409 (__le32 *)&priv->delta_stats._name, \
410 (__le32 *)&priv->max_delta_stats._name, \
411 (__le32 *)&priv->accum_stats._name, \
412 sizeof(*_name));
413
414 ACCUM(common);
415 ACCUM(rx_non_phy);
416 ACCUM(rx_ofdm);
417 ACCUM(rx_ofdm_ht);
418 ACCUM(rx_cck);
419 ACCUM(tx);
420 if (bt_activity)
421 ACCUM(bt_activity);
422#undef ACCUM
423}
424#else
425static inline void
426iwlagn_accumulative_statistics(struct iwl_priv *priv,
427 struct statistics_general_common *common,
428 struct statistics_rx_non_phy *rx_non_phy,
429 struct statistics_rx_phy *rx_ofdm,
430 struct statistics_rx_ht_phy *rx_ofdm_ht,
431 struct statistics_rx_phy *rx_cck,
432 struct statistics_tx *tx,
433 struct statistics_bt_activity *bt_activity)
434{
435}
436#endif
437
438static void iwlagn_rx_statistics(struct iwl_priv *priv,
439 struct iwl_rx_cmd_buffer *rxb)
440{
441 unsigned long stamp = jiffies;
442 const int reg_recalib_period = 60;
443 int change;
444 struct iwl_rx_packet *pkt = rxb_addr(rxb);
445 u32 len = iwl_rx_packet_payload_len(pkt);
446 __le32 *flag;
447 struct statistics_general_common *common;
448 struct statistics_rx_non_phy *rx_non_phy;
449 struct statistics_rx_phy *rx_ofdm;
450 struct statistics_rx_ht_phy *rx_ofdm_ht;
451 struct statistics_rx_phy *rx_cck;
452 struct statistics_tx *tx;
453 struct statistics_bt_activity *bt_activity;
454
455 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
456 len);
457
458 spin_lock(&priv->statistics.lock);
459
460 if (len == sizeof(struct iwl_bt_notif_statistics)) {
461 struct iwl_bt_notif_statistics *stats;
462 stats = (void *)&pkt->data;
463 flag = &stats->flag;
464 common = &stats->general.common;
465 rx_non_phy = &stats->rx.general.common;
466 rx_ofdm = &stats->rx.ofdm;
467 rx_ofdm_ht = &stats->rx.ofdm_ht;
468 rx_cck = &stats->rx.cck;
469 tx = &stats->tx;
470 bt_activity = &stats->general.activity;
471
472#ifdef CONFIG_IWLWIFI_DEBUGFS
473 /* handle this exception directly */
474 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
475 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
476 le32_to_cpu(stats->rx.general.num_bt_kills));
477#endif
478 } else if (len == sizeof(struct iwl_notif_statistics)) {
479 struct iwl_notif_statistics *stats;
480 stats = (void *)&pkt->data;
481 flag = &stats->flag;
482 common = &stats->general.common;
483 rx_non_phy = &stats->rx.general;
484 rx_ofdm = &stats->rx.ofdm;
485 rx_ofdm_ht = &stats->rx.ofdm_ht;
486 rx_cck = &stats->rx.cck;
487 tx = &stats->tx;
488 bt_activity = NULL;
489 } else {
490 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
491 len, sizeof(struct iwl_bt_notif_statistics),
492 sizeof(struct iwl_notif_statistics));
493 spin_unlock(&priv->statistics.lock);
494 return;
495 }
496
497 change = common->temperature != priv->statistics.common.temperature ||
498 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
499 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
500
501 iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
502 rx_ofdm_ht, rx_cck, tx, bt_activity);
503
504 iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
505
506 priv->statistics.flag = *flag;
507 memcpy(&priv->statistics.common, common, sizeof(*common));
508 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
509 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
510 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
511 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
512 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
513#ifdef CONFIG_IWLWIFI_DEBUGFS
514 if (bt_activity)
515 memcpy(&priv->statistics.bt_activity, bt_activity,
516 sizeof(*bt_activity));
517#endif
518
519 priv->rx_statistics_jiffies = stamp;
520
521 set_bit(STATUS_STATISTICS, &priv->status);
522
523 /* Reschedule the statistics timer to occur in
524 * reg_recalib_period seconds to ensure we get a
525 * thermal update even if the uCode doesn't give
526 * us one */
527 mod_timer(&priv->statistics_periodic, jiffies +
528 msecs_to_jiffies(reg_recalib_period * 1000));
529
530 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
531 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
532 iwlagn_rx_calc_noise(priv);
533 queue_work(priv->workqueue, &priv->run_time_calib_work);
534 }
535 if (priv->lib->temperature && change)
536 priv->lib->temperature(priv);
537
538 spin_unlock(&priv->statistics.lock);
539}
540
541static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
542 struct iwl_rx_cmd_buffer *rxb)
543{
544 struct iwl_rx_packet *pkt = rxb_addr(rxb);
545 struct iwl_notif_statistics *stats = (void *)pkt->data;
546
547 if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) {
548#ifdef CONFIG_IWLWIFI_DEBUGFS
549 memset(&priv->accum_stats, 0,
550 sizeof(priv->accum_stats));
551 memset(&priv->delta_stats, 0,
552 sizeof(priv->delta_stats));
553 memset(&priv->max_delta_stats, 0,
554 sizeof(priv->max_delta_stats));
555#endif
556 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
557 }
558
559 iwlagn_rx_statistics(priv, rxb);
560}
561
562/* Handle notification from uCode that card's power state is changing
563 * due to software, hardware, or critical temperature RFKILL */
564static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
565 struct iwl_rx_cmd_buffer *rxb)
566{
567 struct iwl_rx_packet *pkt = rxb_addr(rxb);
568 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
569 u32 flags = le32_to_cpu(card_state_notif->flags);
570 unsigned long status = priv->status;
571
572 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
573 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
574 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
575 (flags & CT_CARD_DISABLED) ?
576 "Reached" : "Not reached");
577
578 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
579 CT_CARD_DISABLED)) {
580
581 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
582 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
583
584 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
585 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
586
587 if (!(flags & RXON_CARD_DISABLED)) {
588 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
589 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
590 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
591 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
592 }
593 if (flags & CT_CARD_DISABLED)
594 iwl_tt_enter_ct_kill(priv);
595 }
596 if (!(flags & CT_CARD_DISABLED))
597 iwl_tt_exit_ct_kill(priv);
598
599 if (flags & HW_CARD_DISABLED)
600 set_bit(STATUS_RF_KILL_HW, &priv->status);
601 else
602 clear_bit(STATUS_RF_KILL_HW, &priv->status);
603
604
605 if (!(flags & RXON_CARD_DISABLED))
606 iwl_scan_cancel(priv);
607
608 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
609 test_bit(STATUS_RF_KILL_HW, &priv->status)))
610 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
611 test_bit(STATUS_RF_KILL_HW, &priv->status));
612}
613
614static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
615 struct iwl_rx_cmd_buffer *rxb)
616
617{
618 struct iwl_rx_packet *pkt = rxb_addr(rxb);
619 struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
620
621 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
622 priv->missed_beacon_threshold) {
623 IWL_DEBUG_CALIB(priv,
624 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
625 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
626 le32_to_cpu(missed_beacon->total_missed_becons),
627 le32_to_cpu(missed_beacon->num_recvd_beacons),
628 le32_to_cpu(missed_beacon->num_expected_beacons));
629 if (!test_bit(STATUS_SCANNING, &priv->status))
630 iwl_init_sensitivity(priv);
631 }
632}
633
634/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
635 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
636static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
637 struct iwl_rx_cmd_buffer *rxb)
638{
639 struct iwl_rx_packet *pkt = rxb_addr(rxb);
640
641 priv->last_phy_res_valid = true;
642 priv->ampdu_ref++;
643 memcpy(&priv->last_phy_res, pkt->data,
644 sizeof(struct iwl_rx_phy_res));
645}
646
647/*
648 * returns non-zero if packet should be dropped
649 */
650static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
651 struct ieee80211_hdr *hdr,
652 u32 decrypt_res,
653 struct ieee80211_rx_status *stats)
654{
655 u16 fc = le16_to_cpu(hdr->frame_control);
656
657 /*
658 * All contexts have the same setting here due to it being
659 * a module parameter, so OK to check any context.
660 */
661 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
662 RXON_FILTER_DIS_DECRYPT_MSK)
663 return 0;
664
665 if (!(fc & IEEE80211_FCTL_PROTECTED))
666 return 0;
667
668 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
669 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
670 case RX_RES_STATUS_SEC_TYPE_TKIP:
671 /* The uCode has got a bad phase 1 Key, pushes the packet.
672 * Decryption will be done in SW. */
673 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
674 RX_RES_STATUS_BAD_KEY_TTAK)
675 break;
676
677 case RX_RES_STATUS_SEC_TYPE_WEP:
678 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
679 RX_RES_STATUS_BAD_ICV_MIC) {
680 /* bad ICV, the packet is destroyed since the
681 * decryption is inplace, drop it */
682 IWL_DEBUG_RX(priv, "Packet destroyed\n");
683 return -1;
684 }
685 case RX_RES_STATUS_SEC_TYPE_CCMP:
686 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
687 RX_RES_STATUS_DECRYPT_OK) {
688 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
689 stats->flag |= RX_FLAG_DECRYPTED;
690 }
691 break;
692
693 default:
694 break;
695 }
696 return 0;
697}
698
699static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
700 struct ieee80211_hdr *hdr,
701 u16 len,
702 u32 ampdu_status,
703 struct iwl_rx_cmd_buffer *rxb,
704 struct ieee80211_rx_status *stats)
705{
706 struct sk_buff *skb;
707 __le16 fc = hdr->frame_control;
708 struct iwl_rxon_context *ctx;
709 unsigned int hdrlen, fraglen;
710
711 /* We only process data packets if the interface is open */
712 if (unlikely(!priv->is_open)) {
713 IWL_DEBUG_DROP_LIMIT(priv,
714 "Dropping packet while interface is not open.\n");
715 return;
716 }
717
718 /* In case of HW accelerated crypto and bad decryption, drop */
719 if (!iwlwifi_mod_params.sw_crypto &&
720 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
721 return;
722
723 /* Dont use dev_alloc_skb(), we'll have enough headroom once
724 * ieee80211_hdr pulled.
725 */
726 skb = alloc_skb(128, GFP_ATOMIC);
727 if (!skb) {
728 IWL_ERR(priv, "alloc_skb failed\n");
729 return;
730 }
731 /* If frame is small enough to fit in skb->head, pull it completely.
732 * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
733 * are more efficient.
734 */
735 hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
736
737 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
738 fraglen = len - hdrlen;
739
740 if (fraglen) {
741 int offset = (void *)hdr + hdrlen -
742 rxb_addr(rxb) + rxb_offset(rxb);
743
744 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
745 fraglen, rxb->truesize);
746 }
747
748 /*
749 * Wake any queues that were stopped due to a passive channel tx
750 * failure. This can happen because the regulatory enforcement in
751 * the device waits for a beacon before allowing transmission,
752 * sometimes even after already having transmitted frames for the
753 * association because the new RXON may reset the information.
754 */
755 if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
756 for_each_context(priv, ctx) {
757 if (!ether_addr_equal(hdr->addr3,
758 ctx->active.bssid_addr))
759 continue;
760 iwlagn_lift_passive_no_rx(priv);
761 }
762 }
763
764 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
765
766 ieee80211_rx_napi(priv->hw, skb, priv->napi);
767}
768
769static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
770{
771 u32 decrypt_out = 0;
772
773 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
774 RX_RES_STATUS_STATION_FOUND)
775 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
776 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
777
778 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
779
780 /* packet was not encrypted */
781 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
782 RX_RES_STATUS_SEC_TYPE_NONE)
783 return decrypt_out;
784
785 /* packet was encrypted with unknown alg */
786 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
787 RX_RES_STATUS_SEC_TYPE_ERR)
788 return decrypt_out;
789
790 /* decryption was not done in HW */
791 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
792 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
793 return decrypt_out;
794
795 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
796
797 case RX_RES_STATUS_SEC_TYPE_CCMP:
798 /* alg is CCM: check MIC only */
799 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
800 /* Bad MIC */
801 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
802 else
803 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
804
805 break;
806
807 case RX_RES_STATUS_SEC_TYPE_TKIP:
808 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
809 /* Bad TTAK */
810 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
811 break;
812 }
813 /* fall through if TTAK OK */
814 default:
815 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
816 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
817 else
818 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
819 break;
820 }
821
822 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
823 decrypt_in, decrypt_out);
824
825 return decrypt_out;
826}
827
828/* Calc max signal level (dBm) among 3 possible receivers */
829static int iwlagn_calc_rssi(struct iwl_priv *priv,
830 struct iwl_rx_phy_res *rx_resp)
831{
832 /* data from PHY/DSP regarding signal strength, etc.,
833 * contents are always there, not configurable by host
834 */
835 struct iwlagn_non_cfg_phy *ncphy =
836 (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
837 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
838 u8 agc;
839
840 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
841 agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
842
843 /* Find max rssi among 3 possible receivers.
844 * These values are measured by the digital signal processor (DSP).
845 * They should stay fairly constant even as the signal strength varies,
846 * if the radio's automatic gain control (AGC) is working right.
847 * AGC value (see below) will provide the "interesting" info.
848 */
849 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
850 rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
851 IWLAGN_OFDM_RSSI_A_BIT_POS;
852 rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
853 IWLAGN_OFDM_RSSI_B_BIT_POS;
854 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
855 rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
856 IWLAGN_OFDM_RSSI_C_BIT_POS;
857
858 max_rssi = max_t(u32, rssi_a, rssi_b);
859 max_rssi = max_t(u32, max_rssi, rssi_c);
860
861 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
862 rssi_a, rssi_b, rssi_c, max_rssi, agc);
863
864 /* dBm = max_rssi dB - agc dB - constant.
865 * Higher AGC (higher radio gain) means lower signal. */
866 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
867}
868
869/* Called for REPLY_RX_MPDU_CMD */
870static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
871 struct iwl_rx_cmd_buffer *rxb)
872{
873 struct ieee80211_hdr *header;
874 struct ieee80211_rx_status rx_status = {};
875 struct iwl_rx_packet *pkt = rxb_addr(rxb);
876 struct iwl_rx_phy_res *phy_res;
877 __le32 rx_pkt_status;
878 struct iwl_rx_mpdu_res_start *amsdu;
879 u32 len;
880 u32 ampdu_status;
881 u32 rate_n_flags;
882
883 if (!priv->last_phy_res_valid) {
884 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
885 return;
886 }
887 phy_res = &priv->last_phy_res;
888 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
889 header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
890 len = le16_to_cpu(amsdu->byte_count);
891 rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
892 ampdu_status = iwlagn_translate_rx_status(priv,
893 le32_to_cpu(rx_pkt_status));
894
895 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
896 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
897 phy_res->cfg_phy_cnt);
898 return;
899 }
900
901 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
902 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
903 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
904 le32_to_cpu(rx_pkt_status));
905 return;
906 }
907
908 /* This will be used in several places later */
909 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
910
911 /* rx_status carries information about the packet to mac80211 */
912 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
913 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
914 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
915 rx_status.freq =
916 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
917 rx_status.band);
918 rx_status.rate_idx =
919 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
920 rx_status.flag = 0;
921
922 /* TSF isn't reliable. In order to allow smooth user experience,
923 * this W/A doesn't propagate it to the mac80211 */
924 /*rx_status.flag |= RX_FLAG_MACTIME_START;*/
925
926 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
927
928 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
929 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
930
931 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
932 rx_status.signal, (unsigned long long)rx_status.mactime);
933
934 /*
935 * "antenna number"
936 *
937 * It seems that the antenna field in the phy flags value
938 * is actually a bit field. This is undefined by radiotap,
939 * it wants an actual antenna number but I always get "7"
940 * for most legacy frames I receive indicating that the
941 * same frame was received on all three RX chains.
942 *
943 * I think this field should be removed in favor of a
944 * new 802.11n radiotap field "RX chains" that is defined
945 * as a bitmask.
946 */
947 rx_status.antenna =
948 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
949 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
950
951 /* set the preamble flag if appropriate */
952 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
953 rx_status.flag |= RX_FLAG_SHORTPRE;
954
955 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
956 /*
957 * We know which subframes of an A-MPDU belong
958 * together since we get a single PHY response
959 * from the firmware for all of them
960 */
961 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
962 rx_status.ampdu_reference = priv->ampdu_ref;
963 }
964
965 /* Set up the HT phy flags */
966 if (rate_n_flags & RATE_MCS_HT_MSK)
967 rx_status.flag |= RX_FLAG_HT;
968 if (rate_n_flags & RATE_MCS_HT40_MSK)
969 rx_status.flag |= RX_FLAG_40MHZ;
970 if (rate_n_flags & RATE_MCS_SGI_MSK)
971 rx_status.flag |= RX_FLAG_SHORT_GI;
972 if (rate_n_flags & RATE_MCS_GF_MSK)
973 rx_status.flag |= RX_FLAG_HT_GF;
974
975 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
976 rxb, &rx_status);
977}
978
979static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
980 struct iwl_rx_cmd_buffer *rxb)
981{
982 struct iwl_wipan_noa_data *new_data, *old_data;
983 struct iwl_rx_packet *pkt = rxb_addr(rxb);
984 struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
985
986 /* no condition -- we're in softirq */
987 old_data = rcu_dereference_protected(priv->noa_data, true);
988
989 if (noa_notif->noa_active) {
990 u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
991 u32 copylen = len;
992
993 /* EID, len, OUI, subtype */
994 len += 1 + 1 + 3 + 1;
995 /* P2P id, P2P length */
996 len += 1 + 2;
997 copylen += 1 + 2;
998
999 new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
1000 if (new_data) {
1001 new_data->length = len;
1002 new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
1003 new_data->data[1] = len - 2; /* not counting EID, len */
1004 new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
1005 new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
1006 new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
1007 new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
1008 memcpy(&new_data->data[6], &noa_notif->noa_attribute,
1009 copylen);
1010 }
1011 } else
1012 new_data = NULL;
1013
1014 rcu_assign_pointer(priv->noa_data, new_data);
1015
1016 if (old_data)
1017 kfree_rcu(old_data, rcu_head);
1018}
1019
1020/**
1021 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1022 *
1023 * Setup the RX handlers for each of the reply types sent from the uCode
1024 * to the host.
1025 */
1026void iwl_setup_rx_handlers(struct iwl_priv *priv)
1027{
1028 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
1029
1030 handlers = priv->rx_handlers;
1031
1032 handlers[REPLY_ERROR] = iwlagn_rx_reply_error;
1033 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwlagn_rx_csa;
1034 handlers[SPECTRUM_MEASURE_NOTIFICATION] =
1035 iwlagn_rx_spectrum_measure_notif;
1036 handlers[PM_SLEEP_NOTIFICATION] = iwlagn_rx_pm_sleep_notif;
1037 handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
1038 iwlagn_rx_pm_debug_statistics_notif;
1039 handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
1040 handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
1041
1042 handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
1043
1044 /*
1045 * The same handler is used for both the REPLY to a discrete
1046 * statistics request from the host as well as for the periodic
1047 * statistics notifications (after received beacons) from the uCode.
1048 */
1049 handlers[REPLY_STATISTICS_CMD] = iwlagn_rx_reply_statistics;
1050 handlers[STATISTICS_NOTIFICATION] = iwlagn_rx_statistics;
1051
1052 iwl_setup_rx_scan_handlers(priv);
1053
1054 handlers[CARD_STATE_NOTIFICATION] = iwlagn_rx_card_state_notif;
1055 handlers[MISSED_BEACONS_NOTIFICATION] =
1056 iwlagn_rx_missed_beacon_notif;
1057
1058 /* Rx handlers */
1059 handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
1060 handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
1061
1062 /* block ack */
1063 handlers[REPLY_COMPRESSED_BA] =
1064 iwlagn_rx_reply_compressed_ba;
1065
1066 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
1067
1068 /* set up notification wait support */
1069 iwl_notification_wait_init(&priv->notif_wait);
1070
1071 /* Set up BT Rx handlers */
1072 if (priv->lib->bt_params)
1073 iwlagn_bt_rx_handler_setup(priv);
1074}
1075
1076void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
1077 struct iwl_rx_cmd_buffer *rxb)
1078{
1079 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1080 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1081
1082 /*
1083 * Do the notification wait before RX handlers so
1084 * even if the RX handler consumes the RXB we have
1085 * access to it in the notification wait entry.
1086 */
1087 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1088
1089 /* Based on type of command response or notification,
1090 * handle those that need handling via function in
1091 * rx_handlers table. See iwl_setup_rx_handlers() */
1092 if (priv->rx_handlers[pkt->hdr.cmd]) {
1093 priv->rx_handlers_stats[pkt->hdr.cmd]++;
1094 priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
1095 } else {
1096 /* No handling needed */
1097 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1098 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1099 pkt->hdr.cmd);
1100 }
1101}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
new file mode 100644
index 000000000000..85ceceb34fcc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -0,0 +1,1572 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Deutschland GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27
28#include <linux/etherdevice.h>
29#include "iwl-trans.h"
30#include "iwl-modparams.h"
31#include "dev.h"
32#include "agn.h"
33#include "calib.h"
34
35/*
36 * initialize rxon structure with default values from eeprom
37 */
38void iwl_connection_init_rx_config(struct iwl_priv *priv,
39 struct iwl_rxon_context *ctx)
40{
41 memset(&ctx->staging, 0, sizeof(ctx->staging));
42
43 if (!ctx->vif) {
44 ctx->staging.dev_type = ctx->unused_devtype;
45 } else
46 switch (ctx->vif->type) {
47 case NL80211_IFTYPE_AP:
48 ctx->staging.dev_type = ctx->ap_devtype;
49 break;
50
51 case NL80211_IFTYPE_STATION:
52 ctx->staging.dev_type = ctx->station_devtype;
53 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
54 break;
55
56 case NL80211_IFTYPE_ADHOC:
57 ctx->staging.dev_type = ctx->ibss_devtype;
58 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
59 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
60 RXON_FILTER_ACCEPT_GRP_MSK;
61 break;
62
63 case NL80211_IFTYPE_MONITOR:
64 ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
65 break;
66
67 default:
68 IWL_ERR(priv, "Unsupported interface type %d\n",
69 ctx->vif->type);
70 break;
71 }
72
73#if 0
74 /* TODO: Figure out when short_preamble would be set and cache from
75 * that */
76 if (!hw_to_local(priv->hw)->short_preamble)
77 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
78 else
79 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
80#endif
81
82 ctx->staging.channel =
83 cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
84 priv->band = priv->hw->conf.chandef.chan->band;
85
86 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
87
88 /* clear both MIX and PURE40 mode flag */
89 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
90 RXON_FLG_CHANNEL_MODE_PURE_40);
91 if (ctx->vif)
92 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
93
94 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
95 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
96 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
97}
98
99static int iwlagn_disable_bss(struct iwl_priv *priv,
100 struct iwl_rxon_context *ctx,
101 struct iwl_rxon_cmd *send)
102{
103 __le32 old_filter = send->filter_flags;
104 int ret;
105
106 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
107 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
108 0, sizeof(*send), send);
109
110 send->filter_flags = old_filter;
111
112 if (ret)
113 IWL_DEBUG_QUIET_RFKILL(priv,
114 "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
115
116 return ret;
117}
118
119static int iwlagn_disable_pan(struct iwl_priv *priv,
120 struct iwl_rxon_context *ctx,
121 struct iwl_rxon_cmd *send)
122{
123 struct iwl_notification_wait disable_wait;
124 __le32 old_filter = send->filter_flags;
125 u8 old_dev_type = send->dev_type;
126 int ret;
127 static const u16 deactivate_cmd[] = {
128 REPLY_WIPAN_DEACTIVATION_COMPLETE
129 };
130
131 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
132 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
133 NULL, NULL);
134
135 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
136 send->dev_type = RXON_DEV_TYPE_P2P;
137 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
138 0, sizeof(*send), send);
139
140 send->filter_flags = old_filter;
141 send->dev_type = old_dev_type;
142
143 if (ret) {
144 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
145 iwl_remove_notification(&priv->notif_wait, &disable_wait);
146 } else {
147 ret = iwl_wait_notification(&priv->notif_wait,
148 &disable_wait, HZ);
149 if (ret)
150 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
151 }
152
153 return ret;
154}
155
156static int iwlagn_disconn_pan(struct iwl_priv *priv,
157 struct iwl_rxon_context *ctx,
158 struct iwl_rxon_cmd *send)
159{
160 __le32 old_filter = send->filter_flags;
161 int ret;
162
163 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
164 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
165 sizeof(*send), send);
166
167 send->filter_flags = old_filter;
168
169 return ret;
170}
171
172static void iwlagn_update_qos(struct iwl_priv *priv,
173 struct iwl_rxon_context *ctx)
174{
175 int ret;
176
177 if (!ctx->is_active)
178 return;
179
180 ctx->qos_data.def_qos_parm.qos_flags = 0;
181
182 if (ctx->qos_data.qos_active)
183 ctx->qos_data.def_qos_parm.qos_flags |=
184 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
185
186 if (ctx->ht.enabled)
187 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
188
189 IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
190 ctx->qos_data.qos_active,
191 ctx->qos_data.def_qos_parm.qos_flags);
192
193 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
194 sizeof(struct iwl_qosparam_cmd),
195 &ctx->qos_data.def_qos_parm);
196 if (ret)
197 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
198}
199
200static int iwlagn_update_beacon(struct iwl_priv *priv,
201 struct ieee80211_vif *vif)
202{
203 lockdep_assert_held(&priv->mutex);
204
205 dev_kfree_skb(priv->beacon_skb);
206 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
207 if (!priv->beacon_skb)
208 return -ENOMEM;
209 return iwlagn_send_beacon_cmd(priv);
210}
211
212static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
213 struct iwl_rxon_context *ctx)
214{
215 int ret = 0;
216 struct iwl_rxon_assoc_cmd rxon_assoc;
217 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
218 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
219
220 if ((rxon1->flags == rxon2->flags) &&
221 (rxon1->filter_flags == rxon2->filter_flags) &&
222 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
223 (rxon1->ofdm_ht_single_stream_basic_rates ==
224 rxon2->ofdm_ht_single_stream_basic_rates) &&
225 (rxon1->ofdm_ht_dual_stream_basic_rates ==
226 rxon2->ofdm_ht_dual_stream_basic_rates) &&
227 (rxon1->ofdm_ht_triple_stream_basic_rates ==
228 rxon2->ofdm_ht_triple_stream_basic_rates) &&
229 (rxon1->acquisition_data == rxon2->acquisition_data) &&
230 (rxon1->rx_chain == rxon2->rx_chain) &&
231 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
232 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
233 return 0;
234 }
235
236 rxon_assoc.flags = ctx->staging.flags;
237 rxon_assoc.filter_flags = ctx->staging.filter_flags;
238 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
239 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
240 rxon_assoc.reserved1 = 0;
241 rxon_assoc.reserved2 = 0;
242 rxon_assoc.reserved3 = 0;
243 rxon_assoc.ofdm_ht_single_stream_basic_rates =
244 ctx->staging.ofdm_ht_single_stream_basic_rates;
245 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
246 ctx->staging.ofdm_ht_dual_stream_basic_rates;
247 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
248 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
249 ctx->staging.ofdm_ht_triple_stream_basic_rates;
250 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
251
252 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
253 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
254 return ret;
255}
256
257static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
258{
259 u16 new_val;
260 u16 beacon_factor;
261
262 /*
263 * If mac80211 hasn't given us a beacon interval, program
264 * the default into the device (not checking this here
265 * would cause the adjustment below to return the maximum
266 * value, which may break PAN.)
267 */
268 if (!beacon_val)
269 return DEFAULT_BEACON_INTERVAL;
270
271 /*
272 * If the beacon interval we obtained from the peer
273 * is too large, we'll have to wake up more often
274 * (and in IBSS case, we'll beacon too much)
275 *
276 * For example, if max_beacon_val is 4096, and the
277 * requested beacon interval is 7000, we'll have to
278 * use 3500 to be able to wake up on the beacons.
279 *
280 * This could badly influence beacon detection stats.
281 */
282
283 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
284 new_val = beacon_val / beacon_factor;
285
286 if (!new_val)
287 new_val = max_beacon_val;
288
289 return new_val;
290}
291
292static int iwl_send_rxon_timing(struct iwl_priv *priv,
293 struct iwl_rxon_context *ctx)
294{
295 u64 tsf;
296 s32 interval_tm, rem;
297 struct ieee80211_conf *conf = NULL;
298 u16 beacon_int;
299 struct ieee80211_vif *vif = ctx->vif;
300
301 conf = &priv->hw->conf;
302
303 lockdep_assert_held(&priv->mutex);
304
305 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
306
307 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
308 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
309
310 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
311
312 /*
313 * TODO: For IBSS we need to get atim_window from mac80211,
314 * for now just always use 0
315 */
316 ctx->timing.atim_window = 0;
317
318 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
319 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
320 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
321 priv->contexts[IWL_RXON_CTX_BSS].vif &&
322 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
323 ctx->timing.beacon_interval =
324 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
325 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
326 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
327 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
328 priv->contexts[IWL_RXON_CTX_PAN].vif &&
329 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
330 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
331 !ctx->vif->bss_conf.beacon_int)) {
332 ctx->timing.beacon_interval =
333 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
334 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
335 } else {
336 beacon_int = iwl_adjust_beacon_interval(beacon_int,
337 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
338 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
339 }
340
341 ctx->beacon_int = beacon_int;
342
343 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
344 interval_tm = beacon_int * TIME_UNIT;
345 rem = do_div(tsf, interval_tm);
346 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
347
348 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
349
350 IWL_DEBUG_ASSOC(priv,
351 "beacon interval %d beacon timer %d beacon tim %d\n",
352 le16_to_cpu(ctx->timing.beacon_interval),
353 le32_to_cpu(ctx->timing.beacon_init_val),
354 le16_to_cpu(ctx->timing.atim_window));
355
356 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
357 0, sizeof(ctx->timing), &ctx->timing);
358}
359
360static int iwlagn_rxon_disconn(struct iwl_priv *priv,
361 struct iwl_rxon_context *ctx)
362{
363 int ret;
364 struct iwl_rxon_cmd *active = (void *)&ctx->active;
365
366 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
367 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
368 } else {
369 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
370 if (ret)
371 return ret;
372 if (ctx->vif) {
373 ret = iwl_send_rxon_timing(priv, ctx);
374 if (ret) {
375 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
376 return ret;
377 }
378 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
379 }
380 }
381 if (ret)
382 return ret;
383
384 /*
385 * Un-assoc RXON clears the station table and WEP
386 * keys, so we have to restore those afterwards.
387 */
388 iwl_clear_ucode_stations(priv, ctx);
389 /* update -- might need P2P now */
390 iwl_update_bcast_station(priv, ctx);
391 iwl_restore_stations(priv, ctx);
392 ret = iwl_restore_default_wep_keys(priv, ctx);
393 if (ret) {
394 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
395 return ret;
396 }
397
398 memcpy(active, &ctx->staging, sizeof(*active));
399 return 0;
400}
401
402static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
403{
404 int ret;
405 s8 prev_tx_power;
406 bool defer;
407 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
408
409 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
410 return 0;
411
412 lockdep_assert_held(&priv->mutex);
413
414 if (priv->tx_power_user_lmt == tx_power && !force)
415 return 0;
416
417 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
418 IWL_WARN(priv,
419 "Requested user TXPOWER %d below lower limit %d.\n",
420 tx_power,
421 IWLAGN_TX_POWER_TARGET_POWER_MIN);
422 return -EINVAL;
423 }
424
425 if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
426 IWL_WARN(priv,
427 "Requested user TXPOWER %d above upper limit %d.\n",
428 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
429 return -EINVAL;
430 }
431
432 if (!iwl_is_ready_rf(priv))
433 return -EIO;
434
435 /* scan complete and commit_rxon use tx_power_next value,
436 * it always need to be updated for newest request */
437 priv->tx_power_next = tx_power;
438
439 /* do not set tx power when scanning or channel changing */
440 defer = test_bit(STATUS_SCANNING, &priv->status) ||
441 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
442 if (defer && !force) {
443 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
444 return 0;
445 }
446
447 prev_tx_power = priv->tx_power_user_lmt;
448 priv->tx_power_user_lmt = tx_power;
449
450 ret = iwlagn_send_tx_power(priv);
451
452 /* if fail to set tx_power, restore the orig. tx power */
453 if (ret) {
454 priv->tx_power_user_lmt = prev_tx_power;
455 priv->tx_power_next = prev_tx_power;
456 }
457 return ret;
458}
459
460static int iwlagn_rxon_connect(struct iwl_priv *priv,
461 struct iwl_rxon_context *ctx)
462{
463 int ret;
464 struct iwl_rxon_cmd *active = (void *)&ctx->active;
465
466 /* RXON timing must be before associated RXON */
467 if (ctx->ctxid == IWL_RXON_CTX_BSS) {
468 ret = iwl_send_rxon_timing(priv, ctx);
469 if (ret) {
470 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
471 return ret;
472 }
473 }
474 /* QoS info may be cleared by previous un-assoc RXON */
475 iwlagn_update_qos(priv, ctx);
476
477 /*
478 * We'll run into this code path when beaconing is
479 * enabled, but then we also need to send the beacon
480 * to the device.
481 */
482 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
483 ret = iwlagn_update_beacon(priv, ctx->vif);
484 if (ret) {
485 IWL_ERR(priv,
486 "Error sending required beacon (%d)!\n",
487 ret);
488 return ret;
489 }
490 }
491
492 priv->start_calib = 0;
493 /*
494 * Apply the new configuration.
495 *
496 * Associated RXON doesn't clear the station table in uCode,
497 * so we don't need to restore stations etc. after this.
498 */
499 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
500 sizeof(struct iwl_rxon_cmd), &ctx->staging);
501 if (ret) {
502 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
503 return ret;
504 }
505 memcpy(active, &ctx->staging, sizeof(*active));
506
507 /* IBSS beacon needs to be sent after setting assoc */
508 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
509 if (iwlagn_update_beacon(priv, ctx->vif))
510 IWL_ERR(priv, "Error sending IBSS beacon\n");
511 iwl_init_sensitivity(priv);
512
513 /*
514 * If we issue a new RXON command which required a tune then
515 * we must send a new TXPOWER command or we won't be able to
516 * Tx any frames.
517 *
518 * It's expected we set power here if channel is changing.
519 */
520 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
521 if (ret) {
522 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
523 return ret;
524 }
525
526 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
527 priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
528 ieee80211_request_smps(ctx->vif,
529 priv->cfg->ht_params->smps_mode);
530
531 return 0;
532}
533
534int iwlagn_set_pan_params(struct iwl_priv *priv)
535{
536 struct iwl_wipan_params_cmd cmd;
537 struct iwl_rxon_context *ctx_bss, *ctx_pan;
538 int slot0 = 300, slot1 = 0;
539 int ret;
540
541 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
542 return 0;
543
544 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
545
546 lockdep_assert_held(&priv->mutex);
547
548 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
549 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
550
551 /*
552 * If the PAN context is inactive, then we don't need
553 * to update the PAN parameters, the last thing we'll
554 * have done before it goes inactive is making the PAN
555 * parameters be WLAN-only.
556 */
557 if (!ctx_pan->is_active)
558 return 0;
559
560 memset(&cmd, 0, sizeof(cmd));
561
562 /* only 2 slots are currently allowed */
563 cmd.num_slots = 2;
564
565 cmd.slots[0].type = 0; /* BSS */
566 cmd.slots[1].type = 1; /* PAN */
567
568 if (ctx_bss->vif && ctx_pan->vif) {
569 int bcnint = ctx_pan->beacon_int;
570 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
571
572 /* should be set, but seems unused?? */
573 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
574
575 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
576 bcnint &&
577 bcnint != ctx_bss->beacon_int) {
578 IWL_ERR(priv,
579 "beacon intervals don't match (%d, %d)\n",
580 ctx_bss->beacon_int, ctx_pan->beacon_int);
581 } else
582 bcnint = max_t(int, bcnint,
583 ctx_bss->beacon_int);
584 if (!bcnint)
585 bcnint = DEFAULT_BEACON_INTERVAL;
586 slot0 = bcnint / 2;
587 slot1 = bcnint - slot0;
588
589 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
590 (!ctx_bss->vif->bss_conf.idle &&
591 !ctx_bss->vif->bss_conf.assoc)) {
592 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
593 slot1 = IWL_MIN_SLOT_TIME;
594 } else if (!ctx_pan->vif->bss_conf.idle &&
595 !ctx_pan->vif->bss_conf.assoc) {
596 slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
597 slot0 = IWL_MIN_SLOT_TIME;
598 }
599 } else if (ctx_pan->vif) {
600 slot0 = 0;
601 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
602 ctx_pan->beacon_int;
603 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
604
605 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
606 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
607 slot1 = IWL_MIN_SLOT_TIME;
608 }
609 }
610
611 cmd.slots[0].width = cpu_to_le16(slot0);
612 cmd.slots[1].width = cpu_to_le16(slot1);
613
614 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
615 sizeof(cmd), &cmd);
616 if (ret)
617 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
618
619 return ret;
620}
621
622static void _iwl_set_rxon_ht(struct iwl_priv *priv,
623 struct iwl_ht_config *ht_conf,
624 struct iwl_rxon_context *ctx)
625{
626 struct iwl_rxon_cmd *rxon = &ctx->staging;
627
628 if (!ctx->ht.enabled) {
629 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
630 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
631 RXON_FLG_HT40_PROT_MSK |
632 RXON_FLG_HT_PROT_MSK);
633 return;
634 }
635
636 /* FIXME: if the definition of ht.protection changed, the "translation"
637 * will be needed for rxon->flags
638 */
639 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
640 RXON_FLG_HT_OPERATING_MODE_POS);
641
642 /* Set up channel bandwidth:
643 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
644 /* clear the HT channel mode before set the mode */
645 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
646 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
647 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
648 /* pure ht40 */
649 if (ctx->ht.protection ==
650 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
651 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
652 /*
653 * Note: control channel is opposite of extension
654 * channel
655 */
656 switch (ctx->ht.extension_chan_offset) {
657 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
658 rxon->flags &=
659 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
660 break;
661 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
662 rxon->flags |=
663 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
664 break;
665 }
666 } else {
667 /*
668 * Note: control channel is opposite of extension
669 * channel
670 */
671 switch (ctx->ht.extension_chan_offset) {
672 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
673 rxon->flags &=
674 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
675 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
676 break;
677 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
678 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
679 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
680 break;
681 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
682 default:
683 /*
684 * channel location only valid if in Mixed
685 * mode
686 */
687 IWL_ERR(priv,
688 "invalid extension channel offset\n");
689 break;
690 }
691 }
692 } else {
693 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
694 }
695
696 iwlagn_set_rxon_chain(priv, ctx);
697
698 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
699 "extension channel offset 0x%x\n",
700 le32_to_cpu(rxon->flags), ctx->ht.protection,
701 ctx->ht.extension_chan_offset);
702}
703
704void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
705{
706 struct iwl_rxon_context *ctx;
707
708 for_each_context(priv, ctx)
709 _iwl_set_rxon_ht(priv, ht_conf, ctx);
710}
711
712/**
713 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
714 * @ch: requested channel as a pointer to struct ieee80211_channel
715
716 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
717 * in the staging RXON flag structure based on the ch->band
718 */
719void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
720 struct iwl_rxon_context *ctx)
721{
722 enum ieee80211_band band = ch->band;
723 u16 channel = ch->hw_value;
724
725 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
726 (priv->band == band))
727 return;
728
729 ctx->staging.channel = cpu_to_le16(channel);
730 if (band == IEEE80211_BAND_5GHZ)
731 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
732 else
733 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
734
735 priv->band = band;
736
737 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
738
739}
740
741void iwl_set_flags_for_band(struct iwl_priv *priv,
742 struct iwl_rxon_context *ctx,
743 enum ieee80211_band band,
744 struct ieee80211_vif *vif)
745{
746 if (band == IEEE80211_BAND_5GHZ) {
747 ctx->staging.flags &=
748 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
749 | RXON_FLG_CCK_MSK);
750 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
751 } else {
752 /* Copied from iwl_post_associate() */
753 if (vif && vif->bss_conf.use_short_slot)
754 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
755 else
756 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
757
758 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
759 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
760 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
761 }
762}
763
764static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
765 struct iwl_rxon_context *ctx, int hw_decrypt)
766{
767 struct iwl_rxon_cmd *rxon = &ctx->staging;
768
769 if (hw_decrypt)
770 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
771 else
772 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
773
774}
775
776/* validate RXON structure is valid */
777static int iwl_check_rxon_cmd(struct iwl_priv *priv,
778 struct iwl_rxon_context *ctx)
779{
780 struct iwl_rxon_cmd *rxon = &ctx->staging;
781 u32 errors = 0;
782
783 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
784 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
785 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
786 errors |= BIT(0);
787 }
788 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
789 IWL_WARN(priv, "check 2.4G: wrong radar\n");
790 errors |= BIT(1);
791 }
792 } else {
793 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
794 IWL_WARN(priv, "check 5.2G: not short slot!\n");
795 errors |= BIT(2);
796 }
797 if (rxon->flags & RXON_FLG_CCK_MSK) {
798 IWL_WARN(priv, "check 5.2G: CCK!\n");
799 errors |= BIT(3);
800 }
801 }
802 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
803 IWL_WARN(priv, "mac/bssid mcast!\n");
804 errors |= BIT(4);
805 }
806
807 /* make sure basic rates 6Mbps and 1Mbps are supported */
808 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
809 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
810 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
811 errors |= BIT(5);
812 }
813
814 if (le16_to_cpu(rxon->assoc_id) > 2007) {
815 IWL_WARN(priv, "aid > 2007\n");
816 errors |= BIT(6);
817 }
818
819 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
820 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
821 IWL_WARN(priv, "CCK and short slot\n");
822 errors |= BIT(7);
823 }
824
825 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
826 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
827 IWL_WARN(priv, "CCK and auto detect\n");
828 errors |= BIT(8);
829 }
830
831 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
832 RXON_FLG_TGG_PROTECT_MSK)) ==
833 RXON_FLG_TGG_PROTECT_MSK) {
834 IWL_WARN(priv, "TGg but no auto-detect\n");
835 errors |= BIT(9);
836 }
837
838 if (rxon->channel == 0) {
839 IWL_WARN(priv, "zero channel is invalid\n");
840 errors |= BIT(10);
841 }
842
843 WARN(errors, "Invalid RXON (%#x), channel %d",
844 errors, le16_to_cpu(rxon->channel));
845
846 return errors ? -EINVAL : 0;
847}
848
849/**
850 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
851 * @priv: staging_rxon is compared to active_rxon
852 *
853 * If the RXON structure is changing enough to require a new tune,
854 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
855 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
856 */
857static int iwl_full_rxon_required(struct iwl_priv *priv,
858 struct iwl_rxon_context *ctx)
859{
860 const struct iwl_rxon_cmd *staging = &ctx->staging;
861 const struct iwl_rxon_cmd *active = &ctx->active;
862
863#define CHK(cond) \
864 if ((cond)) { \
865 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
866 return 1; \
867 }
868
869#define CHK_NEQ(c1, c2) \
870 if ((c1) != (c2)) { \
871 IWL_DEBUG_INFO(priv, "need full RXON - " \
872 #c1 " != " #c2 " - %d != %d\n", \
873 (c1), (c2)); \
874 return 1; \
875 }
876
877 /* These items are only settable from the full RXON command */
878 CHK(!iwl_is_associated_ctx(ctx));
879 CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
880 CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
881 CHK(!ether_addr_equal(staging->wlap_bssid_addr,
882 active->wlap_bssid_addr));
883 CHK_NEQ(staging->dev_type, active->dev_type);
884 CHK_NEQ(staging->channel, active->channel);
885 CHK_NEQ(staging->air_propagation, active->air_propagation);
886 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
887 active->ofdm_ht_single_stream_basic_rates);
888 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
889 active->ofdm_ht_dual_stream_basic_rates);
890 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
891 active->ofdm_ht_triple_stream_basic_rates);
892 CHK_NEQ(staging->assoc_id, active->assoc_id);
893
894 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
895 * be updated with the RXON_ASSOC command -- however only some
896 * flag transitions are allowed using RXON_ASSOC */
897
898 /* Check if we are not switching bands */
899 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
900 active->flags & RXON_FLG_BAND_24G_MSK);
901
902 /* Check if we are switching association toggle */
903 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
904 active->filter_flags & RXON_FILTER_ASSOC_MSK);
905
906#undef CHK
907#undef CHK_NEQ
908
909 return 0;
910}
911
912#ifdef CONFIG_IWLWIFI_DEBUG
913void iwl_print_rx_config_cmd(struct iwl_priv *priv,
914 enum iwl_rxon_context_id ctxid)
915{
916 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
917 struct iwl_rxon_cmd *rxon = &ctx->staging;
918
919 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
920 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
921 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
922 le16_to_cpu(rxon->channel));
923 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
924 le32_to_cpu(rxon->flags));
925 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
926 le32_to_cpu(rxon->filter_flags));
927 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
928 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
929 rxon->ofdm_basic_rates);
930 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
931 rxon->cck_basic_rates);
932 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
933 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
934 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
935 le16_to_cpu(rxon->assoc_id));
936}
937#endif
938
939static void iwl_calc_basic_rates(struct iwl_priv *priv,
940 struct iwl_rxon_context *ctx)
941{
942 int lowest_present_ofdm = 100;
943 int lowest_present_cck = 100;
944 u8 cck = 0;
945 u8 ofdm = 0;
946
947 if (ctx->vif) {
948 struct ieee80211_supported_band *sband;
949 unsigned long basic = ctx->vif->bss_conf.basic_rates;
950 int i;
951
952 sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
953
954 for_each_set_bit(i, &basic, BITS_PER_LONG) {
955 int hw = sband->bitrates[i].hw_value;
956 if (hw >= IWL_FIRST_OFDM_RATE) {
957 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
958 if (lowest_present_ofdm > hw)
959 lowest_present_ofdm = hw;
960 } else {
961 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
962
963 cck |= BIT(hw);
964 if (lowest_present_cck > hw)
965 lowest_present_cck = hw;
966 }
967 }
968 }
969
970 /*
971 * Now we've got the basic rates as bitmaps in the ofdm and cck
972 * variables. This isn't sufficient though, as there might not
973 * be all the right rates in the bitmap. E.g. if the only basic
974 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
975 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
976 *
977 * [...] a STA responding to a received frame shall transmit
978 * its Control Response frame [...] at the highest rate in the
979 * BSSBasicRateSet parameter that is less than or equal to the
980 * rate of the immediately previous frame in the frame exchange
981 * sequence ([...]) and that is of the same modulation class
982 * ([...]) as the received frame. If no rate contained in the
983 * BSSBasicRateSet parameter meets these conditions, then the
984 * control frame sent in response to a received frame shall be
985 * transmitted at the highest mandatory rate of the PHY that is
986 * less than or equal to the rate of the received frame, and
987 * that is of the same modulation class as the received frame.
988 *
989 * As a consequence, we need to add all mandatory rates that are
990 * lower than all of the basic rates to these bitmaps.
991 */
992
993 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
994 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
995 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
996 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
997 /* 6M already there or needed so always add */
998 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
999
1000 /*
1001 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
1002 * Note, however:
1003 * - if no CCK rates are basic, it must be ERP since there must
1004 * be some basic rates at all, so they're OFDM => ERP PHY
1005 * (or we're in 5 GHz, and the cck bitmap will never be used)
1006 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
1007 * - if 5.5M is basic, 1M and 2M are mandatory
1008 * - if 2M is basic, 1M is mandatory
1009 * - if 1M is basic, that's the only valid ACK rate.
1010 * As a consequence, it's not as complicated as it sounds, just add
1011 * any lower rates to the ACK rate bitmap.
1012 */
1013 if (IWL_RATE_11M_INDEX < lowest_present_cck)
1014 cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1015 if (IWL_RATE_5M_INDEX < lowest_present_cck)
1016 cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1017 if (IWL_RATE_2M_INDEX < lowest_present_cck)
1018 cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1019 /* 1M already there or needed so always add */
1020 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1021
1022 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1023 cck, ofdm);
1024
1025 /* "basic_rates" is a misnomer here -- should be called ACK rates */
1026 ctx->staging.cck_basic_rates = cck;
1027 ctx->staging.ofdm_basic_rates = ofdm;
1028}
1029
1030/**
1031 * iwlagn_commit_rxon - commit staging_rxon to hardware
1032 *
1033 * The RXON command in staging_rxon is committed to the hardware and
1034 * the active_rxon structure is updated with the new data. This
1035 * function correctly transitions out of the RXON_ASSOC_MSK state if
1036 * a HW tune is required based on the RXON structure changes.
1037 *
1038 * The connect/disconnect flow should be as the following:
1039 *
1040 * 1. make sure send RXON command with association bit unset if not connect
1041 * this should include the channel and the band for the candidate
1042 * to be connected to
1043 * 2. Add Station before RXON association with the AP
1044 * 3. RXON_timing has to send before RXON for connection
1045 * 4. full RXON command - associated bit set
1046 * 5. use RXON_ASSOC command to update any flags changes
1047 */
1048int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1049{
1050 /* cast away the const for active_rxon in this function */
1051 struct iwl_rxon_cmd *active = (void *)&ctx->active;
1052 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1053 int ret;
1054
1055 lockdep_assert_held(&priv->mutex);
1056
1057 if (!iwl_is_alive(priv))
1058 return -EBUSY;
1059
1060 /* This function hardcodes a bunch of dual-mode assumptions */
1061 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1062
1063 if (!ctx->is_active)
1064 return 0;
1065
1066 /* always get timestamp with Rx frame */
1067 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1068
1069 /* recalculate basic rates */
1070 iwl_calc_basic_rates(priv, ctx);
1071
1072 /*
1073 * force CTS-to-self frames protection if RTS-CTS is not preferred
1074 * one aggregation protection method
1075 */
1076 if (!priv->hw_params.use_rts_for_aggregation)
1077 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1078
1079 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1080 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1081 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1082 else
1083 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1084
1085 iwl_print_rx_config_cmd(priv, ctx->ctxid);
1086 ret = iwl_check_rxon_cmd(priv, ctx);
1087 if (ret) {
1088 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1089 return -EINVAL;
1090 }
1091
1092 /*
1093 * receive commit_rxon request
1094 * abort any previous channel switch if still in process
1095 */
1096 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1097 (priv->switch_channel != ctx->staging.channel)) {
1098 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1099 le16_to_cpu(priv->switch_channel));
1100 iwl_chswitch_done(priv, false);
1101 }
1102
1103 /*
1104 * If we don't need to send a full RXON, we can use
1105 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1106 * and other flags for the current radio configuration.
1107 */
1108 if (!iwl_full_rxon_required(priv, ctx)) {
1109 ret = iwlagn_send_rxon_assoc(priv, ctx);
1110 if (ret) {
1111 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1112 return ret;
1113 }
1114
1115 memcpy(active, &ctx->staging, sizeof(*active));
1116 /*
1117 * We do not commit tx power settings while channel changing,
1118 * do it now if after settings changed.
1119 */
1120 iwl_set_tx_power(priv, priv->tx_power_next, false);
1121
1122 /* make sure we are in the right PS state */
1123 iwl_power_update_mode(priv, true);
1124
1125 return 0;
1126 }
1127
1128 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
1129
1130 IWL_DEBUG_INFO(priv,
1131 "Going to commit RXON\n"
1132 " * with%s RXON_FILTER_ASSOC_MSK\n"
1133 " * channel = %d\n"
1134 " * bssid = %pM\n",
1135 (new_assoc ? "" : "out"),
1136 le16_to_cpu(ctx->staging.channel),
1137 ctx->staging.bssid_addr);
1138
1139 /*
1140 * Always clear associated first, but with the correct config.
1141 * This is required as for example station addition for the
1142 * AP station must be done after the BSSID is set to correctly
1143 * set up filters in the device.
1144 */
1145 ret = iwlagn_rxon_disconn(priv, ctx);
1146 if (ret)
1147 return ret;
1148
1149 ret = iwlagn_set_pan_params(priv);
1150 if (ret)
1151 return ret;
1152
1153 if (new_assoc)
1154 return iwlagn_rxon_connect(priv, ctx);
1155
1156 return 0;
1157}
1158
1159void iwlagn_config_ht40(struct ieee80211_conf *conf,
1160 struct iwl_rxon_context *ctx)
1161{
1162 if (conf_is_ht40_minus(conf)) {
1163 ctx->ht.extension_chan_offset =
1164 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1165 ctx->ht.is_40mhz = true;
1166 } else if (conf_is_ht40_plus(conf)) {
1167 ctx->ht.extension_chan_offset =
1168 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1169 ctx->ht.is_40mhz = true;
1170 } else {
1171 ctx->ht.extension_chan_offset =
1172 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1173 ctx->ht.is_40mhz = false;
1174 }
1175}
1176
1177int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1178{
1179 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1180 struct iwl_rxon_context *ctx;
1181 struct ieee80211_conf *conf = &hw->conf;
1182 struct ieee80211_channel *channel = conf->chandef.chan;
1183 int ret = 0;
1184
1185 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
1186
1187 mutex_lock(&priv->mutex);
1188
1189 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1190 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1191 goto out;
1192 }
1193
1194 if (!iwl_is_ready(priv)) {
1195 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1196 goto out;
1197 }
1198
1199 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1200 IEEE80211_CONF_CHANGE_CHANNEL)) {
1201 /* mac80211 uses static for non-HT which is what we want */
1202 priv->current_ht_config.smps = conf->smps_mode;
1203
1204 /*
1205 * Recalculate chain counts.
1206 *
1207 * If monitor mode is enabled then mac80211 will
1208 * set up the SM PS mode to OFF if an HT channel is
1209 * configured.
1210 */
1211 for_each_context(priv, ctx)
1212 iwlagn_set_rxon_chain(priv, ctx);
1213 }
1214
1215 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1216 for_each_context(priv, ctx) {
1217 /* Configure HT40 channels */
1218 if (ctx->ht.enabled != conf_is_ht(conf))
1219 ctx->ht.enabled = conf_is_ht(conf);
1220
1221 if (ctx->ht.enabled) {
1222 /* if HT40 is used, it should not change
1223 * after associated except channel switch */
1224 if (!ctx->ht.is_40mhz ||
1225 !iwl_is_associated_ctx(ctx))
1226 iwlagn_config_ht40(conf, ctx);
1227 } else
1228 ctx->ht.is_40mhz = false;
1229
1230 /*
1231 * Default to no protection. Protection mode will
1232 * later be set from BSS config in iwl_ht_conf
1233 */
1234 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1235
1236 /* if we are switching from ht to 2.4 clear flags
1237 * from any ht related info since 2.4 does not
1238 * support ht */
1239 if (le16_to_cpu(ctx->staging.channel) !=
1240 channel->hw_value)
1241 ctx->staging.flags = 0;
1242
1243 iwl_set_rxon_channel(priv, channel, ctx);
1244 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1245
1246 iwl_set_flags_for_band(priv, ctx, channel->band,
1247 ctx->vif);
1248 }
1249
1250 iwl_update_bcast_stations(priv);
1251 }
1252
1253 if (changed & (IEEE80211_CONF_CHANGE_PS |
1254 IEEE80211_CONF_CHANGE_IDLE)) {
1255 ret = iwl_power_update_mode(priv, false);
1256 if (ret)
1257 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
1258 }
1259
1260 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1261 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
1262 priv->tx_power_user_lmt, conf->power_level);
1263
1264 iwl_set_tx_power(priv, conf->power_level, false);
1265 }
1266
1267 for_each_context(priv, ctx) {
1268 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1269 continue;
1270 iwlagn_commit_rxon(priv, ctx);
1271 }
1272 out:
1273 mutex_unlock(&priv->mutex);
1274 IWL_DEBUG_MAC80211(priv, "leave\n");
1275
1276 return ret;
1277}
1278
1279static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1280 struct iwl_rxon_context *ctx,
1281 struct ieee80211_bss_conf *bss_conf)
1282{
1283 struct ieee80211_vif *vif = ctx->vif;
1284 struct iwl_rxon_context *tmp;
1285 struct ieee80211_sta *sta;
1286 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1287 struct ieee80211_sta_ht_cap *ht_cap;
1288 bool need_multiple;
1289
1290 lockdep_assert_held(&priv->mutex);
1291
1292 switch (vif->type) {
1293 case NL80211_IFTYPE_STATION:
1294 rcu_read_lock();
1295 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1296 if (!sta) {
1297 /*
1298 * If at all, this can only happen through a race
1299 * when the AP disconnects us while we're still
1300 * setting up the connection, in that case mac80211
1301 * will soon tell us about that.
1302 */
1303 need_multiple = false;
1304 rcu_read_unlock();
1305 break;
1306 }
1307
1308 ht_cap = &sta->ht_cap;
1309
1310 need_multiple = true;
1311
1312 /*
1313 * If the peer advertises no support for receiving 2 and 3
1314 * stream MCS rates, it can't be transmitting them either.
1315 */
1316 if (ht_cap->mcs.rx_mask[1] == 0 &&
1317 ht_cap->mcs.rx_mask[2] == 0) {
1318 need_multiple = false;
1319 } else if (!(ht_cap->mcs.tx_params &
1320 IEEE80211_HT_MCS_TX_DEFINED)) {
1321 /* If it can't TX MCS at all ... */
1322 need_multiple = false;
1323 } else if (ht_cap->mcs.tx_params &
1324 IEEE80211_HT_MCS_TX_RX_DIFF) {
1325 int maxstreams;
1326
1327 /*
1328 * But if it can receive them, it might still not
1329 * be able to transmit them, which is what we need
1330 * to check here -- so check the number of streams
1331 * it advertises for TX (if different from RX).
1332 */
1333
1334 maxstreams = (ht_cap->mcs.tx_params &
1335 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
1336 maxstreams >>=
1337 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1338 maxstreams += 1;
1339
1340 if (maxstreams <= 1)
1341 need_multiple = false;
1342 }
1343
1344 rcu_read_unlock();
1345 break;
1346 case NL80211_IFTYPE_ADHOC:
1347 /* currently */
1348 need_multiple = false;
1349 break;
1350 default:
1351 /* only AP really */
1352 need_multiple = true;
1353 break;
1354 }
1355
1356 ctx->ht_need_multiple_chains = need_multiple;
1357
1358 if (!need_multiple) {
1359 /* check all contexts */
1360 for_each_context(priv, tmp) {
1361 if (!tmp->vif)
1362 continue;
1363 if (tmp->ht_need_multiple_chains) {
1364 need_multiple = true;
1365 break;
1366 }
1367 }
1368 }
1369
1370 ht_conf->single_chain_sufficient = !need_multiple;
1371}
1372
1373static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1374{
1375 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1376 int ret;
1377
1378 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1379 return;
1380
1381 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
1382 iwl_is_any_associated(priv)) {
1383 struct iwl_calib_chain_noise_reset_cmd cmd;
1384
1385 /* clear data for chain noise calibration algorithm */
1386 data->chain_noise_a = 0;
1387 data->chain_noise_b = 0;
1388 data->chain_noise_c = 0;
1389 data->chain_signal_a = 0;
1390 data->chain_signal_b = 0;
1391 data->chain_signal_c = 0;
1392 data->beacon_count = 0;
1393
1394 memset(&cmd, 0, sizeof(cmd));
1395 iwl_set_calib_hdr(&cmd.hdr,
1396 priv->phy_calib_chain_noise_reset_cmd);
1397 ret = iwl_dvm_send_cmd_pdu(priv,
1398 REPLY_PHY_CALIBRATION_CMD,
1399 0, sizeof(cmd), &cmd);
1400 if (ret)
1401 IWL_ERR(priv,
1402 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
1403 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1404 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
1405 }
1406}
1407
1408void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1409 struct ieee80211_vif *vif,
1410 struct ieee80211_bss_conf *bss_conf,
1411 u32 changes)
1412{
1413 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1414 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1415 int ret;
1416 bool force = false;
1417
1418 mutex_lock(&priv->mutex);
1419
1420 if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
1421 /*
1422 * If we go idle, then clearly no "passive-no-rx"
1423 * workaround is needed any more, this is a reset.
1424 */
1425 iwlagn_lift_passive_no_rx(priv);
1426 }
1427
1428 if (unlikely(!iwl_is_ready(priv))) {
1429 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1430 mutex_unlock(&priv->mutex);
1431 return;
1432 }
1433
1434 if (unlikely(!ctx->vif)) {
1435 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
1436 mutex_unlock(&priv->mutex);
1437 return;
1438 }
1439
1440 if (changes & BSS_CHANGED_BEACON_INT)
1441 force = true;
1442
1443 if (changes & BSS_CHANGED_QOS) {
1444 ctx->qos_data.qos_active = bss_conf->qos;
1445 iwlagn_update_qos(priv, ctx);
1446 }
1447
1448 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1449 if (vif->bss_conf.use_short_preamble)
1450 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1451 else
1452 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1453
1454 if (changes & BSS_CHANGED_ASSOC) {
1455 if (bss_conf->assoc) {
1456 priv->timestamp = bss_conf->sync_tsf;
1457 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1458 } else {
1459 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1460
1461 if (ctx->ctxid == IWL_RXON_CTX_BSS)
1462 priv->have_rekey_data = false;
1463 }
1464
1465 iwlagn_bt_coex_rssi_monitor(priv);
1466 }
1467
1468 if (ctx->ht.enabled) {
1469 ctx->ht.protection = bss_conf->ht_operation_mode &
1470 IEEE80211_HT_OP_MODE_PROTECTION;
1471 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
1472 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1473 iwlagn_check_needed_chains(priv, ctx, bss_conf);
1474 iwl_set_rxon_ht(priv, &priv->current_ht_config);
1475 }
1476
1477 iwlagn_set_rxon_chain(priv, ctx);
1478
1479 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1480 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1481 else
1482 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1483
1484 if (bss_conf->use_cts_prot)
1485 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1486 else
1487 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1488
1489 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1490
1491 if (vif->type == NL80211_IFTYPE_AP ||
1492 vif->type == NL80211_IFTYPE_ADHOC) {
1493 if (vif->bss_conf.enable_beacon) {
1494 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1495 priv->beacon_ctx = ctx;
1496 } else {
1497 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1498 priv->beacon_ctx = NULL;
1499 }
1500 }
1501
1502 /*
1503 * If the ucode decides to do beacon filtering before
1504 * association, it will lose beacons that are needed
1505 * before sending frames out on passive channels. This
1506 * causes association failures on those channels. Enable
1507 * receiving beacons in such cases.
1508 */
1509
1510 if (vif->type == NL80211_IFTYPE_STATION) {
1511 if (!bss_conf->assoc)
1512 ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1513 else
1514 ctx->staging.filter_flags &=
1515 ~RXON_FILTER_BCON_AWARE_MSK;
1516 }
1517
1518 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1519 iwlagn_commit_rxon(priv, ctx);
1520
1521 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
1522 /*
1523 * The chain noise calibration will enable PM upon
1524 * completion. If calibration has already been run
1525 * then we need to enable power management here.
1526 */
1527 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1528 iwl_power_update_mode(priv, false);
1529
1530 /* Enable RX differential gain and sensitivity calibrations */
1531 iwlagn_chain_noise_reset(priv);
1532 priv->start_calib = 1;
1533 }
1534
1535 if (changes & BSS_CHANGED_IBSS) {
1536 ret = iwlagn_manage_ibss_station(priv, vif,
1537 bss_conf->ibss_joined);
1538 if (ret)
1539 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1540 bss_conf->ibss_joined ? "add" : "remove",
1541 bss_conf->bssid);
1542 }
1543
1544 if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
1545 if (iwlagn_update_beacon(priv, vif))
1546 IWL_ERR(priv, "Error updating beacon\n");
1547 }
1548
1549 mutex_unlock(&priv->mutex);
1550}
1551
1552void iwlagn_post_scan(struct iwl_priv *priv)
1553{
1554 struct iwl_rxon_context *ctx;
1555
1556 /*
1557 * We do not commit power settings while scan is pending,
1558 * do it now if the settings changed.
1559 */
1560 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
1561 iwl_set_tx_power(priv, priv->tx_power_next, false);
1562
1563 /*
1564 * Since setting the RXON may have been deferred while
1565 * performing the scan, fire one off if needed
1566 */
1567 for_each_context(priv, ctx)
1568 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1569 iwlagn_commit_rxon(priv, ctx);
1570
1571 iwlagn_set_pan_params(priv);
1572}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
new file mode 100644
index 000000000000..648159495bbc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -0,0 +1,1075 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "dev.h"
34#include "agn.h"
35
36/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
37 * sending probe req. This should be set long enough to hear probe responses
38 * from more than one AP. */
39#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
40#define IWL_ACTIVE_DWELL_TIME_52 (20)
41
42#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
43#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
44
45/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
46 * Must be set longer than active dwell time.
47 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
48#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
49#define IWL_PASSIVE_DWELL_TIME_52 (10)
50#define IWL_PASSIVE_DWELL_BASE (100)
51#define IWL_CHANNEL_TUNE_TIME 5
52#define MAX_SCAN_CHANNEL 50
53
54/* For reset radio, need minimal dwell time only */
55#define IWL_RADIO_RESET_DWELL_TIME 5
56
57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_host_cmd cmd = {
61 .id = REPLY_SCAN_ABORT_CMD,
62 .flags = CMD_WANT_SKB,
63 };
64 __le32 *status;
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
71 test_bit(STATUS_FW_ERROR, &priv->status))
72 return -EIO;
73
74 ret = iwl_dvm_send_cmd(priv, &cmd);
75 if (ret)
76 return ret;
77
78 status = (void *)cmd.resp_pkt->data;
79 if (*status != CAN_ABORT_STATUS) {
80 /* The scan abort will return 1 for success or
81 * 2 for "failure". A failure condition can be
82 * due to simply not being in an active scan which
83 * can occur if we send the scan abort before we
84 * the microcode has notified us that a scan is
85 * completed. */
86 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
87 le32_to_cpu(*status));
88 ret = -EIO;
89 }
90
91 iwl_free_resp(&cmd);
92 return ret;
93}
94
95static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
96{
97 /* check if scan was requested from mac80211 */
98 if (priv->scan_request) {
99 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
100 ieee80211_scan_completed(priv->hw, aborted);
101 }
102
103 priv->scan_type = IWL_SCAN_NORMAL;
104 priv->scan_vif = NULL;
105 priv->scan_request = NULL;
106}
107
108static void iwl_process_scan_complete(struct iwl_priv *priv)
109{
110 bool aborted;
111
112 lockdep_assert_held(&priv->mutex);
113
114 if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
115 return;
116
117 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
118
119 cancel_delayed_work(&priv->scan_check);
120
121 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 if (aborted)
123 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
124
125 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
126 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
127 goto out_settings;
128 }
129
130 if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
131 int err;
132
133 /* Check if mac80211 requested scan during our internal scan */
134 if (priv->scan_request == NULL)
135 goto out_complete;
136
137 /* If so request a new scan */
138 err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
139 priv->scan_request->channels[0]->band);
140 if (err) {
141 IWL_DEBUG_SCAN(priv,
142 "failed to initiate pending scan: %d\n", err);
143 aborted = true;
144 goto out_complete;
145 }
146
147 return;
148 }
149
150out_complete:
151 iwl_complete_scan(priv, aborted);
152
153out_settings:
154 /* Can we still talk to firmware ? */
155 if (!iwl_is_ready_rf(priv))
156 return;
157
158 iwlagn_post_scan(priv);
159}
160
161void iwl_force_scan_end(struct iwl_priv *priv)
162{
163 lockdep_assert_held(&priv->mutex);
164
165 if (!test_bit(STATUS_SCANNING, &priv->status)) {
166 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
167 return;
168 }
169
170 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
171 clear_bit(STATUS_SCANNING, &priv->status);
172 clear_bit(STATUS_SCAN_HW, &priv->status);
173 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
174 clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
175 iwl_complete_scan(priv, true);
176}
177
178static void iwl_do_scan_abort(struct iwl_priv *priv)
179{
180 int ret;
181
182 lockdep_assert_held(&priv->mutex);
183
184 if (!test_bit(STATUS_SCANNING, &priv->status)) {
185 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
186 return;
187 }
188
189 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
190 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
191 return;
192 }
193
194 ret = iwl_send_scan_abort(priv);
195 if (ret) {
196 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
197 iwl_force_scan_end(priv);
198 } else
199 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
200}
201
202/**
203 * iwl_scan_cancel - Cancel any currently executing HW scan
204 */
205int iwl_scan_cancel(struct iwl_priv *priv)
206{
207 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
208 queue_work(priv->workqueue, &priv->abort_scan);
209 return 0;
210}
211
212/**
213 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
214 * @ms: amount of time to wait (in milliseconds) for scan to abort
215 *
216 */
217void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
218{
219 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
220
221 lockdep_assert_held(&priv->mutex);
222
223 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
224
225 iwl_do_scan_abort(priv);
226
227 while (time_before_eq(jiffies, timeout)) {
228 if (!test_bit(STATUS_SCAN_HW, &priv->status))
229 goto finished;
230 msleep(20);
231 }
232
233 return;
234
235 finished:
236 /*
237 * Now STATUS_SCAN_HW is clear. This means that the
238 * device finished, but the background work is going
239 * to execute at best as soon as we release the mutex.
240 * Since we need to be able to issue a new scan right
241 * after this function returns, run the complete here.
242 * The STATUS_SCAN_COMPLETE bit will then be cleared
243 * and prevent the background work from "completing"
244 * a possible new scan.
245 */
246 iwl_process_scan_complete(priv);
247}
248
249/* Service response to REPLY_SCAN_CMD (0x80) */
250static void iwl_rx_reply_scan(struct iwl_priv *priv,
251 struct iwl_rx_cmd_buffer *rxb)
252{
253#ifdef CONFIG_IWLWIFI_DEBUG
254 struct iwl_rx_packet *pkt = rxb_addr(rxb);
255 struct iwl_scanreq_notification *notif = (void *)pkt->data;
256
257 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
258#endif
259}
260
261/* Service SCAN_START_NOTIFICATION (0x82) */
262static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
263 struct iwl_rx_cmd_buffer *rxb)
264{
265 struct iwl_rx_packet *pkt = rxb_addr(rxb);
266 struct iwl_scanstart_notification *notif = (void *)pkt->data;
267
268 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
269 IWL_DEBUG_SCAN(priv, "Scan start: "
270 "%d [802.11%s] "
271 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
272 notif->channel,
273 notif->band ? "bg" : "a",
274 le32_to_cpu(notif->tsf_high),
275 le32_to_cpu(notif->tsf_low),
276 notif->status, notif->beacon_timer);
277}
278
279/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
280static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
281 struct iwl_rx_cmd_buffer *rxb)
282{
283#ifdef CONFIG_IWLWIFI_DEBUG
284 struct iwl_rx_packet *pkt = rxb_addr(rxb);
285 struct iwl_scanresults_notification *notif = (void *)pkt->data;
286
287 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
288 "%d [802.11%s] "
289 "probe status: %u:%u "
290 "(TSF: 0x%08X:%08X) - %d "
291 "elapsed=%lu usec\n",
292 notif->channel,
293 notif->band ? "bg" : "a",
294 notif->probe_status, notif->num_probe_not_sent,
295 le32_to_cpu(notif->tsf_high),
296 le32_to_cpu(notif->tsf_low),
297 le32_to_cpu(notif->statistics[0]),
298 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
299#endif
300}
301
302/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
303static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
304 struct iwl_rx_cmd_buffer *rxb)
305{
306 struct iwl_rx_packet *pkt = rxb_addr(rxb);
307 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
308
309 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
310 scan_notif->scanned_channels,
311 scan_notif->tsf_low,
312 scan_notif->tsf_high, scan_notif->status);
313
314 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
315 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
316 jiffies_to_msecs(jiffies - priv->scan_start));
317
318 /*
319 * When aborting, we run the scan completed background work inline
320 * and the background work must then do nothing. The SCAN_COMPLETE
321 * bit helps implement that logic and thus needs to be set before
322 * queueing the work. Also, since the scan abort waits for SCAN_HW
323 * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
324 * to avoid a race there.
325 */
326 set_bit(STATUS_SCAN_COMPLETE, &priv->status);
327 clear_bit(STATUS_SCAN_HW, &priv->status);
328 queue_work(priv->workqueue, &priv->scan_completed);
329
330 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
331 iwl_advanced_bt_coexist(priv) &&
332 priv->bt_status != scan_notif->bt_status) {
333 if (scan_notif->bt_status) {
334 /* BT on */
335 if (!priv->bt_ch_announce)
336 priv->bt_traffic_load =
337 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
338 /*
339 * otherwise, no traffic load information provided
340 * no changes made
341 */
342 } else {
343 /* BT off */
344 priv->bt_traffic_load =
345 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
346 }
347 priv->bt_status = scan_notif->bt_status;
348 queue_work(priv->workqueue,
349 &priv->bt_traffic_change_work);
350 }
351}
352
353void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
354{
355 /* scan handlers */
356 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
357 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
358 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
359 iwl_rx_scan_results_notif;
360 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
361 iwl_rx_scan_complete_notif;
362}
363
364static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
365 enum ieee80211_band band, u8 n_probes)
366{
367 if (band == IEEE80211_BAND_5GHZ)
368 return IWL_ACTIVE_DWELL_TIME_52 +
369 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
370 else
371 return IWL_ACTIVE_DWELL_TIME_24 +
372 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
373}
374
375static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
376{
377 struct iwl_rxon_context *ctx;
378 int limits[NUM_IWL_RXON_CTX] = {};
379 int n_active = 0;
380 u16 limit;
381
382 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
383
384 /*
385 * If we're associated, we clamp the dwell time 98%
386 * of the beacon interval (minus 2 * channel tune time)
387 * If both contexts are active, we have to restrict to
388 * 1/2 of the minimum of them, because they might be in
389 * lock-step with the time inbetween only half of what
390 * time we'd have in each of them.
391 */
392 for_each_context(priv, ctx) {
393 switch (ctx->staging.dev_type) {
394 case RXON_DEV_TYPE_P2P:
395 /* no timing constraints */
396 continue;
397 case RXON_DEV_TYPE_ESS:
398 default:
399 /* timing constraints if associated */
400 if (!iwl_is_associated_ctx(ctx))
401 continue;
402 break;
403 case RXON_DEV_TYPE_CP:
404 case RXON_DEV_TYPE_2STA:
405 /*
406 * These seem to always have timers for TBTT
407 * active in uCode even when not associated yet.
408 */
409 break;
410 }
411
412 limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
413 }
414
415 switch (n_active) {
416 case 0:
417 return dwell_time;
418 case 2:
419 limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
420 limit /= 2;
421 dwell_time = min(limit, dwell_time);
422 /* fall through to limit further */
423 case 1:
424 limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
425 limit /= n_active;
426 return min(limit, dwell_time);
427 default:
428 WARN_ON_ONCE(1);
429 return dwell_time;
430 }
431}
432
433static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
434 enum ieee80211_band band)
435{
436 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
437 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
438 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
439
440 return iwl_limit_dwell(priv, passive);
441}
442
443/* Return valid, unused, channel for a passive scan to reset the RF */
444static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
445 enum ieee80211_band band)
446{
447 struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
448 struct iwl_rxon_context *ctx;
449 int i;
450
451 for (i = 0; i < sband->n_channels; i++) {
452 bool busy = false;
453
454 for_each_context(priv, ctx) {
455 busy = sband->channels[i].hw_value ==
456 le16_to_cpu(ctx->staging.channel);
457 if (busy)
458 break;
459 }
460
461 if (busy)
462 continue;
463
464 if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
465 return sband->channels[i].hw_value;
466 }
467
468 return 0;
469}
470
471static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
472 struct ieee80211_vif *vif,
473 enum ieee80211_band band,
474 struct iwl_scan_channel *scan_ch)
475{
476 const struct ieee80211_supported_band *sband;
477 u16 channel;
478
479 sband = iwl_get_hw_mode(priv, band);
480 if (!sband) {
481 IWL_ERR(priv, "invalid band\n");
482 return 0;
483 }
484
485 channel = iwl_get_single_channel_number(priv, band);
486 if (channel) {
487 scan_ch->channel = cpu_to_le16(channel);
488 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
489 scan_ch->active_dwell =
490 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
491 scan_ch->passive_dwell =
492 cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
493 /* Set txpower levels to defaults */
494 scan_ch->dsp_atten = 110;
495 if (band == IEEE80211_BAND_5GHZ)
496 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
497 else
498 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
499 return 1;
500 }
501
502 IWL_ERR(priv, "no valid channel found\n");
503 return 0;
504}
505
506static int iwl_get_channels_for_scan(struct iwl_priv *priv,
507 struct ieee80211_vif *vif,
508 enum ieee80211_band band,
509 u8 is_active, u8 n_probes,
510 struct iwl_scan_channel *scan_ch)
511{
512 struct ieee80211_channel *chan;
513 const struct ieee80211_supported_band *sband;
514 u16 passive_dwell = 0;
515 u16 active_dwell = 0;
516 int added, i;
517 u16 channel;
518
519 sband = iwl_get_hw_mode(priv, band);
520 if (!sband)
521 return 0;
522
523 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
524 passive_dwell = iwl_get_passive_dwell_time(priv, band);
525
526 if (passive_dwell <= active_dwell)
527 passive_dwell = active_dwell + 1;
528
529 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
530 chan = priv->scan_request->channels[i];
531
532 if (chan->band != band)
533 continue;
534
535 channel = chan->hw_value;
536 scan_ch->channel = cpu_to_le16(channel);
537
538 if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR))
539 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
540 else
541 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
542
543 if (n_probes)
544 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
545
546 scan_ch->active_dwell = cpu_to_le16(active_dwell);
547 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
548
549 /* Set txpower levels to defaults */
550 scan_ch->dsp_atten = 110;
551
552 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
553 * power level:
554 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
555 */
556 if (band == IEEE80211_BAND_5GHZ)
557 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
558 else
559 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
560
561 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
562 channel, le32_to_cpu(scan_ch->type),
563 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
564 "ACTIVE" : "PASSIVE",
565 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
566 active_dwell : passive_dwell);
567
568 scan_ch++;
569 added++;
570 }
571
572 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
573 return added;
574}
575
576/**
577 * iwl_fill_probe_req - fill in all required fields and IE for probe request
578 */
579
580static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
581 const u8 *ies, int ie_len, const u8 *ssid,
582 u8 ssid_len, int left)
583{
584 int len = 0;
585 u8 *pos = NULL;
586
587 /* Make sure there is enough space for the probe request,
588 * two mandatory IEs and the data */
589 left -= 24;
590 if (left < 0)
591 return 0;
592
593 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
594 eth_broadcast_addr(frame->da);
595 memcpy(frame->sa, ta, ETH_ALEN);
596 eth_broadcast_addr(frame->bssid);
597 frame->seq_ctrl = 0;
598
599 len += 24;
600
601 /* ...next IE... */
602 pos = &frame->u.probe_req.variable[0];
603
604 /* fill in our SSID IE */
605 left -= ssid_len + 2;
606 if (left < 0)
607 return 0;
608 *pos++ = WLAN_EID_SSID;
609 *pos++ = ssid_len;
610 if (ssid && ssid_len) {
611 memcpy(pos, ssid, ssid_len);
612 pos += ssid_len;
613 }
614
615 len += ssid_len + 2;
616
617 if (WARN_ON(left < ie_len))
618 return len;
619
620 if (ies && ie_len) {
621 memcpy(pos, ies, ie_len);
622 len += ie_len;
623 }
624
625 return (u16)len;
626}
627
628static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
629{
630 struct iwl_host_cmd cmd = {
631 .id = REPLY_SCAN_CMD,
632 .len = { sizeof(struct iwl_scan_cmd), },
633 };
634 struct iwl_scan_cmd *scan;
635 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
636 u32 rate_flags = 0;
637 u16 cmd_len = 0;
638 u16 rx_chain = 0;
639 enum ieee80211_band band;
640 u8 n_probes = 0;
641 u8 rx_ant = priv->nvm_data->valid_rx_ant;
642 u8 rate;
643 bool is_active = false;
644 int chan_mod;
645 u8 active_chains;
646 u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
647 int ret;
648 int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
649 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
650 priv->fw->ucode_capa.max_probe_length;
651 const u8 *ssid = NULL;
652 u8 ssid_len = 0;
653
654 if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL &&
655 (!priv->scan_request ||
656 priv->scan_request->n_channels > MAX_SCAN_CHANNEL)))
657 return -EINVAL;
658
659 lockdep_assert_held(&priv->mutex);
660
661 if (vif)
662 ctx = iwl_rxon_ctx_from_vif(vif);
663
664 if (!priv->scan_cmd) {
665 priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
666 if (!priv->scan_cmd) {
667 IWL_DEBUG_SCAN(priv,
668 "fail to allocate memory for scan\n");
669 return -ENOMEM;
670 }
671 }
672 scan = priv->scan_cmd;
673 memset(scan, 0, scan_cmd_size);
674
675 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
676 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
677
678 if (iwl_is_any_associated(priv)) {
679 u16 interval = 0;
680 u32 extra;
681 u32 suspend_time = 100;
682 u32 scan_suspend_time = 100;
683
684 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
685 switch (priv->scan_type) {
686 case IWL_SCAN_RADIO_RESET:
687 interval = 0;
688 break;
689 case IWL_SCAN_NORMAL:
690 interval = vif->bss_conf.beacon_int;
691 break;
692 }
693
694 scan->suspend_time = 0;
695 scan->max_out_time = cpu_to_le32(200 * 1024);
696 if (!interval)
697 interval = suspend_time;
698
699 extra = (suspend_time / interval) << 22;
700 scan_suspend_time = (extra |
701 ((suspend_time % interval) * 1024));
702 scan->suspend_time = cpu_to_le32(scan_suspend_time);
703 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
704 scan_suspend_time, interval);
705 }
706
707 switch (priv->scan_type) {
708 case IWL_SCAN_RADIO_RESET:
709 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
710 /*
711 * Override quiet time as firmware checks that active
712 * dwell is >= quiet; since we use passive scan it'll
713 * not actually be used.
714 */
715 scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
716 break;
717 case IWL_SCAN_NORMAL:
718 if (priv->scan_request->n_ssids) {
719 int i, p = 0;
720 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
721 /*
722 * The highest priority SSID is inserted to the
723 * probe request template.
724 */
725 ssid_len = priv->scan_request->ssids[0].ssid_len;
726 ssid = priv->scan_request->ssids[0].ssid;
727
728 /*
729 * Invert the order of ssids, the firmware will invert
730 * it back.
731 */
732 for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) {
733 scan->direct_scan[p].id = WLAN_EID_SSID;
734 scan->direct_scan[p].len =
735 priv->scan_request->ssids[i].ssid_len;
736 memcpy(scan->direct_scan[p].ssid,
737 priv->scan_request->ssids[i].ssid,
738 priv->scan_request->ssids[i].ssid_len);
739 n_probes++;
740 p++;
741 }
742 is_active = true;
743 } else
744 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
745 break;
746 }
747
748 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
749 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
750 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
751
752 switch (priv->scan_band) {
753 case IEEE80211_BAND_2GHZ:
754 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
755 chan_mod = le32_to_cpu(
756 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
757 RXON_FLG_CHANNEL_MODE_MSK)
758 >> RXON_FLG_CHANNEL_MODE_POS;
759 if ((priv->scan_request && priv->scan_request->no_cck) ||
760 chan_mod == CHANNEL_MODE_PURE_40) {
761 rate = IWL_RATE_6M_PLCP;
762 } else {
763 rate = IWL_RATE_1M_PLCP;
764 rate_flags = RATE_MCS_CCK_MSK;
765 }
766 /*
767 * Internal scans are passive, so we can indiscriminately set
768 * the BT ignore flag on 2.4 GHz since it applies to TX only.
769 */
770 if (priv->lib->bt_params &&
771 priv->lib->bt_params->advanced_bt_coexist)
772 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
773 break;
774 case IEEE80211_BAND_5GHZ:
775 rate = IWL_RATE_6M_PLCP;
776 break;
777 default:
778 IWL_WARN(priv, "Invalid scan band\n");
779 return -EIO;
780 }
781
782 /*
783 * If active scanning is requested but a certain channel is
784 * marked passive, we can do active scanning if we detect
785 * transmissions.
786 *
787 * There is an issue with some firmware versions that triggers
788 * a sysassert on a "good CRC threshold" of zero (== disabled),
789 * on a radar channel even though this means that we should NOT
790 * send probes.
791 *
792 * The "good CRC threshold" is the number of frames that we
793 * need to receive during our dwell time on a channel before
794 * sending out probes -- setting this to a huge value will
795 * mean we never reach it, but at the same time work around
796 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
797 * here instead of IWL_GOOD_CRC_TH_DISABLED.
798 *
799 * This was fixed in later versions along with some other
800 * scan changes, and the threshold behaves as a flag in those
801 * versions.
802 */
803 if (priv->new_scan_threshold_behaviour)
804 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
805 IWL_GOOD_CRC_TH_DISABLED;
806 else
807 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
808 IWL_GOOD_CRC_TH_NEVER;
809
810 band = priv->scan_band;
811
812 if (band == IEEE80211_BAND_2GHZ &&
813 priv->lib->bt_params &&
814 priv->lib->bt_params->advanced_bt_coexist) {
815 /* transmit 2.4 GHz probes only on first antenna */
816 scan_tx_antennas = first_antenna(scan_tx_antennas);
817 }
818
819 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv,
820 priv->scan_tx_ant[band],
821 scan_tx_antennas);
822 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
823 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
824
825 /*
826 * In power save mode while associated use one chain,
827 * otherwise use all chains
828 */
829 if (test_bit(STATUS_POWER_PMI, &priv->status) &&
830 !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
831 /* rx_ant has been set to all valid chains previously */
832 active_chains = rx_ant &
833 ((u8)(priv->chain_noise_data.active_chains));
834 if (!active_chains)
835 active_chains = rx_ant;
836
837 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
838 priv->chain_noise_data.active_chains);
839
840 rx_ant = first_antenna(active_chains);
841 }
842 if (priv->lib->bt_params &&
843 priv->lib->bt_params->advanced_bt_coexist &&
844 priv->bt_full_concurrent) {
845 /* operated as 1x1 in full concurrency mode */
846 rx_ant = first_antenna(rx_ant);
847 }
848
849 /* MIMO is not used here, but value is required */
850 rx_chain |=
851 priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
852 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
853 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
854 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
855 scan->rx_chain = cpu_to_le16(rx_chain);
856 switch (priv->scan_type) {
857 case IWL_SCAN_NORMAL:
858 cmd_len = iwl_fill_probe_req(
859 (struct ieee80211_mgmt *)scan->data,
860 vif->addr,
861 priv->scan_request->ie,
862 priv->scan_request->ie_len,
863 ssid, ssid_len,
864 scan_cmd_size - sizeof(*scan));
865 break;
866 case IWL_SCAN_RADIO_RESET:
867 /* use bcast addr, will not be transmitted but must be valid */
868 cmd_len = iwl_fill_probe_req(
869 (struct ieee80211_mgmt *)scan->data,
870 iwl_bcast_addr, NULL, 0,
871 NULL, 0,
872 scan_cmd_size - sizeof(*scan));
873 break;
874 default:
875 BUG();
876 }
877 scan->tx_cmd.len = cpu_to_le16(cmd_len);
878
879 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
880 RXON_FILTER_BCON_AWARE_MSK);
881
882 switch (priv->scan_type) {
883 case IWL_SCAN_RADIO_RESET:
884 scan->channel_count =
885 iwl_get_channel_for_reset_scan(priv, vif, band,
886 (void *)&scan->data[cmd_len]);
887 break;
888 case IWL_SCAN_NORMAL:
889 scan->channel_count =
890 iwl_get_channels_for_scan(priv, vif, band,
891 is_active, n_probes,
892 (void *)&scan->data[cmd_len]);
893 break;
894 }
895
896 if (scan->channel_count == 0) {
897 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
898 return -EIO;
899 }
900
901 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
902 scan->channel_count * sizeof(struct iwl_scan_channel);
903 cmd.data[0] = scan;
904 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
905 scan->len = cpu_to_le16(cmd.len[0]);
906
907 /* set scan bit here for PAN params */
908 set_bit(STATUS_SCAN_HW, &priv->status);
909
910 ret = iwlagn_set_pan_params(priv);
911 if (ret) {
912 clear_bit(STATUS_SCAN_HW, &priv->status);
913 return ret;
914 }
915
916 ret = iwl_dvm_send_cmd(priv, &cmd);
917 if (ret) {
918 clear_bit(STATUS_SCAN_HW, &priv->status);
919 iwlagn_set_pan_params(priv);
920 }
921
922 return ret;
923}
924
925void iwl_init_scan_params(struct iwl_priv *priv)
926{
927 u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
928 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
929 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
930 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
931 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
932}
933
934int __must_check iwl_scan_initiate(struct iwl_priv *priv,
935 struct ieee80211_vif *vif,
936 enum iwl_scan_type scan_type,
937 enum ieee80211_band band)
938{
939 int ret;
940
941 lockdep_assert_held(&priv->mutex);
942
943 cancel_delayed_work(&priv->scan_check);
944
945 if (!iwl_is_ready_rf(priv)) {
946 IWL_WARN(priv, "Request scan called when driver not ready.\n");
947 return -EIO;
948 }
949
950 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
951 IWL_DEBUG_SCAN(priv,
952 "Multiple concurrent scan requests in parallel.\n");
953 return -EBUSY;
954 }
955
956 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
957 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
958 return -EBUSY;
959 }
960
961 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
962 scan_type == IWL_SCAN_NORMAL ? "" :
963 "internal short ");
964
965 set_bit(STATUS_SCANNING, &priv->status);
966 priv->scan_type = scan_type;
967 priv->scan_start = jiffies;
968 priv->scan_band = band;
969
970 ret = iwlagn_request_scan(priv, vif);
971 if (ret) {
972 clear_bit(STATUS_SCANNING, &priv->status);
973 priv->scan_type = IWL_SCAN_NORMAL;
974 return ret;
975 }
976
977 queue_delayed_work(priv->workqueue, &priv->scan_check,
978 IWL_SCAN_CHECK_WATCHDOG);
979
980 return 0;
981}
982
983
984/*
985 * internal short scan, this function should only been called while associated.
986 * It will reset and tune the radio to prevent possible RF related problem
987 */
988void iwl_internal_short_hw_scan(struct iwl_priv *priv)
989{
990 queue_work(priv->workqueue, &priv->start_internal_scan);
991}
992
993static void iwl_bg_start_internal_scan(struct work_struct *work)
994{
995 struct iwl_priv *priv =
996 container_of(work, struct iwl_priv, start_internal_scan);
997
998 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
999
1000 mutex_lock(&priv->mutex);
1001
1002 if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
1003 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
1004 goto unlock;
1005 }
1006
1007 if (test_bit(STATUS_SCANNING, &priv->status)) {
1008 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
1009 goto unlock;
1010 }
1011
1012 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
1013 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
1014 unlock:
1015 mutex_unlock(&priv->mutex);
1016}
1017
1018static void iwl_bg_scan_check(struct work_struct *data)
1019{
1020 struct iwl_priv *priv =
1021 container_of(data, struct iwl_priv, scan_check.work);
1022
1023 IWL_DEBUG_SCAN(priv, "Scan check work\n");
1024
1025 /* Since we are here firmware does not finish scan and
1026 * most likely is in bad shape, so we don't bother to
1027 * send abort command, just force scan complete to mac80211 */
1028 mutex_lock(&priv->mutex);
1029 iwl_force_scan_end(priv);
1030 mutex_unlock(&priv->mutex);
1031}
1032
1033static void iwl_bg_abort_scan(struct work_struct *work)
1034{
1035 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
1036
1037 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
1038
1039 /* We keep scan_check work queued in case when firmware will not
1040 * report back scan completed notification */
1041 mutex_lock(&priv->mutex);
1042 iwl_scan_cancel_timeout(priv, 200);
1043 mutex_unlock(&priv->mutex);
1044}
1045
1046static void iwl_bg_scan_completed(struct work_struct *work)
1047{
1048 struct iwl_priv *priv =
1049 container_of(work, struct iwl_priv, scan_completed);
1050
1051 mutex_lock(&priv->mutex);
1052 iwl_process_scan_complete(priv);
1053 mutex_unlock(&priv->mutex);
1054}
1055
1056void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
1057{
1058 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
1059 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
1060 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
1061 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
1062}
1063
1064void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
1065{
1066 cancel_work_sync(&priv->start_internal_scan);
1067 cancel_work_sync(&priv->abort_scan);
1068 cancel_work_sync(&priv->scan_completed);
1069
1070 if (cancel_delayed_work_sync(&priv->scan_check)) {
1071 mutex_lock(&priv->mutex);
1072 iwl_force_scan_end(priv);
1073 mutex_unlock(&priv->mutex);
1074 }
1075}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
new file mode 100644
index 000000000000..0fa67d3b7235
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -0,0 +1,1442 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <net/mac80211.h>
31#include "iwl-trans.h"
32#include "dev.h"
33#include "agn.h"
34
35const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
36
37static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
38{
39 lockdep_assert_held(&priv->sta_lock);
40
41 if (sta_id >= IWLAGN_STATION_COUNT) {
42 IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
43 return -EINVAL;
44 }
45 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
46 IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
47 "addr %pM\n",
48 sta_id, priv->stations[sta_id].sta.sta.addr);
49
50 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
51 IWL_DEBUG_ASSOC(priv,
52 "STA id %u addr %pM already present in uCode "
53 "(according to driver)\n",
54 sta_id, priv->stations[sta_id].sta.sta.addr);
55 } else {
56 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
57 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
58 sta_id, priv->stations[sta_id].sta.sta.addr);
59 }
60 return 0;
61}
62
63static void iwl_process_add_sta_resp(struct iwl_priv *priv,
64 struct iwl_rx_packet *pkt)
65{
66 struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
67
68 IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
69
70 spin_lock_bh(&priv->sta_lock);
71
72 switch (add_sta_resp->status) {
73 case ADD_STA_SUCCESS_MSK:
74 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
75 break;
76 case ADD_STA_NO_ROOM_IN_TABLE:
77 IWL_ERR(priv, "Adding station failed, no room in table.\n");
78 break;
79 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
80 IWL_ERR(priv,
81 "Adding station failed, no block ack resource.\n");
82 break;
83 case ADD_STA_MODIFY_NON_EXIST_STA:
84 IWL_ERR(priv, "Attempting to modify non-existing station\n");
85 break;
86 default:
87 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
88 add_sta_resp->status);
89 break;
90 }
91
92 spin_unlock_bh(&priv->sta_lock);
93}
94
95void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
96{
97 struct iwl_rx_packet *pkt = rxb_addr(rxb);
98
99 iwl_process_add_sta_resp(priv, pkt);
100}
101
102int iwl_send_add_sta(struct iwl_priv *priv,
103 struct iwl_addsta_cmd *sta, u8 flags)
104{
105 int ret = 0;
106 struct iwl_host_cmd cmd = {
107 .id = REPLY_ADD_STA,
108 .flags = flags,
109 .data = { sta, },
110 .len = { sizeof(*sta), },
111 };
112 u8 sta_id __maybe_unused = sta->sta.sta_id;
113 struct iwl_rx_packet *pkt;
114 struct iwl_add_sta_resp *add_sta_resp;
115
116 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
117 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
118
119 if (!(flags & CMD_ASYNC)) {
120 cmd.flags |= CMD_WANT_SKB;
121 might_sleep();
122 }
123
124 ret = iwl_dvm_send_cmd(priv, &cmd);
125
126 if (ret || (flags & CMD_ASYNC))
127 return ret;
128
129 pkt = cmd.resp_pkt;
130 add_sta_resp = (void *)pkt->data;
131
132 /* debug messages are printed in the handler */
133 if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
134 spin_lock_bh(&priv->sta_lock);
135 ret = iwl_sta_ucode_activate(priv, sta_id);
136 spin_unlock_bh(&priv->sta_lock);
137 } else {
138 ret = -EIO;
139 }
140
141 iwl_free_resp(&cmd);
142
143 return ret;
144}
145
146bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
147 struct iwl_rxon_context *ctx,
148 struct ieee80211_sta *sta)
149{
150 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
151 return false;
152
153#ifdef CONFIG_IWLWIFI_DEBUGFS
154 if (priv->disable_ht40)
155 return false;
156#endif
157
158 /* special case for RXON */
159 if (!sta)
160 return true;
161
162 return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
163}
164
165static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
166 struct ieee80211_sta *sta,
167 struct iwl_rxon_context *ctx,
168 __le32 *flags, __le32 *mask)
169{
170 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
171
172 *mask = STA_FLG_RTS_MIMO_PROT_MSK |
173 STA_FLG_MIMO_DIS_MSK |
174 STA_FLG_HT40_EN_MSK |
175 STA_FLG_MAX_AGG_SIZE_MSK |
176 STA_FLG_AGG_MPDU_DENSITY_MSK;
177 *flags = 0;
178
179 if (!sta || !sta_ht_inf->ht_supported)
180 return;
181
182 IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
183 sta->addr,
184 (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
185 "static" :
186 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
187 "dynamic" : "disabled");
188
189 switch (sta->smps_mode) {
190 case IEEE80211_SMPS_STATIC:
191 *flags |= STA_FLG_MIMO_DIS_MSK;
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 *flags |= STA_FLG_RTS_MIMO_PROT_MSK;
195 break;
196 case IEEE80211_SMPS_OFF:
197 break;
198 default:
199 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
200 break;
201 }
202
203 *flags |= cpu_to_le32(
204 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
205
206 *flags |= cpu_to_le32(
207 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
208
209 if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
210 *flags |= STA_FLG_HT40_EN_MSK;
211}
212
213int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
214 struct ieee80211_sta *sta)
215{
216 u8 sta_id = iwl_sta_id(sta);
217 __le32 flags, mask;
218 struct iwl_addsta_cmd cmd;
219
220 if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
221 return -EINVAL;
222
223 iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
224
225 spin_lock_bh(&priv->sta_lock);
226 priv->stations[sta_id].sta.station_flags &= ~mask;
227 priv->stations[sta_id].sta.station_flags |= flags;
228 spin_unlock_bh(&priv->sta_lock);
229
230 memset(&cmd, 0, sizeof(cmd));
231 cmd.mode = STA_CONTROL_MODIFY_MSK;
232 cmd.station_flags_msk = mask;
233 cmd.station_flags = flags;
234 cmd.sta.sta_id = sta_id;
235
236 return iwl_send_add_sta(priv, &cmd, 0);
237}
238
239static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
240 struct ieee80211_sta *sta,
241 struct iwl_rxon_context *ctx)
242{
243 __le32 flags, mask;
244
245 iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
246
247 lockdep_assert_held(&priv->sta_lock);
248 priv->stations[index].sta.station_flags &= ~mask;
249 priv->stations[index].sta.station_flags |= flags;
250}
251
252/**
253 * iwl_prep_station - Prepare station information for addition
254 *
255 * should be called with sta_lock held
256 */
257u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
258 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
259{
260 struct iwl_station_entry *station;
261 int i;
262 u8 sta_id = IWL_INVALID_STATION;
263
264 if (is_ap)
265 sta_id = ctx->ap_sta_id;
266 else if (is_broadcast_ether_addr(addr))
267 sta_id = ctx->bcast_sta_id;
268 else
269 for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
270 if (ether_addr_equal(priv->stations[i].sta.sta.addr,
271 addr)) {
272 sta_id = i;
273 break;
274 }
275
276 if (!priv->stations[i].used &&
277 sta_id == IWL_INVALID_STATION)
278 sta_id = i;
279 }
280
281 /*
282 * These two conditions have the same outcome, but keep them
283 * separate
284 */
285 if (unlikely(sta_id == IWL_INVALID_STATION))
286 return sta_id;
287
288 /*
289 * uCode is not able to deal with multiple requests to add a
290 * station. Keep track if one is in progress so that we do not send
291 * another.
292 */
293 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
294 IWL_DEBUG_INFO(priv, "STA %d already in process of being "
295 "added.\n", sta_id);
296 return sta_id;
297 }
298
299 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
300 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
301 ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
302 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
303 "adding again.\n", sta_id, addr);
304 return sta_id;
305 }
306
307 station = &priv->stations[sta_id];
308 station->used = IWL_STA_DRIVER_ACTIVE;
309 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
310 sta_id, addr);
311 priv->num_stations++;
312
313 /* Set up the REPLY_ADD_STA command to send to device */
314 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
315 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
316 station->sta.mode = 0;
317 station->sta.sta.sta_id = sta_id;
318 station->sta.station_flags = ctx->station_flags;
319 station->ctxid = ctx->ctxid;
320
321 if (sta) {
322 struct iwl_station_priv *sta_priv;
323
324 sta_priv = (void *)sta->drv_priv;
325 sta_priv->ctx = ctx;
326 }
327
328 /*
329 * OK to call unconditionally, since local stations (IBSS BSSID
330 * STA and broadcast STA) pass in a NULL sta, and mac80211
331 * doesn't allow HT IBSS.
332 */
333 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
334
335 return sta_id;
336
337}
338
339#define STA_WAIT_TIMEOUT (HZ/2)
340
341/**
342 * iwl_add_station_common -
343 */
344int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
345 const u8 *addr, bool is_ap,
346 struct ieee80211_sta *sta, u8 *sta_id_r)
347{
348 int ret = 0;
349 u8 sta_id;
350 struct iwl_addsta_cmd sta_cmd;
351
352 *sta_id_r = 0;
353 spin_lock_bh(&priv->sta_lock);
354 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
355 if (sta_id == IWL_INVALID_STATION) {
356 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
357 addr);
358 spin_unlock_bh(&priv->sta_lock);
359 return -EINVAL;
360 }
361
362 /*
363 * uCode is not able to deal with multiple requests to add a
364 * station. Keep track if one is in progress so that we do not send
365 * another.
366 */
367 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
368 IWL_DEBUG_INFO(priv, "STA %d already in process of being "
369 "added.\n", sta_id);
370 spin_unlock_bh(&priv->sta_lock);
371 return -EEXIST;
372 }
373
374 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
375 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
376 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
377 "adding again.\n", sta_id, addr);
378 spin_unlock_bh(&priv->sta_lock);
379 return -EEXIST;
380 }
381
382 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
383 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
384 sizeof(struct iwl_addsta_cmd));
385 spin_unlock_bh(&priv->sta_lock);
386
387 /* Add station to device's station table */
388 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
389 if (ret) {
390 spin_lock_bh(&priv->sta_lock);
391 IWL_ERR(priv, "Adding station %pM failed.\n",
392 priv->stations[sta_id].sta.sta.addr);
393 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
394 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
395 spin_unlock_bh(&priv->sta_lock);
396 }
397 *sta_id_r = sta_id;
398 return ret;
399}
400
401/**
402 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
403 */
404static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
405{
406 lockdep_assert_held(&priv->sta_lock);
407
408 /* Ucode must be active and driver must be non active */
409 if ((priv->stations[sta_id].used &
410 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
411 IWL_STA_UCODE_ACTIVE)
412 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
413
414 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
415
416 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
417 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
418}
419
420static int iwl_send_remove_station(struct iwl_priv *priv,
421 const u8 *addr, int sta_id,
422 bool temporary)
423{
424 struct iwl_rx_packet *pkt;
425 int ret;
426 struct iwl_rem_sta_cmd rm_sta_cmd;
427 struct iwl_rem_sta_resp *rem_sta_resp;
428
429 struct iwl_host_cmd cmd = {
430 .id = REPLY_REMOVE_STA,
431 .len = { sizeof(struct iwl_rem_sta_cmd), },
432 .data = { &rm_sta_cmd, },
433 };
434
435 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
436 rm_sta_cmd.num_sta = 1;
437 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
438
439 cmd.flags |= CMD_WANT_SKB;
440
441 ret = iwl_dvm_send_cmd(priv, &cmd);
442
443 if (ret)
444 return ret;
445
446 pkt = cmd.resp_pkt;
447 rem_sta_resp = (void *)pkt->data;
448
449 switch (rem_sta_resp->status) {
450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) {
452 spin_lock_bh(&priv->sta_lock);
453 iwl_sta_ucode_deactivate(priv, sta_id);
454 spin_unlock_bh(&priv->sta_lock);
455 }
456 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
457 break;
458 default:
459 ret = -EIO;
460 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
461 break;
462 }
463
464 iwl_free_resp(&cmd);
465
466 return ret;
467}
468
469/**
470 * iwl_remove_station - Remove driver's knowledge of station.
471 */
472int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
473 const u8 *addr)
474{
475 u8 tid;
476
477 if (!iwl_is_ready(priv)) {
478 IWL_DEBUG_INFO(priv,
479 "Unable to remove station %pM, device not ready.\n",
480 addr);
481 /*
482 * It is typical for stations to be removed when we are
483 * going down. Return success since device will be down
484 * soon anyway
485 */
486 return 0;
487 }
488
489 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
490 sta_id, addr);
491
492 if (WARN_ON(sta_id == IWL_INVALID_STATION))
493 return -EINVAL;
494
495 spin_lock_bh(&priv->sta_lock);
496
497 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
498 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
499 addr);
500 goto out_err;
501 }
502
503 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
504 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
505 addr);
506 goto out_err;
507 }
508
509 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
510 kfree(priv->stations[sta_id].lq);
511 priv->stations[sta_id].lq = NULL;
512 }
513
514 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
515 memset(&priv->tid_data[sta_id][tid], 0,
516 sizeof(priv->tid_data[sta_id][tid]));
517
518 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
519
520 priv->num_stations--;
521
522 if (WARN_ON(priv->num_stations < 0))
523 priv->num_stations = 0;
524
525 spin_unlock_bh(&priv->sta_lock);
526
527 return iwl_send_remove_station(priv, addr, sta_id, false);
528out_err:
529 spin_unlock_bh(&priv->sta_lock);
530 return -EINVAL;
531}
532
533void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
534 const u8 *addr)
535{
536 u8 tid;
537
538 if (!iwl_is_ready(priv)) {
539 IWL_DEBUG_INFO(priv,
540 "Unable to remove station %pM, device not ready.\n",
541 addr);
542 return;
543 }
544
545 IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
546
547 if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
548 return;
549
550 spin_lock_bh(&priv->sta_lock);
551
552 WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
553
554 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
555 memset(&priv->tid_data[sta_id][tid], 0,
556 sizeof(priv->tid_data[sta_id][tid]));
557
558 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
559 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
560
561 priv->num_stations--;
562
563 if (WARN_ON_ONCE(priv->num_stations < 0))
564 priv->num_stations = 0;
565
566 spin_unlock_bh(&priv->sta_lock);
567}
568
569static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
570 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
571{
572 int i, r;
573 u32 rate_flags = 0;
574 __le32 rate_n_flags;
575
576 lockdep_assert_held(&priv->mutex);
577
578 memset(link_cmd, 0, sizeof(*link_cmd));
579
580 /* Set up the rate scaling to start at selected rate, fall back
581 * all the way down to 1M in IEEE order, and then spin on 1M */
582 if (priv->band == IEEE80211_BAND_5GHZ)
583 r = IWL_RATE_6M_INDEX;
584 else if (ctx && ctx->vif && ctx->vif->p2p)
585 r = IWL_RATE_6M_INDEX;
586 else
587 r = IWL_RATE_1M_INDEX;
588
589 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
590 rate_flags |= RATE_MCS_CCK_MSK;
591
592 rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) <<
593 RATE_MCS_ANT_POS;
594 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
595 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
596 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
597
598 link_cmd->general_params.single_stream_ant_msk =
599 first_antenna(priv->nvm_data->valid_tx_ant);
600
601 link_cmd->general_params.dual_stream_ant_msk =
602 priv->nvm_data->valid_tx_ant &
603 ~first_antenna(priv->nvm_data->valid_tx_ant);
604 if (!link_cmd->general_params.dual_stream_ant_msk) {
605 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
606 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
607 link_cmd->general_params.dual_stream_ant_msk =
608 priv->nvm_data->valid_tx_ant;
609 }
610
611 link_cmd->agg_params.agg_dis_start_th =
612 LINK_QUAL_AGG_DISABLE_START_DEF;
613 link_cmd->agg_params.agg_time_limit =
614 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
615
616 link_cmd->sta_id = sta_id;
617}
618
619/**
620 * iwl_clear_ucode_stations - clear ucode station table bits
621 *
622 * This function clears all the bits in the driver indicating
623 * which stations are active in the ucode. Call when something
624 * other than explicit station management would cause this in
625 * the ucode, e.g. unassociated RXON.
626 */
627void iwl_clear_ucode_stations(struct iwl_priv *priv,
628 struct iwl_rxon_context *ctx)
629{
630 int i;
631 bool cleared = false;
632
633 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
634
635 spin_lock_bh(&priv->sta_lock);
636 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
637 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
638 continue;
639
640 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
641 IWL_DEBUG_INFO(priv,
642 "Clearing ucode active for station %d\n", i);
643 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
644 cleared = true;
645 }
646 }
647 spin_unlock_bh(&priv->sta_lock);
648
649 if (!cleared)
650 IWL_DEBUG_INFO(priv,
651 "No active stations found to be cleared\n");
652}
653
654/**
655 * iwl_restore_stations() - Restore driver known stations to device
656 *
657 * All stations considered active by driver, but not present in ucode, is
658 * restored.
659 *
660 * Function sleeps.
661 */
662void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
663{
664 struct iwl_addsta_cmd sta_cmd;
665 static const struct iwl_link_quality_cmd zero_lq = {};
666 struct iwl_link_quality_cmd lq;
667 int i;
668 bool found = false;
669 int ret;
670 bool send_lq;
671
672 if (!iwl_is_ready(priv)) {
673 IWL_DEBUG_INFO(priv,
674 "Not ready yet, not restoring any stations.\n");
675 return;
676 }
677
678 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
679 spin_lock_bh(&priv->sta_lock);
680 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
681 if (ctx->ctxid != priv->stations[i].ctxid)
682 continue;
683 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
684 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
685 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
686 priv->stations[i].sta.sta.addr);
687 priv->stations[i].sta.mode = 0;
688 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
689 found = true;
690 }
691 }
692
693 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
694 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
695 memcpy(&sta_cmd, &priv->stations[i].sta,
696 sizeof(struct iwl_addsta_cmd));
697 send_lq = false;
698 if (priv->stations[i].lq) {
699 if (priv->wowlan)
700 iwl_sta_fill_lq(priv, ctx, i, &lq);
701 else
702 memcpy(&lq, priv->stations[i].lq,
703 sizeof(struct iwl_link_quality_cmd));
704
705 if (memcmp(&lq, &zero_lq, sizeof(lq)))
706 send_lq = true;
707 }
708 spin_unlock_bh(&priv->sta_lock);
709 ret = iwl_send_add_sta(priv, &sta_cmd, 0);
710 if (ret) {
711 spin_lock_bh(&priv->sta_lock);
712 IWL_ERR(priv, "Adding station %pM failed.\n",
713 priv->stations[i].sta.sta.addr);
714 priv->stations[i].used &=
715 ~IWL_STA_DRIVER_ACTIVE;
716 priv->stations[i].used &=
717 ~IWL_STA_UCODE_INPROGRESS;
718 continue;
719 }
720 /*
721 * Rate scaling has already been initialized, send
722 * current LQ command
723 */
724 if (send_lq)
725 iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
726 spin_lock_bh(&priv->sta_lock);
727 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
728 }
729 }
730
731 spin_unlock_bh(&priv->sta_lock);
732 if (!found)
733 IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
734 "no stations to be restored.\n");
735 else
736 IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
737 "complete.\n");
738}
739
740int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
741{
742 int i;
743
744 for (i = 0; i < priv->sta_key_max_num; i++)
745 if (!test_and_set_bit(i, &priv->ucode_key_table))
746 return i;
747
748 return WEP_INVALID_OFFSET;
749}
750
751void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
752{
753 int i;
754
755 spin_lock_bh(&priv->sta_lock);
756 for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
757 if (!(priv->stations[i].used & IWL_STA_BCAST))
758 continue;
759
760 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
761 priv->num_stations--;
762 if (WARN_ON(priv->num_stations < 0))
763 priv->num_stations = 0;
764 kfree(priv->stations[i].lq);
765 priv->stations[i].lq = NULL;
766 }
767 spin_unlock_bh(&priv->sta_lock);
768}
769
770#ifdef CONFIG_IWLWIFI_DEBUG
771static void iwl_dump_lq_cmd(struct iwl_priv *priv,
772 struct iwl_link_quality_cmd *lq)
773{
774 int i;
775 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
776 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
777 lq->general_params.single_stream_ant_msk,
778 lq->general_params.dual_stream_ant_msk);
779
780 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
781 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
782 i, lq->rs_table[i].rate_n_flags);
783}
784#else
785static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
786 struct iwl_link_quality_cmd *lq)
787{
788}
789#endif
790
791/**
792 * is_lq_table_valid() - Test one aspect of LQ cmd for validity
793 *
794 * It sometimes happens when a HT rate has been in use and we
795 * loose connectivity with AP then mac80211 will first tell us that the
796 * current channel is not HT anymore before removing the station. In such a
797 * scenario the RXON flags will be updated to indicate we are not
798 * communicating HT anymore, but the LQ command may still contain HT rates.
799 * Test for this to prevent driver from sending LQ command between the time
800 * RXON flags are updated and when LQ command is updated.
801 */
802static bool is_lq_table_valid(struct iwl_priv *priv,
803 struct iwl_rxon_context *ctx,
804 struct iwl_link_quality_cmd *lq)
805{
806 int i;
807
808 if (ctx->ht.enabled)
809 return true;
810
811 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
812 ctx->active.channel);
813 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
814 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
815 RATE_MCS_HT_MSK) {
816 IWL_DEBUG_INFO(priv,
817 "index %d of LQ expects HT channel\n",
818 i);
819 return false;
820 }
821 }
822 return true;
823}
824
825/**
826 * iwl_send_lq_cmd() - Send link quality command
827 * @init: This command is sent as part of station initialization right
828 * after station has been added.
829 *
830 * The link quality command is sent as the last step of station creation.
831 * This is the special case in which init is set and we call a callback in
832 * this case to clear the state indicating that station creation is in
833 * progress.
834 */
835int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
836 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
837{
838 int ret = 0;
839 struct iwl_host_cmd cmd = {
840 .id = REPLY_TX_LINK_QUALITY_CMD,
841 .len = { sizeof(struct iwl_link_quality_cmd), },
842 .flags = flags,
843 .data = { lq, },
844 };
845
846 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
847 return -EINVAL;
848
849
850 spin_lock_bh(&priv->sta_lock);
851 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
852 spin_unlock_bh(&priv->sta_lock);
853 return -EINVAL;
854 }
855 spin_unlock_bh(&priv->sta_lock);
856
857 iwl_dump_lq_cmd(priv, lq);
858 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
859 return -EINVAL;
860
861 if (is_lq_table_valid(priv, ctx, lq))
862 ret = iwl_dvm_send_cmd(priv, &cmd);
863 else
864 ret = -EINVAL;
865
866 if (cmd.flags & CMD_ASYNC)
867 return ret;
868
869 if (init) {
870 IWL_DEBUG_INFO(priv, "init LQ command complete, "
871 "clearing sta addition status for sta %d\n",
872 lq->sta_id);
873 spin_lock_bh(&priv->sta_lock);
874 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
875 spin_unlock_bh(&priv->sta_lock);
876 }
877 return ret;
878}
879
880
881static struct iwl_link_quality_cmd *
882iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
883 u8 sta_id)
884{
885 struct iwl_link_quality_cmd *link_cmd;
886
887 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
888 if (!link_cmd) {
889 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
890 return NULL;
891 }
892
893 iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
894
895 return link_cmd;
896}
897
898/*
899 * iwlagn_add_bssid_station - Add the special IBSS BSSID station
900 *
901 * Function sleeps.
902 */
903int iwlagn_add_bssid_station(struct iwl_priv *priv,
904 struct iwl_rxon_context *ctx,
905 const u8 *addr, u8 *sta_id_r)
906{
907 int ret;
908 u8 sta_id;
909 struct iwl_link_quality_cmd *link_cmd;
910
911 if (sta_id_r)
912 *sta_id_r = IWL_INVALID_STATION;
913
914 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
915 if (ret) {
916 IWL_ERR(priv, "Unable to add station %pM\n", addr);
917 return ret;
918 }
919
920 if (sta_id_r)
921 *sta_id_r = sta_id;
922
923 spin_lock_bh(&priv->sta_lock);
924 priv->stations[sta_id].used |= IWL_STA_LOCAL;
925 spin_unlock_bh(&priv->sta_lock);
926
927 /* Set up default rate scaling table in device's station table */
928 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
929 if (!link_cmd) {
930 IWL_ERR(priv,
931 "Unable to initialize rate scaling for station %pM.\n",
932 addr);
933 return -ENOMEM;
934 }
935
936 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
937 if (ret)
938 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
939
940 spin_lock_bh(&priv->sta_lock);
941 priv->stations[sta_id].lq = link_cmd;
942 spin_unlock_bh(&priv->sta_lock);
943
944 return 0;
945}
946
947/*
948 * static WEP keys
949 *
950 * For each context, the device has a table of 4 static WEP keys
951 * (one for each key index) that is updated with the following
952 * commands.
953 */
954
955static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
956 struct iwl_rxon_context *ctx,
957 bool send_if_empty)
958{
959 int i, not_empty = 0;
960 u8 buff[sizeof(struct iwl_wep_cmd) +
961 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
962 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
963 size_t cmd_size = sizeof(struct iwl_wep_cmd);
964 struct iwl_host_cmd cmd = {
965 .id = ctx->wep_key_cmd,
966 .data = { wep_cmd, },
967 };
968
969 might_sleep();
970
971 memset(wep_cmd, 0, cmd_size +
972 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
973
974 for (i = 0; i < WEP_KEYS_MAX ; i++) {
975 wep_cmd->key[i].key_index = i;
976 if (ctx->wep_keys[i].key_size) {
977 wep_cmd->key[i].key_offset = i;
978 not_empty = 1;
979 } else {
980 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
981 }
982
983 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
984 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
985 ctx->wep_keys[i].key_size);
986 }
987
988 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
989 wep_cmd->num_keys = WEP_KEYS_MAX;
990
991 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
992
993 cmd.len[0] = cmd_size;
994
995 if (not_empty || send_if_empty)
996 return iwl_dvm_send_cmd(priv, &cmd);
997 else
998 return 0;
999}
1000
1001int iwl_restore_default_wep_keys(struct iwl_priv *priv,
1002 struct iwl_rxon_context *ctx)
1003{
1004 lockdep_assert_held(&priv->mutex);
1005
1006 return iwl_send_static_wepkey_cmd(priv, ctx, false);
1007}
1008
1009int iwl_remove_default_wep_key(struct iwl_priv *priv,
1010 struct iwl_rxon_context *ctx,
1011 struct ieee80211_key_conf *keyconf)
1012{
1013 int ret;
1014
1015 lockdep_assert_held(&priv->mutex);
1016
1017 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
1018 keyconf->keyidx);
1019
1020 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
1021 if (iwl_is_rfkill(priv)) {
1022 IWL_DEBUG_WEP(priv,
1023 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
1024 /* but keys in device are clear anyway so return success */
1025 return 0;
1026 }
1027 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
1028 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
1029 keyconf->keyidx, ret);
1030
1031 return ret;
1032}
1033
1034int iwl_set_default_wep_key(struct iwl_priv *priv,
1035 struct iwl_rxon_context *ctx,
1036 struct ieee80211_key_conf *keyconf)
1037{
1038 int ret;
1039
1040 lockdep_assert_held(&priv->mutex);
1041
1042 if (keyconf->keylen != WEP_KEY_LEN_128 &&
1043 keyconf->keylen != WEP_KEY_LEN_64) {
1044 IWL_DEBUG_WEP(priv,
1045 "Bad WEP key length %d\n", keyconf->keylen);
1046 return -EINVAL;
1047 }
1048
1049 keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
1050
1051 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
1052 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
1053 keyconf->keylen);
1054
1055 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
1056 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
1057 keyconf->keylen, keyconf->keyidx, ret);
1058
1059 return ret;
1060}
1061
1062/*
1063 * dynamic (per-station) keys
1064 *
1065 * The dynamic keys are a little more complicated. The device has
1066 * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
1067 * These are linked to stations by a table that contains an index
1068 * into the key table for each station/key index/{mcast,unicast},
1069 * i.e. it's basically an array of pointers like this:
1070 * key_offset_t key_mapping[NUM_STATIONS][4][2];
1071 * (it really works differently, but you can think of it as such)
1072 *
1073 * The key uploading and linking happens in the same command, the
1074 * add station command with STA_MODIFY_KEY_MASK.
1075 */
1076
1077static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
1078 struct ieee80211_vif *vif,
1079 struct ieee80211_sta *sta)
1080{
1081 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1082
1083 if (sta)
1084 return iwl_sta_id(sta);
1085
1086 /*
1087 * The device expects GTKs for station interfaces to be
1088 * installed as GTKs for the AP station. If we have no
1089 * station ID, then use the ap_sta_id in that case.
1090 */
1091 if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
1092 return vif_priv->ctx->ap_sta_id;
1093
1094 return IWL_INVALID_STATION;
1095}
1096
1097static int iwlagn_send_sta_key(struct iwl_priv *priv,
1098 struct ieee80211_key_conf *keyconf,
1099 u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
1100 u32 cmd_flags)
1101{
1102 __le16 key_flags;
1103 struct iwl_addsta_cmd sta_cmd;
1104 int i;
1105
1106 spin_lock_bh(&priv->sta_lock);
1107 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
1108 spin_unlock_bh(&priv->sta_lock);
1109
1110 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1111 key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
1112
1113 switch (keyconf->cipher) {
1114 case WLAN_CIPHER_SUITE_CCMP:
1115 key_flags |= STA_KEY_FLG_CCMP;
1116 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
1117 break;
1118 case WLAN_CIPHER_SUITE_TKIP:
1119 key_flags |= STA_KEY_FLG_TKIP;
1120 sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
1121 for (i = 0; i < 5; i++)
1122 sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1123 memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
1124 break;
1125 case WLAN_CIPHER_SUITE_WEP104:
1126 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
1127 /* fall through */
1128 case WLAN_CIPHER_SUITE_WEP40:
1129 key_flags |= STA_KEY_FLG_WEP;
1130 memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
1131 break;
1132 default:
1133 WARN_ON(1);
1134 return -EINVAL;
1135 }
1136
1137 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1138 key_flags |= STA_KEY_MULTICAST_MSK;
1139
1140 /* key pointer (offset) */
1141 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1142
1143 sta_cmd.key.key_flags = key_flags;
1144 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1145 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1146
1147 return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
1148}
1149
1150void iwl_update_tkip_key(struct iwl_priv *priv,
1151 struct ieee80211_vif *vif,
1152 struct ieee80211_key_conf *keyconf,
1153 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
1154{
1155 u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
1156
1157 if (sta_id == IWL_INVALID_STATION)
1158 return;
1159
1160 if (iwl_scan_cancel(priv)) {
1161 /* cancel scan failed, just live w/ bad key and rely
1162 briefly on SW decryption */
1163 return;
1164 }
1165
1166 iwlagn_send_sta_key(priv, keyconf, sta_id,
1167 iv32, phase1key, CMD_ASYNC);
1168}
1169
1170int iwl_remove_dynamic_key(struct iwl_priv *priv,
1171 struct iwl_rxon_context *ctx,
1172 struct ieee80211_key_conf *keyconf,
1173 struct ieee80211_sta *sta)
1174{
1175 struct iwl_addsta_cmd sta_cmd;
1176 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
1177 __le16 key_flags;
1178
1179 /* if station isn't there, neither is the key */
1180 if (sta_id == IWL_INVALID_STATION)
1181 return -ENOENT;
1182
1183 spin_lock_bh(&priv->sta_lock);
1184 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
1185 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
1186 sta_id = IWL_INVALID_STATION;
1187 spin_unlock_bh(&priv->sta_lock);
1188
1189 if (sta_id == IWL_INVALID_STATION)
1190 return 0;
1191
1192 lockdep_assert_held(&priv->mutex);
1193
1194 ctx->key_mapping_keys--;
1195
1196 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
1197 keyconf->keyidx, sta_id);
1198
1199 if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
1200 IWL_ERR(priv, "offset %d not used in uCode key table.\n",
1201 keyconf->hw_key_idx);
1202
1203 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1204 key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
1205 STA_KEY_FLG_INVALID;
1206
1207 if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1208 key_flags |= STA_KEY_MULTICAST_MSK;
1209
1210 sta_cmd.key.key_flags = key_flags;
1211 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1212 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1213 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1214
1215 return iwl_send_add_sta(priv, &sta_cmd, 0);
1216}
1217
1218int iwl_set_dynamic_key(struct iwl_priv *priv,
1219 struct iwl_rxon_context *ctx,
1220 struct ieee80211_key_conf *keyconf,
1221 struct ieee80211_sta *sta)
1222{
1223 struct ieee80211_key_seq seq;
1224 u16 p1k[5];
1225 int ret;
1226 u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
1227 const u8 *addr;
1228
1229 if (sta_id == IWL_INVALID_STATION)
1230 return -EINVAL;
1231
1232 lockdep_assert_held(&priv->mutex);
1233
1234 keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
1235 if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
1236 return -ENOSPC;
1237
1238 ctx->key_mapping_keys++;
1239
1240 switch (keyconf->cipher) {
1241 case WLAN_CIPHER_SUITE_TKIP:
1242 if (sta)
1243 addr = sta->addr;
1244 else /* station mode case only */
1245 addr = ctx->active.bssid_addr;
1246
1247 /* pre-fill phase 1 key into device cache */
1248 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1249 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1250 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1251 seq.tkip.iv32, p1k, 0);
1252 break;
1253 case WLAN_CIPHER_SUITE_CCMP:
1254 case WLAN_CIPHER_SUITE_WEP40:
1255 case WLAN_CIPHER_SUITE_WEP104:
1256 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1257 0, NULL, 0);
1258 break;
1259 default:
1260 IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
1261 ret = -EINVAL;
1262 }
1263
1264 if (ret) {
1265 ctx->key_mapping_keys--;
1266 clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
1267 }
1268
1269 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1270 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1271 sta ? sta->addr : NULL, ret);
1272
1273 return ret;
1274}
1275
1276/**
1277 * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
1278 *
1279 * This adds the broadcast station into the driver's station table
1280 * and marks it driver active, so that it will be restored to the
1281 * device at the next best time.
1282 */
1283int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
1284 struct iwl_rxon_context *ctx)
1285{
1286 struct iwl_link_quality_cmd *link_cmd;
1287 u8 sta_id;
1288
1289 spin_lock_bh(&priv->sta_lock);
1290 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
1291 if (sta_id == IWL_INVALID_STATION) {
1292 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1293 spin_unlock_bh(&priv->sta_lock);
1294
1295 return -EINVAL;
1296 }
1297
1298 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
1299 priv->stations[sta_id].used |= IWL_STA_BCAST;
1300 spin_unlock_bh(&priv->sta_lock);
1301
1302 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
1303 if (!link_cmd) {
1304 IWL_ERR(priv,
1305 "Unable to initialize rate scaling for bcast station.\n");
1306 return -ENOMEM;
1307 }
1308
1309 spin_lock_bh(&priv->sta_lock);
1310 priv->stations[sta_id].lq = link_cmd;
1311 spin_unlock_bh(&priv->sta_lock);
1312
1313 return 0;
1314}
1315
1316/**
1317 * iwl_update_bcast_station - update broadcast station's LQ command
1318 *
1319 * Only used by iwlagn. Placed here to have all bcast station management
1320 * code together.
1321 */
1322int iwl_update_bcast_station(struct iwl_priv *priv,
1323 struct iwl_rxon_context *ctx)
1324{
1325 struct iwl_link_quality_cmd *link_cmd;
1326 u8 sta_id = ctx->bcast_sta_id;
1327
1328 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
1329 if (!link_cmd) {
1330 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
1331 return -ENOMEM;
1332 }
1333
1334 spin_lock_bh(&priv->sta_lock);
1335 if (priv->stations[sta_id].lq)
1336 kfree(priv->stations[sta_id].lq);
1337 else
1338 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
1339 priv->stations[sta_id].lq = link_cmd;
1340 spin_unlock_bh(&priv->sta_lock);
1341
1342 return 0;
1343}
1344
1345int iwl_update_bcast_stations(struct iwl_priv *priv)
1346{
1347 struct iwl_rxon_context *ctx;
1348 int ret = 0;
1349
1350 for_each_context(priv, ctx) {
1351 ret = iwl_update_bcast_station(priv, ctx);
1352 if (ret)
1353 break;
1354 }
1355
1356 return ret;
1357}
1358
1359/**
1360 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
1361 */
1362int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1363{
1364 struct iwl_addsta_cmd sta_cmd;
1365
1366 lockdep_assert_held(&priv->mutex);
1367
1368 /* Remove "disable" flag, to enable Tx for this TID */
1369 spin_lock_bh(&priv->sta_lock);
1370 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1371 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
1372 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1373 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1374 spin_unlock_bh(&priv->sta_lock);
1375
1376 return iwl_send_add_sta(priv, &sta_cmd, 0);
1377}
1378
1379int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1380 int tid, u16 ssn)
1381{
1382 int sta_id;
1383 struct iwl_addsta_cmd sta_cmd;
1384
1385 lockdep_assert_held(&priv->mutex);
1386
1387 sta_id = iwl_sta_id(sta);
1388 if (sta_id == IWL_INVALID_STATION)
1389 return -ENXIO;
1390
1391 spin_lock_bh(&priv->sta_lock);
1392 priv->stations[sta_id].sta.station_flags_msk = 0;
1393 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
1394 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
1395 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
1396 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1397 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1398 spin_unlock_bh(&priv->sta_lock);
1399
1400 return iwl_send_add_sta(priv, &sta_cmd, 0);
1401}
1402
1403int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1404 int tid)
1405{
1406 int sta_id;
1407 struct iwl_addsta_cmd sta_cmd;
1408
1409 lockdep_assert_held(&priv->mutex);
1410
1411 sta_id = iwl_sta_id(sta);
1412 if (sta_id == IWL_INVALID_STATION) {
1413 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1414 return -ENXIO;
1415 }
1416
1417 spin_lock_bh(&priv->sta_lock);
1418 priv->stations[sta_id].sta.station_flags_msk = 0;
1419 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
1420 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
1421 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1422 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1423 spin_unlock_bh(&priv->sta_lock);
1424
1425 return iwl_send_add_sta(priv, &sta_cmd, 0);
1426}
1427
1428
1429
1430void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1431{
1432 struct iwl_addsta_cmd cmd = {
1433 .mode = STA_CONTROL_MODIFY_MSK,
1434 .station_flags = STA_FLG_PWR_SAVE_MSK,
1435 .station_flags_msk = STA_FLG_PWR_SAVE_MSK,
1436 .sta.sta_id = sta_id,
1437 .sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
1438 .sleep_tx_count = cpu_to_le16(cnt),
1439 };
1440
1441 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1442}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
new file mode 100644
index 000000000000..c4736c8834c5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -0,0 +1,685 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-io.h"
35#include "iwl-modparams.h"
36#include "iwl-debug.h"
37#include "agn.h"
38#include "dev.h"
39#include "commands.h"
40#include "tt.h"
41
42/* default Thermal Throttling transaction table
43 * Current state | Throttling Down | Throttling Up
44 *=============================================================================
45 * Condition Nxt State Condition Nxt State Condition Nxt State
46 *-----------------------------------------------------------------------------
47 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
48 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
49 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
50 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
51 *=============================================================================
52 */
53static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
54 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
55 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
56 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
57};
58static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
59 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
60 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
61 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
62};
63static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
64 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
65 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
66 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
67};
68static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
69 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
70 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
71 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
72};
73
74/* Advance Thermal Throttling default restriction table */
75static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
76 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
77 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
78 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
79 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
80};
81
82bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
83{
84 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
85
86 if (tt->state >= IWL_TI_1)
87 return true;
88 return false;
89}
90
91u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
92{
93 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
94
95 return tt->tt_power_mode;
96}
97
98bool iwl_ht_enabled(struct iwl_priv *priv)
99{
100 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
101 struct iwl_tt_restriction *restriction;
102
103 if (!priv->thermal_throttle.advanced_tt)
104 return true;
105 restriction = tt->restriction + tt->state;
106 return restriction->is_ht;
107}
108
109static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
110{
111 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
112 bool within_margin = false;
113
114 if (!priv->thermal_throttle.advanced_tt)
115 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
116 CT_KILL_THRESHOLD_LEGACY) ? true : false;
117 else
118 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
119 CT_KILL_THRESHOLD) ? true : false;
120 return within_margin;
121}
122
123bool iwl_check_for_ct_kill(struct iwl_priv *priv)
124{
125 bool is_ct_kill = false;
126
127 if (iwl_within_ct_kill_margin(priv)) {
128 iwl_tt_enter_ct_kill(priv);
129 is_ct_kill = true;
130 }
131 return is_ct_kill;
132}
133
134enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
135{
136 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
137 struct iwl_tt_restriction *restriction;
138
139 if (!priv->thermal_throttle.advanced_tt)
140 return IWL_ANT_OK_MULTI;
141 restriction = tt->restriction + tt->state;
142 return restriction->tx_stream;
143}
144
145enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
146{
147 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
148 struct iwl_tt_restriction *restriction;
149
150 if (!priv->thermal_throttle.advanced_tt)
151 return IWL_ANT_OK_MULTI;
152 restriction = tt->restriction + tt->state;
153 return restriction->rx_stream;
154}
155
156#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
157#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
158
159/*
160 * toggle the bit to wake up uCode and check the temperature
161 * if the temperature is below CT, uCode will stay awake and send card
162 * state notification with CT_KILL bit clear to inform Thermal Throttling
163 * Management to change state. Otherwise, uCode will go back to sleep
164 * without doing anything, driver should continue the 5 seconds timer
165 * to wake up uCode for temperature check until temperature drop below CT
166 */
167static void iwl_tt_check_exit_ct_kill(unsigned long data)
168{
169 struct iwl_priv *priv = (struct iwl_priv *)data;
170 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
171 unsigned long flags;
172
173 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
174 return;
175
176 if (tt->state == IWL_TI_CT_KILL) {
177 if (priv->thermal_throttle.ct_kill_toggle) {
178 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
179 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
180 priv->thermal_throttle.ct_kill_toggle = false;
181 } else {
182 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
183 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
184 priv->thermal_throttle.ct_kill_toggle = true;
185 }
186 iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
187 if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
188 iwl_trans_release_nic_access(priv->trans, &flags);
189
190 /* Reschedule the ct_kill timer to occur in
191 * CT_KILL_EXIT_DURATION seconds to ensure we get a
192 * thermal update */
193 IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n");
194 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
195 jiffies + CT_KILL_EXIT_DURATION * HZ);
196 }
197}
198
199static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
200 bool stop)
201{
202 if (stop) {
203 IWL_DEBUG_TEMP(priv, "Stop all queues\n");
204 if (priv->mac80211_registered)
205 ieee80211_stop_queues(priv->hw);
206 IWL_DEBUG_TEMP(priv,
207 "Schedule 5 seconds CT_KILL Timer\n");
208 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
209 jiffies + CT_KILL_EXIT_DURATION * HZ);
210 } else {
211 IWL_DEBUG_TEMP(priv, "Wake all queues\n");
212 if (priv->mac80211_registered)
213 ieee80211_wake_queues(priv->hw);
214 }
215}
216
217static void iwl_tt_ready_for_ct_kill(unsigned long data)
218{
219 struct iwl_priv *priv = (struct iwl_priv *)data;
220 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
221
222 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
223 return;
224
225 /* temperature timer expired, ready to go into CT_KILL state */
226 if (tt->state != IWL_TI_CT_KILL) {
227 IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
228 "temperature timer expired\n");
229 tt->state = IWL_TI_CT_KILL;
230 set_bit(STATUS_CT_KILL, &priv->status);
231 iwl_perform_ct_kill_task(priv, true);
232 }
233}
234
235static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
236{
237 IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
238 /* make request to retrieve statistics information */
239 iwl_send_statistics_request(priv, 0, false);
240 /* Reschedule the ct_kill wait timer */
241 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
242 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
243}
244
245#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
246#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
247#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
248
249/*
250 * Legacy thermal throttling
251 * 1) Avoid NIC destruction due to high temperatures
252 * Chip will identify dangerously high temperatures that can
253 * harm the device and will power down
254 * 2) Avoid the NIC power down due to high temperature
255 * Throttle early enough to lower the power consumption before
256 * drastic steps are needed
257 */
258static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
259{
260 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
261 enum iwl_tt_state old_state;
262
263#ifdef CONFIG_IWLWIFI_DEBUG
264 if ((tt->tt_previous_temp) &&
265 (temp > tt->tt_previous_temp) &&
266 ((temp - tt->tt_previous_temp) >
267 IWL_TT_INCREASE_MARGIN)) {
268 IWL_DEBUG_TEMP(priv,
269 "Temperature increase %d degree Celsius\n",
270 (temp - tt->tt_previous_temp));
271 }
272#endif
273 old_state = tt->state;
274 /* in Celsius */
275 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
276 tt->state = IWL_TI_CT_KILL;
277 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
278 tt->state = IWL_TI_2;
279 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
280 tt->state = IWL_TI_1;
281 else
282 tt->state = IWL_TI_0;
283
284#ifdef CONFIG_IWLWIFI_DEBUG
285 tt->tt_previous_temp = temp;
286#endif
287 /* stop ct_kill_waiting_tm timer */
288 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
289 if (tt->state != old_state) {
290 switch (tt->state) {
291 case IWL_TI_0:
292 /*
293 * When the system is ready to go back to IWL_TI_0
294 * we only have to call iwl_power_update_mode() to
295 * do so.
296 */
297 break;
298 case IWL_TI_1:
299 tt->tt_power_mode = IWL_POWER_INDEX_3;
300 break;
301 case IWL_TI_2:
302 tt->tt_power_mode = IWL_POWER_INDEX_4;
303 break;
304 default:
305 tt->tt_power_mode = IWL_POWER_INDEX_5;
306 break;
307 }
308 mutex_lock(&priv->mutex);
309 if (old_state == IWL_TI_CT_KILL)
310 clear_bit(STATUS_CT_KILL, &priv->status);
311 if (tt->state != IWL_TI_CT_KILL &&
312 iwl_power_update_mode(priv, true)) {
313 /* TT state not updated
314 * try again during next temperature read
315 */
316 if (old_state == IWL_TI_CT_KILL)
317 set_bit(STATUS_CT_KILL, &priv->status);
318 tt->state = old_state;
319 IWL_ERR(priv, "Cannot update power mode, "
320 "TT state not updated\n");
321 } else {
322 if (tt->state == IWL_TI_CT_KILL) {
323 if (force) {
324 set_bit(STATUS_CT_KILL, &priv->status);
325 iwl_perform_ct_kill_task(priv, true);
326 } else {
327 iwl_prepare_ct_kill_task(priv);
328 tt->state = old_state;
329 }
330 } else if (old_state == IWL_TI_CT_KILL &&
331 tt->state != IWL_TI_CT_KILL)
332 iwl_perform_ct_kill_task(priv, false);
333 IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
334 tt->state);
335 IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
336 tt->tt_power_mode);
337 }
338 mutex_unlock(&priv->mutex);
339 }
340}
341
342/*
343 * Advance thermal throttling
344 * 1) Avoid NIC destruction due to high temperatures
345 * Chip will identify dangerously high temperatures that can
346 * harm the device and will power down
347 * 2) Avoid the NIC power down due to high temperature
348 * Throttle early enough to lower the power consumption before
349 * drastic steps are needed
350 * Actions include relaxing the power down sleep thresholds and
351 * decreasing the number of TX streams
352 * 3) Avoid throughput performance impact as much as possible
353 *
354 *=============================================================================
355 * Condition Nxt State Condition Nxt State Condition Nxt State
356 *-----------------------------------------------------------------------------
357 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
358 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
359 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
360 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
361 *=============================================================================
362 */
363static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
364{
365 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
366 int i;
367 bool changed = false;
368 enum iwl_tt_state old_state;
369 struct iwl_tt_trans *transaction;
370
371 old_state = tt->state;
372 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
373 /* based on the current TT state,
374 * find the curresponding transaction table
375 * each table has (IWL_TI_STATE_MAX - 1) entries
376 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
377 * will advance to the correct table.
378 * then based on the current temperature
379 * find the next state need to transaction to
380 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
381 * in the current table to see if transaction is needed
382 */
383 transaction = tt->transaction +
384 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
385 if (temp >= transaction->tt_low &&
386 temp <= transaction->tt_high) {
387#ifdef CONFIG_IWLWIFI_DEBUG
388 if ((tt->tt_previous_temp) &&
389 (temp > tt->tt_previous_temp) &&
390 ((temp - tt->tt_previous_temp) >
391 IWL_TT_INCREASE_MARGIN)) {
392 IWL_DEBUG_TEMP(priv,
393 "Temperature increase %d "
394 "degree Celsius\n",
395 (temp - tt->tt_previous_temp));
396 }
397 tt->tt_previous_temp = temp;
398#endif
399 if (old_state !=
400 transaction->next_state) {
401 changed = true;
402 tt->state =
403 transaction->next_state;
404 }
405 break;
406 }
407 }
408 /* stop ct_kill_waiting_tm timer */
409 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
410 if (changed) {
411 if (tt->state >= IWL_TI_1) {
412 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
413 tt->tt_power_mode = IWL_POWER_INDEX_5;
414
415 if (!iwl_ht_enabled(priv)) {
416 struct iwl_rxon_context *ctx;
417
418 for_each_context(priv, ctx) {
419 struct iwl_rxon_cmd *rxon;
420
421 rxon = &ctx->staging;
422
423 /* disable HT */
424 rxon->flags &= ~(
425 RXON_FLG_CHANNEL_MODE_MSK |
426 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
427 RXON_FLG_HT40_PROT_MSK |
428 RXON_FLG_HT_PROT_MSK);
429 }
430 } else {
431 /* check HT capability and set
432 * according to the system HT capability
433 * in case get disabled before */
434 iwl_set_rxon_ht(priv, &priv->current_ht_config);
435 }
436
437 } else {
438 /*
439 * restore system power setting -- it will be
440 * recalculated automatically.
441 */
442
443 /* check HT capability and set
444 * according to the system HT capability
445 * in case get disabled before */
446 iwl_set_rxon_ht(priv, &priv->current_ht_config);
447 }
448 mutex_lock(&priv->mutex);
449 if (old_state == IWL_TI_CT_KILL)
450 clear_bit(STATUS_CT_KILL, &priv->status);
451 if (tt->state != IWL_TI_CT_KILL &&
452 iwl_power_update_mode(priv, true)) {
453 /* TT state not updated
454 * try again during next temperature read
455 */
456 IWL_ERR(priv, "Cannot update power mode, "
457 "TT state not updated\n");
458 if (old_state == IWL_TI_CT_KILL)
459 set_bit(STATUS_CT_KILL, &priv->status);
460 tt->state = old_state;
461 } else {
462 IWL_DEBUG_TEMP(priv,
463 "Thermal Throttling to new state: %u\n",
464 tt->state);
465 if (old_state != IWL_TI_CT_KILL &&
466 tt->state == IWL_TI_CT_KILL) {
467 if (force) {
468 IWL_DEBUG_TEMP(priv,
469 "Enter IWL_TI_CT_KILL\n");
470 set_bit(STATUS_CT_KILL, &priv->status);
471 iwl_perform_ct_kill_task(priv, true);
472 } else {
473 tt->state = old_state;
474 iwl_prepare_ct_kill_task(priv);
475 }
476 } else if (old_state == IWL_TI_CT_KILL &&
477 tt->state != IWL_TI_CT_KILL) {
478 IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n");
479 iwl_perform_ct_kill_task(priv, false);
480 }
481 }
482 mutex_unlock(&priv->mutex);
483 }
484}
485
486/* Card State Notification indicated reach critical temperature
487 * if PSP not enable, no Thermal Throttling function will be performed
488 * just set the GP1 bit to acknowledge the event
489 * otherwise, go into IWL_TI_CT_KILL state
490 * since Card State Notification will not provide any temperature reading
491 * for Legacy mode
492 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
493 * for advance mode
494 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
495 */
496static void iwl_bg_ct_enter(struct work_struct *work)
497{
498 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
499 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
500
501 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
502 return;
503
504 if (!iwl_is_ready(priv))
505 return;
506
507 if (tt->state != IWL_TI_CT_KILL) {
508 IWL_ERR(priv, "Device reached critical temperature "
509 "- ucode going to sleep!\n");
510 if (!priv->thermal_throttle.advanced_tt)
511 iwl_legacy_tt_handler(priv,
512 IWL_MINIMAL_POWER_THRESHOLD,
513 true);
514 else
515 iwl_advance_tt_handler(priv,
516 CT_KILL_THRESHOLD + 1, true);
517 }
518}
519
520/* Card State Notification indicated out of critical temperature
521 * since Card State Notification will not provide any temperature reading
522 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
523 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
524 */
525static void iwl_bg_ct_exit(struct work_struct *work)
526{
527 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
528 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
529
530 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
531 return;
532
533 if (!iwl_is_ready(priv))
534 return;
535
536 /* stop ct_kill_exit_tm timer */
537 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
538
539 if (tt->state == IWL_TI_CT_KILL) {
540 IWL_ERR(priv,
541 "Device temperature below critical"
542 "- ucode awake!\n");
543 /*
544 * exit from CT_KILL state
545 * reset the current temperature reading
546 */
547 priv->temperature = 0;
548 if (!priv->thermal_throttle.advanced_tt)
549 iwl_legacy_tt_handler(priv,
550 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
551 true);
552 else
553 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
554 true);
555 }
556}
557
558void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
559{
560 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
561 return;
562
563 IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
564 queue_work(priv->workqueue, &priv->ct_enter);
565}
566
567void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
568{
569 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
570 return;
571
572 IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
573 queue_work(priv->workqueue, &priv->ct_exit);
574}
575
576static void iwl_bg_tt_work(struct work_struct *work)
577{
578 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
579 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
580
581 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
582 return;
583
584 if (!priv->thermal_throttle.advanced_tt)
585 iwl_legacy_tt_handler(priv, temp, false);
586 else
587 iwl_advance_tt_handler(priv, temp, false);
588}
589
590void iwl_tt_handler(struct iwl_priv *priv)
591{
592 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
593 return;
594
595 IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
596 queue_work(priv->workqueue, &priv->tt_work);
597}
598
599/* Thermal throttling initialization
600 * For advance thermal throttling:
601 * Initialize Thermal Index and temperature threshold table
602 * Initialize thermal throttling restriction table
603 */
604void iwl_tt_initialize(struct iwl_priv *priv)
605{
606 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
607 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
608 struct iwl_tt_trans *transaction;
609
610 IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n");
611
612 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
613
614 tt->state = IWL_TI_0;
615 setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
616 iwl_tt_check_exit_ct_kill, (unsigned long)priv);
617 setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
618 iwl_tt_ready_for_ct_kill, (unsigned long)priv);
619 /* setup deferred ct kill work */
620 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
621 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
622 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
623
624 if (priv->lib->adv_thermal_throttle) {
625 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
626 tt->restriction = kcalloc(IWL_TI_STATE_MAX,
627 sizeof(struct iwl_tt_restriction),
628 GFP_KERNEL);
629 tt->transaction = kcalloc(IWL_TI_STATE_MAX *
630 (IWL_TI_STATE_MAX - 1),
631 sizeof(struct iwl_tt_trans),
632 GFP_KERNEL);
633 if (!tt->restriction || !tt->transaction) {
634 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
635 priv->thermal_throttle.advanced_tt = false;
636 kfree(tt->restriction);
637 tt->restriction = NULL;
638 kfree(tt->transaction);
639 tt->transaction = NULL;
640 } else {
641 transaction = tt->transaction +
642 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
643 memcpy(transaction, &tt_range_0[0], size);
644 transaction = tt->transaction +
645 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
646 memcpy(transaction, &tt_range_1[0], size);
647 transaction = tt->transaction +
648 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
649 memcpy(transaction, &tt_range_2[0], size);
650 transaction = tt->transaction +
651 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
652 memcpy(transaction, &tt_range_3[0], size);
653 size = sizeof(struct iwl_tt_restriction) *
654 IWL_TI_STATE_MAX;
655 memcpy(tt->restriction,
656 &restriction_range[0], size);
657 priv->thermal_throttle.advanced_tt = true;
658 }
659 } else {
660 IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n");
661 priv->thermal_throttle.advanced_tt = false;
662 }
663}
664
665/* cleanup thermal throttling management related memory and timer */
666void iwl_tt_exit(struct iwl_priv *priv)
667{
668 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
669
670 /* stop ct_kill_exit_tm timer if activated */
671 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
672 /* stop ct_kill_waiting_tm timer if activated */
673 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
674 cancel_work_sync(&priv->tt_work);
675 cancel_work_sync(&priv->ct_enter);
676 cancel_work_sync(&priv->ct_exit);
677
678 if (priv->thermal_throttle.advanced_tt) {
679 /* free advance thermal throttling memory */
680 kfree(tt->restriction);
681 tt->restriction = NULL;
682 kfree(tt->transaction);
683 tt->transaction = NULL;
684 }
685}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
new file mode 100644
index 000000000000..507726534b84
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -0,0 +1,128 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__
30
31#include "commands.h"
32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
118bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
119bool iwl_ht_enabled(struct iwl_priv *priv);
120enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
121enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
122void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
123void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
124void iwl_tt_handler(struct iwl_priv *priv);
125void iwl_tt_initialize(struct iwl_priv *priv);
126void iwl_tt_exit(struct iwl_priv *priv);
127
128#endif /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
new file mode 100644
index 000000000000..bddd19769035
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -0,0 +1,1412 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/sched.h>
33#include <linux/ieee80211.h>
34#include "iwl-io.h"
35#include "iwl-trans.h"
36#include "iwl-agn-hw.h"
37#include "dev.h"
38#include "agn.h"
39
40static const u8 tid_to_ac[] = {
41 IEEE80211_AC_BE,
42 IEEE80211_AC_BK,
43 IEEE80211_AC_BK,
44 IEEE80211_AC_BE,
45 IEEE80211_AC_VI,
46 IEEE80211_AC_VI,
47 IEEE80211_AC_VO,
48 IEEE80211_AC_VO,
49};
50
51static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
52 struct ieee80211_tx_info *info,
53 __le16 fc, __le32 *tx_flags)
54{
55 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
56 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
57 info->flags & IEEE80211_TX_CTL_AMPDU)
58 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
59}
60
61/*
62 * handle build REPLY_TX command notification.
63 */
64static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
65 struct sk_buff *skb,
66 struct iwl_tx_cmd *tx_cmd,
67 struct ieee80211_tx_info *info,
68 struct ieee80211_hdr *hdr, u8 sta_id)
69{
70 __le16 fc = hdr->frame_control;
71 __le32 tx_flags = tx_cmd->tx_flags;
72
73 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
74
75 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
76 tx_flags |= TX_CMD_FLG_ACK_MSK;
77 else
78 tx_flags &= ~TX_CMD_FLG_ACK_MSK;
79
80 if (ieee80211_is_probe_resp(fc))
81 tx_flags |= TX_CMD_FLG_TSF_MSK;
82 else if (ieee80211_is_back_req(fc))
83 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
84 else if (info->band == IEEE80211_BAND_2GHZ &&
85 priv->lib->bt_params &&
86 priv->lib->bt_params->advanced_bt_coexist &&
87 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
88 ieee80211_is_reassoc_req(fc) ||
89 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
90 tx_flags |= TX_CMD_FLG_IGNORE_BT;
91
92
93 tx_cmd->sta_id = sta_id;
94 if (ieee80211_has_morefrags(fc))
95 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
96
97 if (ieee80211_is_data_qos(fc)) {
98 u8 *qc = ieee80211_get_qos_ctl(hdr);
99 tx_cmd->tid_tspec = qc[0] & 0xf;
100 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
101 } else {
102 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
103 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
104 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
105 else
106 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
107 }
108
109 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
110
111 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
112 if (ieee80211_is_mgmt(fc)) {
113 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
114 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
115 else
116 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
117 } else {
118 tx_cmd->timeout.pm_frame_timeout = 0;
119 }
120
121 tx_cmd->driver_txop = 0;
122 tx_cmd->tx_flags = tx_flags;
123 tx_cmd->next_frame_len = 0;
124}
125
126static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
127 struct iwl_tx_cmd *tx_cmd,
128 struct ieee80211_tx_info *info,
129 struct ieee80211_sta *sta,
130 __le16 fc)
131{
132 u32 rate_flags;
133 int rate_idx;
134 u8 rts_retry_limit;
135 u8 data_retry_limit;
136 u8 rate_plcp;
137
138 if (priv->wowlan) {
139 rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
140 data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
141 } else {
142 /* Set retry limit on RTS packets */
143 rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
144
145 /* Set retry limit on DATA packets and Probe Responses*/
146 if (ieee80211_is_probe_resp(fc)) {
147 data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
148 rts_retry_limit =
149 min(data_retry_limit, rts_retry_limit);
150 } else if (ieee80211_is_back_req(fc))
151 data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
152 else
153 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
154 }
155
156 tx_cmd->data_retry_limit = data_retry_limit;
157 tx_cmd->rts_retry_limit = rts_retry_limit;
158
159 /* DATA packets will use the uCode station table for rate/antenna
160 * selection */
161 if (ieee80211_is_data(fc)) {
162 tx_cmd->initial_rate_index = 0;
163 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
164 return;
165 } else if (ieee80211_is_back_req(fc))
166 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
167
168 /**
169 * If the current TX rate stored in mac80211 has the MCS bit set, it's
170 * not really a TX rate. Thus, we use the lowest supported rate for
171 * this band. Also use the lowest supported rate if the stored rate
172 * index is invalid.
173 */
174 rate_idx = info->control.rates[0].idx;
175 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
176 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
177 rate_idx = rate_lowest_index(
178 &priv->nvm_data->bands[info->band], sta);
179 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
180 if (info->band == IEEE80211_BAND_5GHZ)
181 rate_idx += IWL_FIRST_OFDM_RATE;
182 /* Get PLCP rate for tx_cmd->rate_n_flags */
183 rate_plcp = iwl_rates[rate_idx].plcp;
184 /* Zero out flags for this packet */
185 rate_flags = 0;
186
187 /* Set CCK flag as needed */
188 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
189 rate_flags |= RATE_MCS_CCK_MSK;
190
191 /* Set up antennas */
192 if (priv->lib->bt_params &&
193 priv->lib->bt_params->advanced_bt_coexist &&
194 priv->bt_full_concurrent) {
195 /* operated as 1x1 in full concurrency mode */
196 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
197 first_antenna(priv->nvm_data->valid_tx_ant));
198 } else
199 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
200 priv, priv->mgmt_tx_ant,
201 priv->nvm_data->valid_tx_ant);
202 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
203
204 /* Set the rate in the TX cmd */
205 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
206}
207
208static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
209 struct ieee80211_tx_info *info,
210 struct iwl_tx_cmd *tx_cmd,
211 struct sk_buff *skb_frag)
212{
213 struct ieee80211_key_conf *keyconf = info->control.hw_key;
214
215 switch (keyconf->cipher) {
216 case WLAN_CIPHER_SUITE_CCMP:
217 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
218 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
219 if (info->flags & IEEE80211_TX_CTL_AMPDU)
220 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
221 break;
222
223 case WLAN_CIPHER_SUITE_TKIP:
224 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
225 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
226 break;
227
228 case WLAN_CIPHER_SUITE_WEP104:
229 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
230 /* fall through */
231 case WLAN_CIPHER_SUITE_WEP40:
232 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
233 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
234
235 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
236
237 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
238 "with key %d\n", keyconf->keyidx);
239 break;
240
241 default:
242 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
243 break;
244 }
245}
246
247/**
248 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
249 * @context: the current context
250 * @sta: mac80211 station
251 *
252 * In certain circumstances mac80211 passes a station pointer
253 * that may be %NULL, for example during TX or key setup. In
254 * that case, we need to use the broadcast station, so this
255 * inline wraps that pattern.
256 */
257static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
258 struct ieee80211_sta *sta)
259{
260 int sta_id;
261
262 if (!sta)
263 return context->bcast_sta_id;
264
265 sta_id = iwl_sta_id(sta);
266
267 /*
268 * mac80211 should not be passing a partially
269 * initialised station!
270 */
271 WARN_ON(sta_id == IWL_INVALID_STATION);
272
273 return sta_id;
274}
275
276/*
277 * start REPLY_TX command process
278 */
279int iwlagn_tx_skb(struct iwl_priv *priv,
280 struct ieee80211_sta *sta,
281 struct sk_buff *skb)
282{
283 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
284 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
285 struct iwl_station_priv *sta_priv = NULL;
286 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
287 struct iwl_device_cmd *dev_cmd;
288 struct iwl_tx_cmd *tx_cmd;
289 __le16 fc;
290 u8 hdr_len;
291 u16 len, seq_number = 0;
292 u8 sta_id, tid = IWL_MAX_TID_COUNT;
293 bool is_agg = false, is_data_qos = false;
294 int txq_id;
295
296 if (info->control.vif)
297 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
298
299 if (iwl_is_rfkill(priv)) {
300 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
301 goto drop_unlock_priv;
302 }
303
304 fc = hdr->frame_control;
305
306#ifdef CONFIG_IWLWIFI_DEBUG
307 if (ieee80211_is_auth(fc))
308 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
309 else if (ieee80211_is_assoc_req(fc))
310 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
311 else if (ieee80211_is_reassoc_req(fc))
312 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
313#endif
314
315 if (unlikely(ieee80211_is_probe_resp(fc))) {
316 struct iwl_wipan_noa_data *noa_data =
317 rcu_dereference(priv->noa_data);
318
319 if (noa_data &&
320 pskb_expand_head(skb, 0, noa_data->length,
321 GFP_ATOMIC) == 0) {
322 memcpy(skb_put(skb, noa_data->length),
323 noa_data->data, noa_data->length);
324 hdr = (struct ieee80211_hdr *)skb->data;
325 }
326 }
327
328 hdr_len = ieee80211_hdrlen(fc);
329
330 /* For management frames use broadcast id to do not break aggregation */
331 if (!ieee80211_is_data(fc))
332 sta_id = ctx->bcast_sta_id;
333 else {
334 /* Find index into station table for destination station */
335 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
336 if (sta_id == IWL_INVALID_STATION) {
337 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
338 hdr->addr1);
339 goto drop_unlock_priv;
340 }
341 }
342
343 if (sta)
344 sta_priv = (void *)sta->drv_priv;
345
346 if (sta_priv && sta_priv->asleep &&
347 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
348 /*
349 * This sends an asynchronous command to the device,
350 * but we can rely on it being processed before the
351 * next frame is processed -- and the next frame to
352 * this station is the one that will consume this
353 * counter.
354 * For now set the counter to just 1 since we do not
355 * support uAPSD yet.
356 *
357 * FIXME: If we get two non-bufferable frames one
358 * after the other, we might only send out one of
359 * them because this is racy.
360 */
361 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
362 }
363
364 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
365
366 if (unlikely(!dev_cmd))
367 goto drop_unlock_priv;
368
369 memset(dev_cmd, 0, sizeof(*dev_cmd));
370 dev_cmd->hdr.cmd = REPLY_TX;
371 tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
372
373 /* Total # bytes to be transmitted */
374 len = (u16)skb->len;
375 tx_cmd->len = cpu_to_le16(len);
376
377 if (info->control.hw_key)
378 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
379
380 /* TODO need this for burst mode later on */
381 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
382
383 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
384
385 memset(&info->status, 0, sizeof(info->status));
386
387 info->driver_data[0] = ctx;
388 info->driver_data[1] = dev_cmd;
389 /* From now on, we cannot access info->control */
390
391 spin_lock(&priv->sta_lock);
392
393 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
394 u8 *qc = NULL;
395 struct iwl_tid_data *tid_data;
396 qc = ieee80211_get_qos_ctl(hdr);
397 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
398 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
399 goto drop_unlock_sta;
400 tid_data = &priv->tid_data[sta_id][tid];
401
402 /* aggregation is on for this <sta,tid> */
403 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
404 tid_data->agg.state != IWL_AGG_ON) {
405 IWL_ERR(priv,
406 "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
407 info->flags, tid_data->agg.state);
408 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
409 sta_id, tid,
410 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
411 goto drop_unlock_sta;
412 }
413
414 /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
415 * only. Check this here.
416 */
417 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
418 tid_data->agg.state != IWL_AGG_OFF,
419 "Tx while agg.state = %d\n", tid_data->agg.state))
420 goto drop_unlock_sta;
421
422 seq_number = tid_data->seq_number;
423 seq_number &= IEEE80211_SCTL_SEQ;
424 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
425 hdr->seq_ctrl |= cpu_to_le16(seq_number);
426 seq_number += 0x10;
427
428 if (info->flags & IEEE80211_TX_CTL_AMPDU)
429 is_agg = true;
430 is_data_qos = true;
431 }
432
433 /* Copy MAC header from skb into command buffer */
434 memcpy(tx_cmd->hdr, hdr, hdr_len);
435
436 txq_id = info->hw_queue;
437
438 if (is_agg)
439 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
440 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
441 /*
442 * The microcode will clear the more data
443 * bit in the last frame it transmits.
444 */
445 hdr->frame_control |=
446 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
447 }
448
449 WARN_ON_ONCE(is_agg &&
450 priv->queue_to_mac80211[txq_id] != info->hw_queue);
451
452 IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
453 txq_id, seq_number);
454
455 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
456 goto drop_unlock_sta;
457
458 if (is_data_qos && !ieee80211_has_morefrags(fc))
459 priv->tid_data[sta_id][tid].seq_number = seq_number;
460
461 spin_unlock(&priv->sta_lock);
462
463 /*
464 * Avoid atomic ops if it isn't an associated client.
465 * Also, if this is a packet for aggregation, don't
466 * increase the counter because the ucode will stop
467 * aggregation queues when their respective station
468 * goes to sleep.
469 */
470 if (sta_priv && sta_priv->client && !is_agg)
471 atomic_inc(&sta_priv->pending_frames);
472
473 return 0;
474
475drop_unlock_sta:
476 if (dev_cmd)
477 iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
478 spin_unlock(&priv->sta_lock);
479drop_unlock_priv:
480 return -1;
481}
482
483static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
484{
485 int q;
486
487 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
488 q < priv->cfg->base_params->num_of_queues; q++) {
489 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
490 priv->queue_to_mac80211[q] = mq;
491 return q;
492 }
493 }
494
495 return -ENOSPC;
496}
497
498static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
499{
500 clear_bit(q, priv->agg_q_alloc);
501 priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
502}
503
504int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
505 struct ieee80211_sta *sta, u16 tid)
506{
507 struct iwl_tid_data *tid_data;
508 int sta_id, txq_id;
509 enum iwl_agg_state agg_state;
510
511 sta_id = iwl_sta_id(sta);
512
513 if (sta_id == IWL_INVALID_STATION) {
514 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
515 return -ENXIO;
516 }
517
518 spin_lock_bh(&priv->sta_lock);
519
520 tid_data = &priv->tid_data[sta_id][tid];
521 txq_id = tid_data->agg.txq_id;
522
523 switch (tid_data->agg.state) {
524 case IWL_EMPTYING_HW_QUEUE_ADDBA:
525 /*
526 * This can happen if the peer stops aggregation
527 * again before we've had a chance to drain the
528 * queue we selected previously, i.e. before the
529 * session was really started completely.
530 */
531 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
532 goto turn_off;
533 case IWL_AGG_STARTING:
534 /*
535 * This can happen when the session is stopped before
536 * we receive ADDBA response
537 */
538 IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
539 goto turn_off;
540 case IWL_AGG_ON:
541 break;
542 default:
543 IWL_WARN(priv,
544 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
545 sta_id, tid, tid_data->agg.state);
546 spin_unlock_bh(&priv->sta_lock);
547 return 0;
548 }
549
550 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
551
552 /* There are still packets for this RA / TID in the HW */
553 if (!test_bit(txq_id, priv->agg_q_alloc)) {
554 IWL_DEBUG_TX_QUEUES(priv,
555 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
556 sta_id, tid, txq_id);
557 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
558 IWL_DEBUG_TX_QUEUES(priv,
559 "Can't proceed: ssn %d, next_recl = %d\n",
560 tid_data->agg.ssn,
561 tid_data->next_reclaimed);
562 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
563 spin_unlock_bh(&priv->sta_lock);
564 return 0;
565 }
566
567 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
568 tid_data->agg.ssn);
569turn_off:
570 agg_state = tid_data->agg.state;
571 tid_data->agg.state = IWL_AGG_OFF;
572
573 spin_unlock_bh(&priv->sta_lock);
574
575 if (test_bit(txq_id, priv->agg_q_alloc)) {
576 /*
577 * If the transport didn't know that we wanted to start
578 * agreggation, don't tell it that we want to stop them.
579 * This can happen when we don't get the addBA response on
580 * time, or we hadn't time to drain the AC queues.
581 */
582 if (agg_state == IWL_AGG_ON)
583 iwl_trans_txq_disable(priv->trans, txq_id, true);
584 else
585 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
586 agg_state);
587 iwlagn_dealloc_agg_txq(priv, txq_id);
588 }
589
590 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
591
592 return 0;
593}
594
595int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
596 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
597{
598 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
599 struct iwl_tid_data *tid_data;
600 int sta_id, txq_id, ret;
601
602 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
603 sta->addr, tid);
604
605 sta_id = iwl_sta_id(sta);
606 if (sta_id == IWL_INVALID_STATION) {
607 IWL_ERR(priv, "Start AGG on invalid station\n");
608 return -ENXIO;
609 }
610 if (unlikely(tid >= IWL_MAX_TID_COUNT))
611 return -EINVAL;
612
613 if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
614 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
615 return -ENXIO;
616 }
617
618 txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
619 if (txq_id < 0) {
620 IWL_DEBUG_TX_QUEUES(priv,
621 "No free aggregation queue for %pM/%d\n",
622 sta->addr, tid);
623 return txq_id;
624 }
625
626 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
627 if (ret)
628 return ret;
629
630 spin_lock_bh(&priv->sta_lock);
631 tid_data = &priv->tid_data[sta_id][tid];
632 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
633 tid_data->agg.txq_id = txq_id;
634
635 *ssn = tid_data->agg.ssn;
636
637 if (*ssn == tid_data->next_reclaimed) {
638 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
639 tid_data->agg.ssn);
640 tid_data->agg.state = IWL_AGG_STARTING;
641 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
642 } else {
643 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
644 "next_reclaimed = %d\n",
645 tid_data->agg.ssn,
646 tid_data->next_reclaimed);
647 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
648 }
649 spin_unlock_bh(&priv->sta_lock);
650
651 return ret;
652}
653
654int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
655 struct ieee80211_sta *sta, u16 tid)
656{
657 struct iwl_tid_data *tid_data;
658 enum iwl_agg_state agg_state;
659 int sta_id, txq_id;
660 sta_id = iwl_sta_id(sta);
661
662 /*
663 * First set the agg state to OFF to avoid calling
664 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
665 */
666 spin_lock_bh(&priv->sta_lock);
667
668 tid_data = &priv->tid_data[sta_id][tid];
669 txq_id = tid_data->agg.txq_id;
670 agg_state = tid_data->agg.state;
671 IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
672 sta_id, tid, txq_id, tid_data->agg.state);
673
674 tid_data->agg.state = IWL_AGG_OFF;
675
676 spin_unlock_bh(&priv->sta_lock);
677
678 if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
679 IWL_ERR(priv, "Couldn't flush the AGG queue\n");
680
681 if (test_bit(txq_id, priv->agg_q_alloc)) {
682 /*
683 * If the transport didn't know that we wanted to start
684 * agreggation, don't tell it that we want to stop them.
685 * This can happen when we don't get the addBA response on
686 * time, or we hadn't time to drain the AC queues.
687 */
688 if (agg_state == IWL_AGG_ON)
689 iwl_trans_txq_disable(priv->trans, txq_id, true);
690 else
691 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
692 agg_state);
693 iwlagn_dealloc_agg_txq(priv, txq_id);
694 }
695
696 return 0;
697}
698
699int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
700 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
701{
702 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
703 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
704 int q, fifo;
705 u16 ssn;
706
707 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
708
709 spin_lock_bh(&priv->sta_lock);
710 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
711 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
712 priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
713 spin_unlock_bh(&priv->sta_lock);
714
715 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
716
717 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
718 buf_size, ssn, 0);
719
720 /*
721 * If the limit is 0, then it wasn't initialised yet,
722 * use the default. We can do that since we take the
723 * minimum below, and we don't want to go above our
724 * default due to hardware restrictions.
725 */
726 if (sta_priv->max_agg_bufsize == 0)
727 sta_priv->max_agg_bufsize =
728 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
729
730 /*
731 * Even though in theory the peer could have different
732 * aggregation reorder buffer sizes for different sessions,
733 * our ucode doesn't allow for that and has a global limit
734 * for each station. Therefore, use the minimum of all the
735 * aggregation sessions and our default value.
736 */
737 sta_priv->max_agg_bufsize =
738 min(sta_priv->max_agg_bufsize, buf_size);
739
740 if (priv->hw_params.use_rts_for_aggregation) {
741 /*
742 * switch to RTS/CTS if it is the prefer protection
743 * method for HT traffic
744 */
745
746 sta_priv->lq_sta.lq.general_params.flags |=
747 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
748 }
749 priv->agg_tids_count++;
750 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
751 priv->agg_tids_count);
752
753 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
754 sta_priv->max_agg_bufsize;
755
756 IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
757 sta->addr, tid);
758
759 return iwl_send_lq_cmd(priv, ctx,
760 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
761}
762
763static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
764{
765 struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
766 enum iwl_rxon_context_id ctx;
767 struct ieee80211_vif *vif;
768 u8 *addr;
769
770 lockdep_assert_held(&priv->sta_lock);
771
772 addr = priv->stations[sta_id].sta.sta.addr;
773 ctx = priv->stations[sta_id].ctxid;
774 vif = priv->contexts[ctx].vif;
775
776 switch (priv->tid_data[sta_id][tid].agg.state) {
777 case IWL_EMPTYING_HW_QUEUE_DELBA:
778 /* There are no packets for this RA / TID in the HW any more */
779 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
780 IWL_DEBUG_TX_QUEUES(priv,
781 "Can continue DELBA flow ssn = next_recl = %d\n",
782 tid_data->next_reclaimed);
783 iwl_trans_txq_disable(priv->trans,
784 tid_data->agg.txq_id, true);
785 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
786 tid_data->agg.state = IWL_AGG_OFF;
787 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
788 }
789 break;
790 case IWL_EMPTYING_HW_QUEUE_ADDBA:
791 /* There are no packets for this RA / TID in the HW any more */
792 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
793 IWL_DEBUG_TX_QUEUES(priv,
794 "Can continue ADDBA flow ssn = next_recl = %d\n",
795 tid_data->next_reclaimed);
796 tid_data->agg.state = IWL_AGG_STARTING;
797 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
798 }
799 break;
800 default:
801 break;
802 }
803}
804
805static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
806 struct iwl_rxon_context *ctx,
807 const u8 *addr1)
808{
809 struct ieee80211_sta *sta;
810 struct iwl_station_priv *sta_priv;
811
812 rcu_read_lock();
813 sta = ieee80211_find_sta(ctx->vif, addr1);
814 if (sta) {
815 sta_priv = (void *)sta->drv_priv;
816 /* avoid atomic ops if this isn't a client */
817 if (sta_priv->client &&
818 atomic_dec_return(&sta_priv->pending_frames) == 0)
819 ieee80211_sta_block_awake(priv->hw, sta, false);
820 }
821 rcu_read_unlock();
822}
823
824/**
825 * translate ucode response to mac80211 tx status control values
826 */
827static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
828 struct ieee80211_tx_info *info)
829{
830 struct ieee80211_tx_rate *r = &info->status.rates[0];
831
832 info->status.antenna =
833 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
834 if (rate_n_flags & RATE_MCS_HT_MSK)
835 r->flags |= IEEE80211_TX_RC_MCS;
836 if (rate_n_flags & RATE_MCS_GF_MSK)
837 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
838 if (rate_n_flags & RATE_MCS_HT40_MSK)
839 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
840 if (rate_n_flags & RATE_MCS_DUP_MSK)
841 r->flags |= IEEE80211_TX_RC_DUP_DATA;
842 if (rate_n_flags & RATE_MCS_SGI_MSK)
843 r->flags |= IEEE80211_TX_RC_SHORT_GI;
844 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
845}
846
847#ifdef CONFIG_IWLWIFI_DEBUG
848const char *iwl_get_tx_fail_reason(u32 status)
849{
850#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
851#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
852
853 switch (status & TX_STATUS_MSK) {
854 case TX_STATUS_SUCCESS:
855 return "SUCCESS";
856 TX_STATUS_POSTPONE(DELAY);
857 TX_STATUS_POSTPONE(FEW_BYTES);
858 TX_STATUS_POSTPONE(BT_PRIO);
859 TX_STATUS_POSTPONE(QUIET_PERIOD);
860 TX_STATUS_POSTPONE(CALC_TTAK);
861 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
862 TX_STATUS_FAIL(SHORT_LIMIT);
863 TX_STATUS_FAIL(LONG_LIMIT);
864 TX_STATUS_FAIL(FIFO_UNDERRUN);
865 TX_STATUS_FAIL(DRAIN_FLOW);
866 TX_STATUS_FAIL(RFKILL_FLUSH);
867 TX_STATUS_FAIL(LIFE_EXPIRE);
868 TX_STATUS_FAIL(DEST_PS);
869 TX_STATUS_FAIL(HOST_ABORTED);
870 TX_STATUS_FAIL(BT_RETRY);
871 TX_STATUS_FAIL(STA_INVALID);
872 TX_STATUS_FAIL(FRAG_DROPPED);
873 TX_STATUS_FAIL(TID_DISABLE);
874 TX_STATUS_FAIL(FIFO_FLUSHED);
875 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
876 TX_STATUS_FAIL(PASSIVE_NO_RX);
877 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
878 }
879
880 return "UNKNOWN";
881
882#undef TX_STATUS_FAIL
883#undef TX_STATUS_POSTPONE
884}
885#endif /* CONFIG_IWLWIFI_DEBUG */
886
887static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
888{
889 status &= AGG_TX_STATUS_MSK;
890
891 switch (status) {
892 case AGG_TX_STATE_UNDERRUN_MSK:
893 priv->reply_agg_tx_stats.underrun++;
894 break;
895 case AGG_TX_STATE_BT_PRIO_MSK:
896 priv->reply_agg_tx_stats.bt_prio++;
897 break;
898 case AGG_TX_STATE_FEW_BYTES_MSK:
899 priv->reply_agg_tx_stats.few_bytes++;
900 break;
901 case AGG_TX_STATE_ABORT_MSK:
902 priv->reply_agg_tx_stats.abort++;
903 break;
904 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
905 priv->reply_agg_tx_stats.last_sent_ttl++;
906 break;
907 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
908 priv->reply_agg_tx_stats.last_sent_try++;
909 break;
910 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
911 priv->reply_agg_tx_stats.last_sent_bt_kill++;
912 break;
913 case AGG_TX_STATE_SCD_QUERY_MSK:
914 priv->reply_agg_tx_stats.scd_query++;
915 break;
916 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
917 priv->reply_agg_tx_stats.bad_crc32++;
918 break;
919 case AGG_TX_STATE_RESPONSE_MSK:
920 priv->reply_agg_tx_stats.response++;
921 break;
922 case AGG_TX_STATE_DUMP_TX_MSK:
923 priv->reply_agg_tx_stats.dump_tx++;
924 break;
925 case AGG_TX_STATE_DELAY_TX_MSK:
926 priv->reply_agg_tx_stats.delay_tx++;
927 break;
928 default:
929 priv->reply_agg_tx_stats.unknown++;
930 break;
931 }
932}
933
934static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
935{
936 return le32_to_cpup((__le32 *)&tx_resp->status +
937 tx_resp->frame_count) & IEEE80211_MAX_SN;
938}
939
940static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
941 struct iwlagn_tx_resp *tx_resp)
942{
943 struct agg_tx_status *frame_status = &tx_resp->status;
944 int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
945 IWLAGN_TX_RES_TID_POS;
946 int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
947 IWLAGN_TX_RES_RA_POS;
948 struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
949 u32 status = le16_to_cpu(tx_resp->status.status);
950 int i;
951
952 WARN_ON(tid == IWL_TID_NON_QOS);
953
954 if (agg->wait_for_ba)
955 IWL_DEBUG_TX_REPLY(priv,
956 "got tx response w/o block-ack\n");
957
958 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
959 agg->wait_for_ba = (tx_resp->frame_count > 1);
960
961 /*
962 * If the BT kill count is non-zero, we'll get this
963 * notification again.
964 */
965 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
966 priv->lib->bt_params &&
967 priv->lib->bt_params->advanced_bt_coexist) {
968 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
969 }
970
971 if (tx_resp->frame_count == 1)
972 return;
973
974 IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
975 agg->txq_id,
976 le32_to_cpu(tx_resp->rate_n_flags),
977 iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
978
979 /* Construct bit-map of pending frames within Tx window */
980 for (i = 0; i < tx_resp->frame_count; i++) {
981 u16 fstatus = le16_to_cpu(frame_status[i].status);
982 u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
983
984 if (status & AGG_TX_STATUS_MSK)
985 iwlagn_count_agg_tx_err_status(priv, fstatus);
986
987 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
988 AGG_TX_STATE_ABORT_MSK))
989 continue;
990
991 if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
992 IWL_DEBUG_TX_REPLY(priv,
993 "%d: status %s (0x%04x), try-count (0x%01x)\n",
994 i,
995 iwl_get_agg_tx_fail_reason(fstatus),
996 fstatus & AGG_TX_STATUS_MSK,
997 retry_cnt);
998 }
999}
1000
1001#ifdef CONFIG_IWLWIFI_DEBUG
1002#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
1003
1004const char *iwl_get_agg_tx_fail_reason(u16 status)
1005{
1006 status &= AGG_TX_STATUS_MSK;
1007 switch (status) {
1008 case AGG_TX_STATE_TRANSMITTED:
1009 return "SUCCESS";
1010 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
1011 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
1012 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
1013 AGG_TX_STATE_FAIL(ABORT_MSK);
1014 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
1015 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
1016 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
1017 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
1018 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
1019 AGG_TX_STATE_FAIL(RESPONSE_MSK);
1020 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
1021 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
1022 }
1023
1024 return "UNKNOWN";
1025}
1026#endif /* CONFIG_IWLWIFI_DEBUG */
1027
1028static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1029{
1030 status &= TX_STATUS_MSK;
1031
1032 switch (status) {
1033 case TX_STATUS_POSTPONE_DELAY:
1034 priv->reply_tx_stats.pp_delay++;
1035 break;
1036 case TX_STATUS_POSTPONE_FEW_BYTES:
1037 priv->reply_tx_stats.pp_few_bytes++;
1038 break;
1039 case TX_STATUS_POSTPONE_BT_PRIO:
1040 priv->reply_tx_stats.pp_bt_prio++;
1041 break;
1042 case TX_STATUS_POSTPONE_QUIET_PERIOD:
1043 priv->reply_tx_stats.pp_quiet_period++;
1044 break;
1045 case TX_STATUS_POSTPONE_CALC_TTAK:
1046 priv->reply_tx_stats.pp_calc_ttak++;
1047 break;
1048 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1049 priv->reply_tx_stats.int_crossed_retry++;
1050 break;
1051 case TX_STATUS_FAIL_SHORT_LIMIT:
1052 priv->reply_tx_stats.short_limit++;
1053 break;
1054 case TX_STATUS_FAIL_LONG_LIMIT:
1055 priv->reply_tx_stats.long_limit++;
1056 break;
1057 case TX_STATUS_FAIL_FIFO_UNDERRUN:
1058 priv->reply_tx_stats.fifo_underrun++;
1059 break;
1060 case TX_STATUS_FAIL_DRAIN_FLOW:
1061 priv->reply_tx_stats.drain_flow++;
1062 break;
1063 case TX_STATUS_FAIL_RFKILL_FLUSH:
1064 priv->reply_tx_stats.rfkill_flush++;
1065 break;
1066 case TX_STATUS_FAIL_LIFE_EXPIRE:
1067 priv->reply_tx_stats.life_expire++;
1068 break;
1069 case TX_STATUS_FAIL_DEST_PS:
1070 priv->reply_tx_stats.dest_ps++;
1071 break;
1072 case TX_STATUS_FAIL_HOST_ABORTED:
1073 priv->reply_tx_stats.host_abort++;
1074 break;
1075 case TX_STATUS_FAIL_BT_RETRY:
1076 priv->reply_tx_stats.bt_retry++;
1077 break;
1078 case TX_STATUS_FAIL_STA_INVALID:
1079 priv->reply_tx_stats.sta_invalid++;
1080 break;
1081 case TX_STATUS_FAIL_FRAG_DROPPED:
1082 priv->reply_tx_stats.frag_drop++;
1083 break;
1084 case TX_STATUS_FAIL_TID_DISABLE:
1085 priv->reply_tx_stats.tid_disable++;
1086 break;
1087 case TX_STATUS_FAIL_FIFO_FLUSHED:
1088 priv->reply_tx_stats.fifo_flush++;
1089 break;
1090 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1091 priv->reply_tx_stats.insuff_cf_poll++;
1092 break;
1093 case TX_STATUS_FAIL_PASSIVE_NO_RX:
1094 priv->reply_tx_stats.fail_hw_drop++;
1095 break;
1096 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1097 priv->reply_tx_stats.sta_color_mismatch++;
1098 break;
1099 default:
1100 priv->reply_tx_stats.unknown++;
1101 break;
1102 }
1103}
1104
1105static void iwlagn_set_tx_status(struct iwl_priv *priv,
1106 struct ieee80211_tx_info *info,
1107 struct iwlagn_tx_resp *tx_resp)
1108{
1109 u16 status = le16_to_cpu(tx_resp->status.status);
1110
1111 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1112
1113 info->status.rates[0].count = tx_resp->failure_frame + 1;
1114 info->flags |= iwl_tx_status_to_mac80211(status);
1115 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1116 info);
1117 if (!iwl_is_tx_success(status))
1118 iwlagn_count_tx_err_status(priv, status);
1119}
1120
1121static void iwl_check_abort_status(struct iwl_priv *priv,
1122 u8 frame_count, u32 status)
1123{
1124 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1125 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1126 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1127 queue_work(priv->workqueue, &priv->tx_flush);
1128 }
1129}
1130
1131void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1132{
1133 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1134 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1135 int txq_id = SEQ_TO_QUEUE(sequence);
1136 int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1137 struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1138 struct ieee80211_hdr *hdr;
1139 u32 status = le16_to_cpu(tx_resp->status.status);
1140 u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1141 int tid;
1142 int sta_id;
1143 int freed;
1144 struct ieee80211_tx_info *info;
1145 struct sk_buff_head skbs;
1146 struct sk_buff *skb;
1147 struct iwl_rxon_context *ctx;
1148 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1149
1150 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1151 IWLAGN_TX_RES_TID_POS;
1152 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1153 IWLAGN_TX_RES_RA_POS;
1154
1155 spin_lock_bh(&priv->sta_lock);
1156
1157 if (is_agg) {
1158 WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
1159 tid >= IWL_MAX_TID_COUNT);
1160 if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
1161 IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
1162 priv->tid_data[sta_id][tid].agg.txq_id);
1163 iwl_rx_reply_tx_agg(priv, tx_resp);
1164 }
1165
1166 __skb_queue_head_init(&skbs);
1167
1168 if (tx_resp->frame_count == 1) {
1169 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1170 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1171
1172 if (is_agg) {
1173 /* If this is an aggregation queue, we can rely on the
1174 * ssn since the wifi sequence number corresponds to
1175 * the index in the TFD ring (%256).
1176 * The seq_ctl is the sequence control of the packet
1177 * to which this Tx response relates. But if there is a
1178 * hole in the bitmap of the BA we received, this Tx
1179 * response may allow to reclaim the hole and all the
1180 * subsequent packets that were already acked.
1181 * In that case, seq_ctl != ssn, and the next packet
1182 * to be reclaimed will be ssn and not seq_ctl.
1183 */
1184 next_reclaimed = ssn;
1185 }
1186
1187 if (tid != IWL_TID_NON_QOS) {
1188 priv->tid_data[sta_id][tid].next_reclaimed =
1189 next_reclaimed;
1190 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1191 next_reclaimed);
1192 }
1193
1194 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1195
1196 iwlagn_check_ratid_empty(priv, sta_id, tid);
1197 freed = 0;
1198
1199 /* process frames */
1200 skb_queue_walk(&skbs, skb) {
1201 hdr = (struct ieee80211_hdr *)skb->data;
1202
1203 if (!ieee80211_is_data_qos(hdr->frame_control))
1204 priv->last_seq_ctl = tx_resp->seq_ctl;
1205
1206 info = IEEE80211_SKB_CB(skb);
1207 ctx = info->driver_data[0];
1208 iwl_trans_free_tx_cmd(priv->trans,
1209 info->driver_data[1]);
1210
1211 memset(&info->status, 0, sizeof(info->status));
1212
1213 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1214 ctx->vif &&
1215 ctx->vif->type == NL80211_IFTYPE_STATION) {
1216 /* block and stop all queues */
1217 priv->passive_no_rx = true;
1218 IWL_DEBUG_TX_QUEUES(priv,
1219 "stop all queues: passive channel\n");
1220 ieee80211_stop_queues(priv->hw);
1221
1222 IWL_DEBUG_TX_REPLY(priv,
1223 "TXQ %d status %s (0x%08x) "
1224 "rate_n_flags 0x%x retries %d\n",
1225 txq_id,
1226 iwl_get_tx_fail_reason(status),
1227 status,
1228 le32_to_cpu(tx_resp->rate_n_flags),
1229 tx_resp->failure_frame);
1230
1231 IWL_DEBUG_TX_REPLY(priv,
1232 "FrameCnt = %d, idx=%d\n",
1233 tx_resp->frame_count, cmd_index);
1234 }
1235
1236 /* check if BAR is needed */
1237 if (is_agg && !iwl_is_tx_success(status))
1238 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1239 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1240 tx_resp);
1241 if (!is_agg)
1242 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1243
1244 freed++;
1245 }
1246
1247 if (tid != IWL_TID_NON_QOS) {
1248 priv->tid_data[sta_id][tid].next_reclaimed =
1249 next_reclaimed;
1250 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1251 next_reclaimed);
1252 }
1253
1254 if (!is_agg && freed != 1)
1255 IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1256
1257 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1258 iwl_get_tx_fail_reason(status), status);
1259
1260 IWL_DEBUG_TX_REPLY(priv,
1261 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
1262 le32_to_cpu(tx_resp->rate_n_flags),
1263 tx_resp->failure_frame,
1264 SEQ_TO_INDEX(sequence), ssn,
1265 le16_to_cpu(tx_resp->seq_ctl));
1266 }
1267
1268 iwl_check_abort_status(priv, tx_resp->frame_count, status);
1269 spin_unlock_bh(&priv->sta_lock);
1270
1271 while (!skb_queue_empty(&skbs)) {
1272 skb = __skb_dequeue(&skbs);
1273 ieee80211_tx_status(priv->hw, skb);
1274 }
1275}
1276
1277/**
1278 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1279 *
1280 * Handles block-acknowledge notification from device, which reports success
1281 * of frames sent via aggregation.
1282 */
1283void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1284 struct iwl_rx_cmd_buffer *rxb)
1285{
1286 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1287 struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1288 struct iwl_ht_agg *agg;
1289 struct sk_buff_head reclaimed_skbs;
1290 struct sk_buff *skb;
1291 int sta_id;
1292 int tid;
1293 int freed;
1294
1295 /* "flow" corresponds to Tx queue */
1296 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1297
1298 /* "ssn" is start of block-ack Tx window, corresponds to index
1299 * (in Tx queue's circular buffer) of first TFD/frame in window */
1300 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1301
1302 if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1303 IWL_ERR(priv,
1304 "BUG_ON scd_flow is bigger than number of queues\n");
1305 return;
1306 }
1307
1308 sta_id = ba_resp->sta_id;
1309 tid = ba_resp->tid;
1310 agg = &priv->tid_data[sta_id][tid].agg;
1311
1312 spin_lock_bh(&priv->sta_lock);
1313
1314 if (unlikely(!agg->wait_for_ba)) {
1315 if (unlikely(ba_resp->bitmap))
1316 IWL_ERR(priv, "Received BA when not expected\n");
1317 spin_unlock_bh(&priv->sta_lock);
1318 return;
1319 }
1320
1321 if (unlikely(scd_flow != agg->txq_id)) {
1322 /*
1323 * FIXME: this is a uCode bug which need to be addressed,
1324 * log the information and return for now.
1325 * Since it is can possibly happen very often and in order
1326 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1327 */
1328 IWL_DEBUG_TX_QUEUES(priv,
1329 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1330 scd_flow, sta_id, tid, agg->txq_id);
1331 spin_unlock_bh(&priv->sta_lock);
1332 return;
1333 }
1334
1335 __skb_queue_head_init(&reclaimed_skbs);
1336
1337 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1338 * block-ack window (we assume that they've been successfully
1339 * transmitted ... if not, it's too late anyway). */
1340 iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1341 &reclaimed_skbs);
1342
1343 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1344 "sta_id = %d\n",
1345 agg->wait_for_ba,
1346 (u8 *) &ba_resp->sta_addr_lo32,
1347 ba_resp->sta_id);
1348 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1349 "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1350 ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1351 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1352 scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1353 ba_resp->txed_2_done);
1354
1355 /* Mark that the expected block-ack response arrived */
1356 agg->wait_for_ba = false;
1357
1358 /* Sanity check values reported by uCode */
1359 if (ba_resp->txed_2_done > ba_resp->txed) {
1360 IWL_DEBUG_TX_REPLY(priv,
1361 "bogus sent(%d) and ack(%d) count\n",
1362 ba_resp->txed, ba_resp->txed_2_done);
1363 /*
1364 * set txed_2_done = txed,
1365 * so it won't impact rate scale
1366 */
1367 ba_resp->txed = ba_resp->txed_2_done;
1368 }
1369
1370 priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1371
1372 iwlagn_check_ratid_empty(priv, sta_id, tid);
1373 freed = 0;
1374
1375 skb_queue_walk(&reclaimed_skbs, skb) {
1376 struct ieee80211_hdr *hdr = (void *)skb->data;
1377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1378
1379 if (ieee80211_is_data_qos(hdr->frame_control))
1380 freed++;
1381 else
1382 WARN_ON_ONCE(1);
1383
1384 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1385
1386 memset(&info->status, 0, sizeof(info->status));
1387 /* Packet was transmitted successfully, failures come as single
1388 * frames because before failing a frame the firmware transmits
1389 * it without aggregation at least once.
1390 */
1391 info->flags |= IEEE80211_TX_STAT_ACK;
1392
1393 if (freed == 1) {
1394 /* this is the first skb we deliver in this batch */
1395 /* put the rate scaling data there */
1396 info = IEEE80211_SKB_CB(skb);
1397 memset(&info->status, 0, sizeof(info->status));
1398 info->flags |= IEEE80211_TX_STAT_AMPDU;
1399 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1400 info->status.ampdu_len = ba_resp->txed;
1401 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1402 info);
1403 }
1404 }
1405
1406 spin_unlock_bh(&priv->sta_lock);
1407
1408 while (!skb_queue_empty(&reclaimed_skbs)) {
1409 skb = __skb_dequeue(&reclaimed_skbs);
1410 ieee80211_tx_status(priv->hw, skb);
1411 }
1412}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
new file mode 100644
index 000000000000..931a8e4269ef
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -0,0 +1,452 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
6 * Copyright(c) 2015 Intel Deutschland GmbH
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
20 * USA
21 *
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
30
31#include <linux/kernel.h>
32
33#include "iwl-io.h"
34#include "iwl-agn-hw.h"
35#include "iwl-trans.h"
36#include "iwl-fh.h"
37#include "iwl-op-mode.h"
38
39#include "dev.h"
40#include "agn.h"
41#include "calib.h"
42
43/******************************************************************************
44 *
45 * uCode download functions
46 *
47 ******************************************************************************/
48
49static inline const struct fw_img *
50iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
51{
52 if (ucode_type >= IWL_UCODE_TYPE_MAX)
53 return NULL;
54
55 return &priv->fw->img[ucode_type];
56}
57
58/*
59 * Calibration
60 */
61static int iwl_set_Xtal_calib(struct iwl_priv *priv)
62{
63 struct iwl_calib_xtal_freq_cmd cmd;
64 __le16 *xtal_calib = priv->nvm_data->xtal_calib;
65
66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
68 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
69 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
70}
71
72static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
73{
74 struct iwl_calib_temperature_offset_cmd cmd;
75
76 memset(&cmd, 0, sizeof(cmd));
77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
78 cmd.radio_sensor_offset = priv->nvm_data->raw_temperature;
79 if (!(cmd.radio_sensor_offset))
80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
81
82 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
83 le16_to_cpu(cmd.radio_sensor_offset));
84 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
85}
86
87static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
88{
89 struct iwl_calib_temperature_offset_v2_cmd cmd;
90
91 memset(&cmd, 0, sizeof(cmd));
92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
93 cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature;
94 cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature;
95 if (!cmd.radio_sensor_offset_low) {
96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
99 }
100 cmd.burntVoltageRef = priv->nvm_data->calib_voltage;
101
102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
103 le16_to_cpu(cmd.radio_sensor_offset_high));
104 IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
105 le16_to_cpu(cmd.radio_sensor_offset_low));
106 IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
107 le16_to_cpu(cmd.burntVoltageRef));
108
109 return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
110}
111
112static int iwl_send_calib_cfg(struct iwl_priv *priv)
113{
114 struct iwl_calib_cfg_cmd calib_cfg_cmd;
115 struct iwl_host_cmd cmd = {
116 .id = CALIBRATION_CFG_CMD,
117 .len = { sizeof(struct iwl_calib_cfg_cmd), },
118 .data = { &calib_cfg_cmd, },
119 };
120
121 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
122 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
123 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
124 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
125 calib_cfg_cmd.ucd_calib_cfg.flags =
126 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
127
128 return iwl_dvm_send_cmd(priv, &cmd);
129}
130
131int iwl_init_alive_start(struct iwl_priv *priv)
132{
133 int ret;
134
135 if (priv->lib->bt_params &&
136 priv->lib->bt_params->advanced_bt_coexist) {
137 /*
138 * Tell uCode we are ready to perform calibration
139 * need to perform this before any calibration
140 * no need to close the envlope since we are going
141 * to load the runtime uCode later.
142 */
143 ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
144 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
145 if (ret)
146 return ret;
147
148 }
149
150 ret = iwl_send_calib_cfg(priv);
151 if (ret)
152 return ret;
153
154 /**
155 * temperature offset calibration is only needed for runtime ucode,
156 * so prepare the value now.
157 */
158 if (priv->lib->need_temp_offset_calib) {
159 if (priv->lib->temp_offset_v2)
160 return iwl_set_temperature_offset_calib_v2(priv);
161 else
162 return iwl_set_temperature_offset_calib(priv);
163 }
164
165 return 0;
166}
167
168static int iwl_send_wimax_coex(struct iwl_priv *priv)
169{
170 struct iwl_wimax_coex_cmd coex_cmd;
171
172 /* coexistence is disabled */
173 memset(&coex_cmd, 0, sizeof(coex_cmd));
174
175 return iwl_dvm_send_cmd_pdu(priv,
176 COEX_PRIORITY_TABLE_CMD, 0,
177 sizeof(coex_cmd), &coex_cmd);
178}
179
180static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
181 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
182 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
183 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
184 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
185 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
186 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
187 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
188 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
189 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
190 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
191 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
192 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
193 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
194 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
195 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
196 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
197 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
198 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
199 0, 0, 0, 0, 0, 0, 0
200};
201
202void iwl_send_prio_tbl(struct iwl_priv *priv)
203{
204 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
205
206 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
207 sizeof(iwl_bt_prio_tbl));
208 if (iwl_dvm_send_cmd_pdu(priv,
209 REPLY_BT_COEX_PRIO_TABLE, 0,
210 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
211 IWL_ERR(priv, "failed to send BT prio tbl command\n");
212}
213
214int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
215{
216 struct iwl_bt_coex_prot_env_cmd env_cmd;
217 int ret;
218
219 env_cmd.action = action;
220 env_cmd.type = type;
221 ret = iwl_dvm_send_cmd_pdu(priv,
222 REPLY_BT_COEX_PROT_ENV, 0,
223 sizeof(env_cmd), &env_cmd);
224 if (ret)
225 IWL_ERR(priv, "failed to send BT env command\n");
226 return ret;
227}
228
229static const u8 iwlagn_default_queue_to_tx_fifo[] = {
230 IWL_TX_FIFO_VO,
231 IWL_TX_FIFO_VI,
232 IWL_TX_FIFO_BE,
233 IWL_TX_FIFO_BK,
234};
235
236static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
237 IWL_TX_FIFO_VO,
238 IWL_TX_FIFO_VI,
239 IWL_TX_FIFO_BE,
240 IWL_TX_FIFO_BK,
241 IWL_TX_FIFO_BK_IPAN,
242 IWL_TX_FIFO_BE_IPAN,
243 IWL_TX_FIFO_VI_IPAN,
244 IWL_TX_FIFO_VO_IPAN,
245 IWL_TX_FIFO_BE_IPAN,
246 IWL_TX_FIFO_UNUSED,
247 IWL_TX_FIFO_AUX,
248};
249
250static int iwl_alive_notify(struct iwl_priv *priv)
251{
252 const u8 *queue_to_txf;
253 u8 n_queues;
254 int ret;
255 int i;
256
257 iwl_trans_fw_alive(priv->trans, 0);
258
259 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
260 priv->nvm_data->sku_cap_ipan_enable) {
261 n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
262 queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
263 } else {
264 n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
265 queue_to_txf = iwlagn_default_queue_to_tx_fifo;
266 }
267
268 for (i = 0; i < n_queues; i++)
269 if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
270 iwl_trans_ac_txq_enable(priv->trans, i,
271 queue_to_txf[i], 0);
272
273 priv->passive_no_rx = false;
274 priv->transport_queue_stop = 0;
275
276 ret = iwl_send_wimax_coex(priv);
277 if (ret)
278 return ret;
279
280 if (!priv->lib->no_xtal_calib) {
281 ret = iwl_set_Xtal_calib(priv);
282 if (ret)
283 return ret;
284 }
285
286 return iwl_send_calib_results(priv);
287}
288
289struct iwl_alive_data {
290 bool valid;
291 u8 subtype;
292};
293
294static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
295 struct iwl_rx_packet *pkt, void *data)
296{
297 struct iwl_priv *priv =
298 container_of(notif_wait, struct iwl_priv, notif_wait);
299 struct iwl_alive_data *alive_data = data;
300 struct iwl_alive_resp *palive;
301
302 palive = (void *)pkt->data;
303
304 IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
305 "0x%01X 0x%01X\n",
306 palive->is_valid, palive->ver_type,
307 palive->ver_subtype);
308
309 priv->device_pointers.error_event_table =
310 le32_to_cpu(palive->error_event_table_ptr);
311 priv->device_pointers.log_event_table =
312 le32_to_cpu(palive->log_event_table_ptr);
313
314 alive_data->subtype = palive->ver_subtype;
315 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
316
317 return true;
318}
319
320#define UCODE_ALIVE_TIMEOUT HZ
321#define UCODE_CALIB_TIMEOUT (2*HZ)
322
323int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
324 enum iwl_ucode_type ucode_type)
325{
326 struct iwl_notification_wait alive_wait;
327 struct iwl_alive_data alive_data;
328 const struct fw_img *fw;
329 int ret;
330 enum iwl_ucode_type old_type;
331 static const u16 alive_cmd[] = { REPLY_ALIVE };
332
333 fw = iwl_get_ucode_image(priv, ucode_type);
334 if (WARN_ON(!fw))
335 return -EINVAL;
336
337 old_type = priv->cur_ucode;
338 priv->cur_ucode = ucode_type;
339 priv->ucode_loaded = false;
340
341 iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
342 alive_cmd, ARRAY_SIZE(alive_cmd),
343 iwl_alive_fn, &alive_data);
344
345 ret = iwl_trans_start_fw(priv->trans, fw, false);
346 if (ret) {
347 priv->cur_ucode = old_type;
348 iwl_remove_notification(&priv->notif_wait, &alive_wait);
349 return ret;
350 }
351
352 /*
353 * Some things may run in the background now, but we
354 * just wait for the ALIVE notification here.
355 */
356 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
357 UCODE_ALIVE_TIMEOUT);
358 if (ret) {
359 priv->cur_ucode = old_type;
360 return ret;
361 }
362
363 if (!alive_data.valid) {
364 IWL_ERR(priv, "Loaded ucode is not valid!\n");
365 priv->cur_ucode = old_type;
366 return -EIO;
367 }
368
369 priv->ucode_loaded = true;
370
371 if (ucode_type != IWL_UCODE_WOWLAN) {
372 /* delay a bit to give rfkill time to run */
373 msleep(5);
374 }
375
376 ret = iwl_alive_notify(priv);
377 if (ret) {
378 IWL_WARN(priv,
379 "Could not complete ALIVE transition: %d\n", ret);
380 priv->cur_ucode = old_type;
381 return ret;
382 }
383
384 return 0;
385}
386
387static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
388 struct iwl_rx_packet *pkt, void *data)
389{
390 struct iwl_priv *priv = data;
391 struct iwl_calib_hdr *hdr;
392
393 if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
394 WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
395 return true;
396 }
397
398 hdr = (struct iwl_calib_hdr *)pkt->data;
399
400 if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
401 IWL_ERR(priv, "Failed to record calibration data %d\n",
402 hdr->op_code);
403
404 return false;
405}
406
407int iwl_run_init_ucode(struct iwl_priv *priv)
408{
409 struct iwl_notification_wait calib_wait;
410 static const u16 calib_complete[] = {
411 CALIBRATION_RES_NOTIFICATION,
412 CALIBRATION_COMPLETE_NOTIFICATION
413 };
414 int ret;
415
416 lockdep_assert_held(&priv->mutex);
417
418 /* No init ucode required? Curious, but maybe ok */
419 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
420 return 0;
421
422 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
423 calib_complete, ARRAY_SIZE(calib_complete),
424 iwlagn_wait_calib, priv);
425
426 /* Will also start the device */
427 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
428 if (ret)
429 goto error;
430
431 ret = iwl_init_alive_start(priv);
432 if (ret)
433 goto error;
434
435 /*
436 * Some things may run in the background now, but we
437 * just wait for the calibration complete notification.
438 */
439 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
440 UCODE_CALIB_TIMEOUT);
441
442 goto out;
443
444 error:
445 iwl_remove_notification(&priv->notif_wait, &calib_wait);
446 out:
447 /* Whatever happened, stop the device */
448 iwl_trans_stop_device(priv->trans);
449 priv->ucode_loaded = false;
450
451 return ret;
452}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
new file mode 100644
index 000000000000..06f6cc08f451
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -0,0 +1,140 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-csr.h"
31#include "iwl-agn-hw.h"
32
33/* Highest firmware API version supported */
34#define IWL1000_UCODE_API_MAX 5
35#define IWL100_UCODE_API_MAX 5
36
37/* Oldest version we won't warn about */
38#define IWL1000_UCODE_API_OK 5
39#define IWL100_UCODE_API_OK 5
40
41/* Lowest firmware API version supported */
42#define IWL1000_UCODE_API_MIN 1
43#define IWL100_UCODE_API_MIN 5
44
45/* EEPROM version */
46#define EEPROM_1000_TX_POWER_VERSION (4)
47#define EEPROM_1000_EEPROM_VERSION (0x15C)
48
49#define IWL1000_FW_PRE "iwlwifi-1000-"
50#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
51
52#define IWL100_FW_PRE "iwlwifi-100-"
53#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
54
55
56static const struct iwl_base_params iwl1000_base_params = {
57 .num_of_queues = IWLAGN_NUM_QUEUES,
58 .eeprom_size = OTP_LOW_IMAGE_SIZE,
59 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
60 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
61 .shadow_ram_support = false,
62 .led_compensation = 51,
63 .wd_timeout = IWL_WATCHDOG_DISABLED,
64 .max_event_log_size = 128,
65 .scd_chain_ext_wa = true,
66};
67
68static const struct iwl_ht_params iwl1000_ht_params = {
69 .ht_greenfield_support = true,
70 .use_rts_for_aggregation = true, /* use rts/cts protection */
71 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
72};
73
74static const struct iwl_eeprom_params iwl1000_eeprom_params = {
75 .regulatory_bands = {
76 EEPROM_REG_BAND_1_CHANNELS,
77 EEPROM_REG_BAND_2_CHANNELS,
78 EEPROM_REG_BAND_3_CHANNELS,
79 EEPROM_REG_BAND_4_CHANNELS,
80 EEPROM_REG_BAND_5_CHANNELS,
81 EEPROM_REG_BAND_24_HT40_CHANNELS,
82 EEPROM_REGULATORY_BAND_NO_HT40,
83 }
84};
85
86#define IWL_DEVICE_1000 \
87 .fw_name_pre = IWL1000_FW_PRE, \
88 .ucode_api_max = IWL1000_UCODE_API_MAX, \
89 .ucode_api_ok = IWL1000_UCODE_API_OK, \
90 .ucode_api_min = IWL1000_UCODE_API_MIN, \
91 .device_family = IWL_DEVICE_FAMILY_1000, \
92 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
93 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
94 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
96 .base_params = &iwl1000_base_params, \
97 .eeprom_params = &iwl1000_eeprom_params, \
98 .led_mode = IWL_LED_BLINK, \
99 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
100
101const struct iwl_cfg iwl1000_bgn_cfg = {
102 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
103 IWL_DEVICE_1000,
104 .ht_params = &iwl1000_ht_params,
105};
106
107const struct iwl_cfg iwl1000_bg_cfg = {
108 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
109 IWL_DEVICE_1000,
110};
111
112#define IWL_DEVICE_100 \
113 .fw_name_pre = IWL100_FW_PRE, \
114 .ucode_api_max = IWL100_UCODE_API_MAX, \
115 .ucode_api_ok = IWL100_UCODE_API_OK, \
116 .ucode_api_min = IWL100_UCODE_API_MIN, \
117 .device_family = IWL_DEVICE_FAMILY_100, \
118 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
119 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
120 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
121 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
122 .base_params = &iwl1000_base_params, \
123 .eeprom_params = &iwl1000_eeprom_params, \
124 .led_mode = IWL_LED_RF_STATE, \
125 .rx_with_siso_diversity = true, \
126 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
127
128const struct iwl_cfg iwl100_bgn_cfg = {
129 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
130 IWL_DEVICE_100,
131 .ht_params = &iwl1000_ht_params,
132};
133
134const struct iwl_cfg iwl100_bg_cfg = {
135 .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
136 IWL_DEVICE_100,
137};
138
139MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
140MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
new file mode 100644
index 000000000000..890b95f497d6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -0,0 +1,216 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "dvm/commands.h" /* needed for BT for now */
32
33/* Highest firmware API version supported */
34#define IWL2030_UCODE_API_MAX 6
35#define IWL2000_UCODE_API_MAX 6
36#define IWL105_UCODE_API_MAX 6
37#define IWL135_UCODE_API_MAX 6
38
39/* Oldest version we won't warn about */
40#define IWL2030_UCODE_API_OK 6
41#define IWL2000_UCODE_API_OK 6
42#define IWL105_UCODE_API_OK 6
43#define IWL135_UCODE_API_OK 6
44
45/* Lowest firmware API version supported */
46#define IWL2030_UCODE_API_MIN 5
47#define IWL2000_UCODE_API_MIN 5
48#define IWL105_UCODE_API_MIN 5
49#define IWL135_UCODE_API_MIN 5
50
51/* EEPROM version */
52#define EEPROM_2000_TX_POWER_VERSION (6)
53#define EEPROM_2000_EEPROM_VERSION (0x805)
54
55
56#define IWL2030_FW_PRE "iwlwifi-2030-"
57#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
58
59#define IWL2000_FW_PRE "iwlwifi-2000-"
60#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
61
62#define IWL105_FW_PRE "iwlwifi-105-"
63#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
64
65#define IWL135_FW_PRE "iwlwifi-135-"
66#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
67
68static const struct iwl_base_params iwl2000_base_params = {
69 .eeprom_size = OTP_LOW_IMAGE_SIZE,
70 .num_of_queues = IWLAGN_NUM_QUEUES,
71 .pll_cfg_val = 0,
72 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
73 .shadow_ram_support = true,
74 .led_compensation = 51,
75 .wd_timeout = IWL_DEF_WD_TIMEOUT,
76 .max_event_log_size = 512,
77 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
78 .scd_chain_ext_wa = true,
79};
80
81
82static const struct iwl_base_params iwl2030_base_params = {
83 .eeprom_size = OTP_LOW_IMAGE_SIZE,
84 .num_of_queues = IWLAGN_NUM_QUEUES,
85 .pll_cfg_val = 0,
86 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
87 .shadow_ram_support = true,
88 .led_compensation = 57,
89 .wd_timeout = IWL_LONG_WD_TIMEOUT,
90 .max_event_log_size = 512,
91 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
92 .scd_chain_ext_wa = true,
93};
94
95static const struct iwl_ht_params iwl2000_ht_params = {
96 .ht_greenfield_support = true,
97 .use_rts_for_aggregation = true, /* use rts/cts protection */
98 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
99};
100
101static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
102 .regulatory_bands = {
103 EEPROM_REG_BAND_1_CHANNELS,
104 EEPROM_REG_BAND_2_CHANNELS,
105 EEPROM_REG_BAND_3_CHANNELS,
106 EEPROM_REG_BAND_4_CHANNELS,
107 EEPROM_REG_BAND_5_CHANNELS,
108 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
109 EEPROM_REGULATORY_BAND_NO_HT40,
110 },
111 .enhanced_txpower = true,
112};
113
114#define IWL_DEVICE_2000 \
115 .fw_name_pre = IWL2000_FW_PRE, \
116 .ucode_api_max = IWL2000_UCODE_API_MAX, \
117 .ucode_api_ok = IWL2000_UCODE_API_OK, \
118 .ucode_api_min = IWL2000_UCODE_API_MIN, \
119 .device_family = IWL_DEVICE_FAMILY_2000, \
120 .max_inst_size = IWL60_RTC_INST_SIZE, \
121 .max_data_size = IWL60_RTC_DATA_SIZE, \
122 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
124 .base_params = &iwl2000_base_params, \
125 .eeprom_params = &iwl20x0_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE, \
127 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
128
129
130const struct iwl_cfg iwl2000_2bgn_cfg = {
131 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
132 IWL_DEVICE_2000,
133 .ht_params = &iwl2000_ht_params,
134};
135
136const struct iwl_cfg iwl2000_2bgn_d_cfg = {
137 .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
138 IWL_DEVICE_2000,
139 .ht_params = &iwl2000_ht_params,
140};
141
142#define IWL_DEVICE_2030 \
143 .fw_name_pre = IWL2030_FW_PRE, \
144 .ucode_api_max = IWL2030_UCODE_API_MAX, \
145 .ucode_api_ok = IWL2030_UCODE_API_OK, \
146 .ucode_api_min = IWL2030_UCODE_API_MIN, \
147 .device_family = IWL_DEVICE_FAMILY_2030, \
148 .max_inst_size = IWL60_RTC_INST_SIZE, \
149 .max_data_size = IWL60_RTC_DATA_SIZE, \
150 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
151 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
152 .base_params = &iwl2030_base_params, \
153 .eeprom_params = &iwl20x0_eeprom_params, \
154 .led_mode = IWL_LED_RF_STATE, \
155 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
156
157const struct iwl_cfg iwl2030_2bgn_cfg = {
158 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
159 IWL_DEVICE_2030,
160 .ht_params = &iwl2000_ht_params,
161};
162
163#define IWL_DEVICE_105 \
164 .fw_name_pre = IWL105_FW_PRE, \
165 .ucode_api_max = IWL105_UCODE_API_MAX, \
166 .ucode_api_ok = IWL105_UCODE_API_OK, \
167 .ucode_api_min = IWL105_UCODE_API_MIN, \
168 .device_family = IWL_DEVICE_FAMILY_105, \
169 .max_inst_size = IWL60_RTC_INST_SIZE, \
170 .max_data_size = IWL60_RTC_DATA_SIZE, \
171 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
172 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
173 .base_params = &iwl2000_base_params, \
174 .eeprom_params = &iwl20x0_eeprom_params, \
175 .led_mode = IWL_LED_RF_STATE, \
176 .rx_with_siso_diversity = true, \
177 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
178
179const struct iwl_cfg iwl105_bgn_cfg = {
180 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
181 IWL_DEVICE_105,
182 .ht_params = &iwl2000_ht_params,
183};
184
185const struct iwl_cfg iwl105_bgn_d_cfg = {
186 .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
187 IWL_DEVICE_105,
188 .ht_params = &iwl2000_ht_params,
189};
190
191#define IWL_DEVICE_135 \
192 .fw_name_pre = IWL135_FW_PRE, \
193 .ucode_api_max = IWL135_UCODE_API_MAX, \
194 .ucode_api_ok = IWL135_UCODE_API_OK, \
195 .ucode_api_min = IWL135_UCODE_API_MIN, \
196 .device_family = IWL_DEVICE_FAMILY_135, \
197 .max_inst_size = IWL60_RTC_INST_SIZE, \
198 .max_data_size = IWL60_RTC_DATA_SIZE, \
199 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
200 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
201 .base_params = &iwl2030_base_params, \
202 .eeprom_params = &iwl20x0_eeprom_params, \
203 .led_mode = IWL_LED_RF_STATE, \
204 .rx_with_siso_diversity = true, \
205 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
206
207const struct iwl_cfg iwl135_bgn_cfg = {
208 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
209 IWL_DEVICE_135,
210 .ht_params = &iwl2000_ht_params,
211};
212
213MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
214MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
215MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
216MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
new file mode 100644
index 000000000000..724194e23414
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -0,0 +1,178 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "iwl-csr.h"
32
33/* Highest firmware API version supported */
34#define IWL5000_UCODE_API_MAX 5
35#define IWL5150_UCODE_API_MAX 2
36
37/* Oldest version we won't warn about */
38#define IWL5000_UCODE_API_OK 5
39#define IWL5150_UCODE_API_OK 2
40
41/* Lowest firmware API version supported */
42#define IWL5000_UCODE_API_MIN 1
43#define IWL5150_UCODE_API_MIN 1
44
45/* EEPROM versions */
46#define EEPROM_5000_TX_POWER_VERSION (4)
47#define EEPROM_5000_EEPROM_VERSION (0x11A)
48#define EEPROM_5050_TX_POWER_VERSION (4)
49#define EEPROM_5050_EEPROM_VERSION (0x21E)
50
51#define IWL5000_FW_PRE "iwlwifi-5000-"
52#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
53
54#define IWL5150_FW_PRE "iwlwifi-5150-"
55#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
56
57static const struct iwl_base_params iwl5000_base_params = {
58 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
59 .num_of_queues = IWLAGN_NUM_QUEUES,
60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
61 .led_compensation = 51,
62 .wd_timeout = IWL_WATCHDOG_DISABLED,
63 .max_event_log_size = 512,
64 .scd_chain_ext_wa = true,
65};
66
67static const struct iwl_ht_params iwl5000_ht_params = {
68 .ht_greenfield_support = true,
69 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
70};
71
72static const struct iwl_eeprom_params iwl5000_eeprom_params = {
73 .regulatory_bands = {
74 EEPROM_REG_BAND_1_CHANNELS,
75 EEPROM_REG_BAND_2_CHANNELS,
76 EEPROM_REG_BAND_3_CHANNELS,
77 EEPROM_REG_BAND_4_CHANNELS,
78 EEPROM_REG_BAND_5_CHANNELS,
79 EEPROM_REG_BAND_24_HT40_CHANNELS,
80 EEPROM_REG_BAND_52_HT40_CHANNELS
81 },
82};
83
84#define IWL_DEVICE_5000 \
85 .fw_name_pre = IWL5000_FW_PRE, \
86 .ucode_api_max = IWL5000_UCODE_API_MAX, \
87 .ucode_api_ok = IWL5000_UCODE_API_OK, \
88 .ucode_api_min = IWL5000_UCODE_API_MIN, \
89 .device_family = IWL_DEVICE_FAMILY_5000, \
90 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
91 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
92 .nvm_ver = EEPROM_5000_EEPROM_VERSION, \
93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
94 .base_params = &iwl5000_base_params, \
95 .eeprom_params = &iwl5000_eeprom_params, \
96 .led_mode = IWL_LED_BLINK, \
97 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
98
99const struct iwl_cfg iwl5300_agn_cfg = {
100 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
101 IWL_DEVICE_5000,
102 /* at least EEPROM 0x11A has wrong info */
103 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
104 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
105 .ht_params = &iwl5000_ht_params,
106};
107
108const struct iwl_cfg iwl5100_bgn_cfg = {
109 .name = "Intel(R) WiFi Link 5100 BGN",
110 IWL_DEVICE_5000,
111 .valid_tx_ant = ANT_B, /* .cfg overwrite */
112 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
113 .ht_params = &iwl5000_ht_params,
114};
115
116const struct iwl_cfg iwl5100_abg_cfg = {
117 .name = "Intel(R) WiFi Link 5100 ABG",
118 IWL_DEVICE_5000,
119 .valid_tx_ant = ANT_B, /* .cfg overwrite */
120 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
121};
122
123const struct iwl_cfg iwl5100_agn_cfg = {
124 .name = "Intel(R) WiFi Link 5100 AGN",
125 IWL_DEVICE_5000,
126 .valid_tx_ant = ANT_B, /* .cfg overwrite */
127 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
128 .ht_params = &iwl5000_ht_params,
129};
130
131const struct iwl_cfg iwl5350_agn_cfg = {
132 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
133 .fw_name_pre = IWL5000_FW_PRE,
134 .ucode_api_max = IWL5000_UCODE_API_MAX,
135 .ucode_api_ok = IWL5000_UCODE_API_OK,
136 .ucode_api_min = IWL5000_UCODE_API_MIN,
137 .device_family = IWL_DEVICE_FAMILY_5000,
138 .max_inst_size = IWLAGN_RTC_INST_SIZE,
139 .max_data_size = IWLAGN_RTC_DATA_SIZE,
140 .nvm_ver = EEPROM_5050_EEPROM_VERSION,
141 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
142 .base_params = &iwl5000_base_params,
143 .eeprom_params = &iwl5000_eeprom_params,
144 .ht_params = &iwl5000_ht_params,
145 .led_mode = IWL_LED_BLINK,
146 .internal_wimax_coex = true,
147};
148
149#define IWL_DEVICE_5150 \
150 .fw_name_pre = IWL5150_FW_PRE, \
151 .ucode_api_max = IWL5150_UCODE_API_MAX, \
152 .ucode_api_ok = IWL5150_UCODE_API_OK, \
153 .ucode_api_min = IWL5150_UCODE_API_MIN, \
154 .device_family = IWL_DEVICE_FAMILY_5150, \
155 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
156 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
157 .nvm_ver = EEPROM_5050_EEPROM_VERSION, \
158 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
159 .base_params = &iwl5000_base_params, \
160 .eeprom_params = &iwl5000_eeprom_params, \
161 .led_mode = IWL_LED_BLINK, \
162 .internal_wimax_coex = true, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
164
165const struct iwl_cfg iwl5150_agn_cfg = {
166 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
167 IWL_DEVICE_5150,
168 .ht_params = &iwl5000_ht_params,
169
170};
171
172const struct iwl_cfg iwl5150_abg_cfg = {
173 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
174 IWL_DEVICE_5150,
175};
176
177MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
178MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
new file mode 100644
index 000000000000..21b2630763dc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -0,0 +1,389 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "dvm/commands.h" /* needed for BT for now */
32
33/* Highest firmware API version supported */
34#define IWL6000_UCODE_API_MAX 6
35#define IWL6050_UCODE_API_MAX 5
36#define IWL6000G2_UCODE_API_MAX 6
37#define IWL6035_UCODE_API_MAX 6
38
39/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6
44#define IWL6035_UCODE_API_OK 6
45
46/* Lowest firmware API version supported */
47#define IWL6000_UCODE_API_MIN 4
48#define IWL6050_UCODE_API_MIN 4
49#define IWL6000G2_UCODE_API_MIN 5
50#define IWL6035_UCODE_API_MIN 6
51
52/* EEPROM versions */
53#define EEPROM_6000_TX_POWER_VERSION (4)
54#define EEPROM_6000_EEPROM_VERSION (0x423)
55#define EEPROM_6050_TX_POWER_VERSION (4)
56#define EEPROM_6050_EEPROM_VERSION (0x532)
57#define EEPROM_6150_TX_POWER_VERSION (6)
58#define EEPROM_6150_EEPROM_VERSION (0x553)
59#define EEPROM_6005_TX_POWER_VERSION (6)
60#define EEPROM_6005_EEPROM_VERSION (0x709)
61#define EEPROM_6030_TX_POWER_VERSION (6)
62#define EEPROM_6030_EEPROM_VERSION (0x709)
63#define EEPROM_6035_TX_POWER_VERSION (6)
64#define EEPROM_6035_EEPROM_VERSION (0x753)
65
66#define IWL6000_FW_PRE "iwlwifi-6000-"
67#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
68
69#define IWL6050_FW_PRE "iwlwifi-6050-"
70#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
71
72#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
73#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
74
75#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
76#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
77
78static const struct iwl_base_params iwl6000_base_params = {
79 .eeprom_size = OTP_LOW_IMAGE_SIZE,
80 .num_of_queues = IWLAGN_NUM_QUEUES,
81 .pll_cfg_val = 0,
82 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
83 .shadow_ram_support = true,
84 .led_compensation = 51,
85 .wd_timeout = IWL_DEF_WD_TIMEOUT,
86 .max_event_log_size = 512,
87 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
88 .scd_chain_ext_wa = true,
89};
90
91static const struct iwl_base_params iwl6050_base_params = {
92 .eeprom_size = OTP_LOW_IMAGE_SIZE,
93 .num_of_queues = IWLAGN_NUM_QUEUES,
94 .pll_cfg_val = 0,
95 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
96 .shadow_ram_support = true,
97 .led_compensation = 51,
98 .wd_timeout = IWL_DEF_WD_TIMEOUT,
99 .max_event_log_size = 1024,
100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .scd_chain_ext_wa = true,
102};
103
104static const struct iwl_base_params iwl6000_g2_base_params = {
105 .eeprom_size = OTP_LOW_IMAGE_SIZE,
106 .num_of_queues = IWLAGN_NUM_QUEUES,
107 .pll_cfg_val = 0,
108 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
109 .shadow_ram_support = true,
110 .led_compensation = 57,
111 .wd_timeout = IWL_LONG_WD_TIMEOUT,
112 .max_event_log_size = 512,
113 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
114 .scd_chain_ext_wa = true,
115};
116
117static const struct iwl_ht_params iwl6000_ht_params = {
118 .ht_greenfield_support = true,
119 .use_rts_for_aggregation = true, /* use rts/cts protection */
120 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
121};
122
123static const struct iwl_eeprom_params iwl6000_eeprom_params = {
124 .regulatory_bands = {
125 EEPROM_REG_BAND_1_CHANNELS,
126 EEPROM_REG_BAND_2_CHANNELS,
127 EEPROM_REG_BAND_3_CHANNELS,
128 EEPROM_REG_BAND_4_CHANNELS,
129 EEPROM_REG_BAND_5_CHANNELS,
130 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
131 EEPROM_REG_BAND_52_HT40_CHANNELS
132 },
133 .enhanced_txpower = true,
134};
135
136#define IWL_DEVICE_6005 \
137 .fw_name_pre = IWL6005_FW_PRE, \
138 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
139 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
140 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
141 .device_family = IWL_DEVICE_FAMILY_6005, \
142 .max_inst_size = IWL60_RTC_INST_SIZE, \
143 .max_data_size = IWL60_RTC_DATA_SIZE, \
144 .nvm_ver = EEPROM_6005_EEPROM_VERSION, \
145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
146 .base_params = &iwl6000_g2_base_params, \
147 .eeprom_params = &iwl6000_eeprom_params, \
148 .led_mode = IWL_LED_RF_STATE, \
149 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
150
151const struct iwl_cfg iwl6005_2agn_cfg = {
152 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
153 IWL_DEVICE_6005,
154 .ht_params = &iwl6000_ht_params,
155};
156
157const struct iwl_cfg iwl6005_2abg_cfg = {
158 .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
159 IWL_DEVICE_6005,
160};
161
162const struct iwl_cfg iwl6005_2bg_cfg = {
163 .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
164 IWL_DEVICE_6005,
165};
166
167const struct iwl_cfg iwl6005_2agn_sff_cfg = {
168 .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
169 IWL_DEVICE_6005,
170 .ht_params = &iwl6000_ht_params,
171};
172
173const struct iwl_cfg iwl6005_2agn_d_cfg = {
174 .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
175 IWL_DEVICE_6005,
176 .ht_params = &iwl6000_ht_params,
177};
178
179const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
180 .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
181 IWL_DEVICE_6005,
182 .ht_params = &iwl6000_ht_params,
183};
184
185const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
186 .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
187 IWL_DEVICE_6005,
188 .ht_params = &iwl6000_ht_params,
189};
190
191#define IWL_DEVICE_6030 \
192 .fw_name_pre = IWL6030_FW_PRE, \
193 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
194 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
195 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
196 .device_family = IWL_DEVICE_FAMILY_6030, \
197 .max_inst_size = IWL60_RTC_INST_SIZE, \
198 .max_data_size = IWL60_RTC_DATA_SIZE, \
199 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
200 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
201 .base_params = &iwl6000_g2_base_params, \
202 .eeprom_params = &iwl6000_eeprom_params, \
203 .led_mode = IWL_LED_RF_STATE, \
204 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
205
206const struct iwl_cfg iwl6030_2agn_cfg = {
207 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
208 IWL_DEVICE_6030,
209 .ht_params = &iwl6000_ht_params,
210};
211
212const struct iwl_cfg iwl6030_2abg_cfg = {
213 .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
214 IWL_DEVICE_6030,
215};
216
217const struct iwl_cfg iwl6030_2bgn_cfg = {
218 .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
219 IWL_DEVICE_6030,
220 .ht_params = &iwl6000_ht_params,
221};
222
223const struct iwl_cfg iwl6030_2bg_cfg = {
224 .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
225 IWL_DEVICE_6030,
226};
227
228#define IWL_DEVICE_6035 \
229 .fw_name_pre = IWL6030_FW_PRE, \
230 .ucode_api_max = IWL6035_UCODE_API_MAX, \
231 .ucode_api_ok = IWL6035_UCODE_API_OK, \
232 .ucode_api_min = IWL6035_UCODE_API_MIN, \
233 .device_family = IWL_DEVICE_FAMILY_6030, \
234 .max_inst_size = IWL60_RTC_INST_SIZE, \
235 .max_data_size = IWL60_RTC_DATA_SIZE, \
236 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
237 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
238 .base_params = &iwl6000_g2_base_params, \
239 .eeprom_params = &iwl6000_eeprom_params, \
240 .led_mode = IWL_LED_RF_STATE, \
241 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
242
243const struct iwl_cfg iwl6035_2agn_cfg = {
244 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
245 IWL_DEVICE_6035,
246 .ht_params = &iwl6000_ht_params,
247};
248
249const struct iwl_cfg iwl6035_2agn_sff_cfg = {
250 .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
251 IWL_DEVICE_6035,
252 .ht_params = &iwl6000_ht_params,
253};
254
255const struct iwl_cfg iwl1030_bgn_cfg = {
256 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
257 IWL_DEVICE_6030,
258 .ht_params = &iwl6000_ht_params,
259};
260
261const struct iwl_cfg iwl1030_bg_cfg = {
262 .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
263 IWL_DEVICE_6030,
264};
265
266const struct iwl_cfg iwl130_bgn_cfg = {
267 .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
268 IWL_DEVICE_6030,
269 .ht_params = &iwl6000_ht_params,
270 .rx_with_siso_diversity = true,
271};
272
273const struct iwl_cfg iwl130_bg_cfg = {
274 .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
275 IWL_DEVICE_6030,
276 .rx_with_siso_diversity = true,
277};
278
279/*
280 * "i": Internal configuration, use internal Power Amplifier
281 */
282#define IWL_DEVICE_6000i \
283 .fw_name_pre = IWL6000_FW_PRE, \
284 .ucode_api_max = IWL6000_UCODE_API_MAX, \
285 .ucode_api_ok = IWL6000_UCODE_API_OK, \
286 .ucode_api_min = IWL6000_UCODE_API_MIN, \
287 .device_family = IWL_DEVICE_FAMILY_6000i, \
288 .max_inst_size = IWL60_RTC_INST_SIZE, \
289 .max_data_size = IWL60_RTC_DATA_SIZE, \
290 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
291 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
292 .nvm_ver = EEPROM_6000_EEPROM_VERSION, \
293 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
294 .base_params = &iwl6000_base_params, \
295 .eeprom_params = &iwl6000_eeprom_params, \
296 .led_mode = IWL_LED_BLINK, \
297 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
298
299const struct iwl_cfg iwl6000i_2agn_cfg = {
300 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
301 IWL_DEVICE_6000i,
302 .ht_params = &iwl6000_ht_params,
303};
304
305const struct iwl_cfg iwl6000i_2abg_cfg = {
306 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
307 IWL_DEVICE_6000i,
308};
309
310const struct iwl_cfg iwl6000i_2bg_cfg = {
311 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
312 IWL_DEVICE_6000i,
313};
314
315#define IWL_DEVICE_6050 \
316 .fw_name_pre = IWL6050_FW_PRE, \
317 .ucode_api_max = IWL6050_UCODE_API_MAX, \
318 .ucode_api_min = IWL6050_UCODE_API_MIN, \
319 .device_family = IWL_DEVICE_FAMILY_6050, \
320 .max_inst_size = IWL60_RTC_INST_SIZE, \
321 .max_data_size = IWL60_RTC_DATA_SIZE, \
322 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
323 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
324 .nvm_ver = EEPROM_6050_EEPROM_VERSION, \
325 .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
326 .base_params = &iwl6050_base_params, \
327 .eeprom_params = &iwl6000_eeprom_params, \
328 .led_mode = IWL_LED_BLINK, \
329 .internal_wimax_coex = true, \
330 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
331
332const struct iwl_cfg iwl6050_2agn_cfg = {
333 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
334 IWL_DEVICE_6050,
335 .ht_params = &iwl6000_ht_params,
336};
337
338const struct iwl_cfg iwl6050_2abg_cfg = {
339 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
340 IWL_DEVICE_6050,
341};
342
343#define IWL_DEVICE_6150 \
344 .fw_name_pre = IWL6050_FW_PRE, \
345 .ucode_api_max = IWL6050_UCODE_API_MAX, \
346 .ucode_api_min = IWL6050_UCODE_API_MIN, \
347 .device_family = IWL_DEVICE_FAMILY_6150, \
348 .max_inst_size = IWL60_RTC_INST_SIZE, \
349 .max_data_size = IWL60_RTC_DATA_SIZE, \
350 .nvm_ver = EEPROM_6150_EEPROM_VERSION, \
351 .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
352 .base_params = &iwl6050_base_params, \
353 .eeprom_params = &iwl6000_eeprom_params, \
354 .led_mode = IWL_LED_BLINK, \
355 .internal_wimax_coex = true, \
356 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
357
358const struct iwl_cfg iwl6150_bgn_cfg = {
359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
360 IWL_DEVICE_6150,
361 .ht_params = &iwl6000_ht_params,
362};
363
364const struct iwl_cfg iwl6150_bg_cfg = {
365 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
366 IWL_DEVICE_6150,
367};
368
369const struct iwl_cfg iwl6000_3agn_cfg = {
370 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
371 .fw_name_pre = IWL6000_FW_PRE,
372 .ucode_api_max = IWL6000_UCODE_API_MAX,
373 .ucode_api_ok = IWL6000_UCODE_API_OK,
374 .ucode_api_min = IWL6000_UCODE_API_MIN,
375 .device_family = IWL_DEVICE_FAMILY_6000,
376 .max_inst_size = IWL60_RTC_INST_SIZE,
377 .max_data_size = IWL60_RTC_DATA_SIZE,
378 .nvm_ver = EEPROM_6000_EEPROM_VERSION,
379 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
380 .base_params = &iwl6000_base_params,
381 .eeprom_params = &iwl6000_eeprom_params,
382 .ht_params = &iwl6000_ht_params,
383 .led_mode = IWL_LED_BLINK,
384};
385
386MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
387MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
388MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
389MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
new file mode 100644
index 000000000000..1a73c7a1da77
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -0,0 +1,346 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/module.h>
67#include <linux/stringify.h>
68#include "iwl-config.h"
69#include "iwl-agn-hw.h"
70
71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 17
73
74/* Oldest version we won't warn about */
75#define IWL7260_UCODE_API_OK 13
76
77/* Lowest firmware API version supported */
78#define IWL7260_UCODE_API_MIN 13
79
80/* NVM versions */
81#define IWL7260_NVM_VERSION 0x0a1d
82#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
83#define IWL3160_NVM_VERSION 0x709
84#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
85#define IWL3165_NVM_VERSION 0x709
86#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
87#define IWL7265_NVM_VERSION 0x0a1d
88#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
89#define IWL7265D_NVM_VERSION 0x0c11
90#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
91
92/* DCCM offsets and lengths */
93#define IWL7000_DCCM_OFFSET 0x800000
94#define IWL7260_DCCM_LEN 0x14000
95#define IWL3160_DCCM_LEN 0x10000
96#define IWL7265_DCCM_LEN 0x17A00
97
98#define IWL7260_FW_PRE "iwlwifi-7260-"
99#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
100
101#define IWL3160_FW_PRE "iwlwifi-3160-"
102#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
103
104#define IWL7265_FW_PRE "iwlwifi-7265-"
105#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
106
107#define IWL7265D_FW_PRE "iwlwifi-7265D-"
108#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
109
110#define NVM_HW_SECTION_NUM_FAMILY_7000 0
111
112static const struct iwl_base_params iwl7000_base_params = {
113 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
114 .num_of_queues = 31,
115 .pll_cfg_val = 0,
116 .shadow_ram_support = true,
117 .led_compensation = 57,
118 .wd_timeout = IWL_LONG_WD_TIMEOUT,
119 .max_event_log_size = 512,
120 .shadow_reg_enable = true,
121 .pcie_l1_allowed = true,
122 .apmg_wake_up_wa = true,
123};
124
125static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
126 .ct_kill_entry = 118,
127 .ct_kill_exit = 96,
128 .ct_kill_duration = 5,
129 .dynamic_smps_entry = 114,
130 .dynamic_smps_exit = 110,
131 .tx_protection_entry = 114,
132 .tx_protection_exit = 108,
133 .tx_backoff = {
134 {.temperature = 112, .backoff = 300},
135 {.temperature = 113, .backoff = 800},
136 {.temperature = 114, .backoff = 1500},
137 {.temperature = 115, .backoff = 3000},
138 {.temperature = 116, .backoff = 5000},
139 {.temperature = 117, .backoff = 10000},
140 },
141 .support_ct_kill = true,
142 .support_dynamic_smps = true,
143 .support_tx_protection = true,
144 .support_tx_backoff = true,
145};
146
147static const struct iwl_ht_params iwl7000_ht_params = {
148 .stbc = true,
149 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
150};
151
152#define IWL_DEVICE_7000 \
153 .ucode_api_max = IWL7260_UCODE_API_MAX, \
154 .ucode_api_ok = IWL7260_UCODE_API_OK, \
155 .ucode_api_min = IWL7260_UCODE_API_MIN, \
156 .device_family = IWL_DEVICE_FAMILY_7000, \
157 .max_inst_size = IWL60_RTC_INST_SIZE, \
158 .max_data_size = IWL60_RTC_DATA_SIZE, \
159 .base_params = &iwl7000_base_params, \
160 .led_mode = IWL_LED_RF_STATE, \
161 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
162 .non_shared_ant = ANT_A, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
164 .dccm_offset = IWL7000_DCCM_OFFSET
165
166const struct iwl_cfg iwl7260_2ac_cfg = {
167 .name = "Intel(R) Dual Band Wireless AC 7260",
168 .fw_name_pre = IWL7260_FW_PRE,
169 IWL_DEVICE_7000,
170 .ht_params = &iwl7000_ht_params,
171 .nvm_ver = IWL7260_NVM_VERSION,
172 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
173 .host_interrupt_operation_mode = true,
174 .lp_xtal_workaround = true,
175 .dccm_len = IWL7260_DCCM_LEN,
176};
177
178const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
179 .name = "Intel(R) Dual Band Wireless AC 7260",
180 .fw_name_pre = IWL7260_FW_PRE,
181 IWL_DEVICE_7000,
182 .ht_params = &iwl7000_ht_params,
183 .nvm_ver = IWL7260_NVM_VERSION,
184 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
185 .high_temp = true,
186 .host_interrupt_operation_mode = true,
187 .lp_xtal_workaround = true,
188 .dccm_len = IWL7260_DCCM_LEN,
189 .thermal_params = &iwl7000_high_temp_tt_params,
190};
191
192const struct iwl_cfg iwl7260_2n_cfg = {
193 .name = "Intel(R) Dual Band Wireless N 7260",
194 .fw_name_pre = IWL7260_FW_PRE,
195 IWL_DEVICE_7000,
196 .ht_params = &iwl7000_ht_params,
197 .nvm_ver = IWL7260_NVM_VERSION,
198 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
199 .host_interrupt_operation_mode = true,
200 .lp_xtal_workaround = true,
201 .dccm_len = IWL7260_DCCM_LEN,
202};
203
204const struct iwl_cfg iwl7260_n_cfg = {
205 .name = "Intel(R) Wireless N 7260",
206 .fw_name_pre = IWL7260_FW_PRE,
207 IWL_DEVICE_7000,
208 .ht_params = &iwl7000_ht_params,
209 .nvm_ver = IWL7260_NVM_VERSION,
210 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
211 .host_interrupt_operation_mode = true,
212 .lp_xtal_workaround = true,
213 .dccm_len = IWL7260_DCCM_LEN,
214};
215
216const struct iwl_cfg iwl3160_2ac_cfg = {
217 .name = "Intel(R) Dual Band Wireless AC 3160",
218 .fw_name_pre = IWL3160_FW_PRE,
219 IWL_DEVICE_7000,
220 .ht_params = &iwl7000_ht_params,
221 .nvm_ver = IWL3160_NVM_VERSION,
222 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
223 .host_interrupt_operation_mode = true,
224 .dccm_len = IWL3160_DCCM_LEN,
225};
226
227const struct iwl_cfg iwl3160_2n_cfg = {
228 .name = "Intel(R) Dual Band Wireless N 3160",
229 .fw_name_pre = IWL3160_FW_PRE,
230 IWL_DEVICE_7000,
231 .ht_params = &iwl7000_ht_params,
232 .nvm_ver = IWL3160_NVM_VERSION,
233 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
234 .host_interrupt_operation_mode = true,
235 .dccm_len = IWL3160_DCCM_LEN,
236};
237
238const struct iwl_cfg iwl3160_n_cfg = {
239 .name = "Intel(R) Wireless N 3160",
240 .fw_name_pre = IWL3160_FW_PRE,
241 IWL_DEVICE_7000,
242 .ht_params = &iwl7000_ht_params,
243 .nvm_ver = IWL3160_NVM_VERSION,
244 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
245 .host_interrupt_operation_mode = true,
246 .dccm_len = IWL3160_DCCM_LEN,
247};
248
249static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
250 {.pwr = 1600, .backoff = 0},
251 {.pwr = 1300, .backoff = 467},
252 {.pwr = 900, .backoff = 1900},
253 {.pwr = 800, .backoff = 2630},
254 {.pwr = 700, .backoff = 3720},
255 {.pwr = 600, .backoff = 5550},
256 {.pwr = 500, .backoff = 9350},
257 {0},
258};
259
260static const struct iwl_ht_params iwl7265_ht_params = {
261 .stbc = true,
262 .ldpc = true,
263 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
264};
265
266const struct iwl_cfg iwl3165_2ac_cfg = {
267 .name = "Intel(R) Dual Band Wireless AC 3165",
268 .fw_name_pre = IWL7265D_FW_PRE,
269 IWL_DEVICE_7000,
270 .ht_params = &iwl7000_ht_params,
271 .nvm_ver = IWL3165_NVM_VERSION,
272 .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
273 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
274 .dccm_len = IWL7265_DCCM_LEN,
275};
276
277const struct iwl_cfg iwl7265_2ac_cfg = {
278 .name = "Intel(R) Dual Band Wireless AC 7265",
279 .fw_name_pre = IWL7265_FW_PRE,
280 IWL_DEVICE_7000,
281 .ht_params = &iwl7265_ht_params,
282 .nvm_ver = IWL7265_NVM_VERSION,
283 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
284 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
285 .dccm_len = IWL7265_DCCM_LEN,
286};
287
288const struct iwl_cfg iwl7265_2n_cfg = {
289 .name = "Intel(R) Dual Band Wireless N 7265",
290 .fw_name_pre = IWL7265_FW_PRE,
291 IWL_DEVICE_7000,
292 .ht_params = &iwl7265_ht_params,
293 .nvm_ver = IWL7265_NVM_VERSION,
294 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
295 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
296 .dccm_len = IWL7265_DCCM_LEN,
297};
298
299const struct iwl_cfg iwl7265_n_cfg = {
300 .name = "Intel(R) Wireless N 7265",
301 .fw_name_pre = IWL7265_FW_PRE,
302 IWL_DEVICE_7000,
303 .ht_params = &iwl7265_ht_params,
304 .nvm_ver = IWL7265_NVM_VERSION,
305 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
306 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
307 .dccm_len = IWL7265_DCCM_LEN,
308};
309
310const struct iwl_cfg iwl7265d_2ac_cfg = {
311 .name = "Intel(R) Dual Band Wireless AC 7265",
312 .fw_name_pre = IWL7265D_FW_PRE,
313 IWL_DEVICE_7000,
314 .ht_params = &iwl7265_ht_params,
315 .nvm_ver = IWL7265D_NVM_VERSION,
316 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
317 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
318 .dccm_len = IWL7265_DCCM_LEN,
319};
320
321const struct iwl_cfg iwl7265d_2n_cfg = {
322 .name = "Intel(R) Dual Band Wireless N 7265",
323 .fw_name_pre = IWL7265D_FW_PRE,
324 IWL_DEVICE_7000,
325 .ht_params = &iwl7265_ht_params,
326 .nvm_ver = IWL7265D_NVM_VERSION,
327 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
328 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
329 .dccm_len = IWL7265_DCCM_LEN,
330};
331
332const struct iwl_cfg iwl7265d_n_cfg = {
333 .name = "Intel(R) Wireless N 7265",
334 .fw_name_pre = IWL7265D_FW_PRE,
335 IWL_DEVICE_7000,
336 .ht_params = &iwl7265_ht_params,
337 .nvm_ver = IWL7265D_NVM_VERSION,
338 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
339 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
340 .dccm_len = IWL7265_DCCM_LEN,
341};
342
343MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
344MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
345MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
346MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
new file mode 100644
index 000000000000..0116e5a4c393
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -0,0 +1,229 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/module.h>
67#include <linux/stringify.h>
68#include "iwl-config.h"
69#include "iwl-agn-hw.h"
70
71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 17
73
74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13
76
77/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13
79
80/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d
82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
83
84/* Memory offsets and lengths */
85#define IWL8260_DCCM_OFFSET 0x800000
86#define IWL8260_DCCM_LEN 0x18000
87#define IWL8260_DCCM2_OFFSET 0x880000
88#define IWL8260_DCCM2_LEN 0x8000
89#define IWL8260_SMEM_OFFSET 0x400000
90#define IWL8260_SMEM_LEN 0x68000
91
92#define IWL8000_FW_PRE "iwlwifi-8000"
93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
99
100/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
101#define MAX_RX_AGG_SIZE_8260_SDIO 21
102#define MAX_TX_AGG_SIZE_8260_SDIO 40
103
104/* Max A-MPDU exponent for HT and VHT */
105#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
106#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
107
108static const struct iwl_base_params iwl8000_base_params = {
109 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
110 .num_of_queues = 31,
111 .pll_cfg_val = 0,
112 .shadow_ram_support = true,
113 .led_compensation = 57,
114 .wd_timeout = IWL_LONG_WD_TIMEOUT,
115 .max_event_log_size = 512,
116 .shadow_reg_enable = true,
117 .pcie_l1_allowed = true,
118};
119
120static const struct iwl_ht_params iwl8000_ht_params = {
121 .stbc = true,
122 .ldpc = true,
123 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
124};
125
126static const struct iwl_tt_params iwl8000_tt_params = {
127 .ct_kill_entry = 115,
128 .ct_kill_exit = 93,
129 .ct_kill_duration = 5,
130 .dynamic_smps_entry = 111,
131 .dynamic_smps_exit = 107,
132 .tx_protection_entry = 112,
133 .tx_protection_exit = 105,
134 .tx_backoff = {
135 {.temperature = 110, .backoff = 200},
136 {.temperature = 111, .backoff = 600},
137 {.temperature = 112, .backoff = 1200},
138 {.temperature = 113, .backoff = 2000},
139 {.temperature = 114, .backoff = 4000},
140 },
141 .support_ct_kill = true,
142 .support_dynamic_smps = true,
143 .support_tx_protection = true,
144 .support_tx_backoff = true,
145};
146
147#define IWL_DEVICE_8000 \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \
154 .base_params = &iwl8000_base_params, \
155 .led_mode = IWL_LED_RF_STATE, \
156 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
157 .d0i3 = true, \
158 .features = NETIF_F_RXCSUM, \
159 .non_shared_ant = ANT_A, \
160 .dccm_offset = IWL8260_DCCM_OFFSET, \
161 .dccm_len = IWL8260_DCCM_LEN, \
162 .dccm2_offset = IWL8260_DCCM2_OFFSET, \
163 .dccm2_len = IWL8260_DCCM2_LEN, \
164 .smem_offset = IWL8260_SMEM_OFFSET, \
165 .smem_len = IWL8260_SMEM_LEN, \
166 .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
167 .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
168 .thermal_params = &iwl8000_tt_params, \
169 .apmg_not_supported = true
170
171const struct iwl_cfg iwl8260_2n_cfg = {
172 .name = "Intel(R) Dual Band Wireless N 8260",
173 .fw_name_pre = IWL8000_FW_PRE,
174 IWL_DEVICE_8000,
175 .ht_params = &iwl8000_ht_params,
176 .nvm_ver = IWL8000_NVM_VERSION,
177 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
178};
179
180const struct iwl_cfg iwl8260_2ac_cfg = {
181 .name = "Intel(R) Dual Band Wireless AC 8260",
182 .fw_name_pre = IWL8000_FW_PRE,
183 IWL_DEVICE_8000,
184 .ht_params = &iwl8000_ht_params,
185 .nvm_ver = IWL8000_NVM_VERSION,
186 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
187 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
188};
189
190const struct iwl_cfg iwl4165_2ac_cfg = {
191 .name = "Intel(R) Dual Band Wireless AC 4165",
192 .fw_name_pre = IWL8000_FW_PRE,
193 IWL_DEVICE_8000,
194 .ht_params = &iwl8000_ht_params,
195 .nvm_ver = IWL8000_NVM_VERSION,
196 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
197 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
198};
199
200const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
201 .name = "Intel(R) Dual Band Wireless-AC 8260",
202 .fw_name_pre = IWL8000_FW_PRE,
203 IWL_DEVICE_8000,
204 .ht_params = &iwl8000_ht_params,
205 .nvm_ver = IWL8000_NVM_VERSION,
206 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
207 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
208 .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
209 .disable_dummy_notification = true,
210 .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
211 .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
212};
213
214const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
215 .name = "Intel(R) Dual Band Wireless-AC 4165",
216 .fw_name_pre = IWL8000_FW_PRE,
217 IWL_DEVICE_8000,
218 .ht_params = &iwl8000_ht_params,
219 .nvm_ver = IWL8000_NVM_VERSION,
220 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
221 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
222 .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
223 .bt_shared_single_ant = true,
224 .disable_dummy_notification = true,
225 .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
226 .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
227};
228
229MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 000000000000..04a483d38659
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,117 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
65 */
66
67#ifndef __iwl_agn_hw_h__
68#define __iwl_agn_hw_h__
69
70#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
71#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
72
73#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
74#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
75
76#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
77 IWLAGN_RTC_INST_LOWER_BOUND)
78#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
79 IWLAGN_RTC_DATA_LOWER_BOUND)
80
81#define IWL60_RTC_INST_LOWER_BOUND (0x000000)
82#define IWL60_RTC_INST_UPPER_BOUND (0x040000)
83#define IWL60_RTC_DATA_LOWER_BOUND (0x800000)
84#define IWL60_RTC_DATA_UPPER_BOUND (0x814000)
85#define IWL60_RTC_INST_SIZE \
86 (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
87#define IWL60_RTC_DATA_SIZE \
88 (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
89
90/* RSSI to dBm */
91#define IWLAGN_RSSI_OFFSET 44
92
93#define IWLAGN_DEFAULT_TX_RETRY 15
94#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT 3
95#define IWLAGN_RTS_DFAULT_RETRY_LIMIT 60
96#define IWLAGN_BAR_DFAULT_RETRY_LIMIT 60
97#define IWLAGN_LOW_RETRY_LIMIT 7
98
99/* Limit range of txpower output target to be between these values */
100#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
101#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
102
103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048
105
106/* high blocks contain PAPD data */
107#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
108#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
109#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
110#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
111#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
112#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
113
114
115#define IWLAGN_NUM_QUEUES 20
116
117#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
new file mode 100644
index 000000000000..910970858f98
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -0,0 +1,437 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __IWL_CONFIG_H__
64#define __IWL_CONFIG_H__
65
66#include <linux/types.h>
67#include <net/mac80211.h>
68
69
70enum iwl_device_family {
71 IWL_DEVICE_FAMILY_UNDEFINED,
72 IWL_DEVICE_FAMILY_1000,
73 IWL_DEVICE_FAMILY_100,
74 IWL_DEVICE_FAMILY_2000,
75 IWL_DEVICE_FAMILY_2030,
76 IWL_DEVICE_FAMILY_105,
77 IWL_DEVICE_FAMILY_135,
78 IWL_DEVICE_FAMILY_5000,
79 IWL_DEVICE_FAMILY_5150,
80 IWL_DEVICE_FAMILY_6000,
81 IWL_DEVICE_FAMILY_6000i,
82 IWL_DEVICE_FAMILY_6005,
83 IWL_DEVICE_FAMILY_6030,
84 IWL_DEVICE_FAMILY_6050,
85 IWL_DEVICE_FAMILY_6150,
86 IWL_DEVICE_FAMILY_7000,
87 IWL_DEVICE_FAMILY_8000,
88};
89
90static inline bool iwl_has_secure_boot(u32 hw_rev,
91 enum iwl_device_family family)
92{
93 /* return 1 only for family 8000 B0 */
94 if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
95 return true;
96
97 return false;
98}
99
100/*
101 * LED mode
102 * IWL_LED_DEFAULT: use device default
103 * IWL_LED_RF_STATE: turn LED on/off based on RF state
104 * LED ON = RF ON
105 * LED OFF = RF OFF
106 * IWL_LED_BLINK: adjust led blink rate based on blink table
107 * IWL_LED_DISABLE: led disabled
108 */
109enum iwl_led_mode {
110 IWL_LED_DEFAULT,
111 IWL_LED_RF_STATE,
112 IWL_LED_BLINK,
113 IWL_LED_DISABLE,
114};
115
116/*
117 * This is the threshold value of plcp error rate per 100mSecs. It is
118 * used to set and check for the validity of plcp_delta.
119 */
120#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1
121#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50
122#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100
123#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200
124#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255
125#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
126
127/* TX queue watchdog timeouts in mSecs */
128#define IWL_WATCHDOG_DISABLED 0
129#define IWL_DEF_WD_TIMEOUT 2500
130#define IWL_LONG_WD_TIMEOUT 10000
131#define IWL_MAX_WD_TIMEOUT 120000
132
133#define IWL_DEFAULT_MAX_TX_POWER 22
134
135/* Antenna presence definitions */
136#define ANT_NONE 0x0
137#define ANT_A BIT(0)
138#define ANT_B BIT(1)
139#define ANT_C BIT(2)
140#define ANT_AB (ANT_A | ANT_B)
141#define ANT_AC (ANT_A | ANT_C)
142#define ANT_BC (ANT_B | ANT_C)
143#define ANT_ABC (ANT_A | ANT_B | ANT_C)
144
145static inline u8 num_of_ant(u8 mask)
146{
147 return !!((mask) & ANT_A) +
148 !!((mask) & ANT_B) +
149 !!((mask) & ANT_C);
150}
151
152/*
153 * @max_ll_items: max number of OTP blocks
154 * @shadow_ram_support: shadow support for OTP memory
155 * @led_compensation: compensate on the led on/off time per HW according
156 * to the deviation to achieve the desired led frequency.
157 * The detail algorithm is described in iwl-led.c
158 * @wd_timeout: TX queues watchdog timeout
159 * @max_event_log_size: size of event log buffer size for ucode event logging
160 * @shadow_reg_enable: HW shadow register support
161 * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
162 * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
163 * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
164 */
165struct iwl_base_params {
166 int eeprom_size;
167 int num_of_queues; /* def: HW dependent */
168 /* for iwl_pcie_apm_init() */
169 u32 pll_cfg_val;
170
171 const u16 max_ll_items;
172 const bool shadow_ram_support;
173 u16 led_compensation;
174 unsigned int wd_timeout;
175 u32 max_event_log_size;
176 const bool shadow_reg_enable;
177 const bool pcie_l1_allowed;
178 const bool apmg_wake_up_wa;
179 const bool scd_chain_ext_wa;
180};
181
182/*
183 * @stbc: support Tx STBC and 1*SS Rx STBC
184 * @ldpc: support Tx/Rx with LDPC
185 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
186 * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
187 */
188struct iwl_ht_params {
189 enum ieee80211_smps_mode smps_mode;
190 const bool ht_greenfield_support; /* if used set to true */
191 const bool stbc;
192 const bool ldpc;
193 bool use_rts_for_aggregation;
194 u8 ht40_bands;
195};
196
197/*
198 * Tx-backoff threshold
199 * @temperature: The threshold in Celsius
200 * @backoff: The tx-backoff in uSec
201 */
202struct iwl_tt_tx_backoff {
203 s32 temperature;
204 u32 backoff;
205};
206
207#define TT_TX_BACKOFF_SIZE 6
208
209/**
210 * struct iwl_tt_params - thermal throttling parameters
211 * @ct_kill_entry: CT Kill entry threshold
212 * @ct_kill_exit: CT Kill exit threshold
213 * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
214 * to checks whether to exit CT Kill.
215 * @dynamic_smps_entry: Dynamic SMPS entry threshold
216 * @dynamic_smps_exit: Dynamic SMPS exit threshold
217 * @tx_protection_entry: TX protection entry threshold
218 * @tx_protection_exit: TX protection exit threshold
219 * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
220 * @support_ct_kill: Support CT Kill?
221 * @support_dynamic_smps: Support dynamic SMPS?
222 * @support_tx_protection: Support tx protection?
223 * @support_tx_backoff: Support tx-backoff?
224 */
225struct iwl_tt_params {
226 u32 ct_kill_entry;
227 u32 ct_kill_exit;
228 u32 ct_kill_duration;
229 u32 dynamic_smps_entry;
230 u32 dynamic_smps_exit;
231 u32 tx_protection_entry;
232 u32 tx_protection_exit;
233 struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
234 bool support_ct_kill;
235 bool support_dynamic_smps;
236 bool support_tx_protection;
237 bool support_tx_backoff;
238};
239
240/*
241 * information on how to parse the EEPROM
242 */
243#define EEPROM_REG_BAND_1_CHANNELS 0x08
244#define EEPROM_REG_BAND_2_CHANNELS 0x26
245#define EEPROM_REG_BAND_3_CHANNELS 0x42
246#define EEPROM_REG_BAND_4_CHANNELS 0x5C
247#define EEPROM_REG_BAND_5_CHANNELS 0x74
248#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
249#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
250#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
251#define EEPROM_REGULATORY_BAND_NO_HT40 0
252
253/* lower blocks contain EEPROM image and calibration data */
254#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
255#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
256#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
257
258struct iwl_eeprom_params {
259 const u8 regulatory_bands[7];
260 bool enhanced_txpower;
261};
262
263/* Tx-backoff power threshold
264 * @pwr: The power limit in mw
265 * @backoff: The tx-backoff in uSec
266 */
267struct iwl_pwr_tx_backoff {
268 u32 pwr;
269 u32 backoff;
270};
271
272/**
273 * struct iwl_cfg
274 * @name: Official name of the device
275 * @fw_name_pre: Firmware filename prefix. The api version and extension
276 * (.ucode) will be added to filename before loading from disk. The
277 * filename is constructed as fw_name_pre<api>.ucode.
278 * @ucode_api_max: Highest version of uCode API supported by driver.
279 * @ucode_api_ok: oldest version of the uCode API that is OK to load
280 * without a warning, for use in transitions
281 * @ucode_api_min: Lowest version of uCode API supported by driver.
282 * @max_inst_size: The maximal length of the fw inst section
283 * @max_data_size: The maximal length of the fw data section
284 * @valid_tx_ant: valid transmit antenna
285 * @valid_rx_ant: valid receive antenna
286 * @non_shared_ant: the antenna that is for WiFi only
287 * @nvm_ver: NVM version
288 * @nvm_calib_ver: NVM calibration version
289 * @lib: pointer to the lib ops
290 * @base_params: pointer to basic parameters
291 * @ht_params: point to ht parameters
292 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
293 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
294 * @internal_wimax_coex: internal wifi/wimax combo device
295 * @high_temp: Is this NIC is designated to be in high temperature.
296 * @host_interrupt_operation_mode: device needs host interrupt operation
297 * mode set
298 * @d0i3: device uses d0i3 instead of d3
299 * @nvm_hw_section_num: the ID of the HW NVM section
300 * @features: hw features, any combination of feature_whitelist
301 * @pwr_tx_backoffs: translation table between power limits and backoffs
302 * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
303 * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
304 * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
305 * station can receive in HT
306 * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
307 * station can receive in VHT
308 * @dccm_offset: offset from which DCCM begins
309 * @dccm_len: length of DCCM (including runtime stack CCM)
310 * @dccm2_offset: offset from which the second DCCM begins
311 * @dccm2_len: length of the second DCCM
312 * @smem_offset: offset from which the SMEM begins
313 * @smem_len: the length of SMEM
314 *
315 * We enable the driver to be backward compatible wrt. hardware features.
316 * API differences in uCode shouldn't be handled here but through TLVs
317 * and/or the uCode API version instead.
318 */
319struct iwl_cfg {
320 /* params specific to an individual device within a device family */
321 const char *name;
322 const char *fw_name_pre;
323 const unsigned int ucode_api_max;
324 const unsigned int ucode_api_ok;
325 const unsigned int ucode_api_min;
326 const enum iwl_device_family device_family;
327 const u32 max_data_size;
328 const u32 max_inst_size;
329 u8 valid_tx_ant;
330 u8 valid_rx_ant;
331 u8 non_shared_ant;
332 bool bt_shared_single_ant;
333 u16 nvm_ver;
334 u16 nvm_calib_ver;
335 /* params not likely to change within a device family */
336 const struct iwl_base_params *base_params;
337 /* params likely to change within a device family */
338 const struct iwl_ht_params *ht_params;
339 const struct iwl_eeprom_params *eeprom_params;
340 enum iwl_led_mode led_mode;
341 const bool rx_with_siso_diversity;
342 const bool internal_wimax_coex;
343 const bool host_interrupt_operation_mode;
344 bool high_temp;
345 bool d0i3;
346 u8 nvm_hw_section_num;
347 bool lp_xtal_workaround;
348 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
349 bool no_power_up_nic_in_init;
350 const char *default_nvm_file_B_step;
351 const char *default_nvm_file_C_step;
352 netdev_features_t features;
353 unsigned int max_rx_agg_size;
354 bool disable_dummy_notification;
355 unsigned int max_tx_agg_size;
356 unsigned int max_ht_ampdu_exponent;
357 unsigned int max_vht_ampdu_exponent;
358 const u32 dccm_offset;
359 const u32 dccm_len;
360 const u32 dccm2_offset;
361 const u32 dccm2_len;
362 const u32 smem_offset;
363 const u32 smem_len;
364 const struct iwl_tt_params *thermal_params;
365 bool apmg_not_supported;
366};
367
368/*
369 * This list declares the config structures for all devices.
370 */
371#if IS_ENABLED(CONFIG_IWLDVM)
372extern const struct iwl_cfg iwl5300_agn_cfg;
373extern const struct iwl_cfg iwl5100_agn_cfg;
374extern const struct iwl_cfg iwl5350_agn_cfg;
375extern const struct iwl_cfg iwl5100_bgn_cfg;
376extern const struct iwl_cfg iwl5100_abg_cfg;
377extern const struct iwl_cfg iwl5150_agn_cfg;
378extern const struct iwl_cfg iwl5150_abg_cfg;
379extern const struct iwl_cfg iwl6005_2agn_cfg;
380extern const struct iwl_cfg iwl6005_2abg_cfg;
381extern const struct iwl_cfg iwl6005_2bg_cfg;
382extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
383extern const struct iwl_cfg iwl6005_2agn_d_cfg;
384extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
385extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
386extern const struct iwl_cfg iwl1030_bgn_cfg;
387extern const struct iwl_cfg iwl1030_bg_cfg;
388extern const struct iwl_cfg iwl6030_2agn_cfg;
389extern const struct iwl_cfg iwl6030_2abg_cfg;
390extern const struct iwl_cfg iwl6030_2bgn_cfg;
391extern const struct iwl_cfg iwl6030_2bg_cfg;
392extern const struct iwl_cfg iwl6000i_2agn_cfg;
393extern const struct iwl_cfg iwl6000i_2abg_cfg;
394extern const struct iwl_cfg iwl6000i_2bg_cfg;
395extern const struct iwl_cfg iwl6000_3agn_cfg;
396extern const struct iwl_cfg iwl6050_2agn_cfg;
397extern const struct iwl_cfg iwl6050_2abg_cfg;
398extern const struct iwl_cfg iwl6150_bgn_cfg;
399extern const struct iwl_cfg iwl6150_bg_cfg;
400extern const struct iwl_cfg iwl1000_bgn_cfg;
401extern const struct iwl_cfg iwl1000_bg_cfg;
402extern const struct iwl_cfg iwl100_bgn_cfg;
403extern const struct iwl_cfg iwl100_bg_cfg;
404extern const struct iwl_cfg iwl130_bgn_cfg;
405extern const struct iwl_cfg iwl130_bg_cfg;
406extern const struct iwl_cfg iwl2000_2bgn_cfg;
407extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
408extern const struct iwl_cfg iwl2030_2bgn_cfg;
409extern const struct iwl_cfg iwl6035_2agn_cfg;
410extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
411extern const struct iwl_cfg iwl105_bgn_cfg;
412extern const struct iwl_cfg iwl105_bgn_d_cfg;
413extern const struct iwl_cfg iwl135_bgn_cfg;
414#endif /* CONFIG_IWLDVM */
415#if IS_ENABLED(CONFIG_IWLMVM)
416extern const struct iwl_cfg iwl7260_2ac_cfg;
417extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
418extern const struct iwl_cfg iwl7260_2n_cfg;
419extern const struct iwl_cfg iwl7260_n_cfg;
420extern const struct iwl_cfg iwl3160_2ac_cfg;
421extern const struct iwl_cfg iwl3160_2n_cfg;
422extern const struct iwl_cfg iwl3160_n_cfg;
423extern const struct iwl_cfg iwl3165_2ac_cfg;
424extern const struct iwl_cfg iwl7265_2ac_cfg;
425extern const struct iwl_cfg iwl7265_2n_cfg;
426extern const struct iwl_cfg iwl7265_n_cfg;
427extern const struct iwl_cfg iwl7265d_2ac_cfg;
428extern const struct iwl_cfg iwl7265d_2n_cfg;
429extern const struct iwl_cfg iwl7265d_n_cfg;
430extern const struct iwl_cfg iwl8260_2n_cfg;
431extern const struct iwl_cfg iwl8260_2ac_cfg;
432extern const struct iwl_cfg iwl4165_2ac_cfg;
433extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
434extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
435#endif /* CONFIG_IWLMVM */
436
437#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
new file mode 100644
index 000000000000..543abeaffcf0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -0,0 +1,552 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#ifndef __iwl_csr_h__
66#define __iwl_csr_h__
67/*
68 * CSR (control and status registers)
69 *
70 * CSR registers are mapped directly into PCI bus space, and are accessible
71 * whenever platform supplies power to device, even when device is in
72 * low power states due to driver-invoked device resets
73 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
74 *
75 * Use iwl_write32() and iwl_read32() family to access these registers;
76 * these provide simple PCI bus access, without waking up the MAC.
77 * Do not use iwl_write_direct32() family for these registers;
78 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
79 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
80 * the CSR registers.
81 *
82 * NOTE: Device does need to be awake in order to read this memory
83 * via CSR_EEPROM and CSR_OTP registers
84 */
85#define CSR_BASE (0x000)
86
87#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
88#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
89#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
90#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
91#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
92#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
93#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
94#define CSR_GP_CNTRL (CSR_BASE+0x024)
95
96/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
97#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
98
99/*
100 * Hardware revision info
101 * Bit fields:
102 * 31-16: Reserved
103 * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
104 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
105 * 1-0: "Dash" (-) value, as in A-1, etc.
106 */
107#define CSR_HW_REV (CSR_BASE+0x028)
108
109/*
110 * EEPROM and OTP (one-time-programmable) memory reads
111 *
112 * NOTE: Device must be awake, initialized via apm_ops.init(),
113 * in order to read.
114 */
115#define CSR_EEPROM_REG (CSR_BASE+0x02c)
116#define CSR_EEPROM_GP (CSR_BASE+0x030)
117#define CSR_OTP_GP_REG (CSR_BASE+0x034)
118
119#define CSR_GIO_REG (CSR_BASE+0x03C)
120#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
121#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
122
123/*
124 * UCODE-DRIVER GP (general purpose) mailbox registers.
125 * SET/CLR registers set/clear bit(s) if "1" is written.
126 */
127#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
128#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
129#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
130#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
131
132#define CSR_MBOX_SET_REG (CSR_BASE + 0x88)
133
134#define CSR_LED_REG (CSR_BASE+0x094)
135#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
136#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
137
138
139/* GIO Chicken Bits (PCI Express bus link power management) */
140#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
141
142/* Analog phase-lock-loop configuration */
143#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
144
145/*
146 * CSR HW resources monitor registers
147 */
148#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
149#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
150#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
151
152/*
153 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
154 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
155 * See also CSR_HW_REV register.
156 * Bit fields:
157 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
158 * 1-0: "Dash" (-) value, as in C-1, etc.
159 */
160#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
161
162#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
163#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
164
165/* Bits for CSR_HW_IF_CONFIG_REG */
166#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
167#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
168#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
169#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
170#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
171#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
172#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
173#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
174
175#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
176#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
177#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
178#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
179#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
180#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
181
182#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
183#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
184#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
185#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
186#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
187#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
188#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
189
190#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
191
192#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
193#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
194
195/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
196 * acknowledged (reset) by host writing "1" to flagged bits. */
197#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
198#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
199#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
200#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
201#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
202#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
203#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
204#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
205#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
206#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
207#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
208#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
209
210#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
211 CSR_INT_BIT_HW_ERR | \
212 CSR_INT_BIT_FH_TX | \
213 CSR_INT_BIT_SW_ERR | \
214 CSR_INT_BIT_PAGING | \
215 CSR_INT_BIT_RF_KILL | \
216 CSR_INT_BIT_SW_RX | \
217 CSR_INT_BIT_WAKEUP | \
218 CSR_INT_BIT_ALIVE | \
219 CSR_INT_BIT_RX_PERIODIC)
220
221/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
222#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
223#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
224#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
225#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
226#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
227#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
228
229#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
230 CSR_FH_INT_BIT_RX_CHNL1 | \
231 CSR_FH_INT_BIT_RX_CHNL0)
232
233#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
234 CSR_FH_INT_BIT_TX_CHNL0)
235
236/* GPIO */
237#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
238#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
239#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
240
241/* RESET */
242#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
243#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
244#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
245#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
246#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
247#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
248
249/*
250 * GP (general purpose) CONTROL REGISTER
251 * Bit fields:
252 * 27: HW_RF_KILL_SW
253 * Indicates state of (platform's) hardware RF-Kill switch
254 * 26-24: POWER_SAVE_TYPE
255 * Indicates current power-saving mode:
256 * 000 -- No power saving
257 * 001 -- MAC power-down
258 * 010 -- PHY (radio) power-down
259 * 011 -- Error
260 * 10: XTAL ON request
261 * 9-6: SYS_CONFIG
262 * Indicates current system configuration, reflecting pins on chip
263 * as forced high/low by device circuit board.
264 * 4: GOING_TO_SLEEP
265 * Indicates MAC is entering a power-saving sleep power-down.
266 * Not a good time to access device-internal resources.
267 * 3: MAC_ACCESS_REQ
268 * Host sets this to request and maintain MAC wakeup, to allow host
269 * access to device-internal resources. Host must wait for
270 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
271 * device registers.
272 * 2: INIT_DONE
273 * Host sets this to put device into fully operational D0 power mode.
274 * Host resets this after SW_RESET to put device into low power mode.
275 * 0: MAC_CLOCK_READY
276 * Indicates MAC (ucode processor, etc.) is powered up and can run.
277 * Internal resources are accessible.
278 * NOTE: This does not indicate that the processor is actually running.
279 * NOTE: This does not indicate that device has completed
280 * init or post-power-down restore of internal SRAM memory.
281 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
282 * SRAM is restored and uCode is in normal operation mode.
283 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
284 * do not need to save/restore it.
285 * NOTE: After device reset, this bit remains "0" until host sets
286 * INIT_DONE
287 */
288#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
289#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
290#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
291#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
292#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
293
294#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
295
296#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
297#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
298#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
299
300
301/* HW REV */
302#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
303#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
304
305
306/**
307 * hw_rev values
308 */
309enum {
310 SILICON_A_STEP = 0,
311 SILICON_B_STEP,
312 SILICON_C_STEP,
313};
314
315
316#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
317#define CSR_HW_REV_TYPE_5300 (0x0000020)
318#define CSR_HW_REV_TYPE_5350 (0x0000030)
319#define CSR_HW_REV_TYPE_5100 (0x0000050)
320#define CSR_HW_REV_TYPE_5150 (0x0000040)
321#define CSR_HW_REV_TYPE_1000 (0x0000060)
322#define CSR_HW_REV_TYPE_6x00 (0x0000070)
323#define CSR_HW_REV_TYPE_6x50 (0x0000080)
324#define CSR_HW_REV_TYPE_6150 (0x0000084)
325#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
326#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
327#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
328#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
329#define CSR_HW_REV_TYPE_2x00 (0x0000100)
330#define CSR_HW_REV_TYPE_105 (0x0000110)
331#define CSR_HW_REV_TYPE_135 (0x0000120)
332#define CSR_HW_REV_TYPE_7265D (0x0000210)
333#define CSR_HW_REV_TYPE_NONE (0x00001F0)
334
335/* EEPROM REG */
336#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
337#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
338#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
339#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
340
341/* EEPROM GP */
342#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
343#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
344#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000)
345#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001)
346#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
347#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
348
349/* One-time-programmable memory general purpose reg */
350#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */
351#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
352#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
353#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
354
355/* GP REG */
356#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
357#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
358#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
359#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
360#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
361
362
363/* CSR GIO */
364#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
365
366/*
367 * UCODE-DRIVER GP (general purpose) mailbox register 1
368 * Host driver and uCode write and/or read this register to communicate with
369 * each other.
370 * Bit fields:
371 * 4: UCODE_DISABLE
372 * Host sets this to request permanent halt of uCode, same as
373 * sending CARD_STATE command with "halt" bit set.
374 * 3: CT_KILL_EXIT
375 * Host sets this to request exit from CT_KILL state, i.e. host thinks
376 * device temperature is low enough to continue normal operation.
377 * 2: CMD_BLOCKED
378 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
379 * to release uCode to clear all Tx and command queues, enter
380 * unassociated mode, and power down.
381 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
382 * 1: SW_BIT_RFKILL
383 * Host sets this when issuing CARD_STATE command to request
384 * device sleep.
385 * 0: MAC_SLEEP
386 * uCode sets this when preparing a power-saving power-down.
387 * uCode resets this when power-up is complete and SRAM is sane.
388 * NOTE: device saves internal SRAM data to host when powering down,
389 * and must restore this data after powering back up.
390 * MAC_SLEEP is the best indication that restore is complete.
391 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
392 * do not need to save/restore it.
393 */
394#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
395#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
396#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
397#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
398#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
399
400/* GP Driver */
401#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003)
402#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
403#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
404#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
405#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
406#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
407
408#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
409
410/* GIO Chicken Bits (PCI Express bus link power management) */
411#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
412#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
413
414/* LED */
415#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
416#define CSR_LED_REG_TURN_ON (0x60)
417#define CSR_LED_REG_TURN_OFF (0x20)
418
419/* ANA_PLL */
420#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
421
422/* HPET MEM debug */
423#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
424
425/* DRAM INT TABLE */
426#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
427#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
428#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
429
430/*
431 * SHR target access (Shared block memory space)
432 *
433 * Shared internal registers can be accessed directly from PCI bus through SHR
434 * arbiter without need for the MAC HW to be powered up. This is possible due to
435 * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
436 * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
437 *
438 * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
439 * need not be powered up so no "grab inc access" is required.
440 */
441
442/*
443 * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
444 * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
445 * first, write to the control register:
446 * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
447 * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
448 * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
449 *
450 * To write the register, first, write to the data register
451 * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
452 * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
453 * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
454 */
455#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
456#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
457
458/*
459 * HBUS (Host-side Bus)
460 *
461 * HBUS registers are mapped directly into PCI bus space, but are used
462 * to indirectly access device's internal memory or registers that
463 * may be powered-down.
464 *
465 * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
466 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
467 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
468 * internal resources.
469 *
470 * Do not use iwl_write32()/iwl_read32() family to access these registers;
471 * these provide only simple PCI bus access, without waking up the MAC.
472 */
473#define HBUS_BASE (0x400)
474
475/*
476 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
477 * structures, error log, event log, verifying uCode load).
478 * First write to address register, then read from or write to data register
479 * to complete the job. Once the address register is set up, accesses to
480 * data registers auto-increment the address by one dword.
481 * Bit usage for address registers (read or write):
482 * 0-31: memory address within device
483 */
484#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
485#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
486#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
487#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
488
489/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
490#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
491#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
492
493/*
494 * Registers for accessing device's internal peripheral registers
495 * (e.g. SCD, BSM, etc.). First write to address register,
496 * then read from or write to data register to complete the job.
497 * Bit usage for address registers (read or write):
498 * 0-15: register address (offset) within device
499 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
500 */
501#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
502#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
503#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
504#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
505
506/* Used to enable DBGM */
507#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c)
508
509/*
510 * Per-Tx-queue write pointer (index, really!)
511 * Indicates index to next TFD that driver will fill (1 past latest filled).
512 * Bit usage:
513 * 0-7: queue write index
514 * 11-8: queue selector
515 */
516#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
517
518/**********************************************************
519 * CSR values
520 **********************************************************/
521 /*
522 * host interrupt timeout value
523 * used with setting interrupt coalescing timer
524 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
525 *
526 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
527 */
528#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
529#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
530#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
531#define IWL_HOST_INT_OPER_MODE BIT(31)
532
533/*****************************************************************************
534 * 7000/3000 series SHR DTS addresses *
535 *****************************************************************************/
536
537/* Diode Results Register Structure: */
538enum dtd_diode_reg {
539 DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */
540 DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */
541 DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
542 DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */
543 DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
544 DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
545/* Those are the masks INSIDE the flags bit-field: */
546 DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
547 DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
548 DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
549 DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
550};
551
552#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
new file mode 100644
index 000000000000..09feff4fa226
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -0,0 +1,136 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/device.h>
65#include <linux/interrupt.h>
66#include <linux/export.h>
67#include "iwl-drv.h"
68#include "iwl-debug.h"
69#include "iwl-devtrace.h"
70
71#define __iwl_fn(fn) \
72void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
73{ \
74 struct va_format vaf = { \
75 .fmt = fmt, \
76 }; \
77 va_list args; \
78 \
79 va_start(args, fmt); \
80 vaf.va = &args; \
81 dev_ ##fn(dev, "%pV", &vaf); \
82 trace_iwlwifi_ ##fn(&vaf); \
83 va_end(args); \
84}
85
86__iwl_fn(warn)
87IWL_EXPORT_SYMBOL(__iwl_warn);
88__iwl_fn(info)
89IWL_EXPORT_SYMBOL(__iwl_info);
90__iwl_fn(crit)
91IWL_EXPORT_SYMBOL(__iwl_crit);
92
93void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
94 const char *fmt, ...)
95{
96 struct va_format vaf = {
97 .fmt = fmt,
98 };
99 va_list args;
100
101 va_start(args, fmt);
102 vaf.va = &args;
103 if (!trace_only) {
104 if (rfkill_prefix)
105 dev_err(dev, "(RFKILL) %pV", &vaf);
106 else
107 dev_err(dev, "%pV", &vaf);
108 }
109 trace_iwlwifi_err(&vaf);
110 va_end(args);
111}
112IWL_EXPORT_SYMBOL(__iwl_err);
113
114#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
115void __iwl_dbg(struct device *dev,
116 u32 level, bool limit, const char *function,
117 const char *fmt, ...)
118{
119 struct va_format vaf = {
120 .fmt = fmt,
121 };
122 va_list args;
123
124 va_start(args, fmt);
125 vaf.va = &args;
126#ifdef CONFIG_IWLWIFI_DEBUG
127 if (iwl_have_debug_level(level) &&
128 (!limit || net_ratelimit()))
129 dev_printk(KERN_DEBUG, dev, "%c %s %pV",
130 in_interrupt() ? 'I' : 'U', function, &vaf);
131#endif
132 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
133 va_end(args);
134}
135IWL_EXPORT_SYMBOL(__iwl_dbg);
136#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
new file mode 100644
index 000000000000..9bb36d79c2bd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -0,0 +1,225 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__
31
32#include "iwl-modparams.h"
33
34
35static inline bool iwl_have_debug_level(u32 level)
36{
37#ifdef CONFIG_IWLWIFI_DEBUG
38 return iwlwifi_mod_params.debug_level & level;
39#else
40 return false;
41#endif
42}
43
44void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
45 const char *fmt, ...) __printf(4, 5);
46void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
47void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
48void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
49
50/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
51#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
52
53/* No matter what is m (priv, bus, trans), this will work */
54#define IWL_ERR_DEV(d, f, a...) \
55 do { \
56 CHECK_FOR_NEWLINE(f); \
57 __iwl_err((d), false, false, f, ## a); \
58 } while (0)
59#define IWL_ERR(m, f, a...) \
60 IWL_ERR_DEV((m)->dev, f, ## a)
61#define IWL_WARN(m, f, a...) \
62 do { \
63 CHECK_FOR_NEWLINE(f); \
64 __iwl_warn((m)->dev, f, ## a); \
65 } while (0)
66#define IWL_INFO(m, f, a...) \
67 do { \
68 CHECK_FOR_NEWLINE(f); \
69 __iwl_info((m)->dev, f, ## a); \
70 } while (0)
71#define IWL_CRIT(m, f, a...) \
72 do { \
73 CHECK_FOR_NEWLINE(f); \
74 __iwl_crit((m)->dev, f, ## a); \
75 } while (0)
76
77#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
78void __iwl_dbg(struct device *dev,
79 u32 level, bool limit, const char *function,
80 const char *fmt, ...) __printf(5, 6);
81#else
82__printf(5, 6) static inline void
83__iwl_dbg(struct device *dev,
84 u32 level, bool limit, const char *function,
85 const char *fmt, ...)
86{}
87#endif
88
89#define iwl_print_hex_error(m, p, len) \
90do { \
91 print_hex_dump(KERN_ERR, "iwl data: ", \
92 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
93} while (0)
94
95#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
96 do { \
97 CHECK_FOR_NEWLINE(fmt); \
98 __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
99 } while (0)
100#define IWL_DEBUG(m, level, fmt, args...) \
101 __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
102#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
103 __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
104#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
105 __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
106
107#ifdef CONFIG_IWLWIFI_DEBUG
108#define iwl_print_hex_dump(m, level, p, len) \
109do { \
110 if (iwl_have_debug_level(level)) \
111 print_hex_dump(KERN_DEBUG, "iwl data: ", \
112 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
113} while (0)
114#else
115#define iwl_print_hex_dump(m, level, p, len)
116#endif /* CONFIG_IWLWIFI_DEBUG */
117
118/*
119 * To use the debug system:
120 *
121 * If you are defining a new debug classification, simply add it to the #define
122 * list here in the form of
123 *
124 * #define IWL_DL_xxxx VALUE
125 *
126 * where xxxx should be the name of the classification (for example, WEP).
127 *
128 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
129 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
130 * to send output to that classification.
131 *
132 * The active debug levels can be accessed via files
133 *
134 * /sys/module/iwlwifi/parameters/debug
135 * when CONFIG_IWLWIFI_DEBUG=y.
136 *
137 * /sys/kernel/debug/phy0/iwlwifi/debug/debug_level
138 * when CONFIG_IWLWIFI_DEBUGFS=y.
139 *
140 */
141
142/* 0x0000000F - 0x00000001 */
143#define IWL_DL_INFO 0x00000001
144#define IWL_DL_MAC80211 0x00000002
145#define IWL_DL_HCMD 0x00000004
146#define IWL_DL_TDLS 0x00000008
147/* 0x000000F0 - 0x00000010 */
148#define IWL_DL_QUOTA 0x00000010
149#define IWL_DL_TE 0x00000020
150#define IWL_DL_EEPROM 0x00000040
151#define IWL_DL_RADIO 0x00000080
152/* 0x00000F00 - 0x00000100 */
153#define IWL_DL_POWER 0x00000100
154#define IWL_DL_TEMP 0x00000200
155#define IWL_DL_RPM 0x00000400
156#define IWL_DL_SCAN 0x00000800
157/* 0x0000F000 - 0x00001000 */
158#define IWL_DL_ASSOC 0x00001000
159#define IWL_DL_DROP 0x00002000
160#define IWL_DL_LAR 0x00004000
161#define IWL_DL_COEX 0x00008000
162/* 0x000F0000 - 0x00010000 */
163#define IWL_DL_FW 0x00010000
164#define IWL_DL_RF_KILL 0x00020000
165#define IWL_DL_FW_ERRORS 0x00040000
166#define IWL_DL_LED 0x00080000
167/* 0x00F00000 - 0x00100000 */
168#define IWL_DL_RATE 0x00100000
169#define IWL_DL_CALIB 0x00200000
170#define IWL_DL_WEP 0x00400000
171#define IWL_DL_TX 0x00800000
172/* 0x0F000000 - 0x01000000 */
173#define IWL_DL_RX 0x01000000
174#define IWL_DL_ISR 0x02000000
175#define IWL_DL_HT 0x04000000
176#define IWL_DL_EXTERNAL 0x08000000
177/* 0xF0000000 - 0x10000000 */
178#define IWL_DL_11H 0x10000000
179#define IWL_DL_STATS 0x20000000
180#define IWL_DL_TX_REPLY 0x40000000
181#define IWL_DL_TX_QUEUES 0x80000000
182
183#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
185#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
186#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
187#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
188#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
189#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
190#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
191#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
192#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
193#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
194#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
195#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a)
196#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a)
197#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
198#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
199#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
200#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
201#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
202#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
203#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
204 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
205#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a)
206#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
207#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
208 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
209#define IWL_DEBUG_ASSOC(p, f, a...) \
210 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
211#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
212 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
213#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
214#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
215#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
216 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
217#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
218#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
219#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
220#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
221#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
222#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
223#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
224
225#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
new file mode 100644
index 000000000000..71a78cede9b0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -0,0 +1,80 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_DEVICE_TRACE_DATA
29
30#include <linux/tracepoint.h>
31
32#undef TRACE_SYSTEM
33#define TRACE_SYSTEM iwlwifi_data
34
35TRACE_EVENT(iwlwifi_dev_tx_data,
36 TP_PROTO(const struct device *dev,
37 struct sk_buff *skb,
38 u8 hdr_len, size_t data_len),
39 TP_ARGS(dev, skb, hdr_len, data_len),
40 TP_STRUCT__entry(
41 DEV_ENTRY
42
43 __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
44 ),
45 TP_fast_assign(
46 DEV_ASSIGN;
47 if (iwl_trace_data(skb))
48 skb_copy_bits(skb, hdr_len,
49 __get_dynamic_array(data), data_len);
50 ),
51 TP_printk("[%s] TX frame data", __get_str(dev))
52);
53
54TRACE_EVENT(iwlwifi_dev_rx_data,
55 TP_PROTO(const struct device *dev,
56 const struct iwl_trans *trans,
57 void *rxbuf, size_t len),
58 TP_ARGS(dev, trans, rxbuf, len),
59 TP_STRUCT__entry(
60 DEV_ENTRY
61
62 __dynamic_array(u8, data,
63 len - iwl_rx_trace_len(trans, rxbuf, len))
64 ),
65 TP_fast_assign(
66 size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
67 DEV_ASSIGN;
68 if (offs < len)
69 memcpy(__get_dynamic_array(data),
70 ((u8 *)rxbuf) + offs, len - offs);
71 ),
72 TP_printk("[%s] RX frame data", __get_str(dev))
73);
74#endif /* __IWLWIFI_DEVICE_TRACE_DATA */
75
76#undef TRACE_INCLUDE_PATH
77#define TRACE_INCLUDE_PATH .
78#undef TRACE_INCLUDE_FILE
79#define TRACE_INCLUDE_FILE iwl-devtrace-data
80#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
new file mode 100644
index 000000000000..f62c54485852
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
@@ -0,0 +1,155 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_DEVICE_TRACE_IO
29
30#include <linux/tracepoint.h>
31
32#undef TRACE_SYSTEM
33#define TRACE_SYSTEM iwlwifi_io
34
35TRACE_EVENT(iwlwifi_dev_ioread32,
36 TP_PROTO(const struct device *dev, u32 offs, u32 val),
37 TP_ARGS(dev, offs, val),
38 TP_STRUCT__entry(
39 DEV_ENTRY
40 __field(u32, offs)
41 __field(u32, val)
42 ),
43 TP_fast_assign(
44 DEV_ASSIGN;
45 __entry->offs = offs;
46 __entry->val = val;
47 ),
48 TP_printk("[%s] read io[%#x] = %#x",
49 __get_str(dev), __entry->offs, __entry->val)
50);
51
52TRACE_EVENT(iwlwifi_dev_iowrite8,
53 TP_PROTO(const struct device *dev, u32 offs, u8 val),
54 TP_ARGS(dev, offs, val),
55 TP_STRUCT__entry(
56 DEV_ENTRY
57 __field(u32, offs)
58 __field(u8, val)
59 ),
60 TP_fast_assign(
61 DEV_ASSIGN;
62 __entry->offs = offs;
63 __entry->val = val;
64 ),
65 TP_printk("[%s] write io[%#x] = %#x)",
66 __get_str(dev), __entry->offs, __entry->val)
67);
68
69TRACE_EVENT(iwlwifi_dev_iowrite32,
70 TP_PROTO(const struct device *dev, u32 offs, u32 val),
71 TP_ARGS(dev, offs, val),
72 TP_STRUCT__entry(
73 DEV_ENTRY
74 __field(u32, offs)
75 __field(u32, val)
76 ),
77 TP_fast_assign(
78 DEV_ASSIGN;
79 __entry->offs = offs;
80 __entry->val = val;
81 ),
82 TP_printk("[%s] write io[%#x] = %#x)",
83 __get_str(dev), __entry->offs, __entry->val)
84);
85
86TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
87 TP_PROTO(const struct device *dev, u32 offs, u32 val),
88 TP_ARGS(dev, offs, val),
89 TP_STRUCT__entry(
90 DEV_ENTRY
91 __field(u32, offs)
92 __field(u32, val)
93 ),
94 TP_fast_assign(
95 DEV_ASSIGN;
96 __entry->offs = offs;
97 __entry->val = val;
98 ),
99 TP_printk("[%s] write PRPH[%#x] = %#x)",
100 __get_str(dev), __entry->offs, __entry->val)
101);
102
103TRACE_EVENT(iwlwifi_dev_ioread_prph32,
104 TP_PROTO(const struct device *dev, u32 offs, u32 val),
105 TP_ARGS(dev, offs, val),
106 TP_STRUCT__entry(
107 DEV_ENTRY
108 __field(u32, offs)
109 __field(u32, val)
110 ),
111 TP_fast_assign(
112 DEV_ASSIGN;
113 __entry->offs = offs;
114 __entry->val = val;
115 ),
116 TP_printk("[%s] read PRPH[%#x] = %#x",
117 __get_str(dev), __entry->offs, __entry->val)
118);
119
120TRACE_EVENT(iwlwifi_dev_irq,
121 TP_PROTO(const struct device *dev),
122 TP_ARGS(dev),
123 TP_STRUCT__entry(
124 DEV_ENTRY
125 ),
126 TP_fast_assign(
127 DEV_ASSIGN;
128 ),
129 /* TP_printk("") doesn't compile */
130 TP_printk("%d", 0)
131);
132
133TRACE_EVENT(iwlwifi_dev_ict_read,
134 TP_PROTO(const struct device *dev, u32 index, u32 value),
135 TP_ARGS(dev, index, value),
136 TP_STRUCT__entry(
137 DEV_ENTRY
138 __field(u32, index)
139 __field(u32, value)
140 ),
141 TP_fast_assign(
142 DEV_ASSIGN;
143 __entry->index = index;
144 __entry->value = value;
145 ),
146 TP_printk("[%s] read ict[%d] = %#.8x",
147 __get_str(dev), __entry->index, __entry->value)
148);
149#endif /* __IWLWIFI_DEVICE_TRACE_IO */
150
151#undef TRACE_INCLUDE_PATH
152#define TRACE_INCLUDE_PATH .
153#undef TRACE_INCLUDE_FILE
154#define TRACE_INCLUDE_FILE iwl-devtrace-io
155#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
new file mode 100644
index 000000000000..eb4b99a1c8cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -0,0 +1,209 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27
28#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
29#define __IWLWIFI_DEVICE_TRACE_IWLWIFI
30
31#include <linux/tracepoint.h>
32
33#undef TRACE_SYSTEM
34#define TRACE_SYSTEM iwlwifi
35
36TRACE_EVENT(iwlwifi_dev_hcmd,
37 TP_PROTO(const struct device *dev,
38 struct iwl_host_cmd *cmd, u16 total_size,
39 struct iwl_cmd_header_wide *hdr),
40 TP_ARGS(dev, cmd, total_size, hdr),
41 TP_STRUCT__entry(
42 DEV_ENTRY
43 __dynamic_array(u8, hcmd, total_size)
44 __field(u32, flags)
45 ),
46 TP_fast_assign(
47 int i, offset = sizeof(struct iwl_cmd_header);
48
49 if (hdr->group_id)
50 offset = sizeof(struct iwl_cmd_header_wide);
51
52 DEV_ASSIGN;
53 __entry->flags = cmd->flags;
54 memcpy(__get_dynamic_array(hcmd), hdr, offset);
55
56 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
57 if (!cmd->len[i])
58 continue;
59 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
60 cmd->data[i], cmd->len[i]);
61 offset += cmd->len[i];
62 }
63 ),
64 TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
65 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
66 ((u8 *)__get_dynamic_array(hcmd))[0],
67 __entry->flags & CMD_ASYNC ? "a" : "")
68);
69
70TRACE_EVENT(iwlwifi_dev_rx,
71 TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
72 struct iwl_rx_packet *pkt, size_t len),
73 TP_ARGS(dev, trans, pkt, len),
74 TP_STRUCT__entry(
75 DEV_ENTRY
76 __field(u8, cmd)
77 __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
78 ),
79 TP_fast_assign(
80 DEV_ASSIGN;
81 __entry->cmd = pkt->hdr.cmd;
82 memcpy(__get_dynamic_array(rxbuf), pkt,
83 iwl_rx_trace_len(trans, pkt, len));
84 ),
85 TP_printk("[%s] RX cmd %#.2x",
86 __get_str(dev), __entry->cmd)
87);
88
89TRACE_EVENT(iwlwifi_dev_tx,
90 TP_PROTO(const struct device *dev, struct sk_buff *skb,
91 void *tfd, size_t tfdlen,
92 void *buf0, size_t buf0_len,
93 void *buf1, size_t buf1_len),
94 TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
95 TP_STRUCT__entry(
96 DEV_ENTRY
97
98 __field(size_t, framelen)
99 __dynamic_array(u8, tfd, tfdlen)
100
101 /*
102 * Do not insert between or below these items,
103 * we want to keep the frame together (except
104 * for the possible padding).
105 */
106 __dynamic_array(u8, buf0, buf0_len)
107 __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
108 ),
109 TP_fast_assign(
110 DEV_ASSIGN;
111 __entry->framelen = buf0_len + buf1_len;
112 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
113 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
114 if (!iwl_trace_data(skb))
115 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
116 ),
117 TP_printk("[%s] TX %.2x (%zu bytes)",
118 __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
119 __entry->framelen)
120);
121
122TRACE_EVENT(iwlwifi_dev_ucode_error,
123 TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
124 u32 data1, u32 data2, u32 line, u32 blink1,
125 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
126 u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
127 u32 brd_ver),
128 TP_ARGS(dev, desc, tsf_low, data1, data2, line,
129 blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
130 gp3, major, minor, hw_ver, brd_ver),
131 TP_STRUCT__entry(
132 DEV_ENTRY
133 __field(u32, desc)
134 __field(u32, tsf_low)
135 __field(u32, data1)
136 __field(u32, data2)
137 __field(u32, line)
138 __field(u32, blink1)
139 __field(u32, blink2)
140 __field(u32, ilink1)
141 __field(u32, ilink2)
142 __field(u32, bcon_time)
143 __field(u32, gp1)
144 __field(u32, gp2)
145 __field(u32, gp3)
146 __field(u32, major)
147 __field(u32, minor)
148 __field(u32, hw_ver)
149 __field(u32, brd_ver)
150 ),
151 TP_fast_assign(
152 DEV_ASSIGN;
153 __entry->desc = desc;
154 __entry->tsf_low = tsf_low;
155 __entry->data1 = data1;
156 __entry->data2 = data2;
157 __entry->line = line;
158 __entry->blink1 = blink1;
159 __entry->blink2 = blink2;
160 __entry->ilink1 = ilink1;
161 __entry->ilink2 = ilink2;
162 __entry->bcon_time = bcon_time;
163 __entry->gp1 = gp1;
164 __entry->gp2 = gp2;
165 __entry->gp3 = gp3;
166 __entry->major = major;
167 __entry->minor = minor;
168 __entry->hw_ver = hw_ver;
169 __entry->brd_ver = brd_ver;
170 ),
171 TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
172 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
173 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
174 "minor 0x%08X hw 0x%08X brd 0x%08X",
175 __get_str(dev), __entry->desc, __entry->tsf_low,
176 __entry->data1,
177 __entry->data2, __entry->line, __entry->blink1,
178 __entry->blink2, __entry->ilink1, __entry->ilink2,
179 __entry->bcon_time, __entry->gp1, __entry->gp2,
180 __entry->gp3, __entry->major, __entry->minor,
181 __entry->hw_ver, __entry->brd_ver)
182);
183
184TRACE_EVENT(iwlwifi_dev_ucode_event,
185 TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
186 TP_ARGS(dev, time, data, ev),
187 TP_STRUCT__entry(
188 DEV_ENTRY
189
190 __field(u32, time)
191 __field(u32, data)
192 __field(u32, ev)
193 ),
194 TP_fast_assign(
195 DEV_ASSIGN;
196 __entry->time = time;
197 __entry->data = data;
198 __entry->ev = ev;
199 ),
200 TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
201 __get_str(dev), __entry->time, __entry->data, __entry->ev)
202);
203#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */
204
205#undef TRACE_INCLUDE_PATH
206#define TRACE_INCLUDE_PATH .
207#undef TRACE_INCLUDE_FILE
208#define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi
209#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
new file mode 100644
index 000000000000..a3b3c2465f89
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_DEVICE_TRACE_MSG
29
30#include <linux/tracepoint.h>
31
32#undef TRACE_SYSTEM
33#define TRACE_SYSTEM iwlwifi_msg
34
35#define MAX_MSG_LEN 110
36
37DECLARE_EVENT_CLASS(iwlwifi_msg_event,
38 TP_PROTO(struct va_format *vaf),
39 TP_ARGS(vaf),
40 TP_STRUCT__entry(
41 __dynamic_array(char, msg, MAX_MSG_LEN)
42 ),
43 TP_fast_assign(
44 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
45 MAX_MSG_LEN, vaf->fmt,
46 *vaf->va) >= MAX_MSG_LEN);
47 ),
48 TP_printk("%s", __get_str(msg))
49);
50
51DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
52 TP_PROTO(struct va_format *vaf),
53 TP_ARGS(vaf)
54);
55
56DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
57 TP_PROTO(struct va_format *vaf),
58 TP_ARGS(vaf)
59);
60
61DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
62 TP_PROTO(struct va_format *vaf),
63 TP_ARGS(vaf)
64);
65
66DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
67 TP_PROTO(struct va_format *vaf),
68 TP_ARGS(vaf)
69);
70
71TRACE_EVENT(iwlwifi_dbg,
72 TP_PROTO(u32 level, bool in_interrupt, const char *function,
73 struct va_format *vaf),
74 TP_ARGS(level, in_interrupt, function, vaf),
75 TP_STRUCT__entry(
76 __field(u32, level)
77 __field(u8, in_interrupt)
78 __string(function, function)
79 __dynamic_array(char, msg, MAX_MSG_LEN)
80 ),
81 TP_fast_assign(
82 __entry->level = level;
83 __entry->in_interrupt = in_interrupt;
84 __assign_str(function, function);
85 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
86 MAX_MSG_LEN, vaf->fmt,
87 *vaf->va) >= MAX_MSG_LEN);
88 ),
89 TP_printk("%s", __get_str(msg))
90);
91#endif /* __IWLWIFI_DEVICE_TRACE_MSG */
92
93#undef TRACE_INCLUDE_PATH
94#define TRACE_INCLUDE_PATH .
95#undef TRACE_INCLUDE_FILE
96#define TRACE_INCLUDE_FILE iwl-devtrace-msg
97#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
new file mode 100644
index 000000000000..10839fae9cd9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
@@ -0,0 +1,81 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_DEVICE_TRACE_UCODE
29
30#include <linux/tracepoint.h>
31
32#undef TRACE_SYSTEM
33#define TRACE_SYSTEM iwlwifi_ucode
34
35TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
36 TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
37 TP_ARGS(dev, time, data, ev),
38 TP_STRUCT__entry(
39 DEV_ENTRY
40
41 __field(u32, time)
42 __field(u32, data)
43 __field(u32, ev)
44 ),
45 TP_fast_assign(
46 DEV_ASSIGN;
47 __entry->time = time;
48 __entry->data = data;
49 __entry->ev = ev;
50 ),
51 TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
52 __get_str(dev), __entry->time, __entry->data, __entry->ev)
53);
54
55TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
56 TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
57 TP_ARGS(dev, wraps, n_entry, p_entry),
58 TP_STRUCT__entry(
59 DEV_ENTRY
60
61 __field(u32, wraps)
62 __field(u32, n_entry)
63 __field(u32, p_entry)
64 ),
65 TP_fast_assign(
66 DEV_ASSIGN;
67 __entry->wraps = wraps;
68 __entry->n_entry = n_entry;
69 __entry->p_entry = p_entry;
70 ),
71 TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
72 __get_str(dev), __entry->wraps, __entry->n_entry,
73 __entry->p_entry)
74);
75#endif /* __IWLWIFI_DEVICE_TRACE_UCODE */
76
77#undef TRACE_INCLUDE_PATH
78#define TRACE_INCLUDE_PATH .
79#undef TRACE_INCLUDE_FILE
80#define TRACE_INCLUDE_FILE iwl-devtrace-ucode
81#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
new file mode 100644
index 000000000000..90987d6f348e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-trans.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
43#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
new file mode 100644
index 000000000000..b87acd6a229b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __IWLWIFI_DEVICE_TRACE
28#include <linux/skbuff.h>
29#include <linux/ieee80211.h>
30#include <net/cfg80211.h>
31#include "iwl-trans.h"
32#if !defined(__IWLWIFI_DEVICE_TRACE)
33static inline bool iwl_trace_data(struct sk_buff *skb)
34{
35 struct ieee80211_hdr *hdr = (void *)skb->data;
36 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
37
38 if (!ieee80211_is_data(hdr->frame_control))
39 return false;
40 return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
41}
42
43static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
44 void *rxbuf, size_t len)
45{
46 struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
47 struct ieee80211_hdr *hdr;
48
49 if (cmd->cmd != trans->rx_mpdu_cmd)
50 return len;
51
52 hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
53 trans->rx_mpdu_cmd_hdr_size);
54 if (!ieee80211_is_data(hdr->frame_control))
55 return len;
56 /* maybe try to identify EAPOL frames? */
57 return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
58 ieee80211_hdrlen(hdr->frame_control);
59}
60#endif
61
62#define __IWLWIFI_DEVICE_TRACE
63
64#include <linux/tracepoint.h>
65#include <linux/device.h>
66#include "iwl-trans.h"
67
68
69#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
70#undef TRACE_EVENT
71#define TRACE_EVENT(name, proto, ...) \
72static inline void trace_ ## name(proto) {}
73#undef DECLARE_EVENT_CLASS
74#define DECLARE_EVENT_CLASS(...)
75#undef DEFINE_EVENT
76#define DEFINE_EVENT(evt_class, name, proto, ...) \
77static inline void trace_ ## name(proto) {}
78#endif
79
80#define DEV_ENTRY __string(dev, dev_name(dev))
81#define DEV_ASSIGN __assign_str(dev, dev_name(dev))
82
83#include "iwl-devtrace-io.h"
84#include "iwl-devtrace-ucode.h"
85#include "iwl-devtrace-msg.h"
86#include "iwl-devtrace-data.h"
87#include "iwl-devtrace-iwlwifi.h"
88
89#endif /* __IWLWIFI_DEVICE_TRACE */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
new file mode 100644
index 000000000000..463cadfbfccb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -0,0 +1,1706 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/completion.h>
66#include <linux/dma-mapping.h>
67#include <linux/firmware.h>
68#include <linux/module.h>
69#include <linux/vmalloc.h>
70
71#include "iwl-drv.h"
72#include "iwl-csr.h"
73#include "iwl-debug.h"
74#include "iwl-trans.h"
75#include "iwl-op-mode.h"
76#include "iwl-agn-hw.h"
77#include "iwl-fw.h"
78#include "iwl-config.h"
79#include "iwl-modparams.h"
80
81/******************************************************************************
82 *
83 * module boiler plate
84 *
85 ******************************************************************************/
86
87#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
88MODULE_DESCRIPTION(DRV_DESCRIPTION);
89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
90MODULE_LICENSE("GPL");
91
92#ifdef CONFIG_IWLWIFI_DEBUGFS
93static struct dentry *iwl_dbgfs_root;
94#endif
95
96/**
97 * struct iwl_drv - drv common data
98 * @list: list of drv structures using this opmode
99 * @fw: the iwl_fw structure
100 * @op_mode: the running op_mode
101 * @trans: transport layer
102 * @dev: for debug prints only
103 * @cfg: configuration struct
104 * @fw_index: firmware revision to try loading
105 * @firmware_name: composite filename of ucode file to load
106 * @request_firmware_complete: the firmware has been obtained from user space
107 */
108struct iwl_drv {
109 struct list_head list;
110 struct iwl_fw fw;
111
112 struct iwl_op_mode *op_mode;
113 struct iwl_trans *trans;
114 struct device *dev;
115 const struct iwl_cfg *cfg;
116
117 int fw_index; /* firmware we're trying to load */
118 char firmware_name[32]; /* name of firmware file to load */
119
120 struct completion request_firmware_complete;
121
122#ifdef CONFIG_IWLWIFI_DEBUGFS
123 struct dentry *dbgfs_drv;
124 struct dentry *dbgfs_trans;
125 struct dentry *dbgfs_op_mode;
126#endif
127};
128
129enum {
130 DVM_OP_MODE = 0,
131 MVM_OP_MODE = 1,
132};
133
134/* Protects the table contents, i.e. the ops pointer & drv list */
135static struct mutex iwlwifi_opmode_table_mtx;
136static struct iwlwifi_opmode_table {
137 const char *name; /* name: iwldvm, iwlmvm, etc */
138 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
139 struct list_head drv; /* list of devices using this op_mode */
140} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
141 [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
142 [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
143};
144
145#define IWL_DEFAULT_SCAN_CHANNELS 40
146
147/*
148 * struct fw_sec: Just for the image parsing process.
149 * For the fw storage we are using struct fw_desc.
150 */
151struct fw_sec {
152 const void *data; /* the sec data */
153 size_t size; /* section size */
154 u32 offset; /* offset of writing in the device */
155};
156
157static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
158{
159 vfree(desc->data);
160 desc->data = NULL;
161 desc->len = 0;
162}
163
164static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
165{
166 int i;
167 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
168 iwl_free_fw_desc(drv, &img->sec[i]);
169}
170
171static void iwl_dealloc_ucode(struct iwl_drv *drv)
172{
173 int i;
174
175 kfree(drv->fw.dbg_dest_tlv);
176 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
177 kfree(drv->fw.dbg_conf_tlv[i]);
178 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
179 kfree(drv->fw.dbg_trigger_tlv[i]);
180
181 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
182 iwl_free_fw_img(drv, drv->fw.img + i);
183}
184
185static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
186 struct fw_sec *sec)
187{
188 void *data;
189
190 desc->data = NULL;
191
192 if (!sec || !sec->size)
193 return -EINVAL;
194
195 data = vmalloc(sec->size);
196 if (!data)
197 return -ENOMEM;
198
199 desc->len = sec->size;
200 desc->offset = sec->offset;
201 memcpy(data, sec->data, desc->len);
202 desc->data = data;
203
204 return 0;
205}
206
207static void iwl_req_fw_callback(const struct firmware *ucode_raw,
208 void *context);
209
210#define UCODE_EXPERIMENTAL_INDEX 100
211#define UCODE_EXPERIMENTAL_TAG "exp"
212
213static int iwl_request_firmware(struct iwl_drv *drv, bool first)
214{
215 const char *name_pre = drv->cfg->fw_name_pre;
216 char tag[8];
217
218 if (first) {
219#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
220 drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
221 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
222 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
223#endif
224 drv->fw_index = drv->cfg->ucode_api_max;
225 sprintf(tag, "%d", drv->fw_index);
226 } else {
227 drv->fw_index--;
228 sprintf(tag, "%d", drv->fw_index);
229 }
230
231 if (drv->fw_index < drv->cfg->ucode_api_min) {
232 IWL_ERR(drv, "no suitable firmware found!\n");
233 return -ENOENT;
234 }
235
236 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
237 name_pre, tag);
238
239 /*
240 * Starting 8000B - FW name format has changed. This overwrites the
241 * previous name and uses the new format.
242 */
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name),
247 "%s%c-%s.ucode", name_pre, rev_step, tag);
248 }
249
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
251 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
252 ? "EXPERIMENTAL " : "",
253 drv->firmware_name);
254
255 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
256 drv->trans->dev,
257 GFP_KERNEL, drv, iwl_req_fw_callback);
258}
259
260struct fw_img_parsing {
261 struct fw_sec sec[IWL_UCODE_SECTION_MAX];
262 int sec_counter;
263};
264
265/*
266 * struct fw_sec_parsing: to extract fw section and it's offset from tlv
267 */
268struct fw_sec_parsing {
269 __le32 offset;
270 const u8 data[];
271} __packed;
272
273/**
274 * struct iwl_tlv_calib_data - parse the default calib data from TLV
275 *
276 * @ucode_type: the uCode to which the following default calib relates.
277 * @calib: default calibrations.
278 */
279struct iwl_tlv_calib_data {
280 __le32 ucode_type;
281 struct iwl_tlv_calib_ctrl calib;
282} __packed;
283
284struct iwl_firmware_pieces {
285 struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
286
287 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
288 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
289
290 /* FW debug data parsed for driver usage */
291 struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
292 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
293 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
294 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
295 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
296};
297
298/*
299 * These functions are just to extract uCode section data from the pieces
300 * structure.
301 */
302static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
303 enum iwl_ucode_type type,
304 int sec)
305{
306 return &pieces->img[type].sec[sec];
307}
308
309static void set_sec_data(struct iwl_firmware_pieces *pieces,
310 enum iwl_ucode_type type,
311 int sec,
312 const void *data)
313{
314 pieces->img[type].sec[sec].data = data;
315}
316
317static void set_sec_size(struct iwl_firmware_pieces *pieces,
318 enum iwl_ucode_type type,
319 int sec,
320 size_t size)
321{
322 pieces->img[type].sec[sec].size = size;
323}
324
325static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
326 enum iwl_ucode_type type,
327 int sec)
328{
329 return pieces->img[type].sec[sec].size;
330}
331
332static void set_sec_offset(struct iwl_firmware_pieces *pieces,
333 enum iwl_ucode_type type,
334 int sec,
335 u32 offset)
336{
337 pieces->img[type].sec[sec].offset = offset;
338}
339
340static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
341{
342 int i, j;
343 struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
344 struct iwl_fw_cipher_scheme *fwcs;
345 struct ieee80211_cipher_scheme *cs;
346 u32 cipher;
347
348 if (len < sizeof(*l) ||
349 len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
350 return -EINVAL;
351
352 for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
353 fwcs = &l->cs[j];
354 cipher = le32_to_cpu(fwcs->cipher);
355
356 /* we skip schemes with zero cipher suite selector */
357 if (!cipher)
358 continue;
359
360 cs = &fw->cs[j++];
361 cs->cipher = cipher;
362 cs->iftype = BIT(NL80211_IFTYPE_STATION);
363 cs->hdr_len = fwcs->hdr_len;
364 cs->pn_len = fwcs->pn_len;
365 cs->pn_off = fwcs->pn_off;
366 cs->key_idx_off = fwcs->key_idx_off;
367 cs->key_idx_mask = fwcs->key_idx_mask;
368 cs->key_idx_shift = fwcs->key_idx_shift;
369 cs->mic_len = fwcs->mic_len;
370 }
371
372 return 0;
373}
374
375static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
376 const u32 len)
377{
378 struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
379 struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
380
381 if (len < sizeof(*fw_capa))
382 return -EINVAL;
383
384 capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
385 capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
386 capa->max_ap_cache_per_scan =
387 le32_to_cpu(fw_capa->max_ap_cache_per_scan);
388 capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
389 capa->max_scan_reporting_threshold =
390 le32_to_cpu(fw_capa->max_scan_reporting_threshold);
391 capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
392 capa->max_significant_change_aps =
393 le32_to_cpu(fw_capa->max_significant_change_aps);
394 capa->max_bssid_history_entries =
395 le32_to_cpu(fw_capa->max_bssid_history_entries);
396 return 0;
397}
398
399/*
400 * Gets uCode section from tlv.
401 */
402static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
403 const void *data, enum iwl_ucode_type type,
404 int size)
405{
406 struct fw_img_parsing *img;
407 struct fw_sec *sec;
408 struct fw_sec_parsing *sec_parse;
409
410 if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
411 return -1;
412
413 sec_parse = (struct fw_sec_parsing *)data;
414
415 img = &pieces->img[type];
416 sec = &img->sec[img->sec_counter];
417
418 sec->offset = le32_to_cpu(sec_parse->offset);
419 sec->data = sec_parse->data;
420 sec->size = size - sizeof(sec_parse->offset);
421
422 ++img->sec_counter;
423
424 return 0;
425}
426
427static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
428{
429 struct iwl_tlv_calib_data *def_calib =
430 (struct iwl_tlv_calib_data *)data;
431 u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
432 if (ucode_type >= IWL_UCODE_TYPE_MAX) {
433 IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
434 ucode_type);
435 return -EINVAL;
436 }
437 drv->fw.default_calib[ucode_type].flow_trigger =
438 def_calib->calib.flow_trigger;
439 drv->fw.default_calib[ucode_type].event_trigger =
440 def_calib->calib.event_trigger;
441
442 return 0;
443}
444
445static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
446 struct iwl_ucode_capabilities *capa)
447{
448 const struct iwl_ucode_api *ucode_api = (void *)data;
449 u32 api_index = le32_to_cpu(ucode_api->api_index);
450 u32 api_flags = le32_to_cpu(ucode_api->api_flags);
451 int i;
452
453 if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
454 IWL_ERR(drv, "api_index larger than supported by driver\n");
455 /* don't return an error so we can load FW that has more bits */
456 return 0;
457 }
458
459 for (i = 0; i < 32; i++) {
460 if (api_flags & BIT(i))
461 __set_bit(i + 32 * api_index, capa->_api);
462 }
463
464 return 0;
465}
466
467static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
468 struct iwl_ucode_capabilities *capa)
469{
470 const struct iwl_ucode_capa *ucode_capa = (void *)data;
471 u32 api_index = le32_to_cpu(ucode_capa->api_index);
472 u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
473 int i;
474
475 if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
476 IWL_ERR(drv, "api_index larger than supported by driver\n");
477 /* don't return an error so we can load FW that has more bits */
478 return 0;
479 }
480
481 for (i = 0; i < 32; i++) {
482 if (api_flags & BIT(i))
483 __set_bit(i + 32 * api_index, capa->_capa);
484 }
485
486 return 0;
487}
488
489static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
490 const struct firmware *ucode_raw,
491 struct iwl_firmware_pieces *pieces)
492{
493 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
494 u32 api_ver, hdr_size, build;
495 char buildstr[25];
496 const u8 *src;
497
498 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
499 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
500
501 switch (api_ver) {
502 default:
503 hdr_size = 28;
504 if (ucode_raw->size < hdr_size) {
505 IWL_ERR(drv, "File size too small!\n");
506 return -EINVAL;
507 }
508 build = le32_to_cpu(ucode->u.v2.build);
509 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
510 le32_to_cpu(ucode->u.v2.inst_size));
511 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
512 le32_to_cpu(ucode->u.v2.data_size));
513 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
514 le32_to_cpu(ucode->u.v2.init_size));
515 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
516 le32_to_cpu(ucode->u.v2.init_data_size));
517 src = ucode->u.v2.data;
518 break;
519 case 0:
520 case 1:
521 case 2:
522 hdr_size = 24;
523 if (ucode_raw->size < hdr_size) {
524 IWL_ERR(drv, "File size too small!\n");
525 return -EINVAL;
526 }
527 build = 0;
528 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
529 le32_to_cpu(ucode->u.v1.inst_size));
530 set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
531 le32_to_cpu(ucode->u.v1.data_size));
532 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
533 le32_to_cpu(ucode->u.v1.init_size));
534 set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
535 le32_to_cpu(ucode->u.v1.init_data_size));
536 src = ucode->u.v1.data;
537 break;
538 }
539
540 if (build)
541 sprintf(buildstr, " build %u%s", build,
542 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
543 ? " (EXP)" : "");
544 else
545 buildstr[0] = '\0';
546
547 snprintf(drv->fw.fw_version,
548 sizeof(drv->fw.fw_version),
549 "%u.%u.%u.%u%s",
550 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
551 IWL_UCODE_MINOR(drv->fw.ucode_ver),
552 IWL_UCODE_API(drv->fw.ucode_ver),
553 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
554 buildstr);
555
556 /* Verify size of file vs. image size info in file's header */
557
558 if (ucode_raw->size != hdr_size +
559 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
560 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
561 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
562 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
563
564 IWL_ERR(drv,
565 "uCode file size %d does not match expected size\n",
566 (int)ucode_raw->size);
567 return -EINVAL;
568 }
569
570
571 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
572 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
573 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
574 IWLAGN_RTC_INST_LOWER_BOUND);
575 set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
576 src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
577 set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
578 IWLAGN_RTC_DATA_LOWER_BOUND);
579 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
580 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
581 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
582 IWLAGN_RTC_INST_LOWER_BOUND);
583 set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
584 src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
585 set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
586 IWLAGN_RTC_DATA_LOWER_BOUND);
587 return 0;
588}
589
590static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
591 const struct firmware *ucode_raw,
592 struct iwl_firmware_pieces *pieces,
593 struct iwl_ucode_capabilities *capa)
594{
595 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
596 struct iwl_ucode_tlv *tlv;
597 size_t len = ucode_raw->size;
598 const u8 *data;
599 u32 tlv_len;
600 u32 usniffer_img;
601 enum iwl_ucode_tlv_type tlv_type;
602 const u8 *tlv_data;
603 char buildstr[25];
604 u32 build, paging_mem_size;
605 int num_of_cpus;
606 bool usniffer_images = false;
607 bool usniffer_req = false;
608 bool gscan_capa = false;
609
610 if (len < sizeof(*ucode)) {
611 IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
612 return -EINVAL;
613 }
614
615 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
616 IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
617 le32_to_cpu(ucode->magic));
618 return -EINVAL;
619 }
620
621 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
622 memcpy(drv->fw.human_readable, ucode->human_readable,
623 sizeof(drv->fw.human_readable));
624 build = le32_to_cpu(ucode->build);
625
626 if (build)
627 sprintf(buildstr, " build %u%s", build,
628 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
629 ? " (EXP)" : "");
630 else
631 buildstr[0] = '\0';
632
633 snprintf(drv->fw.fw_version,
634 sizeof(drv->fw.fw_version),
635 "%u.%u.%u.%u%s",
636 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
637 IWL_UCODE_MINOR(drv->fw.ucode_ver),
638 IWL_UCODE_API(drv->fw.ucode_ver),
639 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
640 buildstr);
641
642 data = ucode->data;
643
644 len -= sizeof(*ucode);
645
646 while (len >= sizeof(*tlv)) {
647 len -= sizeof(*tlv);
648 tlv = (void *)data;
649
650 tlv_len = le32_to_cpu(tlv->length);
651 tlv_type = le32_to_cpu(tlv->type);
652 tlv_data = tlv->data;
653
654 if (len < tlv_len) {
655 IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
656 len, tlv_len);
657 return -EINVAL;
658 }
659 len -= ALIGN(tlv_len, 4);
660 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
661
662 switch (tlv_type) {
663 case IWL_UCODE_TLV_INST:
664 set_sec_data(pieces, IWL_UCODE_REGULAR,
665 IWL_UCODE_SECTION_INST, tlv_data);
666 set_sec_size(pieces, IWL_UCODE_REGULAR,
667 IWL_UCODE_SECTION_INST, tlv_len);
668 set_sec_offset(pieces, IWL_UCODE_REGULAR,
669 IWL_UCODE_SECTION_INST,
670 IWLAGN_RTC_INST_LOWER_BOUND);
671 break;
672 case IWL_UCODE_TLV_DATA:
673 set_sec_data(pieces, IWL_UCODE_REGULAR,
674 IWL_UCODE_SECTION_DATA, tlv_data);
675 set_sec_size(pieces, IWL_UCODE_REGULAR,
676 IWL_UCODE_SECTION_DATA, tlv_len);
677 set_sec_offset(pieces, IWL_UCODE_REGULAR,
678 IWL_UCODE_SECTION_DATA,
679 IWLAGN_RTC_DATA_LOWER_BOUND);
680 break;
681 case IWL_UCODE_TLV_INIT:
682 set_sec_data(pieces, IWL_UCODE_INIT,
683 IWL_UCODE_SECTION_INST, tlv_data);
684 set_sec_size(pieces, IWL_UCODE_INIT,
685 IWL_UCODE_SECTION_INST, tlv_len);
686 set_sec_offset(pieces, IWL_UCODE_INIT,
687 IWL_UCODE_SECTION_INST,
688 IWLAGN_RTC_INST_LOWER_BOUND);
689 break;
690 case IWL_UCODE_TLV_INIT_DATA:
691 set_sec_data(pieces, IWL_UCODE_INIT,
692 IWL_UCODE_SECTION_DATA, tlv_data);
693 set_sec_size(pieces, IWL_UCODE_INIT,
694 IWL_UCODE_SECTION_DATA, tlv_len);
695 set_sec_offset(pieces, IWL_UCODE_INIT,
696 IWL_UCODE_SECTION_DATA,
697 IWLAGN_RTC_DATA_LOWER_BOUND);
698 break;
699 case IWL_UCODE_TLV_BOOT:
700 IWL_ERR(drv, "Found unexpected BOOT ucode\n");
701 break;
702 case IWL_UCODE_TLV_PROBE_MAX_LEN:
703 if (tlv_len != sizeof(u32))
704 goto invalid_tlv_len;
705 capa->max_probe_length =
706 le32_to_cpup((__le32 *)tlv_data);
707 break;
708 case IWL_UCODE_TLV_PAN:
709 if (tlv_len)
710 goto invalid_tlv_len;
711 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
712 break;
713 case IWL_UCODE_TLV_FLAGS:
714 /* must be at least one u32 */
715 if (tlv_len < sizeof(u32))
716 goto invalid_tlv_len;
717 /* and a proper number of u32s */
718 if (tlv_len % sizeof(u32))
719 goto invalid_tlv_len;
720 /*
721 * This driver only reads the first u32 as
722 * right now no more features are defined,
723 * if that changes then either the driver
724 * will not work with the new firmware, or
725 * it'll not take advantage of new features.
726 */
727 capa->flags = le32_to_cpup((__le32 *)tlv_data);
728 break;
729 case IWL_UCODE_TLV_API_CHANGES_SET:
730 if (tlv_len != sizeof(struct iwl_ucode_api))
731 goto invalid_tlv_len;
732 if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
733 goto tlv_error;
734 break;
735 case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
736 if (tlv_len != sizeof(struct iwl_ucode_capa))
737 goto invalid_tlv_len;
738 if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
739 goto tlv_error;
740 break;
741 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
742 if (tlv_len != sizeof(u32))
743 goto invalid_tlv_len;
744 pieces->init_evtlog_ptr =
745 le32_to_cpup((__le32 *)tlv_data);
746 break;
747 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
748 if (tlv_len != sizeof(u32))
749 goto invalid_tlv_len;
750 pieces->init_evtlog_size =
751 le32_to_cpup((__le32 *)tlv_data);
752 break;
753 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
754 if (tlv_len != sizeof(u32))
755 goto invalid_tlv_len;
756 pieces->init_errlog_ptr =
757 le32_to_cpup((__le32 *)tlv_data);
758 break;
759 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
760 if (tlv_len != sizeof(u32))
761 goto invalid_tlv_len;
762 pieces->inst_evtlog_ptr =
763 le32_to_cpup((__le32 *)tlv_data);
764 break;
765 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
766 if (tlv_len != sizeof(u32))
767 goto invalid_tlv_len;
768 pieces->inst_evtlog_size =
769 le32_to_cpup((__le32 *)tlv_data);
770 break;
771 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
772 if (tlv_len != sizeof(u32))
773 goto invalid_tlv_len;
774 pieces->inst_errlog_ptr =
775 le32_to_cpup((__le32 *)tlv_data);
776 break;
777 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
778 if (tlv_len)
779 goto invalid_tlv_len;
780 drv->fw.enhance_sensitivity_table = true;
781 break;
782 case IWL_UCODE_TLV_WOWLAN_INST:
783 set_sec_data(pieces, IWL_UCODE_WOWLAN,
784 IWL_UCODE_SECTION_INST, tlv_data);
785 set_sec_size(pieces, IWL_UCODE_WOWLAN,
786 IWL_UCODE_SECTION_INST, tlv_len);
787 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
788 IWL_UCODE_SECTION_INST,
789 IWLAGN_RTC_INST_LOWER_BOUND);
790 break;
791 case IWL_UCODE_TLV_WOWLAN_DATA:
792 set_sec_data(pieces, IWL_UCODE_WOWLAN,
793 IWL_UCODE_SECTION_DATA, tlv_data);
794 set_sec_size(pieces, IWL_UCODE_WOWLAN,
795 IWL_UCODE_SECTION_DATA, tlv_len);
796 set_sec_offset(pieces, IWL_UCODE_WOWLAN,
797 IWL_UCODE_SECTION_DATA,
798 IWLAGN_RTC_DATA_LOWER_BOUND);
799 break;
800 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
801 if (tlv_len != sizeof(u32))
802 goto invalid_tlv_len;
803 capa->standard_phy_calibration_size =
804 le32_to_cpup((__le32 *)tlv_data);
805 break;
806 case IWL_UCODE_TLV_SEC_RT:
807 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
808 tlv_len);
809 drv->fw.mvm_fw = true;
810 break;
811 case IWL_UCODE_TLV_SEC_INIT:
812 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
813 tlv_len);
814 drv->fw.mvm_fw = true;
815 break;
816 case IWL_UCODE_TLV_SEC_WOWLAN:
817 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
818 tlv_len);
819 drv->fw.mvm_fw = true;
820 break;
821 case IWL_UCODE_TLV_DEF_CALIB:
822 if (tlv_len != sizeof(struct iwl_tlv_calib_data))
823 goto invalid_tlv_len;
824 if (iwl_set_default_calib(drv, tlv_data))
825 goto tlv_error;
826 break;
827 case IWL_UCODE_TLV_PHY_SKU:
828 if (tlv_len != sizeof(u32))
829 goto invalid_tlv_len;
830 drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
831 drv->fw.valid_tx_ant = (drv->fw.phy_config &
832 FW_PHY_CFG_TX_CHAIN) >>
833 FW_PHY_CFG_TX_CHAIN_POS;
834 drv->fw.valid_rx_ant = (drv->fw.phy_config &
835 FW_PHY_CFG_RX_CHAIN) >>
836 FW_PHY_CFG_RX_CHAIN_POS;
837 break;
838 case IWL_UCODE_TLV_SECURE_SEC_RT:
839 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
840 tlv_len);
841 drv->fw.mvm_fw = true;
842 break;
843 case IWL_UCODE_TLV_SECURE_SEC_INIT:
844 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
845 tlv_len);
846 drv->fw.mvm_fw = true;
847 break;
848 case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
849 iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
850 tlv_len);
851 drv->fw.mvm_fw = true;
852 break;
853 case IWL_UCODE_TLV_NUM_OF_CPU:
854 if (tlv_len != sizeof(u32))
855 goto invalid_tlv_len;
856 num_of_cpus =
857 le32_to_cpup((__le32 *)tlv_data);
858
859 if (num_of_cpus == 2) {
860 drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
861 true;
862 drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
863 true;
864 drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
865 true;
866 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
867 IWL_ERR(drv, "Driver support upto 2 CPUs\n");
868 return -EINVAL;
869 }
870 break;
871 case IWL_UCODE_TLV_CSCHEME:
872 if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
873 goto invalid_tlv_len;
874 break;
875 case IWL_UCODE_TLV_N_SCAN_CHANNELS:
876 if (tlv_len != sizeof(u32))
877 goto invalid_tlv_len;
878 capa->n_scan_channels =
879 le32_to_cpup((__le32 *)tlv_data);
880 break;
881 case IWL_UCODE_TLV_FW_VERSION: {
882 __le32 *ptr = (void *)tlv_data;
883 u32 major, minor;
884 u8 local_comp;
885
886 if (tlv_len != sizeof(u32) * 3)
887 goto invalid_tlv_len;
888
889 major = le32_to_cpup(ptr++);
890 minor = le32_to_cpup(ptr++);
891 local_comp = le32_to_cpup(ptr);
892
893 snprintf(drv->fw.fw_version,
894 sizeof(drv->fw.fw_version), "%u.%u.%u",
895 major, minor, local_comp);
896 break;
897 }
898 case IWL_UCODE_TLV_FW_DBG_DEST: {
899 struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
900
901 if (pieces->dbg_dest_tlv) {
902 IWL_ERR(drv,
903 "dbg destination ignored, already exists\n");
904 break;
905 }
906
907 pieces->dbg_dest_tlv = dest;
908 IWL_INFO(drv, "Found debug destination: %s\n",
909 get_fw_dbg_mode_string(dest->monitor_mode));
910
911 drv->fw.dbg_dest_reg_num =
912 tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
913 reg_ops);
914 drv->fw.dbg_dest_reg_num /=
915 sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
916
917 break;
918 }
919 case IWL_UCODE_TLV_FW_DBG_CONF: {
920 struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
921
922 if (!pieces->dbg_dest_tlv) {
923 IWL_ERR(drv,
924 "Ignore dbg config %d - no destination configured\n",
925 conf->id);
926 break;
927 }
928
929 if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
930 IWL_ERR(drv,
931 "Skip unknown configuration: %d\n",
932 conf->id);
933 break;
934 }
935
936 if (pieces->dbg_conf_tlv[conf->id]) {
937 IWL_ERR(drv,
938 "Ignore duplicate dbg config %d\n",
939 conf->id);
940 break;
941 }
942
943 if (conf->usniffer)
944 usniffer_req = true;
945
946 IWL_INFO(drv, "Found debug configuration: %d\n",
947 conf->id);
948
949 pieces->dbg_conf_tlv[conf->id] = conf;
950 pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
951 break;
952 }
953 case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
954 struct iwl_fw_dbg_trigger_tlv *trigger =
955 (void *)tlv_data;
956 u32 trigger_id = le32_to_cpu(trigger->id);
957
958 if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
959 IWL_ERR(drv,
960 "Skip unknown trigger: %u\n",
961 trigger->id);
962 break;
963 }
964
965 if (pieces->dbg_trigger_tlv[trigger_id]) {
966 IWL_ERR(drv,
967 "Ignore duplicate dbg trigger %u\n",
968 trigger->id);
969 break;
970 }
971
972 IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
973
974 pieces->dbg_trigger_tlv[trigger_id] = trigger;
975 pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
976 break;
977 }
978 case IWL_UCODE_TLV_SEC_RT_USNIFFER:
979 usniffer_images = true;
980 iwl_store_ucode_sec(pieces, tlv_data,
981 IWL_UCODE_REGULAR_USNIFFER,
982 tlv_len);
983 break;
984 case IWL_UCODE_TLV_PAGING:
985 if (tlv_len != sizeof(u32))
986 goto invalid_tlv_len;
987 paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
988
989 IWL_DEBUG_FW(drv,
990 "Paging: paging enabled (size = %u bytes)\n",
991 paging_mem_size);
992
993 if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
994 IWL_ERR(drv,
995 "Paging: driver supports up to %lu bytes for paging image\n",
996 MAX_PAGING_IMAGE_SIZE);
997 return -EINVAL;
998 }
999
1000 if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
1001 IWL_ERR(drv,
1002 "Paging: image isn't multiple %lu\n",
1003 FW_PAGING_SIZE);
1004 return -EINVAL;
1005 }
1006
1007 drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
1008 paging_mem_size;
1009 usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
1010 drv->fw.img[usniffer_img].paging_mem_size =
1011 paging_mem_size;
1012 break;
1013 case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
1014 if (tlv_len != sizeof(u32))
1015 goto invalid_tlv_len;
1016 drv->fw.sdio_adma_addr =
1017 le32_to_cpup((__le32 *)tlv_data);
1018 break;
1019 case IWL_UCODE_TLV_FW_GSCAN_CAPA:
1020 if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
1021 goto invalid_tlv_len;
1022 gscan_capa = true;
1023 break;
1024 default:
1025 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
1026 break;
1027 }
1028 }
1029
1030 if (usniffer_req && !usniffer_images) {
1031 IWL_ERR(drv,
1032 "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
1033 return -EINVAL;
1034 }
1035
1036 if (len) {
1037 IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
1038 iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
1039 return -EINVAL;
1040 }
1041
1042 /*
1043 * If ucode advertises that it supports GSCAN but GSCAN
1044 * capabilities TLV is not present, warn and continue without GSCAN.
1045 */
1046 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
1047 WARN(!gscan_capa,
1048 "GSCAN is supported but capabilities TLV is unavailable\n"))
1049 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
1050 capa->_capa);
1051
1052 return 0;
1053
1054 invalid_tlv_len:
1055 IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
1056 tlv_error:
1057 iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
1058
1059 return -EINVAL;
1060}
1061
1062static int iwl_alloc_ucode(struct iwl_drv *drv,
1063 struct iwl_firmware_pieces *pieces,
1064 enum iwl_ucode_type type)
1065{
1066 int i;
1067 for (i = 0;
1068 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
1069 i++)
1070 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
1071 get_sec(pieces, type, i)))
1072 return -ENOMEM;
1073 return 0;
1074}
1075
1076static int validate_sec_sizes(struct iwl_drv *drv,
1077 struct iwl_firmware_pieces *pieces,
1078 const struct iwl_cfg *cfg)
1079{
1080 IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
1081 get_sec_size(pieces, IWL_UCODE_REGULAR,
1082 IWL_UCODE_SECTION_INST));
1083 IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
1084 get_sec_size(pieces, IWL_UCODE_REGULAR,
1085 IWL_UCODE_SECTION_DATA));
1086 IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
1087 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
1088 IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
1089 get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
1090
1091 /* Verify that uCode images will fit in card's SRAM. */
1092 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
1093 cfg->max_inst_size) {
1094 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
1095 get_sec_size(pieces, IWL_UCODE_REGULAR,
1096 IWL_UCODE_SECTION_INST));
1097 return -1;
1098 }
1099
1100 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
1101 cfg->max_data_size) {
1102 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
1103 get_sec_size(pieces, IWL_UCODE_REGULAR,
1104 IWL_UCODE_SECTION_DATA));
1105 return -1;
1106 }
1107
1108 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
1109 cfg->max_inst_size) {
1110 IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
1111 get_sec_size(pieces, IWL_UCODE_INIT,
1112 IWL_UCODE_SECTION_INST));
1113 return -1;
1114 }
1115
1116 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
1117 cfg->max_data_size) {
1118 IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
1119 get_sec_size(pieces, IWL_UCODE_REGULAR,
1120 IWL_UCODE_SECTION_DATA));
1121 return -1;
1122 }
1123 return 0;
1124}
1125
1126static struct iwl_op_mode *
1127_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
1128{
1129 const struct iwl_op_mode_ops *ops = op->ops;
1130 struct dentry *dbgfs_dir = NULL;
1131 struct iwl_op_mode *op_mode = NULL;
1132
1133#ifdef CONFIG_IWLWIFI_DEBUGFS
1134 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
1135 drv->dbgfs_drv);
1136 if (!drv->dbgfs_op_mode) {
1137 IWL_ERR(drv,
1138 "failed to create opmode debugfs directory\n");
1139 return op_mode;
1140 }
1141 dbgfs_dir = drv->dbgfs_op_mode;
1142#endif
1143
1144 op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
1145
1146#ifdef CONFIG_IWLWIFI_DEBUGFS
1147 if (!op_mode) {
1148 debugfs_remove_recursive(drv->dbgfs_op_mode);
1149 drv->dbgfs_op_mode = NULL;
1150 }
1151#endif
1152
1153 return op_mode;
1154}
1155
1156static void _iwl_op_mode_stop(struct iwl_drv *drv)
1157{
1158 /* op_mode can be NULL if its start failed */
1159 if (drv->op_mode) {
1160 iwl_op_mode_stop(drv->op_mode);
1161 drv->op_mode = NULL;
1162
1163#ifdef CONFIG_IWLWIFI_DEBUGFS
1164 debugfs_remove_recursive(drv->dbgfs_op_mode);
1165 drv->dbgfs_op_mode = NULL;
1166#endif
1167 }
1168}
1169
1170/**
1171 * iwl_req_fw_callback - callback when firmware was loaded
1172 *
1173 * If loaded successfully, copies the firmware into buffers
1174 * for the card to fetch (via DMA).
1175 */
1176static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1177{
1178 struct iwl_drv *drv = context;
1179 struct iwl_fw *fw = &drv->fw;
1180 struct iwl_ucode_header *ucode;
1181 struct iwlwifi_opmode_table *op;
1182 int err;
1183 struct iwl_firmware_pieces *pieces;
1184 const unsigned int api_max = drv->cfg->ucode_api_max;
1185 unsigned int api_ok = drv->cfg->ucode_api_ok;
1186 const unsigned int api_min = drv->cfg->ucode_api_min;
1187 size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
1188 u32 api_ver;
1189 int i;
1190 bool load_module = false;
1191
1192 fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
1193 fw->ucode_capa.standard_phy_calibration_size =
1194 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1195 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
1196
1197 if (!api_ok)
1198 api_ok = api_max;
1199
1200 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
1201 if (!pieces)
1202 return;
1203
1204 if (!ucode_raw) {
1205 if (drv->fw_index <= api_ok)
1206 IWL_ERR(drv,
1207 "request for firmware file '%s' failed.\n",
1208 drv->firmware_name);
1209 goto try_again;
1210 }
1211
1212 IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
1213 drv->firmware_name, ucode_raw->size);
1214
1215 /* Make sure that we got at least the API version number */
1216 if (ucode_raw->size < 4) {
1217 IWL_ERR(drv, "File size way too small!\n");
1218 goto try_again;
1219 }
1220
1221 /* Data from ucode file: header followed by uCode images */
1222 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1223
1224 if (ucode->ver)
1225 err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
1226 else
1227 err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
1228 &fw->ucode_capa);
1229
1230 if (err)
1231 goto try_again;
1232
1233 if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
1234 api_ver = drv->fw.ucode_ver;
1235 else
1236 api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
1237
1238 /*
1239 * api_ver should match the api version forming part of the
1240 * firmware filename ... but we don't check for that and only rely
1241 * on the API version read from firmware header from here on forward
1242 */
1243 /* no api version check required for experimental uCode */
1244 if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
1245 if (api_ver < api_min || api_ver > api_max) {
1246 IWL_ERR(drv,
1247 "Driver unable to support your firmware API. "
1248 "Driver supports v%u, firmware is v%u.\n",
1249 api_max, api_ver);
1250 goto try_again;
1251 }
1252
1253 if (api_ver < api_ok) {
1254 if (api_ok != api_max)
1255 IWL_ERR(drv, "Firmware has old API version, "
1256 "expected v%u through v%u, got v%u.\n",
1257 api_ok, api_max, api_ver);
1258 else
1259 IWL_ERR(drv, "Firmware has old API version, "
1260 "expected v%u, got v%u.\n",
1261 api_max, api_ver);
1262 IWL_ERR(drv, "New firmware can be obtained from "
1263 "http://www.intellinuxwireless.org/.\n");
1264 }
1265 }
1266
1267 /*
1268 * In mvm uCode there is no difference between data and instructions
1269 * sections.
1270 */
1271 if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
1272 goto try_again;
1273
1274 /* Allocate ucode buffers for card's bus-master loading ... */
1275
1276 /* Runtime instructions and 2 copies of data:
1277 * 1) unmodified from disk
1278 * 2) backup cache for save/restore during power-downs */
1279 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
1280 if (iwl_alloc_ucode(drv, pieces, i))
1281 goto out_free_fw;
1282
1283 if (pieces->dbg_dest_tlv) {
1284 drv->fw.dbg_dest_tlv =
1285 kmemdup(pieces->dbg_dest_tlv,
1286 sizeof(*pieces->dbg_dest_tlv) +
1287 sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
1288 drv->fw.dbg_dest_reg_num, GFP_KERNEL);
1289
1290 if (!drv->fw.dbg_dest_tlv)
1291 goto out_free_fw;
1292 }
1293
1294 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
1295 if (pieces->dbg_conf_tlv[i]) {
1296 drv->fw.dbg_conf_tlv_len[i] =
1297 pieces->dbg_conf_tlv_len[i];
1298 drv->fw.dbg_conf_tlv[i] =
1299 kmemdup(pieces->dbg_conf_tlv[i],
1300 drv->fw.dbg_conf_tlv_len[i],
1301 GFP_KERNEL);
1302 if (!drv->fw.dbg_conf_tlv[i])
1303 goto out_free_fw;
1304 }
1305 }
1306
1307 memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
1308
1309 trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
1310 sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
1311 trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
1312 trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
1313 sizeof(struct iwl_fw_dbg_trigger_cmd);
1314 trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
1315 sizeof(struct iwl_fw_dbg_trigger_mlme);
1316 trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
1317 sizeof(struct iwl_fw_dbg_trigger_stats);
1318 trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
1319 sizeof(struct iwl_fw_dbg_trigger_low_rssi);
1320 trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
1321 sizeof(struct iwl_fw_dbg_trigger_txq_timer);
1322 trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
1323 sizeof(struct iwl_fw_dbg_trigger_time_event);
1324 trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
1325 sizeof(struct iwl_fw_dbg_trigger_ba);
1326
1327 for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
1328 if (pieces->dbg_trigger_tlv[i]) {
1329 /*
1330 * If the trigger isn't long enough, WARN and exit.
1331 * Someone is trying to debug something and he won't
1332 * be able to catch the bug he is trying to chase.
1333 * We'd better be noisy to be sure he knows what's
1334 * going on.
1335 */
1336 if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
1337 (trigger_tlv_sz[i] +
1338 sizeof(struct iwl_fw_dbg_trigger_tlv))))
1339 goto out_free_fw;
1340 drv->fw.dbg_trigger_tlv_len[i] =
1341 pieces->dbg_trigger_tlv_len[i];
1342 drv->fw.dbg_trigger_tlv[i] =
1343 kmemdup(pieces->dbg_trigger_tlv[i],
1344 drv->fw.dbg_trigger_tlv_len[i],
1345 GFP_KERNEL);
1346 if (!drv->fw.dbg_trigger_tlv[i])
1347 goto out_free_fw;
1348 }
1349 }
1350
1351 /* Now that we can no longer fail, copy information */
1352
1353 /*
1354 * The (size - 16) / 12 formula is based on the information recorded
1355 * for each event, which is of mode 1 (including timestamp) for all
1356 * new microcodes that include this information.
1357 */
1358 fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
1359 if (pieces->init_evtlog_size)
1360 fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
1361 else
1362 fw->init_evtlog_size =
1363 drv->cfg->base_params->max_event_log_size;
1364 fw->init_errlog_ptr = pieces->init_errlog_ptr;
1365 fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
1366 if (pieces->inst_evtlog_size)
1367 fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
1368 else
1369 fw->inst_evtlog_size =
1370 drv->cfg->base_params->max_event_log_size;
1371 fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
1372
1373 /*
1374 * figure out the offset of chain noise reset and gain commands
1375 * base on the size of standard phy calibration commands table size
1376 */
1377 if (fw->ucode_capa.standard_phy_calibration_size >
1378 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
1379 fw->ucode_capa.standard_phy_calibration_size =
1380 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1381
1382 /* We have our copies now, allow OS release its copies */
1383 release_firmware(ucode_raw);
1384
1385 mutex_lock(&iwlwifi_opmode_table_mtx);
1386 if (fw->mvm_fw)
1387 op = &iwlwifi_opmode_table[MVM_OP_MODE];
1388 else
1389 op = &iwlwifi_opmode_table[DVM_OP_MODE];
1390
1391 IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
1392 drv->fw.fw_version, op->name);
1393
1394 /* add this device to the list of devices using this op_mode */
1395 list_add_tail(&drv->list, &op->drv);
1396
1397 if (op->ops) {
1398 drv->op_mode = _iwl_op_mode_start(drv, op);
1399
1400 if (!drv->op_mode) {
1401 mutex_unlock(&iwlwifi_opmode_table_mtx);
1402 goto out_unbind;
1403 }
1404 } else {
1405 load_module = true;
1406 }
1407 mutex_unlock(&iwlwifi_opmode_table_mtx);
1408
1409 /*
1410 * Complete the firmware request last so that
1411 * a driver unbind (stop) doesn't run while we
1412 * are doing the start() above.
1413 */
1414 complete(&drv->request_firmware_complete);
1415
1416 /*
1417 * Load the module last so we don't block anything
1418 * else from proceeding if the module fails to load
1419 * or hangs loading.
1420 */
1421 if (load_module) {
1422 err = request_module("%s", op->name);
1423#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
1424 if (err)
1425 IWL_ERR(drv,
1426 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1427 op->name, err);
1428#endif
1429 }
1430 kfree(pieces);
1431 return;
1432
1433 try_again:
1434 /* try next, if any */
1435 release_firmware(ucode_raw);
1436 if (iwl_request_firmware(drv, false))
1437 goto out_unbind;
1438 kfree(pieces);
1439 return;
1440
1441 out_free_fw:
1442 IWL_ERR(drv, "failed to allocate pci memory\n");
1443 iwl_dealloc_ucode(drv);
1444 release_firmware(ucode_raw);
1445 out_unbind:
1446 kfree(pieces);
1447 complete(&drv->request_firmware_complete);
1448 device_release_driver(drv->trans->dev);
1449}
1450
1451struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
1452 const struct iwl_cfg *cfg)
1453{
1454 struct iwl_drv *drv;
1455 int ret;
1456
1457 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
1458 if (!drv) {
1459 ret = -ENOMEM;
1460 goto err;
1461 }
1462
1463 drv->trans = trans;
1464 drv->dev = trans->dev;
1465 drv->cfg = cfg;
1466
1467 init_completion(&drv->request_firmware_complete);
1468 INIT_LIST_HEAD(&drv->list);
1469
1470#ifdef CONFIG_IWLWIFI_DEBUGFS
1471 /* Create the device debugfs entries. */
1472 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1473 iwl_dbgfs_root);
1474
1475 if (!drv->dbgfs_drv) {
1476 IWL_ERR(drv, "failed to create debugfs directory\n");
1477 ret = -ENOMEM;
1478 goto err_free_drv;
1479 }
1480
1481 /* Create transport layer debugfs dir */
1482 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1483
1484 if (!drv->trans->dbgfs_dir) {
1485 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1486 ret = -ENOMEM;
1487 goto err_free_dbgfs;
1488 }
1489#endif
1490
1491 ret = iwl_request_firmware(drv, true);
1492 if (ret) {
1493 IWL_ERR(trans, "Couldn't request the fw\n");
1494 goto err_fw;
1495 }
1496
1497 return drv;
1498
1499err_fw:
1500#ifdef CONFIG_IWLWIFI_DEBUGFS
1501err_free_dbgfs:
1502 debugfs_remove_recursive(drv->dbgfs_drv);
1503err_free_drv:
1504#endif
1505 kfree(drv);
1506err:
1507 return ERR_PTR(ret);
1508}
1509
1510void iwl_drv_stop(struct iwl_drv *drv)
1511{
1512 wait_for_completion(&drv->request_firmware_complete);
1513
1514 _iwl_op_mode_stop(drv);
1515
1516 iwl_dealloc_ucode(drv);
1517
1518 mutex_lock(&iwlwifi_opmode_table_mtx);
1519 /*
1520 * List is empty (this item wasn't added)
1521 * when firmware loading failed -- in that
1522 * case we can't remove it from any list.
1523 */
1524 if (!list_empty(&drv->list))
1525 list_del(&drv->list);
1526 mutex_unlock(&iwlwifi_opmode_table_mtx);
1527
1528#ifdef CONFIG_IWLWIFI_DEBUGFS
1529 debugfs_remove_recursive(drv->dbgfs_drv);
1530#endif
1531
1532 kfree(drv);
1533}
1534
1535
1536/* shared module parameters */
1537struct iwl_mod_params iwlwifi_mod_params = {
1538 .restart_fw = true,
1539 .bt_coex_active = true,
1540 .power_level = IWL_POWER_INDEX_1,
1541 .d0i3_disable = true,
1542#ifndef CONFIG_IWLWIFI_UAPSD
1543 .uapsd_disable = true,
1544#endif /* CONFIG_IWLWIFI_UAPSD */
1545 /* the rest are 0 by default */
1546};
1547IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
1548
1549int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1550{
1551 int i;
1552 struct iwl_drv *drv;
1553 struct iwlwifi_opmode_table *op;
1554
1555 mutex_lock(&iwlwifi_opmode_table_mtx);
1556 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1557 op = &iwlwifi_opmode_table[i];
1558 if (strcmp(op->name, name))
1559 continue;
1560 op->ops = ops;
1561 /* TODO: need to handle exceptional case */
1562 list_for_each_entry(drv, &op->drv, list)
1563 drv->op_mode = _iwl_op_mode_start(drv, op);
1564
1565 mutex_unlock(&iwlwifi_opmode_table_mtx);
1566 return 0;
1567 }
1568 mutex_unlock(&iwlwifi_opmode_table_mtx);
1569 return -EIO;
1570}
1571IWL_EXPORT_SYMBOL(iwl_opmode_register);
1572
1573void iwl_opmode_deregister(const char *name)
1574{
1575 int i;
1576 struct iwl_drv *drv;
1577
1578 mutex_lock(&iwlwifi_opmode_table_mtx);
1579 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1580 if (strcmp(iwlwifi_opmode_table[i].name, name))
1581 continue;
1582 iwlwifi_opmode_table[i].ops = NULL;
1583
1584 /* call the stop routine for all devices */
1585 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1586 _iwl_op_mode_stop(drv);
1587
1588 mutex_unlock(&iwlwifi_opmode_table_mtx);
1589 return;
1590 }
1591 mutex_unlock(&iwlwifi_opmode_table_mtx);
1592}
1593IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
1594
1595static int __init iwl_drv_init(void)
1596{
1597 int i;
1598
1599 mutex_init(&iwlwifi_opmode_table_mtx);
1600
1601 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1602 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1603
1604 pr_info(DRV_DESCRIPTION "\n");
1605 pr_info(DRV_COPYRIGHT "\n");
1606
1607#ifdef CONFIG_IWLWIFI_DEBUGFS
1608 /* Create the root of iwlwifi debugfs subsystem. */
1609 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1610
1611 if (!iwl_dbgfs_root)
1612 return -EFAULT;
1613#endif
1614
1615 return iwl_pci_register_driver();
1616}
1617module_init(iwl_drv_init);
1618
1619static void __exit iwl_drv_exit(void)
1620{
1621 iwl_pci_unregister_driver();
1622
1623#ifdef CONFIG_IWLWIFI_DEBUGFS
1624 debugfs_remove_recursive(iwl_dbgfs_root);
1625#endif
1626}
1627module_exit(iwl_drv_exit);
1628
1629#ifdef CONFIG_IWLWIFI_DEBUG
1630module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
1631 S_IRUGO | S_IWUSR);
1632MODULE_PARM_DESC(debug, "debug output mask");
1633#endif
1634
1635module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
1636MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1637module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
1638MODULE_PARM_DESC(11n_disable,
1639 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1640module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1641 int, S_IRUGO);
1642MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1643module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO);
1644MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
1645
1646module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1647 int, S_IRUGO);
1648MODULE_PARM_DESC(antenna_coupling,
1649 "specify antenna coupling in dB (default: 0 dB)");
1650
1651module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1652MODULE_PARM_DESC(nvm_file, "NVM file name");
1653
1654module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
1655 bool, S_IRUGO);
1656MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
1657
1658module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
1659 bool, S_IRUGO);
1660MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
1661
1662module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1663 bool, S_IRUGO | S_IWUSR);
1664#ifdef CONFIG_IWLWIFI_UAPSD
1665MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
1666#else
1667MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
1668#endif
1669
1670/*
1671 * set bt_coex_active to true, uCode will do kill/defer
1672 * every time the priority line is asserted (BT is sending signals on the
1673 * priority line in the PCIx).
1674 * set bt_coex_active to false, uCode will ignore the BT activity and
1675 * perform the normal operation
1676 *
1677 * User might experience transmit issue on some platform due to WiFi/BT
1678 * co-exist problem. The possible behaviors are:
1679 * Able to scan and finding all the available AP
1680 * Not able to associate with any AP
1681 * On those platforms, WiFi communication can be restored by set
1682 * "bt_coex_active" module parameter to "false"
1683 *
1684 * default: bt_coex_active = true (BT_COEX_ENABLE)
1685 */
1686module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
1687 bool, S_IRUGO);
1688MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
1689
1690module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO);
1691MODULE_PARM_DESC(led_mode, "0=system default, "
1692 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1693
1694module_param_named(power_save, iwlwifi_mod_params.power_save,
1695 bool, S_IRUGO);
1696MODULE_PARM_DESC(power_save,
1697 "enable WiFi power management (default: disable)");
1698
1699module_param_named(power_level, iwlwifi_mod_params.power_level,
1700 int, S_IRUGO);
1701MODULE_PARM_DESC(power_level,
1702 "default power save level (range from 1 - 5, default: 1)");
1703
1704module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO);
1705MODULE_PARM_DESC(fw_monitor,
1706 "firmware monitor - to debug FW (default: false - needs lots of memory)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
new file mode 100644
index 000000000000..cda746b33db1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -0,0 +1,155 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __iwl_drv_h__
66#define __iwl_drv_h__
67#include <linux/export.h>
68
69/* for all modules */
70#define DRV_NAME "iwlwifi"
71#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
72#define DRV_AUTHOR "<ilw@linux.intel.com>"
73
74/* radio config bits (actual values from NVM definition) */
75#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
76#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
77#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
78#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
79#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
80#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
81
82#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
83#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
84#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
85#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
86#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
87#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
88
89/**
90 * DOC: Driver system flows - drv component
91 *
92 * This component implements the system flows such as bus enumeration, bus
93 * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in
94 * bus specific files (transport files). This is the code that is common among
95 * different buses.
96 *
97 * This component is also in charge of managing the several implementations of
98 * the wifi flows: it will allow to have several fw API implementation. These
99 * different implementations will differ in the way they implement mac80211's
100 * handlers too.
101
102 * The init flow wrt to the drv component looks like this:
103 * 1) The bus specific component is called from module_init
104 * 2) The bus specific component registers the bus driver
105 * 3) The bus driver calls the probe function
106 * 4) The bus specific component configures the bus
107 * 5) The bus specific component calls to the drv bus agnostic part
108 * (iwl_drv_start)
109 * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
110 * 7) iwl_req_fw_callback parses the fw file
111 * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
112 */
113
114struct iwl_drv;
115struct iwl_trans;
116struct iwl_cfg;
117/**
118 * iwl_drv_start - start the drv
119 *
120 * @trans_ops: the ops of the transport
121 * @cfg: device specific constants / virtual functions
122 *
123 * starts the driver: fetches the firmware. This should be called by bus
124 * specific system flows implementations. For example, the bus specific probe
125 * function should do bus related operations only, and then call to this
126 * function. It returns the driver object or %NULL if an error occurred.
127 */
128struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
129 const struct iwl_cfg *cfg);
130
131/**
132 * iwl_drv_stop - stop the drv
133 *
134 * @drv:
135 *
136 * Stop the driver. This should be called by bus specific system flows
137 * implementations. For example, the bus specific remove function should first
138 * call this function and then do the bus related operations only.
139 */
140void iwl_drv_stop(struct iwl_drv *drv);
141
142/*
143 * exported symbol management
144 *
145 * The driver can be split into multiple modules, in which case some symbols
146 * must be exported for the sub-modules. However, if it's not split and
147 * everything is built-in, then we can avoid that.
148 */
149#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
150#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
151#else
152#define IWL_EXPORT_SYMBOL(sym)
153#endif
154
155#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 000000000000..acc3d186c5c1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,947 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64#include <linux/types.h>
65#include <linux/slab.h>
66#include <linux/export.h>
67#include "iwl-drv.h"
68#include "iwl-modparams.h"
69#include "iwl-eeprom-parse.h"
70
71/* EEPROM offset definitions */
72
73/* indirect access definitions */
74#define ADDRESS_MSK 0x0000FFFF
75#define INDIRECT_TYPE_MSK 0x000F0000
76#define INDIRECT_HOST 0x00010000
77#define INDIRECT_GENERAL 0x00020000
78#define INDIRECT_REGULATORY 0x00030000
79#define INDIRECT_CALIBRATION 0x00040000
80#define INDIRECT_PROCESS_ADJST 0x00050000
81#define INDIRECT_OTHERS 0x00060000
82#define INDIRECT_TXP_LIMIT 0x00070000
83#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
84#define INDIRECT_ADDRESS 0x00100000
85
86/* corresponding link offsets in EEPROM */
87#define EEPROM_LINK_HOST (2*0x64)
88#define EEPROM_LINK_GENERAL (2*0x65)
89#define EEPROM_LINK_REGULATORY (2*0x66)
90#define EEPROM_LINK_CALIBRATION (2*0x67)
91#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
92#define EEPROM_LINK_OTHERS (2*0x69)
93#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
94#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
95
96/* General */
97#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
98#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
99#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
100#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
101#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
102#define EEPROM_VERSION (2*0x44) /* 2 bytes */
103#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
104#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
105#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
106#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
107
108/* calibration */
109struct iwl_eeprom_calib_hdr {
110 u8 version;
111 u8 pa_type;
112 __le16 voltage;
113} __packed;
114
115#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
116#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
117
118/* temperature */
119#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
120#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
121
122/* SKU Capabilities (actual values from EEPROM definition) */
123enum eeprom_sku_bits {
124 EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
125 EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
126 EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
127 EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
128 EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
129};
130
131/* radio config bits (actual values from EEPROM definition) */
132#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
133#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
134#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
135#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
136#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
137#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
138
139
140/*
141 * EEPROM bands
142 * These are the channel numbers from each band in the order
143 * that they are stored in the EEPROM band information. Note
144 * that EEPROM bands aren't the same as mac80211 bands, and
145 * there are even special "ht40 bands" in the EEPROM.
146 */
147static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
148 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
149};
150
151static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
152 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
153};
154
155static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
156 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
157};
158
159static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
160 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
161};
162
163static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
164 145, 149, 153, 157, 161, 165
165};
166
167static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
168 1, 2, 3, 4, 5, 6, 7
169};
170
171static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
172 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
173};
174
175#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
176 ARRAY_SIZE(iwl_eeprom_band_2) + \
177 ARRAY_SIZE(iwl_eeprom_band_3) + \
178 ARRAY_SIZE(iwl_eeprom_band_4) + \
179 ARRAY_SIZE(iwl_eeprom_band_5))
180
181/* rate data (static) */
182static struct ieee80211_rate iwl_cfg80211_rates[] = {
183 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
184 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
185 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
186 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
187 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
188 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
189 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
190 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
191 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
192 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
193 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
194 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
195 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
196 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
197 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
198};
199#define RATES_24_OFFS 0
200#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
201#define RATES_52_OFFS 4
202#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
203
204/* EEPROM reading functions */
205
206static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
207{
208 if (WARN_ON(offset + sizeof(u16) > eeprom_size))
209 return 0;
210 return le16_to_cpup((__le16 *)(eeprom + offset));
211}
212
213static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
214 u32 address)
215{
216 u16 offset = 0;
217
218 if ((address & INDIRECT_ADDRESS) == 0)
219 return address;
220
221 switch (address & INDIRECT_TYPE_MSK) {
222 case INDIRECT_HOST:
223 offset = iwl_eeprom_query16(eeprom, eeprom_size,
224 EEPROM_LINK_HOST);
225 break;
226 case INDIRECT_GENERAL:
227 offset = iwl_eeprom_query16(eeprom, eeprom_size,
228 EEPROM_LINK_GENERAL);
229 break;
230 case INDIRECT_REGULATORY:
231 offset = iwl_eeprom_query16(eeprom, eeprom_size,
232 EEPROM_LINK_REGULATORY);
233 break;
234 case INDIRECT_TXP_LIMIT:
235 offset = iwl_eeprom_query16(eeprom, eeprom_size,
236 EEPROM_LINK_TXP_LIMIT);
237 break;
238 case INDIRECT_TXP_LIMIT_SIZE:
239 offset = iwl_eeprom_query16(eeprom, eeprom_size,
240 EEPROM_LINK_TXP_LIMIT_SIZE);
241 break;
242 case INDIRECT_CALIBRATION:
243 offset = iwl_eeprom_query16(eeprom, eeprom_size,
244 EEPROM_LINK_CALIBRATION);
245 break;
246 case INDIRECT_PROCESS_ADJST:
247 offset = iwl_eeprom_query16(eeprom, eeprom_size,
248 EEPROM_LINK_PROCESS_ADJST);
249 break;
250 case INDIRECT_OTHERS:
251 offset = iwl_eeprom_query16(eeprom, eeprom_size,
252 EEPROM_LINK_OTHERS);
253 break;
254 default:
255 WARN_ON(1);
256 break;
257 }
258
259 /* translate the offset from words to byte */
260 return (address & ADDRESS_MSK) + (offset << 1);
261}
262
263static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
264 u32 offset)
265{
266 u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
267
268 if (WARN_ON(address >= eeprom_size))
269 return NULL;
270
271 return &eeprom[address];
272}
273
274static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
275 struct iwl_nvm_data *data)
276{
277 struct iwl_eeprom_calib_hdr *hdr;
278
279 hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
280 EEPROM_CALIB_ALL);
281 if (!hdr)
282 return -ENODATA;
283 data->calib_version = hdr->version;
284 data->calib_voltage = hdr->voltage;
285
286 return 0;
287}
288
289/**
290 * enum iwl_eeprom_channel_flags - channel flags in EEPROM
291 * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
292 * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
293 * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
294 * @EEPROM_CHANNEL_RADAR: radar detection required
295 * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
296 * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
297 */
298enum iwl_eeprom_channel_flags {
299 EEPROM_CHANNEL_VALID = BIT(0),
300 EEPROM_CHANNEL_IBSS = BIT(1),
301 EEPROM_CHANNEL_ACTIVE = BIT(3),
302 EEPROM_CHANNEL_RADAR = BIT(4),
303 EEPROM_CHANNEL_WIDE = BIT(5),
304 EEPROM_CHANNEL_DFS = BIT(7),
305};
306
307/**
308 * struct iwl_eeprom_channel - EEPROM channel data
309 * @flags: %EEPROM_CHANNEL_* flags
310 * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
311 */
312struct iwl_eeprom_channel {
313 u8 flags;
314 s8 max_power_avg;
315} __packed;
316
317
318enum iwl_eeprom_enhanced_txpwr_flags {
319 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
320 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
321 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
322 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
323 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
324 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
325 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
326 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
327};
328
329/**
330 * iwl_eeprom_enhanced_txpwr structure
331 * @flags: entry flags
332 * @channel: channel number
333 * @chain_a_max_pwr: chain a max power in 1/2 dBm
334 * @chain_b_max_pwr: chain b max power in 1/2 dBm
335 * @chain_c_max_pwr: chain c max power in 1/2 dBm
336 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
337 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
338 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
339 *
340 * This structure presents the enhanced regulatory tx power limit layout
341 * in an EEPROM image.
342 */
343struct iwl_eeprom_enhanced_txpwr {
344 u8 flags;
345 u8 channel;
346 s8 chain_a_max;
347 s8 chain_b_max;
348 s8 chain_c_max;
349 u8 delta_20_in_40;
350 s8 mimo2_max;
351 s8 mimo3_max;
352} __packed;
353
354static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
355 struct iwl_eeprom_enhanced_txpwr *txp)
356{
357 s8 result = 0; /* (.5 dBm) */
358
359 /* Take the highest tx power from any valid chains */
360 if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
361 result = txp->chain_a_max;
362
363 if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
364 result = txp->chain_b_max;
365
366 if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
367 result = txp->chain_c_max;
368
369 if ((data->valid_tx_ant == ANT_AB ||
370 data->valid_tx_ant == ANT_BC ||
371 data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
372 result = txp->mimo2_max;
373
374 if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
375 result = txp->mimo3_max;
376
377 return result;
378}
379
380#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
381#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
382#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
383
384#define TXP_CHECK_AND_PRINT(x) \
385 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
386
387static void
388iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
389 struct iwl_eeprom_enhanced_txpwr *txp,
390 int n_channels, s8 max_txpower_avg)
391{
392 int ch_idx;
393 enum ieee80211_band band;
394
395 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
396 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
397
398 for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
399 struct ieee80211_channel *chan = &data->channels[ch_idx];
400
401 /* update matching channel or from common data only */
402 if (txp->channel != 0 && chan->hw_value != txp->channel)
403 continue;
404
405 /* update matching band only */
406 if (band != chan->band)
407 continue;
408
409 if (chan->max_power < max_txpower_avg &&
410 !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
411 chan->max_power = max_txpower_avg;
412 }
413}
414
415static void iwl_eeprom_enhanced_txpower(struct device *dev,
416 struct iwl_nvm_data *data,
417 const u8 *eeprom, size_t eeprom_size,
418 int n_channels)
419{
420 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
421 int idx, entries;
422 __le16 *txp_len;
423 s8 max_txp_avg_halfdbm;
424
425 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
426
427 /* the length is in 16-bit words, but we want entries */
428 txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
429 EEPROM_TXP_SZ_OFFS);
430 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
431
432 txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
433 EEPROM_TXP_OFFS);
434
435 for (idx = 0; idx < entries; idx++) {
436 txp = &txp_array[idx];
437 /* skip invalid entries */
438 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
439 continue;
440
441 IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
442 (txp->channel && (txp->flags &
443 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
444 "Common " : (txp->channel) ?
445 "Channel" : "Common",
446 (txp->channel),
447 TXP_CHECK_AND_PRINT(VALID),
448 TXP_CHECK_AND_PRINT(BAND_52G),
449 TXP_CHECK_AND_PRINT(OFDM),
450 TXP_CHECK_AND_PRINT(40MHZ),
451 TXP_CHECK_AND_PRINT(HT_AP),
452 TXP_CHECK_AND_PRINT(RES1),
453 TXP_CHECK_AND_PRINT(RES2),
454 TXP_CHECK_AND_PRINT(COMMON_TYPE),
455 txp->flags);
456 IWL_DEBUG_EEPROM(dev,
457 "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
458 txp->chain_a_max, txp->chain_b_max,
459 txp->chain_c_max);
460 IWL_DEBUG_EEPROM(dev,
461 "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
462 txp->mimo2_max, txp->mimo3_max,
463 ((txp->delta_20_in_40 & 0xf0) >> 4),
464 (txp->delta_20_in_40 & 0x0f));
465
466 max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
467
468 iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
469 DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
470
471 if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
472 data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
473 }
474}
475
476static void iwl_init_band_reference(const struct iwl_cfg *cfg,
477 const u8 *eeprom, size_t eeprom_size,
478 int eeprom_band, int *eeprom_ch_count,
479 const struct iwl_eeprom_channel **ch_info,
480 const u8 **eeprom_ch_array)
481{
482 u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
483
484 offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
485
486 *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
487
488 switch (eeprom_band) {
489 case 1: /* 2.4GHz band */
490 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
491 *eeprom_ch_array = iwl_eeprom_band_1;
492 break;
493 case 2: /* 4.9GHz band */
494 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
495 *eeprom_ch_array = iwl_eeprom_band_2;
496 break;
497 case 3: /* 5.2GHz band */
498 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
499 *eeprom_ch_array = iwl_eeprom_band_3;
500 break;
501 case 4: /* 5.5GHz band */
502 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
503 *eeprom_ch_array = iwl_eeprom_band_4;
504 break;
505 case 5: /* 5.7GHz band */
506 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
507 *eeprom_ch_array = iwl_eeprom_band_5;
508 break;
509 case 6: /* 2.4GHz ht40 channels */
510 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
511 *eeprom_ch_array = iwl_eeprom_band_6;
512 break;
513 case 7: /* 5 GHz ht40 channels */
514 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
515 *eeprom_ch_array = iwl_eeprom_band_7;
516 break;
517 default:
518 *eeprom_ch_count = 0;
519 *eeprom_ch_array = NULL;
520 WARN_ON(1);
521 }
522}
523
524#define CHECK_AND_PRINT(x) \
525 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
526
527static void iwl_mod_ht40_chan_info(struct device *dev,
528 struct iwl_nvm_data *data, int n_channels,
529 enum ieee80211_band band, u16 channel,
530 const struct iwl_eeprom_channel *eeprom_ch,
531 u8 clear_ht40_extension_channel)
532{
533 struct ieee80211_channel *chan = NULL;
534 int i;
535
536 for (i = 0; i < n_channels; i++) {
537 if (data->channels[i].band != band)
538 continue;
539 if (data->channels[i].hw_value != channel)
540 continue;
541 chan = &data->channels[i];
542 break;
543 }
544
545 if (!chan)
546 return;
547
548 IWL_DEBUG_EEPROM(dev,
549 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
550 channel,
551 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
552 CHECK_AND_PRINT(IBSS),
553 CHECK_AND_PRINT(ACTIVE),
554 CHECK_AND_PRINT(RADAR),
555 CHECK_AND_PRINT(WIDE),
556 CHECK_AND_PRINT(DFS),
557 eeprom_ch->flags,
558 eeprom_ch->max_power_avg,
559 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
560 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
561 : "not ");
562
563 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
564 chan->flags &= ~clear_ht40_extension_channel;
565}
566
567#define CHECK_AND_PRINT_I(x) \
568 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
569
570static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
571 struct iwl_nvm_data *data,
572 const u8 *eeprom, size_t eeprom_size)
573{
574 int band, ch_idx;
575 const struct iwl_eeprom_channel *eeprom_ch_info;
576 const u8 *eeprom_ch_array;
577 int eeprom_ch_count;
578 int n_channels = 0;
579
580 /*
581 * Loop through the 5 EEPROM bands and add them to the parse list
582 */
583 for (band = 1; band <= 5; band++) {
584 struct ieee80211_channel *channel;
585
586 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
587 &eeprom_ch_count, &eeprom_ch_info,
588 &eeprom_ch_array);
589
590 /* Loop through each band adding each of the channels */
591 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
592 const struct iwl_eeprom_channel *eeprom_ch;
593
594 eeprom_ch = &eeprom_ch_info[ch_idx];
595
596 if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
597 IWL_DEBUG_EEPROM(dev,
598 "Ch. %d Flags %x [%sGHz] - No traffic\n",
599 eeprom_ch_array[ch_idx],
600 eeprom_ch_info[ch_idx].flags,
601 (band != 1) ? "5.2" : "2.4");
602 continue;
603 }
604
605 channel = &data->channels[n_channels];
606 n_channels++;
607
608 channel->hw_value = eeprom_ch_array[ch_idx];
609 channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
610 : IEEE80211_BAND_5GHZ;
611 channel->center_freq =
612 ieee80211_channel_to_frequency(
613 channel->hw_value, channel->band);
614
615 /* set no-HT40, will enable as appropriate later */
616 channel->flags = IEEE80211_CHAN_NO_HT40;
617
618 if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
619 channel->flags |= IEEE80211_CHAN_NO_IR;
620
621 if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
622 channel->flags |= IEEE80211_CHAN_NO_IR;
623
624 if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
625 channel->flags |= IEEE80211_CHAN_RADAR;
626
627 /* Initialize regulatory-based run-time data */
628 channel->max_power =
629 eeprom_ch_info[ch_idx].max_power_avg;
630 IWL_DEBUG_EEPROM(dev,
631 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
632 channel->hw_value,
633 (band != 1) ? "5.2" : "2.4",
634 CHECK_AND_PRINT_I(VALID),
635 CHECK_AND_PRINT_I(IBSS),
636 CHECK_AND_PRINT_I(ACTIVE),
637 CHECK_AND_PRINT_I(RADAR),
638 CHECK_AND_PRINT_I(WIDE),
639 CHECK_AND_PRINT_I(DFS),
640 eeprom_ch_info[ch_idx].flags,
641 eeprom_ch_info[ch_idx].max_power_avg,
642 ((eeprom_ch_info[ch_idx].flags &
643 EEPROM_CHANNEL_IBSS) &&
644 !(eeprom_ch_info[ch_idx].flags &
645 EEPROM_CHANNEL_RADAR))
646 ? "" : "not ");
647 }
648 }
649
650 if (cfg->eeprom_params->enhanced_txpower) {
651 /*
652 * for newer device (6000 series and up)
653 * EEPROM contain enhanced tx power information
654 * driver need to process addition information
655 * to determine the max channel tx power limits
656 */
657 iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
658 n_channels);
659 } else {
660 /* All others use data from channel map */
661 int i;
662
663 data->max_tx_pwr_half_dbm = -128;
664
665 for (i = 0; i < n_channels; i++)
666 data->max_tx_pwr_half_dbm =
667 max_t(s8, data->max_tx_pwr_half_dbm,
668 data->channels[i].max_power * 2);
669 }
670
671 /* Check if we do have HT40 channels */
672 if (cfg->eeprom_params->regulatory_bands[5] ==
673 EEPROM_REGULATORY_BAND_NO_HT40 &&
674 cfg->eeprom_params->regulatory_bands[6] ==
675 EEPROM_REGULATORY_BAND_NO_HT40)
676 return n_channels;
677
678 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
679 for (band = 6; band <= 7; band++) {
680 enum ieee80211_band ieeeband;
681
682 iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
683 &eeprom_ch_count, &eeprom_ch_info,
684 &eeprom_ch_array);
685
686 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
687 ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
688 : IEEE80211_BAND_5GHZ;
689
690 /* Loop through each band adding each of the channels */
691 for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
692 /* Set up driver's info for lower half */
693 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
694 eeprom_ch_array[ch_idx],
695 &eeprom_ch_info[ch_idx],
696 IEEE80211_CHAN_NO_HT40PLUS);
697
698 /* Set up driver's info for upper half */
699 iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
700 eeprom_ch_array[ch_idx] + 4,
701 &eeprom_ch_info[ch_idx],
702 IEEE80211_CHAN_NO_HT40MINUS);
703 }
704 }
705
706 return n_channels;
707}
708
709int iwl_init_sband_channels(struct iwl_nvm_data *data,
710 struct ieee80211_supported_band *sband,
711 int n_channels, enum ieee80211_band band)
712{
713 struct ieee80211_channel *chan = &data->channels[0];
714 int n = 0, idx = 0;
715
716 while (idx < n_channels && chan->band != band)
717 chan = &data->channels[++idx];
718
719 sband->channels = &data->channels[idx];
720
721 while (idx < n_channels && chan->band == band) {
722 chan = &data->channels[++idx];
723 n++;
724 }
725
726 sband->n_channels = n;
727
728 return n;
729}
730
731#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
732#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
733
734void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
735 struct iwl_nvm_data *data,
736 struct ieee80211_sta_ht_cap *ht_info,
737 enum ieee80211_band band,
738 u8 tx_chains, u8 rx_chains)
739{
740 int max_bit_rate = 0;
741
742 tx_chains = hweight8(tx_chains);
743 if (cfg->rx_with_siso_diversity)
744 rx_chains = 1;
745 else
746 rx_chains = hweight8(rx_chains);
747
748 if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
749 ht_info->ht_supported = false;
750 return;
751 }
752
753 if (data->sku_cap_mimo_disabled)
754 rx_chains = 1;
755
756 ht_info->ht_supported = true;
757 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
758
759 if (cfg->ht_params->stbc) {
760 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
761
762 if (tx_chains > 1)
763 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
764 }
765
766 if (cfg->ht_params->ldpc)
767 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
768
769 if (iwlwifi_mod_params.amsdu_size_8K)
770 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
771
772 ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
773 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
774
775 ht_info->mcs.rx_mask[0] = 0xFF;
776 if (rx_chains >= 2)
777 ht_info->mcs.rx_mask[1] = 0xFF;
778 if (rx_chains >= 3)
779 ht_info->mcs.rx_mask[2] = 0xFF;
780
781 if (cfg->ht_params->ht_greenfield_support)
782 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
783 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
784
785 max_bit_rate = MAX_BIT_RATE_20_MHZ;
786
787 if (cfg->ht_params->ht40_bands & BIT(band)) {
788 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
789 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
790 max_bit_rate = MAX_BIT_RATE_40_MHZ;
791 }
792
793 /* Highest supported Rx data rate */
794 max_bit_rate *= rx_chains;
795 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
796 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
797
798 /* Tx MCS capabilities */
799 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
800 if (tx_chains != rx_chains) {
801 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
802 ht_info->mcs.tx_params |= ((tx_chains - 1) <<
803 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
804 }
805}
806
807static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
808 struct iwl_nvm_data *data,
809 const u8 *eeprom, size_t eeprom_size)
810{
811 int n_channels = iwl_init_channel_map(dev, cfg, data,
812 eeprom, eeprom_size);
813 int n_used = 0;
814 struct ieee80211_supported_band *sband;
815
816 sband = &data->bands[IEEE80211_BAND_2GHZ];
817 sband->band = IEEE80211_BAND_2GHZ;
818 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
819 sband->n_bitrates = N_RATES_24;
820 n_used += iwl_init_sband_channels(data, sband, n_channels,
821 IEEE80211_BAND_2GHZ);
822 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
823 data->valid_tx_ant, data->valid_rx_ant);
824
825 sband = &data->bands[IEEE80211_BAND_5GHZ];
826 sband->band = IEEE80211_BAND_5GHZ;
827 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
828 sband->n_bitrates = N_RATES_52;
829 n_used += iwl_init_sband_channels(data, sband, n_channels,
830 IEEE80211_BAND_5GHZ);
831 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
832 data->valid_tx_ant, data->valid_rx_ant);
833
834 if (n_channels != n_used)
835 IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
836 n_used, n_channels);
837}
838
839/* EEPROM data functions */
840
841struct iwl_nvm_data *
842iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
843 const u8 *eeprom, size_t eeprom_size)
844{
845 struct iwl_nvm_data *data;
846 const void *tmp;
847 u16 radio_cfg, sku;
848
849 if (WARN_ON(!cfg || !cfg->eeprom_params))
850 return NULL;
851
852 data = kzalloc(sizeof(*data) +
853 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
854 GFP_KERNEL);
855 if (!data)
856 return NULL;
857
858 /* get MAC address(es) */
859 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
860 if (!tmp)
861 goto err_free;
862 memcpy(data->hw_addr, tmp, ETH_ALEN);
863 data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
864 EEPROM_NUM_MAC_ADDRESS);
865
866 if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
867 goto err_free;
868
869 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
870 if (!tmp)
871 goto err_free;
872 memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
873
874 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
875 EEPROM_RAW_TEMPERATURE);
876 if (!tmp)
877 goto err_free;
878 data->raw_temperature = *(__le16 *)tmp;
879
880 tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
881 EEPROM_KELVIN_TEMPERATURE);
882 if (!tmp)
883 goto err_free;
884 data->kelvin_temperature = *(__le16 *)tmp;
885 data->kelvin_voltage = *((__le16 *)tmp + 1);
886
887 radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
888 EEPROM_RADIO_CONFIG);
889 data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
890 data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
891 data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
892 data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
893 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
894 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
895
896 sku = iwl_eeprom_query16(eeprom, eeprom_size,
897 EEPROM_SKU_CAP);
898 data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
899 data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
900 data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
901 data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
902 data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
903 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
904 data->sku_cap_11n_enable = false;
905
906 data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
907 EEPROM_VERSION);
908
909 /* check overrides (some devices have wrong EEPROM) */
910 if (cfg->valid_tx_ant)
911 data->valid_tx_ant = cfg->valid_tx_ant;
912 if (cfg->valid_rx_ant)
913 data->valid_rx_ant = cfg->valid_rx_ant;
914
915 if (!data->valid_tx_ant || !data->valid_rx_ant) {
916 IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
917 data->valid_tx_ant, data->valid_rx_ant);
918 goto err_free;
919 }
920
921 iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
922
923 return data;
924 err_free:
925 kfree(data);
926 return NULL;
927}
928IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
929
930/* helper functions */
931int iwl_nvm_check_version(struct iwl_nvm_data *data,
932 struct iwl_trans *trans)
933{
934 if (data->nvm_version >= trans->cfg->nvm_ver ||
935 data->calib_version >= trans->cfg->nvm_calib_ver) {
936 IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
937 data->nvm_version, data->calib_version);
938 return 0;
939 }
940
941 IWL_ERR(trans,
942 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
943 data->nvm_version, trans->cfg->nvm_ver,
944 data->calib_version, trans->cfg->nvm_calib_ver);
945 return -EINVAL;
946}
947IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 000000000000..750c8c9ee70d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,144 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64#ifndef __iwl_eeprom_parse_h__
65#define __iwl_eeprom_parse_h__
66
67#include <linux/types.h>
68#include <linux/if_ether.h>
69#include "iwl-trans.h"
70
71struct iwl_nvm_data {
72 int n_hw_addrs;
73 u8 hw_addr[ETH_ALEN];
74
75 u8 calib_version;
76 __le16 calib_voltage;
77
78 __le16 raw_temperature;
79 __le16 kelvin_temperature;
80 __le16 kelvin_voltage;
81 __le16 xtal_calib[2];
82
83 bool sku_cap_band_24GHz_enable;
84 bool sku_cap_band_52GHz_enable;
85 bool sku_cap_11n_enable;
86 bool sku_cap_11ac_enable;
87 bool sku_cap_amt_enable;
88 bool sku_cap_ipan_enable;
89 bool sku_cap_mimo_disabled;
90
91 u16 radio_cfg_type;
92 u8 radio_cfg_step;
93 u8 radio_cfg_dash;
94 u8 radio_cfg_pnum;
95 u8 valid_tx_ant, valid_rx_ant;
96
97 u32 nvm_version;
98 s8 max_tx_pwr_half_dbm;
99
100 bool lar_enabled;
101 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
102 struct ieee80211_channel channels[];
103};
104
105/**
106 * iwl_parse_eeprom_data - parse EEPROM data and return values
107 *
108 * @dev: device pointer we're parsing for, for debug only
109 * @cfg: device configuration for parsing and overrides
110 * @eeprom: the EEPROM data
111 * @eeprom_size: length of the EEPROM data
112 *
113 * This function parses all EEPROM values we need and then
114 * returns a (newly allocated) struct containing all the
115 * relevant values for driver use. The struct must be freed
116 * later with iwl_free_nvm_data().
117 */
118struct iwl_nvm_data *
119iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
120 const u8 *eeprom, size_t eeprom_size);
121
122/**
123 * iwl_free_nvm_data - free NVM data
124 * @data: the data to free
125 */
126static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
127{
128 kfree(data);
129}
130
131int iwl_nvm_check_version(struct iwl_nvm_data *data,
132 struct iwl_trans *trans);
133
134int iwl_init_sband_channels(struct iwl_nvm_data *data,
135 struct ieee80211_supported_band *sband,
136 int n_channels, enum ieee80211_band band);
137
138void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
139 struct iwl_nvm_data *data,
140 struct ieee80211_sta_ht_cap *ht_info,
141 enum ieee80211_band band,
142 u8 tx_chains, u8 rx_chains);
143
144#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 000000000000..219ca8acca62
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,464 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#include <linux/types.h>
63#include <linux/slab.h>
64#include <linux/export.h>
65
66#include "iwl-drv.h"
67#include "iwl-debug.h"
68#include "iwl-eeprom-read.h"
69#include "iwl-io.h"
70#include "iwl-prph.h"
71#include "iwl-csr.h"
72
73/*
74 * EEPROM access time values:
75 *
76 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
77 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
78 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
79 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
80 */
81#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
82
83#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
84#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
85
86
87/*
88 * The device's EEPROM semaphore prevents conflicts between driver and uCode
89 * when accessing the EEPROM; each access is a series of pulses to/from the
90 * EEPROM chip, not a single event, so even reads could conflict if they
91 * weren't arbitrated by the semaphore.
92 */
93
94#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
95#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
96
97static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
98{
99 u16 count;
100 int ret;
101
102 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
103 /* Request semaphore */
104 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
105 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
106
107 /* See if we got it */
108 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
109 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
110 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
111 EEPROM_SEM_TIMEOUT);
112 if (ret >= 0) {
113 IWL_DEBUG_EEPROM(trans->dev,
114 "Acquired semaphore after %d tries.\n",
115 count+1);
116 return ret;
117 }
118 }
119
120 return ret;
121}
122
123static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
124{
125 iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
126 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
127}
128
129static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
130{
131 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
132
133 IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
134
135 switch (gp) {
136 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
137 if (!nvm_is_otp) {
138 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
139 gp);
140 return -ENOENT;
141 }
142 return 0;
143 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
144 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
145 if (nvm_is_otp) {
146 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
147 return -ENOENT;
148 }
149 return 0;
150 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
151 default:
152 IWL_ERR(trans,
153 "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
154 nvm_is_otp ? "OTP" : "EEPROM", gp);
155 return -ENOENT;
156 }
157}
158
159/******************************************************************************
160 *
161 * OTP related functions
162 *
163******************************************************************************/
164
165static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
166{
167 iwl_read32(trans, CSR_OTP_GP_REG);
168
169 iwl_clear_bit(trans, CSR_OTP_GP_REG,
170 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
171}
172
173static int iwl_nvm_is_otp(struct iwl_trans *trans)
174{
175 u32 otpgp;
176
177 /* OTP only valid for CP/PP and after */
178 switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
179 case CSR_HW_REV_TYPE_NONE:
180 IWL_ERR(trans, "Unknown hardware type\n");
181 return -EIO;
182 case CSR_HW_REV_TYPE_5300:
183 case CSR_HW_REV_TYPE_5350:
184 case CSR_HW_REV_TYPE_5100:
185 case CSR_HW_REV_TYPE_5150:
186 return 0;
187 default:
188 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
189 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
190 return 1;
191 return 0;
192 }
193}
194
195static int iwl_init_otp_access(struct iwl_trans *trans)
196{
197 int ret;
198
199 /* Enable 40MHz radio clock */
200 iwl_write32(trans, CSR_GP_CNTRL,
201 iwl_read32(trans, CSR_GP_CNTRL) |
202 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
203
204 /* wait for clock to be ready */
205 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
206 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
207 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
208 25000);
209 if (ret < 0) {
210 IWL_ERR(trans, "Time out access OTP\n");
211 } else {
212 iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
213 APMG_PS_CTRL_VAL_RESET_REQ);
214 udelay(5);
215 iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
216 APMG_PS_CTRL_VAL_RESET_REQ);
217
218 /*
219 * CSR auto clock gate disable bit -
220 * this is only applicable for HW with OTP shadow RAM
221 */
222 if (trans->cfg->base_params->shadow_ram_support)
223 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
224 CSR_RESET_LINK_PWR_MGMT_DISABLED);
225 }
226 return ret;
227}
228
229static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
230 __le16 *eeprom_data)
231{
232 int ret = 0;
233 u32 r;
234 u32 otpgp;
235
236 iwl_write32(trans, CSR_EEPROM_REG,
237 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
238 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
239 CSR_EEPROM_REG_READ_VALID_MSK,
240 CSR_EEPROM_REG_READ_VALID_MSK,
241 IWL_EEPROM_ACCESS_TIMEOUT);
242 if (ret < 0) {
243 IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
244 return ret;
245 }
246 r = iwl_read32(trans, CSR_EEPROM_REG);
247 /* check for ECC errors: */
248 otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
249 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
250 /* stop in this case */
251 /* set the uncorrectable OTP ECC bit for acknowledgment */
252 iwl_set_bit(trans, CSR_OTP_GP_REG,
253 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
254 IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
255 return -EINVAL;
256 }
257 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
258 /* continue in this case */
259 /* set the correctable OTP ECC bit for acknowledgment */
260 iwl_set_bit(trans, CSR_OTP_GP_REG,
261 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
262 IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
263 }
264 *eeprom_data = cpu_to_le16(r >> 16);
265 return 0;
266}
267
268/*
269 * iwl_is_otp_empty: check for empty OTP
270 */
271static bool iwl_is_otp_empty(struct iwl_trans *trans)
272{
273 u16 next_link_addr = 0;
274 __le16 link_value;
275 bool is_empty = false;
276
277 /* locate the beginning of OTP link list */
278 if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
279 if (!link_value) {
280 IWL_ERR(trans, "OTP is empty\n");
281 is_empty = true;
282 }
283 } else {
284 IWL_ERR(trans, "Unable to read first block of OTP list.\n");
285 is_empty = true;
286 }
287
288 return is_empty;
289}
290
291
292/*
293 * iwl_find_otp_image: find EEPROM image in OTP
294 * finding the OTP block that contains the EEPROM image.
295 * the last valid block on the link list (the block _before_ the last block)
296 * is the block we should read and used to configure the device.
297 * If all the available OTP blocks are full, the last block will be the block
298 * we should read and used to configure the device.
299 * only perform this operation if shadow RAM is disabled
300 */
301static int iwl_find_otp_image(struct iwl_trans *trans,
302 u16 *validblockaddr)
303{
304 u16 next_link_addr = 0, valid_addr;
305 __le16 link_value = 0;
306 int usedblocks = 0;
307
308 /* set addressing mode to absolute to traverse the link list */
309 iwl_set_otp_access_absolute(trans);
310
311 /* checking for empty OTP or error */
312 if (iwl_is_otp_empty(trans))
313 return -EINVAL;
314
315 /*
316 * start traverse link list
317 * until reach the max number of OTP blocks
318 * different devices have different number of OTP blocks
319 */
320 do {
321 /* save current valid block address
322 * check for more block on the link list
323 */
324 valid_addr = next_link_addr;
325 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
326 IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
327 usedblocks, next_link_addr);
328 if (iwl_read_otp_word(trans, next_link_addr, &link_value))
329 return -EINVAL;
330 if (!link_value) {
331 /*
332 * reach the end of link list, return success and
333 * set address point to the starting address
334 * of the image
335 */
336 *validblockaddr = valid_addr;
337 /* skip first 2 bytes (link list pointer) */
338 *validblockaddr += 2;
339 return 0;
340 }
341 /* more in the link list, continue */
342 usedblocks++;
343 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
344
345 /* OTP has no valid blocks */
346 IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
347 return -EINVAL;
348}
349
350/**
351 * iwl_read_eeprom - read EEPROM contents
352 *
353 * Load the EEPROM contents from adapter and return it
354 * and its size.
355 *
356 * NOTE: This routine uses the non-debug IO access functions.
357 */
358int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
359{
360 __le16 *e;
361 u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
362 int sz;
363 int ret;
364 u16 addr;
365 u16 validblockaddr = 0;
366 u16 cache_addr = 0;
367 int nvm_is_otp;
368
369 if (!eeprom || !eeprom_size)
370 return -EINVAL;
371
372 nvm_is_otp = iwl_nvm_is_otp(trans);
373 if (nvm_is_otp < 0)
374 return nvm_is_otp;
375
376 sz = trans->cfg->base_params->eeprom_size;
377 IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
378
379 e = kmalloc(sz, GFP_KERNEL);
380 if (!e)
381 return -ENOMEM;
382
383 ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
384 if (ret < 0) {
385 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
386 goto err_free;
387 }
388
389 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
390 ret = iwl_eeprom_acquire_semaphore(trans);
391 if (ret < 0) {
392 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
393 goto err_free;
394 }
395
396 if (nvm_is_otp) {
397 ret = iwl_init_otp_access(trans);
398 if (ret) {
399 IWL_ERR(trans, "Failed to initialize OTP access.\n");
400 goto err_unlock;
401 }
402
403 iwl_write32(trans, CSR_EEPROM_GP,
404 iwl_read32(trans, CSR_EEPROM_GP) &
405 ~CSR_EEPROM_GP_IF_OWNER_MSK);
406
407 iwl_set_bit(trans, CSR_OTP_GP_REG,
408 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
409 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
410 /* traversing the linked list if no shadow ram supported */
411 if (!trans->cfg->base_params->shadow_ram_support) {
412 ret = iwl_find_otp_image(trans, &validblockaddr);
413 if (ret)
414 goto err_unlock;
415 }
416 for (addr = validblockaddr; addr < validblockaddr + sz;
417 addr += sizeof(u16)) {
418 __le16 eeprom_data;
419
420 ret = iwl_read_otp_word(trans, addr, &eeprom_data);
421 if (ret)
422 goto err_unlock;
423 e[cache_addr / 2] = eeprom_data;
424 cache_addr += sizeof(u16);
425 }
426 } else {
427 /* eeprom is an array of 16bit values */
428 for (addr = 0; addr < sz; addr += sizeof(u16)) {
429 u32 r;
430
431 iwl_write32(trans, CSR_EEPROM_REG,
432 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
433
434 ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
435 CSR_EEPROM_REG_READ_VALID_MSK,
436 CSR_EEPROM_REG_READ_VALID_MSK,
437 IWL_EEPROM_ACCESS_TIMEOUT);
438 if (ret < 0) {
439 IWL_ERR(trans,
440 "Time out reading EEPROM[%d]\n", addr);
441 goto err_unlock;
442 }
443 r = iwl_read32(trans, CSR_EEPROM_REG);
444 e[addr / 2] = cpu_to_le16(r >> 16);
445 }
446 }
447
448 IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
449 nvm_is_otp ? "OTP" : "EEPROM");
450
451 iwl_eeprom_release_semaphore(trans);
452
453 *eeprom_size = sz;
454 *eeprom = (u8 *)e;
455 return 0;
456
457 err_unlock:
458 iwl_eeprom_release_semaphore(trans);
459 err_free:
460 kfree(e);
461
462 return ret;
463}
464IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
new file mode 100644
index 000000000000..a6d3bdf82cdd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
@@ -0,0 +1,70 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66#include "iwl-trans.h"
67
68int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
69
70#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
new file mode 100644
index 000000000000..d56064861a9c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -0,0 +1,535 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_fh_h__
64#define __iwl_fh_h__
65
66#include <linux/types.h>
67
68/****************************/
69/* Flow Handler Definitions */
70/****************************/
71
72/**
73 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
74 * Addresses are offsets from device's PCI hardware base address.
75 */
76#define FH_MEM_LOWER_BOUND (0x1000)
77#define FH_MEM_UPPER_BOUND (0x2000)
78
79/**
80 * Keep-Warm (KW) buffer base address.
81 *
82 * Driver must allocate a 4KByte buffer that is for keeping the
83 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
84 * DRAM access when doing Txing or Rxing. The dummy accesses prevent host
85 * from going into a power-savings mode that would cause higher DRAM latency,
86 * and possible data over/under-runs, before all Tx/Rx is complete.
87 *
88 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
89 * of the buffer, which must be 4K aligned. Once this is set up, the device
90 * automatically invokes keep-warm accesses when normal accesses might not
91 * be sufficient to maintain fast DRAM response.
92 *
93 * Bit fields:
94 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
95 */
96#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
97
98
99/**
100 * TFD Circular Buffers Base (CBBC) addresses
101 *
102 * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
103 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
104 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
105 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
106 * aligned (address bits 0-7 must be 0).
107 * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
108 * for them are in different places.
109 *
110 * Bit fields in each pointer register:
111 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
112 */
113#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
114#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
115#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
116#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
117#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
118#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
119
120/* Find TFD CB base pointer for given queue */
121static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
122{
123 if (chnl < 16)
124 return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
125 if (chnl < 20)
126 return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
127 WARN_ON_ONCE(chnl >= 32);
128 return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
129}
130
131
132/**
133 * Rx SRAM Control and Status Registers (RSCSR)
134 *
135 * These registers provide handshake between driver and device for the Rx queue
136 * (this queue handles *all* command responses, notifications, Rx data, etc.
137 * sent from uCode to host driver). Unlike Tx, there is only one Rx
138 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
139 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
140 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
141 * mapping between RBDs and RBs.
142 *
143 * Driver must allocate host DRAM memory for the following, and set the
144 * physical address of each into device registers:
145 *
146 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
147 * entries (although any power of 2, up to 4096, is selectable by driver).
148 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
149 * (typically 4K, although 8K or 16K are also selectable by driver).
150 * Driver sets up RB size and number of RBDs in the CB via Rx config
151 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
152 *
153 * Bit fields within one RBD:
154 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
155 *
156 * Driver sets physical address [35:8] of base of RBD circular buffer
157 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
158 *
159 * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
160 * (RBs) have been filled, via a "write pointer", actually the index of
161 * the RB's corresponding RBD within the circular buffer. Driver sets
162 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
163 *
164 * Bit fields in lower dword of Rx status buffer (upper dword not used
165 * by driver:
166 * 31-12: Not used by driver
167 * 11- 0: Index of last filled Rx buffer descriptor
168 * (device writes, driver reads this value)
169 *
170 * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
171 * enter pointers to these RBs into contiguous RBD circular buffer entries,
172 * and update the device's "write" index register,
173 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
174 *
175 * This "write" index corresponds to the *next* RBD that the driver will make
176 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
177 * the circular buffer. This value should initially be 0 (before preparing any
178 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
179 * wrap back to 0 at the end of the circular buffer (but don't wrap before
180 * "read" index has advanced past 1! See below).
181 * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
182 *
183 * As the device fills RBs (referenced from contiguous RBDs within the circular
184 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
185 * to tell the driver the index of the latest filled RBD. The driver must
186 * read this "read" index from DRAM after receiving an Rx interrupt from device
187 *
188 * The driver must also internally keep track of a third index, which is the
189 * next RBD to process. When receiving an Rx interrupt, driver should process
190 * all filled but unprocessed RBs up to, but not including, the RB
191 * corresponding to the "read" index. For example, if "read" index becomes "1",
192 * driver may process the RB pointed to by RBD 0. Depending on volume of
193 * traffic, there may be many RBs to process.
194 *
195 * If read index == write index, device thinks there is no room to put new data.
196 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
197 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
198 * and "read" indexes; that is, make sure that there are no more than 254
199 * buffers waiting to be filled.
200 */
201#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
202#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
203#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
204
205/**
206 * Physical base address of 8-byte Rx Status buffer.
207 * Bit fields:
208 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
209 */
210#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
211
212/**
213 * Physical base address of Rx Buffer Descriptor Circular Buffer.
214 * Bit fields:
215 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
216 */
217#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
218
219/**
220 * Rx write pointer (index, really!).
221 * Bit fields:
222 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
223 * NOTE: For 256-entry circular buffer, use only bits [7:0].
224 */
225#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
226#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
227
228#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
229#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
230
231/**
232 * Rx Config/Status Registers (RCSR)
233 * Rx Config Reg for channel 0 (only channel used)
234 *
235 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
236 * normal operation (see bit fields).
237 *
238 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
239 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
240 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
241 *
242 * Bit fields:
243 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
244 * '10' operate normally
245 * 29-24: reserved
246 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
247 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
248 * 19-18: reserved
249 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
250 * '10' 12K, '11' 16K.
251 * 15-14: reserved
252 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
253 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
254 * typical value 0x10 (about 1/2 msec)
255 * 3- 0: reserved
256 */
257#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
258#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
259#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
260
261#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
262#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8)
263#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10)
264
265#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
267#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
268#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
269#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
270#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
271
272#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
273#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
274#define RX_RB_TIMEOUT (0x11)
275
276#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
277#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
278#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
279
280#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
281#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
282#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
283#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
284
285#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
286#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
287#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
288
289/**
290 * Rx Shared Status Registers (RSSR)
291 *
292 * After stopping Rx DMA channel (writing 0 to
293 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
294 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
295 *
296 * Bit fields:
297 * 24: 1 = Channel 0 is idle
298 *
299 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
300 * contain default values that should not be altered by the driver.
301 */
302#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
303#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
304
305#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
306#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
307#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
308 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
309
310#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
311
312#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
313#define FH_MEM_TB_MAX_LENGTH (0x00020000)
314
315/* TFDB Area - TFDs buffer table */
316#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
317#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
318#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
319#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
320#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
321
322/**
323 * Transmit DMA Channel Control/Status Registers (TCSR)
324 *
325 * Device has one configuration register for each of 8 Tx DMA/FIFO channels
326 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
327 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
328 *
329 * To use a Tx DMA channel, driver must initialize its
330 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
331 *
332 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
333 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
334 *
335 * All other bits should be 0.
336 *
337 * Bit fields:
338 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
339 * '10' operate normally
340 * 29- 4: Reserved, set to "0"
341 * 3: Enable internal DMA requests (1, normal operation), disable (0)
342 * 2- 0: Reserved, set to "0"
343 */
344#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
345#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
346
347/* Find Control/Status reg for given Tx DMA/FIFO channel */
348#define FH_TCSR_CHNL_NUM (8)
349
350/* TCSR: tx_config register values */
351#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
352 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
353#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
354 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
355#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
356 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
357
358#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
359#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
360
361#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
362#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
363
364#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
365#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
366#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
367
368#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
369#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
370#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
371
372#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
373#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
374#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
375
376#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
377#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
378#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
379
380#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
381#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
382
383/**
384 * Tx Shared Status Registers (TSSR)
385 *
386 * After stopping Tx DMA channel (writing 0 to
387 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
388 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
389 * (channel's buffers empty | no pending requests).
390 *
391 * Bit fields:
392 * 31-24: 1 = Channel buffers empty (channel 7:0)
393 * 23-16: 1 = No pending requests (channel 7:0)
394 */
395#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
396#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
397
398#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
399
400/**
401 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
402 * 31: Indicates an address error when accessed to internal memory
403 * uCode/driver must write "1" in order to clear this flag
404 * 30: Indicates that Host did not send the expected number of dwords to FH
405 * uCode/driver must write "1" in order to clear this flag
406 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
407 * command was received from the scheduler while the TRB was already full
408 * with previous command
409 * uCode/driver must write "1" in order to clear this flag
410 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
411 * bit is set, it indicates that the FH has received a full indication
412 * from the RTC TxFIFO and the current value of the TxCredit counter was
413 * not equal to zero. This mean that the credit mechanism was not
414 * synchronized to the TxFIFO status
415 * uCode/driver must write "1" in order to clear this flag
416 */
417#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
418#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008)
419
420#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
421
422/* Tx service channels */
423#define FH_SRVC_CHNL (9)
424#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
425#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
426#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
427 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
428
429#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
430#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
431
432/* Instruct FH to increment the retry count of a packet when
433 * it is brought from the memory to TX-FIFO
434 */
435#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
436
437#define RX_QUEUE_SIZE 256
438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8
440
441/**
442 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers
444 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
445 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
446 * @finished_rb_num [0:11] - Indicates the index of the current RB
447 * in which the last frame was written to
448 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
449 * which was transferred
450 */
451struct iwl_rb_status {
452 __le16 closed_rb_num;
453 __le16 closed_fr_num;
454 __le16 finished_rb_num;
455 __le16 finished_fr_nam;
456 __le32 __unused;
457} __packed;
458
459
460#define TFD_QUEUE_SIZE_MAX (256)
461#define TFD_QUEUE_SIZE_BC_DUP (64)
462#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
463#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
464#define IWL_NUM_OF_TBS 20
465
466static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
467{
468 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
469}
470/**
471 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
472 *
473 * This structure contains dma address and length of transmission address
474 *
475 * @lo: low [31:0] portion of the dma address of TX buffer
476 * every even is unaligned on 16 bit boundary
477 * @hi_n_len 0-3 [35:32] portion of dma
478 * 4-15 length of the tx buffer
479 */
480struct iwl_tfd_tb {
481 __le32 lo;
482 __le16 hi_n_len;
483} __packed;
484
485/**
486 * struct iwl_tfd
487 *
488 * Transmit Frame Descriptor (TFD)
489 *
490 * @ __reserved1[3] reserved
491 * @ num_tbs 0-4 number of active tbs
492 * 5 reserved
493 * 6-7 padding (not used)
494 * @ tbs[20] transmit frame buffer descriptors
495 * @ __pad padding
496 *
497 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
498 * Both driver and device share these circular buffers, each of which must be
499 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
500 *
501 * Driver must indicate the physical address of the base of each
502 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
503 *
504 * Each TFD contains pointer/size information for up to 20 data buffers
505 * in host DRAM. These buffers collectively contain the (one) frame described
506 * by the TFD. Each buffer must be a single contiguous block of memory within
507 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
508 * of (4K - 4). The concatenates all of a TFD's buffers into a single
509 * Tx frame, up to 8 KBytes in size.
510 *
511 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
512 */
513struct iwl_tfd {
514 u8 __reserved1[3];
515 u8 num_tbs;
516 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
517 __le32 __pad;
518} __packed;
519
520/* Keep Warm Size */
521#define IWL_KW_SIZE 0x1000 /* 4k */
522
523/* Fixed (non-configurable) rx data from phy */
524
525/**
526 * struct iwlagn_schedq_bc_tbl scheduler byte count table
527 * base physical address provided by SCD_DRAM_BASE_ADDR
528 * @tfd_offset 0-12 - tx command byte count
529 * 12-16 - station index
530 */
531struct iwlagn_scd_bc_tbl {
532 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
533} __packed;
534
535#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
new file mode 100644
index 000000000000..9dbe19cbb4dd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -0,0 +1,320 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_error_dump_h__
66#define __fw_error_dump_h__
67
68#include <linux/types.h>
69
70#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
71
72/**
73 * enum iwl_fw_error_dump_type - types of data in the dump file
74 * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
75 * @IWL_FW_ERROR_DUMP_RXF:
76 * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
77 * &struct iwl_fw_error_dump_txcmd packets
78 * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
79 * info on the device / firmware.
80 * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
81 * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
82 * sections like this in a single file.
83 * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
84 * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
85 * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
86 * Structured as &struct iwl_fw_error_dump_trigger_desc.
87 * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
88 * &struct iwl_fw_error_dump_rb
89 * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
90 * paged to the DRAM.
91 */
92enum iwl_fw_error_dump_type {
93 /* 0 is deprecated */
94 IWL_FW_ERROR_DUMP_CSR = 1,
95 IWL_FW_ERROR_DUMP_RXF = 2,
96 IWL_FW_ERROR_DUMP_TXCMD = 3,
97 IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
98 IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
99 IWL_FW_ERROR_DUMP_PRPH = 6,
100 IWL_FW_ERROR_DUMP_TXF = 7,
101 IWL_FW_ERROR_DUMP_FH_REGS = 8,
102 IWL_FW_ERROR_DUMP_MEM = 9,
103 IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
104 IWL_FW_ERROR_DUMP_RB = 11,
105 IWL_FW_ERROR_DUMP_PAGING = 12,
106
107 IWL_FW_ERROR_DUMP_MAX,
108};
109
110/**
111 * struct iwl_fw_error_dump_data - data for one type
112 * @type: %enum iwl_fw_error_dump_type
113 * @len: the length starting from %data
114 * @data: the data itself
115 */
116struct iwl_fw_error_dump_data {
117 __le32 type;
118 __le32 len;
119 __u8 data[];
120} __packed;
121
122/**
123 * struct iwl_fw_error_dump_file - the layout of the header of the file
124 * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
125 * @file_len: the length of all the file starting from %barker
126 * @data: array of %struct iwl_fw_error_dump_data
127 */
128struct iwl_fw_error_dump_file {
129 __le32 barker;
130 __le32 file_len;
131 u8 data[0];
132} __packed;
133
134/**
135 * struct iwl_fw_error_dump_txcmd - TX command data
136 * @cmdlen: original length of command
137 * @caplen: captured length of command (may be less)
138 * @data: captured command data, @caplen bytes
139 */
140struct iwl_fw_error_dump_txcmd {
141 __le32 cmdlen;
142 __le32 caplen;
143 u8 data[];
144} __packed;
145
146/**
147 * struct iwl_fw_error_dump_fifo - RX/TX FIFO data
148 * @fifo_num: number of FIFO (starting from 0)
149 * @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
150 * @wr_ptr: position of write pointer
151 * @rd_ptr: position of read pointer
152 * @fence_ptr: position of fence pointer
153 * @fence_mode: the current mode of the fence (before locking) -
154 * 0=follow RD pointer ; 1 = freeze
155 * @data: all of the FIFO's data
156 */
157struct iwl_fw_error_dump_fifo {
158 __le32 fifo_num;
159 __le32 available_bytes;
160 __le32 wr_ptr;
161 __le32 rd_ptr;
162 __le32 fence_ptr;
163 __le32 fence_mode;
164 u8 data[];
165} __packed;
166
167enum iwl_fw_error_dump_family {
168 IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
169 IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
170};
171
172/**
173 * struct iwl_fw_error_dump_info - info on the device / firmware
174 * @device_family: the family of the device (7 / 8)
175 * @hw_step: the step of the device
176 * @fw_human_readable: human readable FW version
177 * @dev_human_readable: name of the device
178 * @bus_human_readable: name of the bus used
179 */
180struct iwl_fw_error_dump_info {
181 __le32 device_family;
182 __le32 hw_step;
183 u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
184 u8 dev_human_readable[64];
185 u8 bus_human_readable[8];
186} __packed;
187
188/**
189 * struct iwl_fw_error_dump_fw_mon - FW monitor data
190 * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
191 * @fw_mon_base_ptr: base pointer of the data
192 * @fw_mon_cycle_cnt: number of wraparounds
193 * @reserved: for future use
194 * @data: captured data
195 */
196struct iwl_fw_error_dump_fw_mon {
197 __le32 fw_mon_wr_ptr;
198 __le32 fw_mon_base_ptr;
199 __le32 fw_mon_cycle_cnt;
200 __le32 reserved[3];
201 u8 data[];
202} __packed;
203
204/**
205 * struct iwl_fw_error_dump_prph - periphery registers data
206 * @prph_start: address of the first register in this chunk
207 * @data: the content of the registers
208 */
209struct iwl_fw_error_dump_prph {
210 __le32 prph_start;
211 __le32 data[];
212};
213
214enum iwl_fw_error_dump_mem_type {
215 IWL_FW_ERROR_DUMP_MEM_SRAM,
216 IWL_FW_ERROR_DUMP_MEM_SMEM,
217};
218
219/**
220 * struct iwl_fw_error_dump_mem - chunk of memory
221 * @type: %enum iwl_fw_error_dump_mem_type
222 * @offset: the offset from which the memory was read
223 * @data: the content of the memory
224 */
225struct iwl_fw_error_dump_mem {
226 __le32 type;
227 __le32 offset;
228 u8 data[];
229};
230
231/**
232 * struct iwl_fw_error_dump_rb - content of an Receive Buffer
233 * @index: the index of the Receive Buffer in the Rx queue
234 * @rxq: the RB's Rx queue
235 * @reserved:
236 * @data: the content of the Receive Buffer
237 */
238struct iwl_fw_error_dump_rb {
239 __le32 index;
240 __le32 rxq;
241 __le32 reserved;
242 u8 data[];
243};
244
245/**
246 * struct iwl_fw_error_dump_paging - content of the UMAC's image page
247 * block on DRAM
248 * @index: the index of the page block
249 * @reserved:
250 * @data: the content of the page block
251 */
252struct iwl_fw_error_dump_paging {
253 __le32 index;
254 __le32 reserved;
255 u8 data[];
256};
257
258/**
259 * iwl_fw_error_next_data - advance fw error dump data pointer
260 * @data: previous data block
261 * Returns: next data block
262 */
263static inline struct iwl_fw_error_dump_data *
264iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
265{
266 return (void *)(data->data + le32_to_cpu(data->len));
267}
268
269/**
270 * enum iwl_fw_dbg_trigger - triggers available
271 *
272 * @FW_DBG_TRIGGER_USER: trigger log collection by user
273 * This should not be defined as a trigger to the driver, but a value the
274 * driver should set to indicate that the trigger was initiated by the
275 * user.
276 * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
277 * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
278 * missed.
279 * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
280 * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
281 * command response or a notification.
282 * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
283 * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
284 * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
285 * goes below a threshold.
286 * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
287 * detection.
288 * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
289 * events.
290 * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
291 */
292enum iwl_fw_dbg_trigger {
293 FW_DBG_TRIGGER_INVALID = 0,
294 FW_DBG_TRIGGER_USER,
295 FW_DBG_TRIGGER_FW_ASSERT,
296 FW_DBG_TRIGGER_MISSED_BEACONS,
297 FW_DBG_TRIGGER_CHANNEL_SWITCH,
298 FW_DBG_TRIGGER_FW_NOTIF,
299 FW_DBG_TRIGGER_MLME,
300 FW_DBG_TRIGGER_STATS,
301 FW_DBG_TRIGGER_RSSI,
302 FW_DBG_TRIGGER_TXQ_TIMERS,
303 FW_DBG_TRIGGER_TIME_EVENT,
304 FW_DBG_TRIGGER_BA,
305
306 /* must be last */
307 FW_DBG_TRIGGER_MAX,
308};
309
310/**
311 * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
312 * @type: %enum iwl_fw_dbg_trigger
313 * @data: raw data about what happened
314 */
315struct iwl_fw_error_dump_trigger_desc {
316 __le32 type;
317 u8 data[];
318};
319
320#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
new file mode 100644
index 000000000000..08303db0000f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -0,0 +1,768 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __iwl_fw_file_h__
66#define __iwl_fw_file_h__
67
68#include <linux/netdevice.h>
69#include <linux/nl80211.h>
70
71/* v1/v2 uCode file layout */
72struct iwl_ucode_header {
73 __le32 ver; /* major/minor/API/serial */
74 union {
75 struct {
76 __le32 inst_size; /* bytes of runtime code */
77 __le32 data_size; /* bytes of runtime data */
78 __le32 init_size; /* bytes of init code */
79 __le32 init_data_size; /* bytes of init data */
80 __le32 boot_size; /* bytes of bootstrap code */
81 u8 data[0]; /* in same order as sizes */
82 } v1;
83 struct {
84 __le32 build; /* build number */
85 __le32 inst_size; /* bytes of runtime code */
86 __le32 data_size; /* bytes of runtime data */
87 __le32 init_size; /* bytes of init code */
88 __le32 init_data_size; /* bytes of init data */
89 __le32 boot_size; /* bytes of bootstrap code */
90 u8 data[0]; /* in same order as sizes */
91 } v2;
92 } u;
93};
94
95/*
96 * new TLV uCode file layout
97 *
98 * The new TLV file format contains TLVs, that each specify
99 * some piece of data.
100 */
101
102enum iwl_ucode_tlv_type {
103 IWL_UCODE_TLV_INVALID = 0, /* unused */
104 IWL_UCODE_TLV_INST = 1,
105 IWL_UCODE_TLV_DATA = 2,
106 IWL_UCODE_TLV_INIT = 3,
107 IWL_UCODE_TLV_INIT_DATA = 4,
108 IWL_UCODE_TLV_BOOT = 5,
109 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
110 IWL_UCODE_TLV_PAN = 7,
111 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
112 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
113 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
114 IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
115 IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
116 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
117 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
118 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
119 IWL_UCODE_TLV_WOWLAN_INST = 16,
120 IWL_UCODE_TLV_WOWLAN_DATA = 17,
121 IWL_UCODE_TLV_FLAGS = 18,
122 IWL_UCODE_TLV_SEC_RT = 19,
123 IWL_UCODE_TLV_SEC_INIT = 20,
124 IWL_UCODE_TLV_SEC_WOWLAN = 21,
125 IWL_UCODE_TLV_DEF_CALIB = 22,
126 IWL_UCODE_TLV_PHY_SKU = 23,
127 IWL_UCODE_TLV_SECURE_SEC_RT = 24,
128 IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
129 IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
130 IWL_UCODE_TLV_NUM_OF_CPU = 27,
131 IWL_UCODE_TLV_CSCHEME = 28,
132 IWL_UCODE_TLV_API_CHANGES_SET = 29,
133 IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
134 IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
135 IWL_UCODE_TLV_PAGING = 32,
136 IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
137 IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
138 IWL_UCODE_TLV_FW_VERSION = 36,
139 IWL_UCODE_TLV_FW_DBG_DEST = 38,
140 IWL_UCODE_TLV_FW_DBG_CONF = 39,
141 IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
142 IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
143};
144
145struct iwl_ucode_tlv {
146 __le32 type; /* see above */
147 __le32 length; /* not including type/length fields */
148 u8 data[0];
149};
150
151#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
152#define FW_VER_HUMAN_READABLE_SZ 64
153
154struct iwl_tlv_ucode_header {
155 /*
156 * The TLV style ucode header is distinguished from
157 * the v1/v2 style header by first four bytes being
158 * zero, as such is an invalid combination of
159 * major/minor/API/serial versions.
160 */
161 __le32 zero;
162 __le32 magic;
163 u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
164 /* major/minor/API/serial or major in new format */
165 __le32 ver;
166 __le32 build;
167 __le64 ignore;
168 /*
169 * The data contained herein has a TLV layout,
170 * see above for the TLV header and types.
171 * Note that each TLV is padded to a length
172 * that is a multiple of 4 for alignment.
173 */
174 u8 data[0];
175};
176
177/*
178 * ucode TLVs
179 *
180 * ability to get extension for: flags & capabilities from ucode binaries files
181 */
182struct iwl_ucode_api {
183 __le32 api_index;
184 __le32 api_flags;
185} __packed;
186
187struct iwl_ucode_capa {
188 __le32 api_index;
189 __le32 api_capa;
190} __packed;
191
192/**
193 * enum iwl_ucode_tlv_flag - ucode API flags
194 * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
195 * was a separate TLV but moved here to save space.
196 * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
197 * treats good CRC threshold as a boolean
198 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
199 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
200 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
201 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
202 * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
203 * offload profile config command.
204 * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
205 * (rather than two) IPv6 addresses
206 * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
207 * from the probe request template.
208 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
209 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
210 * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
211 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
212 * P2P client interfaces simultaneously if they are in different bindings.
213 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
214 * P2P client interfaces simultaneously if they are in same bindings.
215 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
216 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
217 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
218 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
219 * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
220 */
221enum iwl_ucode_tlv_flag {
222 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
223 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
224 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
225 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
226 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
227 IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
228 IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
229 IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
230 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
231 IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
232 IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
233 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
234 IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
235 IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
236 IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
237 IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
238 IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
239 IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
240};
241
242typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
243
244/**
245 * enum iwl_ucode_tlv_api - ucode api
246 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
247 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
248 * longer than the passive one, which is essential for fragmented scan.
249 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
250 * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
251 * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
252 * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
253 * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
254 * instead of 3.
255 * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
256 * (command version 3) that supports per-chain limits
257 *
258 * @NUM_IWL_UCODE_TLV_API: number of bits used
259 */
260enum iwl_ucode_tlv_api {
261 IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
262 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
263 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
264 IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
265 IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
266 IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
267 IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
268 IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
269
270 NUM_IWL_UCODE_TLV_API
271#ifdef __CHECKER__
272 /* sparse says it cannot increment the previous enum member */
273 = 128
274#endif
275};
276
277typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
278
279/**
280 * enum iwl_ucode_tlv_capa - ucode capabilities
281 * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
282 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
283 * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
284 * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
285 * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
286 * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
287 * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
288 * tx power value into TPC Report action frame and Link Measurement Report
289 * action frame
290 * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
291 * channel in DS parameter set element in probe requests.
292 * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
293 * probe requests.
294 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
295 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
296 * which also implies support for the scheduler configuration command
297 * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
298 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
299 * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
300 * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
301 * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
302 * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
303 * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
304 * sources for the MCC. This TLV bit is a future replacement to
305 * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
306 * is supported.
307 * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
308 * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
309 * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
310 * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
311 *
312 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
313 */
314enum iwl_ucode_tlv_capa {
315 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
316 IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
317 IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
318 IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
319 IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
320 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
321 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
322 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
323 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
324 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
325 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
326 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
327 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
328 IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
329 IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
330 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
331 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
332 IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
333 IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
334 IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
335 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
336 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
337
338 NUM_IWL_UCODE_TLV_CAPA
339#ifdef __CHECKER__
340 /* sparse says it cannot increment the previous enum member */
341 = 128
342#endif
343};
344
345/* The default calibrate table size if not specified by firmware file */
346#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
347#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
348#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
349
350/* The default max probe length if not specified by the firmware file */
351#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
352
353/*
354 * For 16.0 uCode and above, there is no differentiation between sections,
355 * just an offset to the HW address.
356 */
357#define IWL_UCODE_SECTION_MAX 16
358#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
359#define PAGING_SEPARATOR_SECTION 0xAAAABBBB
360
361/* uCode version contains 4 values: Major/Minor/API/Serial */
362#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
363#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
364#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
365#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
366
367/*
368 * Calibration control struct.
369 * Sent as part of the phy configuration command.
370 * @flow_trigger: bitmap for which calibrations to perform according to
371 * flow triggers.
372 * @event_trigger: bitmap for which calibrations to perform according to
373 * event triggers.
374 */
375struct iwl_tlv_calib_ctrl {
376 __le32 flow_trigger;
377 __le32 event_trigger;
378} __packed;
379
380enum iwl_fw_phy_cfg {
381 FW_PHY_CFG_RADIO_TYPE_POS = 0,
382 FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
383 FW_PHY_CFG_RADIO_STEP_POS = 2,
384 FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
385 FW_PHY_CFG_RADIO_DASH_POS = 4,
386 FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
387 FW_PHY_CFG_TX_CHAIN_POS = 16,
388 FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
389 FW_PHY_CFG_RX_CHAIN_POS = 20,
390 FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
391};
392
393#define IWL_UCODE_MAX_CS 1
394
395/**
396 * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
397 * @cipher: a cipher suite selector
398 * @flags: cipher scheme flags (currently reserved for a future use)
399 * @hdr_len: a size of MPDU security header
400 * @pn_len: a size of PN
401 * @pn_off: an offset of pn from the beginning of the security header
402 * @key_idx_off: an offset of key index byte in the security header
403 * @key_idx_mask: a bit mask of key_idx bits
404 * @key_idx_shift: bit shift needed to get key_idx
405 * @mic_len: mic length in bytes
406 * @hw_cipher: a HW cipher index used in host commands
407 */
408struct iwl_fw_cipher_scheme {
409 __le32 cipher;
410 u8 flags;
411 u8 hdr_len;
412 u8 pn_len;
413 u8 pn_off;
414 u8 key_idx_off;
415 u8 key_idx_mask;
416 u8 key_idx_shift;
417 u8 mic_len;
418 u8 hw_cipher;
419} __packed;
420
421enum iwl_fw_dbg_reg_operator {
422 CSR_ASSIGN,
423 CSR_SETBIT,
424 CSR_CLEARBIT,
425
426 PRPH_ASSIGN,
427 PRPH_SETBIT,
428 PRPH_CLEARBIT,
429
430 INDIRECT_ASSIGN,
431 INDIRECT_SETBIT,
432 INDIRECT_CLEARBIT,
433
434 PRPH_BLOCKBIT,
435};
436
437/**
438 * struct iwl_fw_dbg_reg_op - an operation on a register
439 *
440 * @op: %enum iwl_fw_dbg_reg_operator
441 * @addr: offset of the register
442 * @val: value
443 */
444struct iwl_fw_dbg_reg_op {
445 u8 op;
446 u8 reserved[3];
447 __le32 addr;
448 __le32 val;
449} __packed;
450
451/**
452 * enum iwl_fw_dbg_monitor_mode - available monitor recording modes
453 *
454 * @SMEM_MODE: monitor stores the data in SMEM
455 * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
456 * @MARBH_MODE: monitor stores the data in MARBH buffer
457 * @MIPI_MODE: monitor outputs the data through the MIPI interface
458 */
459enum iwl_fw_dbg_monitor_mode {
460 SMEM_MODE = 0,
461 EXTERNAL_MODE = 1,
462 MARBH_MODE = 2,
463 MIPI_MODE = 3,
464};
465
466/**
467 * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
468 *
469 * @version: version of the TLV - currently 0
470 * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
471 * @size_power: buffer size will be 2^(size_power + 11)
472 * @base_reg: addr of the base addr register (PRPH)
473 * @end_reg: addr of the end addr register (PRPH)
474 * @write_ptr_reg: the addr of the reg of the write pointer
475 * @wrap_count: the addr of the reg of the wrap_count
476 * @base_shift: shift right of the base addr reg
477 * @end_shift: shift right of the end addr reg
478 * @reg_ops: array of registers operations
479 *
480 * This parses IWL_UCODE_TLV_FW_DBG_DEST
481 */
482struct iwl_fw_dbg_dest_tlv {
483 u8 version;
484 u8 monitor_mode;
485 u8 size_power;
486 u8 reserved;
487 __le32 base_reg;
488 __le32 end_reg;
489 __le32 write_ptr_reg;
490 __le32 wrap_count;
491 u8 base_shift;
492 u8 end_shift;
493 struct iwl_fw_dbg_reg_op reg_ops[0];
494} __packed;
495
496struct iwl_fw_dbg_conf_hcmd {
497 u8 id;
498 u8 reserved;
499 __le16 len;
500 u8 data[0];
501} __packed;
502
503/**
504 * enum iwl_fw_dbg_trigger_mode - triggers functionalities
505 *
506 * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
507 * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
508 * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
509 * collect only monitor data
510 */
511enum iwl_fw_dbg_trigger_mode {
512 IWL_FW_DBG_TRIGGER_START = BIT(0),
513 IWL_FW_DBG_TRIGGER_STOP = BIT(1),
514 IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
515};
516
517/**
518 * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
519 * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
520 * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
521 * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
522 * @IWL_FW_DBG_CONF_VIF_AP: AP mode
523 * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
524 * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
525 * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
526 */
527enum iwl_fw_dbg_trigger_vif_type {
528 IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
529 IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
530 IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
531 IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
532 IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
533 IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
534 IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
535};
536
537/**
538 * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
539 * @id: %enum iwl_fw_dbg_trigger
540 * @vif_type: %enum iwl_fw_dbg_trigger_vif_type
541 * @stop_conf_ids: bitmap of configurations this trigger relates to.
542 * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
543 * to the currently running configuration is set, the data should be
544 * collected.
545 * @stop_delay: how many milliseconds to wait before collecting the data
546 * after the STOP trigger fires.
547 * @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
548 * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
549 * configuration should be applied when the triggers kicks in.
550 * @occurrences: number of occurrences. 0 means the trigger will never fire.
551 */
552struct iwl_fw_dbg_trigger_tlv {
553 __le32 id;
554 __le32 vif_type;
555 __le32 stop_conf_ids;
556 __le32 stop_delay;
557 u8 mode;
558 u8 start_conf_id;
559 __le16 occurrences;
560 __le32 reserved[2];
561
562 u8 data[0];
563} __packed;
564
565#define FW_DBG_START_FROM_ALIVE 0
566#define FW_DBG_CONF_MAX 32
567#define FW_DBG_INVALID 0xff
568
569/**
570 * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
571 * @stop_consec_missed_bcon: stop recording if threshold is crossed.
572 * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
573 * @start_consec_missed_bcon: start recording if threshold is crossed.
574 * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
575 * @reserved1: reserved
576 * @reserved2: reserved
577 */
578struct iwl_fw_dbg_trigger_missed_bcon {
579 __le32 stop_consec_missed_bcon;
580 __le32 stop_consec_missed_bcon_since_rx;
581 __le32 reserved2[2];
582 __le32 start_consec_missed_bcon;
583 __le32 start_consec_missed_bcon_since_rx;
584 __le32 reserved1[2];
585} __packed;
586
587/**
588 * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
589 * cmds: the list of commands to trigger the collection on
590 */
591struct iwl_fw_dbg_trigger_cmd {
592 struct cmd {
593 u8 cmd_id;
594 u8 group_id;
595 } __packed cmds[16];
596} __packed;
597
598/**
599 * iwl_fw_dbg_trigger_stats - configures trigger for statistics
600 * @stop_offset: the offset of the value to be monitored
601 * @stop_threshold: the threshold above which to collect
602 * @start_offset: the offset of the value to be monitored
603 * @start_threshold: the threshold above which to start recording
604 */
605struct iwl_fw_dbg_trigger_stats {
606 __le32 stop_offset;
607 __le32 stop_threshold;
608 __le32 start_offset;
609 __le32 start_threshold;
610} __packed;
611
612/**
613 * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
614 * @rssi: RSSI value to trigger at
615 */
616struct iwl_fw_dbg_trigger_low_rssi {
617 __le32 rssi;
618} __packed;
619
620/**
621 * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events
622 * @stop_auth_denied: number of denied authentication to collect
623 * @stop_auth_timeout: number of authentication timeout to collect
624 * @stop_rx_deauth: number of Rx deauth before to collect
625 * @stop_tx_deauth: number of Tx deauth before to collect
626 * @stop_assoc_denied: number of denied association to collect
627 * @stop_assoc_timeout: number of association timeout to collect
628 * @stop_connection_loss: number of connection loss to collect
629 * @start_auth_denied: number of denied authentication to start recording
630 * @start_auth_timeout: number of authentication timeout to start recording
631 * @start_rx_deauth: number of Rx deauth to start recording
632 * @start_tx_deauth: number of Tx deauth to start recording
633 * @start_assoc_denied: number of denied association to start recording
634 * @start_assoc_timeout: number of association timeout to start recording
635 * @start_connection_loss: number of connection loss to start recording
636 */
637struct iwl_fw_dbg_trigger_mlme {
638 u8 stop_auth_denied;
639 u8 stop_auth_timeout;
640 u8 stop_rx_deauth;
641 u8 stop_tx_deauth;
642
643 u8 stop_assoc_denied;
644 u8 stop_assoc_timeout;
645 u8 stop_connection_loss;
646 u8 reserved;
647
648 u8 start_auth_denied;
649 u8 start_auth_timeout;
650 u8 start_rx_deauth;
651 u8 start_tx_deauth;
652
653 u8 start_assoc_denied;
654 u8 start_assoc_timeout;
655 u8 start_connection_loss;
656 u8 reserved2;
657} __packed;
658
659/**
660 * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
661 * @command_queue: timeout for the command queue in ms
662 * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
663 * @softap: timeout for the queues of a softAP in ms
664 * @p2p_go: timeout for the queues of a P2P GO in ms
665 * @p2p_client: timeout for the queues of a P2P client in ms
666 * @p2p_device: timeout for the queues of a P2P device in ms
667 * @ibss: timeout for the queues of an IBSS in ms
668 * @tdls: timeout for the queues of a TDLS station in ms
669 */
670struct iwl_fw_dbg_trigger_txq_timer {
671 __le32 command_queue;
672 __le32 bss;
673 __le32 softap;
674 __le32 p2p_go;
675 __le32 p2p_client;
676 __le32 p2p_device;
677 __le32 ibss;
678 __le32 tdls;
679 __le32 reserved[4];
680} __packed;
681
682/**
683 * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger
684 * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
685 * trigger each time a time event notification that relates to time event
686 * id with one of the actions in the bitmap is received and
687 * BIT(notif->status) is set in status_bitmap.
688 *
689 */
690struct iwl_fw_dbg_trigger_time_event {
691 struct {
692 __le32 id;
693 __le32 action_bitmap;
694 __le32 status_bitmap;
695 } __packed time_events[16];
696} __packed;
697
698/**
699 * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
700 * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
701 * when an Rx BlockAck session is started.
702 * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
703 * when an Rx BlockAck session is stopped.
704 * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
705 * when a Tx BlockAck session is started.
706 * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
707 * when a Tx BlockAck session is stopped.
708 * rx_bar: tid bitmap to configure on what tid the trigger should occur
709 * when a BAR is received (for a Tx BlockAck session).
710 * tx_bar: tid bitmap to configure on what tid the trigger should occur
711 * when a BAR is send (for an Rx BlocAck session).
712 * frame_timeout: tid bitmap to configure on what tid the trigger should occur
713 * when a frame times out in the reodering buffer.
714 */
715struct iwl_fw_dbg_trigger_ba {
716 __le16 rx_ba_start;
717 __le16 rx_ba_stop;
718 __le16 tx_ba_start;
719 __le16 tx_ba_stop;
720 __le16 rx_bar;
721 __le16 tx_bar;
722 __le16 frame_timeout;
723} __packed;
724
725/**
726 * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
727 * @id: conf id
728 * @usniffer: should the uSniffer image be used
729 * @num_of_hcmds: how many HCMDs to send are present here
730 * @hcmd: a variable length host command to be sent to apply the configuration.
731 * If there is more than one HCMD to send, they will appear one after the
732 * other and be sent in the order that they appear in.
733 * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
734 * %FW_DBG_CONF_MAX configuration per run.
735 */
736struct iwl_fw_dbg_conf_tlv {
737 u8 id;
738 u8 usniffer;
739 u8 reserved;
740 u8 num_of_hcmds;
741 struct iwl_fw_dbg_conf_hcmd hcmd;
742} __packed;
743
744/**
745 * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
746 * @max_scan_cache_size: total space allocated for scan results (in bytes).
747 * @max_scan_buckets: maximum number of channel buckets.
748 * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
749 * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
750 * @max_scan_reporting_threshold: max possible report threshold. in percentage.
751 * @max_hotlist_aps: maximum number of entries for hotlist APs.
752 * @max_significant_change_aps: maximum number of entries for significant
753 * change APs.
754 * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
755 * hold.
756 */
757struct iwl_fw_gscan_capabilities {
758 __le32 max_scan_cache_size;
759 __le32 max_scan_buckets;
760 __le32 max_ap_cache_per_scan;
761 __le32 max_rssi_sample_size;
762 __le32 max_scan_reporting_threshold;
763 __le32 max_hotlist_aps;
764 __le32 max_significant_change_aps;
765 __le32 max_bssid_history_entries;
766} __packed;
767
768#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
new file mode 100644
index 000000000000..84ec0cefb62a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -0,0 +1,322 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __iwl_fw_h__
66#define __iwl_fw_h__
67#include <linux/types.h>
68#include <net/mac80211.h>
69
70#include "iwl-fw-file.h"
71#include "iwl-fw-error-dump.h"
72
73/**
74 * enum iwl_ucode_type
75 *
76 * The type of ucode.
77 *
78 * @IWL_UCODE_REGULAR: Normal runtime ucode
79 * @IWL_UCODE_INIT: Initial ucode
80 * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
81 * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
82 */
83enum iwl_ucode_type {
84 IWL_UCODE_REGULAR,
85 IWL_UCODE_INIT,
86 IWL_UCODE_WOWLAN,
87 IWL_UCODE_REGULAR_USNIFFER,
88 IWL_UCODE_TYPE_MAX,
89};
90
91/*
92 * enumeration of ucode section.
93 * This enumeration is used directly for older firmware (before 16.0).
94 * For new firmware, there can be up to 4 sections (see below) but the
95 * first one packaged into the firmware file is the DATA section and
96 * some debugging code accesses that.
97 */
98enum iwl_ucode_sec {
99 IWL_UCODE_SECTION_DATA,
100 IWL_UCODE_SECTION_INST,
101};
102
103struct iwl_ucode_capabilities {
104 u32 max_probe_length;
105 u32 n_scan_channels;
106 u32 standard_phy_calibration_size;
107 u32 flags;
108 unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
109 unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
110};
111
112static inline bool
113fw_has_api(const struct iwl_ucode_capabilities *capabilities,
114 iwl_ucode_tlv_api_t api)
115{
116 return test_bit((__force long)api, capabilities->_api);
117}
118
119static inline bool
120fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
121 iwl_ucode_tlv_capa_t capa)
122{
123 return test_bit((__force long)capa, capabilities->_capa);
124}
125
126/* one for each uCode image (inst/data, init/runtime/wowlan) */
127struct fw_desc {
128 const void *data; /* vmalloc'ed data */
129 u32 len; /* size in bytes */
130 u32 offset; /* offset in the device */
131};
132
133struct fw_img {
134 struct fw_desc sec[IWL_UCODE_SECTION_MAX];
135 bool is_dual_cpus;
136 u32 paging_mem_size;
137};
138
139struct iwl_sf_region {
140 u32 addr;
141 u32 size;
142};
143
144/*
145 * Block paging calculations
146 */
147#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
148#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
149#define PAGE_PER_GROUP_2_EXP_SIZE 3
150/* 8 pages per group */
151#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
152/* don't change, support only 32KB size */
153#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
154/* 32K == 2^15 */
155#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
156
157/*
158 * Image paging calculations
159 */
160#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
161/* 2^5 == 32 blocks per image */
162#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
163/* maximum image size 1024KB */
164#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
165
166/* Virtual address signature */
167#define PAGING_ADDR_SIG 0xAA000000
168
169#define PAGING_CMD_IS_SECURED BIT(9)
170#define PAGING_CMD_IS_ENABLED BIT(8)
171#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
172#define PAGING_TLV_SECURE_MASK 1
173
174/**
175 * struct iwl_fw_paging
176 * @fw_paging_phys: page phy pointer
177 * @fw_paging_block: pointer to the allocated block
178 * @fw_paging_size: page size
179 */
180struct iwl_fw_paging {
181 dma_addr_t fw_paging_phys;
182 struct page *fw_paging_block;
183 u32 fw_paging_size;
184};
185
186/**
187 * struct iwl_fw_cscheme_list - a cipher scheme list
188 * @size: a number of entries
189 * @cs: cipher scheme entries
190 */
191struct iwl_fw_cscheme_list {
192 u8 size;
193 struct iwl_fw_cipher_scheme cs[];
194} __packed;
195
196/**
197 * struct iwl_gscan_capabilities - gscan capabilities supported by FW
198 * @max_scan_cache_size: total space allocated for scan results (in bytes).
199 * @max_scan_buckets: maximum number of channel buckets.
200 * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
201 * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
202 * @max_scan_reporting_threshold: max possible report threshold. in percentage.
203 * @max_hotlist_aps: maximum number of entries for hotlist APs.
204 * @max_significant_change_aps: maximum number of entries for significant
205 * change APs.
206 * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
207 * hold.
208 */
209struct iwl_gscan_capabilities {
210 u32 max_scan_cache_size;
211 u32 max_scan_buckets;
212 u32 max_ap_cache_per_scan;
213 u32 max_rssi_sample_size;
214 u32 max_scan_reporting_threshold;
215 u32 max_hotlist_aps;
216 u32 max_significant_change_aps;
217 u32 max_bssid_history_entries;
218};
219
220/**
221 * struct iwl_fw - variables associated with the firmware
222 *
223 * @ucode_ver: ucode version from the ucode file
224 * @fw_version: firmware version string
225 * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
226 * @ucode_capa: capabilities parsed from the ucode file.
227 * @enhance_sensitivity_table: device can do enhanced sensitivity.
228 * @init_evtlog_ptr: event log offset for init ucode.
229 * @init_evtlog_size: event log size for init ucode.
230 * @init_errlog_ptr: error log offfset for init ucode.
231 * @inst_evtlog_ptr: event log offset for runtime ucode.
232 * @inst_evtlog_size: event log size for runtime ucode.
233 * @inst_errlog_ptr: error log offfset for runtime ucode.
234 * @mvm_fw: indicates this is MVM firmware
235 * @cipher_scheme: optional external cipher scheme.
236 * @human_readable: human readable version
237 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
238 * we get the ALIVE from the uCode
239 * @dbg_dest_tlv: points to the destination TLV for debug
240 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
241 * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
242 * @dbg_trigger_tlv: array of pointers to triggers TLVs
243 * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
244 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
245 */
246struct iwl_fw {
247 u32 ucode_ver;
248
249 char fw_version[ETHTOOL_FWVERS_LEN];
250
251 /* ucode images */
252 struct fw_img img[IWL_UCODE_TYPE_MAX];
253
254 struct iwl_ucode_capabilities ucode_capa;
255 bool enhance_sensitivity_table;
256
257 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
258 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
259
260 struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
261 u32 phy_config;
262 u8 valid_tx_ant;
263 u8 valid_rx_ant;
264
265 bool mvm_fw;
266
267 struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
268 u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
269
270 u32 sdio_adma_addr;
271
272 struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
273 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
274 size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
275 struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
276 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
277 u8 dbg_dest_reg_num;
278 struct iwl_gscan_capabilities gscan_capa;
279};
280
281static inline const char *get_fw_dbg_mode_string(int mode)
282{
283 switch (mode) {
284 case SMEM_MODE:
285 return "SMEM";
286 case EXTERNAL_MODE:
287 return "EXTERNAL_DRAM";
288 case MARBH_MODE:
289 return "MARBH";
290 case MIPI_MODE:
291 return "MIPI";
292 default:
293 return "UNKNOWN";
294 }
295}
296
297static inline bool
298iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
299{
300 const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
301
302 if (!conf_tlv)
303 return false;
304
305 return conf_tlv->usniffer;
306}
307
308#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
309 void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
310 unlikely(__dbg_trigger); \
311})
312
313static inline struct iwl_fw_dbg_trigger_tlv*
314iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, u8 id)
315{
316 if (WARN_ON(id >= ARRAY_SIZE(fw->dbg_trigger_tlv)))
317 return NULL;
318
319 return fw->dbg_trigger_tlv[id];
320}
321
322#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
new file mode 100644
index 000000000000..0bd9d4aad0c0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -0,0 +1,289 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28#include <linux/delay.h>
29#include <linux/device.h>
30#include <linux/export.h>
31
32#include "iwl-drv.h"
33#include "iwl-io.h"
34#include "iwl-csr.h"
35#include "iwl-debug.h"
36#include "iwl-prph.h"
37#include "iwl-fh.h"
38
39void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
40{
41 trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
42 iwl_trans_write8(trans, ofs, val);
43}
44IWL_EXPORT_SYMBOL(iwl_write8);
45
46void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
47{
48 trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
49 iwl_trans_write32(trans, ofs, val);
50}
51IWL_EXPORT_SYMBOL(iwl_write32);
52
53u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
54{
55 u32 val = iwl_trans_read32(trans, ofs);
56
57 trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
58 return val;
59}
60IWL_EXPORT_SYMBOL(iwl_read32);
61
62#define IWL_POLL_INTERVAL 10 /* microseconds */
63
64int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
65 u32 bits, u32 mask, int timeout)
66{
67 int t = 0;
68
69 do {
70 if ((iwl_read32(trans, addr) & mask) == (bits & mask))
71 return t;
72 udelay(IWL_POLL_INTERVAL);
73 t += IWL_POLL_INTERVAL;
74 } while (t < timeout);
75
76 return -ETIMEDOUT;
77}
78IWL_EXPORT_SYMBOL(iwl_poll_bit);
79
80u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
81{
82 u32 value = 0x5a5a5a5a;
83 unsigned long flags;
84 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
85 value = iwl_read32(trans, reg);
86 iwl_trans_release_nic_access(trans, &flags);
87 }
88
89 return value;
90}
91IWL_EXPORT_SYMBOL(iwl_read_direct32);
92
93void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
94{
95 unsigned long flags;
96
97 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
98 iwl_write32(trans, reg, value);
99 iwl_trans_release_nic_access(trans, &flags);
100 }
101}
102IWL_EXPORT_SYMBOL(iwl_write_direct32);
103
104int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
105 int timeout)
106{
107 int t = 0;
108
109 do {
110 if ((iwl_read_direct32(trans, addr) & mask) == mask)
111 return t;
112 udelay(IWL_POLL_INTERVAL);
113 t += IWL_POLL_INTERVAL;
114 } while (t < timeout);
115
116 return -ETIMEDOUT;
117}
118IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
119
120u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
121{
122 u32 val = iwl_trans_read_prph(trans, ofs);
123 trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
124 return val;
125}
126
127void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
128{
129 trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
130 iwl_trans_write_prph(trans, ofs, val);
131}
132
133u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
134{
135 unsigned long flags;
136 u32 val = 0x5a5a5a5a;
137
138 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
139 val = __iwl_read_prph(trans, ofs);
140 iwl_trans_release_nic_access(trans, &flags);
141 }
142 return val;
143}
144IWL_EXPORT_SYMBOL(iwl_read_prph);
145
146void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
147{
148 unsigned long flags;
149
150 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
151 __iwl_write_prph(trans, ofs, val);
152 iwl_trans_release_nic_access(trans, &flags);
153 }
154}
155IWL_EXPORT_SYMBOL(iwl_write_prph);
156
157int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
158 u32 bits, u32 mask, int timeout)
159{
160 int t = 0;
161
162 do {
163 if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
164 return t;
165 udelay(IWL_POLL_INTERVAL);
166 t += IWL_POLL_INTERVAL;
167 } while (t < timeout);
168
169 return -ETIMEDOUT;
170}
171
172void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
173{
174 unsigned long flags;
175
176 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
177 __iwl_write_prph(trans, ofs,
178 __iwl_read_prph(trans, ofs) | mask);
179 iwl_trans_release_nic_access(trans, &flags);
180 }
181}
182IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
183
184void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
185 u32 bits, u32 mask)
186{
187 unsigned long flags;
188
189 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
190 __iwl_write_prph(trans, ofs,
191 (__iwl_read_prph(trans, ofs) & mask) | bits);
192 iwl_trans_release_nic_access(trans, &flags);
193 }
194}
195IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
196
197void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
198{
199 unsigned long flags;
200 u32 val;
201
202 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
203 val = __iwl_read_prph(trans, ofs);
204 __iwl_write_prph(trans, ofs, (val & ~mask));
205 iwl_trans_release_nic_access(trans, &flags);
206 }
207}
208IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
209
210void iwl_force_nmi(struct iwl_trans *trans)
211{
212 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
213 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
214 DEVICE_SET_NMI_VAL_DRV);
215 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
216 DEVICE_SET_NMI_VAL_HW);
217 } else {
218 iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
219 DEVICE_SET_NMI_8000_VAL);
220 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
221 DEVICE_SET_NMI_VAL_DRV);
222 }
223}
224IWL_EXPORT_SYMBOL(iwl_force_nmi);
225
226static const char *get_fh_string(int cmd)
227{
228#define IWL_CMD(x) case x: return #x
229 switch (cmd) {
230 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
231 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
232 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
233 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
234 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
235 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
236 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
237 IWL_CMD(FH_TSSR_TX_STATUS_REG);
238 IWL_CMD(FH_TSSR_TX_ERROR_REG);
239 default:
240 return "UNKNOWN";
241 }
242#undef IWL_CMD
243}
244
245int iwl_dump_fh(struct iwl_trans *trans, char **buf)
246{
247 int i;
248 static const u32 fh_tbl[] = {
249 FH_RSCSR_CHNL0_STTS_WPTR_REG,
250 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
251 FH_RSCSR_CHNL0_WPTR,
252 FH_MEM_RCSR_CHNL0_CONFIG_REG,
253 FH_MEM_RSSR_SHARED_CTRL_REG,
254 FH_MEM_RSSR_RX_STATUS_REG,
255 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
256 FH_TSSR_TX_STATUS_REG,
257 FH_TSSR_TX_ERROR_REG
258 };
259
260#ifdef CONFIG_IWLWIFI_DEBUGFS
261 if (buf) {
262 int pos = 0;
263 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
264
265 *buf = kmalloc(bufsz, GFP_KERNEL);
266 if (!*buf)
267 return -ENOMEM;
268
269 pos += scnprintf(*buf + pos, bufsz - pos,
270 "FH register values:\n");
271
272 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
273 pos += scnprintf(*buf + pos, bufsz - pos,
274 " %34s: 0X%08x\n",
275 get_fh_string(fh_tbl[i]),
276 iwl_read_direct32(trans, fh_tbl[i]));
277
278 return pos;
279 }
280#endif
281
282 IWL_ERR(trans, "FH register values:\n");
283 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
284 IWL_ERR(trans, " %34s: 0X%08x\n",
285 get_fh_string(fh_tbl[i]),
286 iwl_read_direct32(trans, fh_tbl[i]));
287
288 return 0;
289}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
new file mode 100644
index 000000000000..501d0560c061
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -0,0 +1,73 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_io_h__
30#define __iwl_io_h__
31
32#include "iwl-devtrace.h"
33#include "iwl-trans.h"
34
35void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
36void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
37u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
38
39static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
40{
41 iwl_trans_set_bits_mask(trans, reg, mask, mask);
42}
43
44static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
45{
46 iwl_trans_set_bits_mask(trans, reg, mask, 0);
47}
48
49int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
50 u32 bits, u32 mask, int timeout);
51int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
52 int timeout);
53
54u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
55void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
56
57
58u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
59u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
60void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
61void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
62int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
63 u32 bits, u32 mask, int timeout);
64void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
65void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
66 u32 bits, u32 mask);
67void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
68void iwl_force_nmi(struct iwl_trans *trans);
69
70/* Error handling */
71int iwl_dump_fh(struct iwl_trans *trans, char **buf);
72
73#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
new file mode 100644
index 000000000000..ac2b90df8413
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_modparams_h__
64#define __iwl_modparams_h__
65
66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h>
70
71extern struct iwl_mod_params iwlwifi_mod_params;
72
73enum iwl_power_level {
74 IWL_POWER_INDEX_1,
75 IWL_POWER_INDEX_2,
76 IWL_POWER_INDEX_3,
77 IWL_POWER_INDEX_4,
78 IWL_POWER_INDEX_5,
79 IWL_POWER_NUM
80};
81
82enum iwl_disable_11n {
83 IWL_DISABLE_HT_ALL = BIT(0),
84 IWL_DISABLE_HT_TXAGG = BIT(1),
85 IWL_DISABLE_HT_RXAGG = BIT(2),
86 IWL_ENABLE_HT_TXAGG = BIT(3),
87};
88
89/**
90 * struct iwl_mod_params
91 *
92 * Holds the module parameters
93 *
94 * @sw_crypto: using hardware encryption, default = 0
95 * @disable_11n: disable 11n capabilities, default = 0,
96 * use IWL_[DIS,EN]ABLE_HT_* constants
97 * @amsdu_size_8K: enable 8K amsdu size, default = 0
98 * @restart_fw: restart firmware, default = 1
99 * @bt_coex_active: enable bt coex, default = true
100 * @led_mode: system default, default = 0
101 * @power_save: enable power save, default = false
102 * @power_level: power level, default = 1
103 * @debug_level: levels are IWL_DL_*
104 * @ant_coupling: antenna coupling in dB, default = 0
105 * @d0i3_disable: disable d0i3, default = 1,
106 * @lar_disable: disable LAR (regulatory), default = 0
107 * @fw_monitor: allow to use firmware monitor
108 */
109struct iwl_mod_params {
110 int sw_crypto;
111 unsigned int disable_11n;
112 int amsdu_size_8K;
113 bool restart_fw;
114 bool bt_coex_active;
115 int led_mode;
116 bool power_save;
117 int power_level;
118#ifdef CONFIG_IWLWIFI_DEBUG
119 u32 debug_level;
120#endif
121 int ant_coupling;
122 char *nvm_file;
123 bool uapsd_disable;
124 bool d0i3_disable;
125 bool lar_disable;
126 bool fw_monitor;
127};
128
129#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
new file mode 100644
index 000000000000..6caf2affbbb5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -0,0 +1,193 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Deutschland GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <linux/sched.h>
65#include <linux/export.h>
66
67#include "iwl-drv.h"
68#include "iwl-notif-wait.h"
69
70
71void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
72{
73 spin_lock_init(&notif_wait->notif_wait_lock);
74 INIT_LIST_HEAD(&notif_wait->notif_waits);
75 init_waitqueue_head(&notif_wait->notif_waitq);
76}
77IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
78
79void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
80 struct iwl_rx_packet *pkt)
81{
82 bool triggered = false;
83
84 if (!list_empty(&notif_wait->notif_waits)) {
85 struct iwl_notification_wait *w;
86
87 spin_lock(&notif_wait->notif_wait_lock);
88 list_for_each_entry(w, &notif_wait->notif_waits, list) {
89 int i;
90 bool found = false;
91
92 /*
93 * If it already finished (triggered) or has been
94 * aborted then don't evaluate it again to avoid races,
95 * Otherwise the function could be called again even
96 * though it returned true before
97 */
98 if (w->triggered || w->aborted)
99 continue;
100
101 for (i = 0; i < w->n_cmds; i++) {
102 if (w->cmds[i] ==
103 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
104 found = true;
105 break;
106 }
107 }
108 if (!found)
109 continue;
110
111 if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
112 w->triggered = true;
113 triggered = true;
114 }
115 }
116 spin_unlock(&notif_wait->notif_wait_lock);
117
118 }
119
120 if (triggered)
121 wake_up_all(&notif_wait->notif_waitq);
122}
123IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
124
125void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
126{
127 struct iwl_notification_wait *wait_entry;
128
129 spin_lock(&notif_wait->notif_wait_lock);
130 list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
131 wait_entry->aborted = true;
132 spin_unlock(&notif_wait->notif_wait_lock);
133
134 wake_up_all(&notif_wait->notif_waitq);
135}
136IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
137
138void
139iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
140 struct iwl_notification_wait *wait_entry,
141 const u16 *cmds, int n_cmds,
142 bool (*fn)(struct iwl_notif_wait_data *notif_wait,
143 struct iwl_rx_packet *pkt, void *data),
144 void *fn_data)
145{
146 if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
147 n_cmds = MAX_NOTIF_CMDS;
148
149 wait_entry->fn = fn;
150 wait_entry->fn_data = fn_data;
151 wait_entry->n_cmds = n_cmds;
152 memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
153 wait_entry->triggered = false;
154 wait_entry->aborted = false;
155
156 spin_lock_bh(&notif_wait->notif_wait_lock);
157 list_add(&wait_entry->list, &notif_wait->notif_waits);
158 spin_unlock_bh(&notif_wait->notif_wait_lock);
159}
160IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
161
162int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
163 struct iwl_notification_wait *wait_entry,
164 unsigned long timeout)
165{
166 int ret;
167
168 ret = wait_event_timeout(notif_wait->notif_waitq,
169 wait_entry->triggered || wait_entry->aborted,
170 timeout);
171
172 spin_lock_bh(&notif_wait->notif_wait_lock);
173 list_del(&wait_entry->list);
174 spin_unlock_bh(&notif_wait->notif_wait_lock);
175
176 if (wait_entry->aborted)
177 return -EIO;
178
179 /* return value is always >= 0 */
180 if (ret <= 0)
181 return -ETIMEDOUT;
182 return 0;
183}
184IWL_EXPORT_SYMBOL(iwl_wait_notification);
185
186void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
187 struct iwl_notification_wait *wait_entry)
188{
189 spin_lock_bh(&notif_wait->notif_wait_lock);
190 list_del(&wait_entry->list);
191 spin_unlock_bh(&notif_wait->notif_wait_lock);
192}
193IWL_EXPORT_SYMBOL(iwl_remove_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
new file mode 100644
index 000000000000..dbe8234521de
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
@@ -0,0 +1,139 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Deutschland GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_notif_wait_h__
64#define __iwl_notif_wait_h__
65
66#include <linux/wait.h>
67
68#include "iwl-trans.h"
69
70struct iwl_notif_wait_data {
71 struct list_head notif_waits;
72 spinlock_t notif_wait_lock;
73 wait_queue_head_t notif_waitq;
74};
75
76#define MAX_NOTIF_CMDS 5
77
78/**
79 * struct iwl_notification_wait - notification wait entry
80 * @list: list head for global list
81 * @fn: Function called with the notification. If the function
82 * returns true, the wait is over, if it returns false then
83 * the waiter stays blocked. If no function is given, any
84 * of the listed commands will unblock the waiter.
85 * @cmds: command IDs
86 * @n_cmds: number of command IDs
87 * @triggered: waiter should be woken up
88 * @aborted: wait was aborted
89 *
90 * This structure is not used directly, to wait for a
91 * notification declare it on the stack, and call
92 * iwlagn_init_notification_wait() with appropriate
93 * parameters. Then do whatever will cause the ucode
94 * to notify the driver, and to wait for that then
95 * call iwlagn_wait_notification().
96 *
97 * Each notification is one-shot. If at some point we
98 * need to support multi-shot notifications (which
99 * can't be allocated on the stack) we need to modify
100 * the code for them.
101 */
102struct iwl_notification_wait {
103 struct list_head list;
104
105 bool (*fn)(struct iwl_notif_wait_data *notif_data,
106 struct iwl_rx_packet *pkt, void *data);
107 void *fn_data;
108
109 u16 cmds[MAX_NOTIF_CMDS];
110 u8 n_cmds;
111 bool triggered, aborted;
112};
113
114
115/* caller functions */
116void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
117void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
118 struct iwl_rx_packet *pkt);
119void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
120
121/* user functions */
122void __acquires(wait_entry)
123iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
124 struct iwl_notification_wait *wait_entry,
125 const u16 *cmds, int n_cmds,
126 bool (*fn)(struct iwl_notif_wait_data *notif_data,
127 struct iwl_rx_packet *pkt, void *data),
128 void *fn_data);
129
130int __must_check __releases(wait_entry)
131iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
132 struct iwl_notification_wait *wait_entry,
133 unsigned long timeout);
134
135void __releases(wait_entry)
136iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
137 struct iwl_notification_wait *wait_entry);
138
139#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
new file mode 100644
index 000000000000..d82984912e04
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -0,0 +1,844 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64#include <linux/types.h>
65#include <linux/slab.h>
66#include <linux/export.h>
67#include <linux/etherdevice.h>
68#include <linux/pci.h>
69#include "iwl-drv.h"
70#include "iwl-modparams.h"
71#include "iwl-nvm-parse.h"
72
73/* NVM offsets (in words) definitions */
74enum wkp_nvm_offsets {
75 /* NVM HW-Section offset (in words) definitions */
76 HW_ADDR = 0x15,
77
78 /* NVM SW-Section offset (in words) definitions */
79 NVM_SW_SECTION = 0x1C0,
80 NVM_VERSION = 0,
81 RADIO_CFG = 1,
82 SKU = 2,
83 N_HW_ADDRS = 3,
84 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
85
86 /* NVM calibration section offset (in words) definitions */
87 NVM_CALIB_SECTION = 0x2B8,
88 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
89};
90
91enum family_8000_nvm_offsets {
92 /* NVM HW-Section offset (in words) definitions */
93 HW_ADDR0_WFPM_FAMILY_8000 = 0x12,
94 HW_ADDR1_WFPM_FAMILY_8000 = 0x16,
95 HW_ADDR0_PCIE_FAMILY_8000 = 0x8A,
96 HW_ADDR1_PCIE_FAMILY_8000 = 0x8E,
97 MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
98
99 /* NVM SW-Section offset (in words) definitions */
100 NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
101 NVM_VERSION_FAMILY_8000 = 0,
102 RADIO_CFG_FAMILY_8000 = 0,
103 SKU_FAMILY_8000 = 2,
104 N_HW_ADDRS_FAMILY_8000 = 3,
105
106 /* NVM REGULATORY -Section offset (in words) definitions */
107 NVM_CHANNELS_FAMILY_8000 = 0,
108 NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
109 NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
110 NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
111
112 /* NVM calibration section offset (in words) definitions */
113 NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
114 XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
115};
116
117/* SKU Capabilities (actual values from NVM definition) */
118enum nvm_sku_bits {
119 NVM_SKU_CAP_BAND_24GHZ = BIT(0),
120 NVM_SKU_CAP_BAND_52GHZ = BIT(1),
121 NVM_SKU_CAP_11N_ENABLE = BIT(2),
122 NVM_SKU_CAP_11AC_ENABLE = BIT(3),
123 NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
124};
125
126/*
127 * These are the channel numbers in the order that they are stored in the NVM
128 */
129static const u8 iwl_nvm_channels[] = {
130 /* 2.4 GHz */
131 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
132 /* 5 GHz */
133 36, 40, 44 , 48, 52, 56, 60, 64,
134 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
135 149, 153, 157, 161, 165
136};
137
138static const u8 iwl_nvm_channels_family_8000[] = {
139 /* 2.4 GHz */
140 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
141 /* 5 GHz */
142 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
143 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
144 149, 153, 157, 161, 165, 169, 173, 177, 181
145};
146
147#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
148#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
149#define NUM_2GHZ_CHANNELS 14
150#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
151#define FIRST_2GHZ_HT_MINUS 5
152#define LAST_2GHZ_HT_PLUS 9
153#define LAST_5GHZ_HT 165
154#define LAST_5GHZ_HT_FAMILY_8000 181
155#define N_HW_ADDR_MASK 0xF
156
157/* rate data (static) */
158static struct ieee80211_rate iwl_cfg80211_rates[] = {
159 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
160 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
161 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
162 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
163 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
164 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
165 .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
166 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
167 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
168 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
169 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
170 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
171 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
172 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
173 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
174};
175#define RATES_24_OFFS 0
176#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
177#define RATES_52_OFFS 4
178#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
179
180/**
181 * enum iwl_nvm_channel_flags - channel flags in NVM
182 * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
183 * @NVM_CHANNEL_IBSS: usable as an IBSS channel
184 * @NVM_CHANNEL_ACTIVE: active scanning allowed
185 * @NVM_CHANNEL_RADAR: radar detection required
186 * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
187 * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
188 * on same channel on 2.4 or same UNII band on 5.2
189 * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
190 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
191 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
192 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
193 */
194enum iwl_nvm_channel_flags {
195 NVM_CHANNEL_VALID = BIT(0),
196 NVM_CHANNEL_IBSS = BIT(1),
197 NVM_CHANNEL_ACTIVE = BIT(3),
198 NVM_CHANNEL_RADAR = BIT(4),
199 NVM_CHANNEL_INDOOR_ONLY = BIT(5),
200 NVM_CHANNEL_GO_CONCURRENT = BIT(6),
201 NVM_CHANNEL_WIDE = BIT(8),
202 NVM_CHANNEL_40MHZ = BIT(9),
203 NVM_CHANNEL_80MHZ = BIT(10),
204 NVM_CHANNEL_160MHZ = BIT(11),
205};
206
207#define CHECK_AND_PRINT_I(x) \
208 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
209
210static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
211 u16 nvm_flags, const struct iwl_cfg *cfg)
212{
213 u32 flags = IEEE80211_CHAN_NO_HT40;
214 u32 last_5ghz_ht = LAST_5GHZ_HT;
215
216 if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
217 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
218
219 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
220 if (ch_num <= LAST_2GHZ_HT_PLUS)
221 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
222 if (ch_num >= FIRST_2GHZ_HT_MINUS)
223 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
224 } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
225 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
226 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
227 else
228 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
229 }
230 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
231 flags |= IEEE80211_CHAN_NO_80MHZ;
232 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
233 flags |= IEEE80211_CHAN_NO_160MHZ;
234
235 if (!(nvm_flags & NVM_CHANNEL_IBSS))
236 flags |= IEEE80211_CHAN_NO_IR;
237
238 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
239 flags |= IEEE80211_CHAN_NO_IR;
240
241 if (nvm_flags & NVM_CHANNEL_RADAR)
242 flags |= IEEE80211_CHAN_RADAR;
243
244 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
245 flags |= IEEE80211_CHAN_INDOOR_ONLY;
246
247 /* Set the GO concurrent flag only in case that NO_IR is set.
248 * Otherwise it is meaningless
249 */
250 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
251 (flags & IEEE80211_CHAN_NO_IR))
252 flags |= IEEE80211_CHAN_IR_CONCURRENT;
253
254 return flags;
255}
256
257static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
258 struct iwl_nvm_data *data,
259 const __le16 * const nvm_ch_flags,
260 bool lar_supported)
261{
262 int ch_idx;
263 int n_channels = 0;
264 struct ieee80211_channel *channel;
265 u16 ch_flags;
266 bool is_5ghz;
267 int num_of_ch, num_2ghz_channels;
268 const u8 *nvm_chan;
269
270 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
271 num_of_ch = IWL_NUM_CHANNELS;
272 nvm_chan = &iwl_nvm_channels[0];
273 num_2ghz_channels = NUM_2GHZ_CHANNELS;
274 } else {
275 num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
276 nvm_chan = &iwl_nvm_channels_family_8000[0];
277 num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
278 }
279
280 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
281 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
282
283 if (ch_idx >= num_2ghz_channels &&
284 !data->sku_cap_band_52GHz_enable)
285 continue;
286
287 if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
288 /*
289 * Channels might become valid later if lar is
290 * supported, hence we still want to add them to
291 * the list of supported channels to cfg80211.
292 */
293 IWL_DEBUG_EEPROM(dev,
294 "Ch. %d Flags %x [%sGHz] - No traffic\n",
295 nvm_chan[ch_idx],
296 ch_flags,
297 (ch_idx >= num_2ghz_channels) ?
298 "5.2" : "2.4");
299 continue;
300 }
301
302 channel = &data->channels[n_channels];
303 n_channels++;
304
305 channel->hw_value = nvm_chan[ch_idx];
306 channel->band = (ch_idx < num_2ghz_channels) ?
307 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
308 channel->center_freq =
309 ieee80211_channel_to_frequency(
310 channel->hw_value, channel->band);
311
312 /* Initialize regulatory-based run-time data */
313
314 /*
315 * Default value - highest tx power value. max_power
316 * is not used in mvm, and is used for backwards compatibility
317 */
318 channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
319 is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
320
321 /* don't put limitations in case we're using LAR */
322 if (!lar_supported)
323 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
324 ch_idx, is_5ghz,
325 ch_flags, cfg);
326 else
327 channel->flags = 0;
328
329 IWL_DEBUG_EEPROM(dev,
330 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
331 channel->hw_value,
332 is_5ghz ? "5.2" : "2.4",
333 CHECK_AND_PRINT_I(VALID),
334 CHECK_AND_PRINT_I(IBSS),
335 CHECK_AND_PRINT_I(ACTIVE),
336 CHECK_AND_PRINT_I(RADAR),
337 CHECK_AND_PRINT_I(WIDE),
338 CHECK_AND_PRINT_I(INDOOR_ONLY),
339 CHECK_AND_PRINT_I(GO_CONCURRENT),
340 ch_flags,
341 channel->max_power,
342 ((ch_flags & NVM_CHANNEL_IBSS) &&
343 !(ch_flags & NVM_CHANNEL_RADAR))
344 ? "" : "not ");
345 }
346
347 return n_channels;
348}
349
350static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
351 struct iwl_nvm_data *data,
352 struct ieee80211_sta_vht_cap *vht_cap,
353 u8 tx_chains, u8 rx_chains)
354{
355 int num_rx_ants = num_of_ant(rx_chains);
356 int num_tx_ants = num_of_ant(tx_chains);
357 unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
358 IEEE80211_VHT_MAX_AMPDU_1024K);
359
360 vht_cap->vht_supported = true;
361
362 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
363 IEEE80211_VHT_CAP_RXSTBC_1 |
364 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
365 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
366 max_ampdu_exponent <<
367 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
368
369 if (cfg->ht_params->ldpc)
370 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
371
372 if (data->sku_cap_mimo_disabled) {
373 num_rx_ants = 1;
374 num_tx_ants = 1;
375 }
376
377 if (num_tx_ants > 1)
378 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
379 else
380 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
381
382 if (iwlwifi_mod_params.amsdu_size_8K)
383 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
384
385 vht_cap->vht_mcs.rx_mcs_map =
386 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
387 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
388 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
389 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
390 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
391 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
392 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
393 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
394
395 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
396 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
397 /* this works because NOT_SUPPORTED == 3 */
398 vht_cap->vht_mcs.rx_mcs_map |=
399 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
400 }
401
402 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
403}
404
405static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
406 struct iwl_nvm_data *data,
407 const __le16 *ch_section,
408 u8 tx_chains, u8 rx_chains, bool lar_supported)
409{
410 int n_channels;
411 int n_used = 0;
412 struct ieee80211_supported_band *sband;
413
414 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
415 n_channels = iwl_init_channel_map(
416 dev, cfg, data,
417 &ch_section[NVM_CHANNELS], lar_supported);
418 else
419 n_channels = iwl_init_channel_map(
420 dev, cfg, data,
421 &ch_section[NVM_CHANNELS_FAMILY_8000],
422 lar_supported);
423
424 sband = &data->bands[IEEE80211_BAND_2GHZ];
425 sband->band = IEEE80211_BAND_2GHZ;
426 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
427 sband->n_bitrates = N_RATES_24;
428 n_used += iwl_init_sband_channels(data, sband, n_channels,
429 IEEE80211_BAND_2GHZ);
430 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
431 tx_chains, rx_chains);
432
433 sband = &data->bands[IEEE80211_BAND_5GHZ];
434 sband->band = IEEE80211_BAND_5GHZ;
435 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
436 sband->n_bitrates = N_RATES_52;
437 n_used += iwl_init_sband_channels(data, sband, n_channels,
438 IEEE80211_BAND_5GHZ);
439 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
440 tx_chains, rx_chains);
441 if (data->sku_cap_11ac_enable)
442 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
443 tx_chains, rx_chains);
444
445 if (n_channels != n_used)
446 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
447 n_used, n_channels);
448}
449
450static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
451 const __le16 *phy_sku)
452{
453 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
454 return le16_to_cpup(nvm_sw + SKU);
455
456 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
457}
458
459static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
460{
461 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
462 return le16_to_cpup(nvm_sw + NVM_VERSION);
463 else
464 return le32_to_cpup((__le32 *)(nvm_sw +
465 NVM_VERSION_FAMILY_8000));
466}
467
468static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
469 const __le16 *phy_sku)
470{
471 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
472 return le16_to_cpup(nvm_sw + RADIO_CFG);
473
474 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
475
476}
477
478static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
479{
480 int n_hw_addr;
481
482 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
483 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
484
485 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
486
487 return n_hw_addr & N_HW_ADDR_MASK;
488}
489
490static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
491 struct iwl_nvm_data *data,
492 u32 radio_cfg)
493{
494 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
495 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
496 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
497 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
498 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
499 return;
500 }
501
502 /* set the radio configuration for family 8000 */
503 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
504 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
505 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
506 data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
507 data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg);
508 data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
509}
510
511static void iwl_set_hw_address(const struct iwl_cfg *cfg,
512 struct iwl_nvm_data *data,
513 const __le16 *nvm_sec)
514{
515 const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
516
517 /* The byte order is little endian 16 bit, meaning 214365 */
518 data->hw_addr[0] = hw_addr[1];
519 data->hw_addr[1] = hw_addr[0];
520 data->hw_addr[2] = hw_addr[3];
521 data->hw_addr[3] = hw_addr[2];
522 data->hw_addr[4] = hw_addr[5];
523 data->hw_addr[5] = hw_addr[4];
524}
525
526static void iwl_set_hw_address_family_8000(struct device *dev,
527 const struct iwl_cfg *cfg,
528 struct iwl_nvm_data *data,
529 const __le16 *mac_override,
530 const __le16 *nvm_hw,
531 u32 mac_addr0, u32 mac_addr1)
532{
533 const u8 *hw_addr;
534
535 if (mac_override) {
536 static const u8 reserved_mac[] = {
537 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
538 };
539
540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542
543 /*
544 * Store the MAC address from MAO section.
545 * No byte swapping is required in MAO section
546 */
547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548
549 /*
550 * Force the use of the OTP MAC address in case of reserved MAC
551 * address in the NVM, or if address is given but invalid.
552 */
553 if (is_valid_ether_addr(data->hw_addr) &&
554 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
555 return;
556
557 IWL_ERR_DEV(dev,
558 "mac address from nvm override section is not valid\n");
559 }
560
561 if (nvm_hw) {
562 /* read the MAC address from HW resisters */
563 hw_addr = (const u8 *)&mac_addr0;
564 data->hw_addr[0] = hw_addr[3];
565 data->hw_addr[1] = hw_addr[2];
566 data->hw_addr[2] = hw_addr[1];
567 data->hw_addr[3] = hw_addr[0];
568
569 hw_addr = (const u8 *)&mac_addr1;
570 data->hw_addr[4] = hw_addr[1];
571 data->hw_addr[5] = hw_addr[0];
572
573 if (!is_valid_ether_addr(data->hw_addr))
574 IWL_ERR_DEV(dev,
575 "mac address from hw section is not valid\n");
576
577 return;
578 }
579
580 IWL_ERR_DEV(dev, "mac address is not found\n");
581}
582
583#define IWL_4165_DEVICE_ID 0x5501
584
585struct iwl_nvm_data *
586iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
587 const __le16 *nvm_hw, const __le16 *nvm_sw,
588 const __le16 *nvm_calib, const __le16 *regulatory,
589 const __le16 *mac_override, const __le16 *phy_sku,
590 u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
591 u32 mac_addr0, u32 mac_addr1, u32 hw_id)
592{
593 struct iwl_nvm_data *data;
594 u32 sku;
595 u32 radio_cfg;
596 u16 lar_config;
597
598 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
599 data = kzalloc(sizeof(*data) +
600 sizeof(struct ieee80211_channel) *
601 IWL_NUM_CHANNELS,
602 GFP_KERNEL);
603 else
604 data = kzalloc(sizeof(*data) +
605 sizeof(struct ieee80211_channel) *
606 IWL_NUM_CHANNELS_FAMILY_8000,
607 GFP_KERNEL);
608 if (!data)
609 return NULL;
610
611 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
612
613 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
614 iwl_set_radio_cfg(cfg, data, radio_cfg);
615 if (data->valid_tx_ant)
616 tx_chains &= data->valid_tx_ant;
617 if (data->valid_rx_ant)
618 rx_chains &= data->valid_rx_ant;
619
620 sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
621 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
622 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
623 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
624 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
625 data->sku_cap_11n_enable = false;
626 data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
627 (sku & NVM_SKU_CAP_11AC_ENABLE);
628 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
629
630 /*
631 * OTP 0x52 bug work around
632 * define antenna 1x1 according to MIMO disabled
633 */
634 if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) {
635 data->valid_tx_ant = ANT_B;
636 data->valid_rx_ant = ANT_B;
637 tx_chains = ANT_B;
638 rx_chains = ANT_B;
639 }
640
641 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
642
643 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
644 /* Checking for required sections */
645 if (!nvm_calib) {
646 IWL_ERR_DEV(dev,
647 "Can't parse empty Calib NVM sections\n");
648 kfree(data);
649 return NULL;
650 }
651 /* in family 8000 Xtal calibration values moved to OTP */
652 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
653 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
654 }
655
656 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
657 iwl_set_hw_address(cfg, data, nvm_hw);
658
659 iwl_init_sbands(dev, cfg, data, nvm_sw,
660 tx_chains, rx_chains, lar_fw_supported);
661 } else {
662 u16 lar_offset = data->nvm_version < 0xE39 ?
663 NVM_LAR_OFFSET_FAMILY_8000_OLD :
664 NVM_LAR_OFFSET_FAMILY_8000;
665
666 lar_config = le16_to_cpup(regulatory + lar_offset);
667 data->lar_enabled = !!(lar_config &
668 NVM_LAR_ENABLED_FAMILY_8000);
669
670 /* MAC address in family 8000 */
671 iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
672 nvm_hw, mac_addr0, mac_addr1);
673
674 iwl_init_sbands(dev, cfg, data, regulatory,
675 tx_chains, rx_chains,
676 lar_fw_supported && data->lar_enabled);
677 }
678
679 data->calib_version = 255;
680
681 return data;
682}
683IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
684
685static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
686 int ch_idx, u16 nvm_flags,
687 const struct iwl_cfg *cfg)
688{
689 u32 flags = NL80211_RRF_NO_HT40;
690 u32 last_5ghz_ht = LAST_5GHZ_HT;
691
692 if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
693 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
694
695 if (ch_idx < NUM_2GHZ_CHANNELS &&
696 (nvm_flags & NVM_CHANNEL_40MHZ)) {
697 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
698 flags &= ~NL80211_RRF_NO_HT40PLUS;
699 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
700 flags &= ~NL80211_RRF_NO_HT40MINUS;
701 } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
702 (nvm_flags & NVM_CHANNEL_40MHZ)) {
703 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
704 flags &= ~NL80211_RRF_NO_HT40PLUS;
705 else
706 flags &= ~NL80211_RRF_NO_HT40MINUS;
707 }
708
709 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
710 flags |= NL80211_RRF_NO_80MHZ;
711 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
712 flags |= NL80211_RRF_NO_160MHZ;
713
714 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
715 flags |= NL80211_RRF_NO_IR;
716
717 if (nvm_flags & NVM_CHANNEL_RADAR)
718 flags |= NL80211_RRF_DFS;
719
720 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
721 flags |= NL80211_RRF_NO_OUTDOOR;
722
723 /* Set the GO concurrent flag only in case that NO_IR is set.
724 * Otherwise it is meaningless
725 */
726 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
727 (flags & NL80211_RRF_NO_IR))
728 flags |= NL80211_RRF_GO_CONCURRENT;
729
730 return flags;
731}
732
733struct ieee80211_regdomain *
734iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
735 int num_of_ch, __le32 *channels, u16 fw_mcc)
736{
737 int ch_idx;
738 u16 ch_flags, prev_ch_flags = 0;
739 const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
740 iwl_nvm_channels_family_8000 : iwl_nvm_channels;
741 struct ieee80211_regdomain *regd;
742 int size_of_regd;
743 struct ieee80211_reg_rule *rule;
744 enum ieee80211_band band;
745 int center_freq, prev_center_freq = 0;
746 int valid_rules = 0;
747 bool new_rule;
748 int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
749 IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
750
751 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
752 return ERR_PTR(-EINVAL);
753
754 if (WARN_ON(num_of_ch > max_num_ch))
755 num_of_ch = max_num_ch;
756
757 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
758 num_of_ch);
759
760 /* build a regdomain rule for every valid channel */
761 size_of_regd =
762 sizeof(struct ieee80211_regdomain) +
763 num_of_ch * sizeof(struct ieee80211_reg_rule);
764
765 regd = kzalloc(size_of_regd, GFP_KERNEL);
766 if (!regd)
767 return ERR_PTR(-ENOMEM);
768
769 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
770 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
771 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
772 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
773 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
774 band);
775 new_rule = false;
776
777 if (!(ch_flags & NVM_CHANNEL_VALID)) {
778 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
779 "Ch. %d Flags %x [%sGHz] - No traffic\n",
780 nvm_chan[ch_idx],
781 ch_flags,
782 (ch_idx >= NUM_2GHZ_CHANNELS) ?
783 "5.2" : "2.4");
784 continue;
785 }
786
787 /* we can't continue the same rule */
788 if (ch_idx == 0 || prev_ch_flags != ch_flags ||
789 center_freq - prev_center_freq > 20) {
790 valid_rules++;
791 new_rule = true;
792 }
793
794 rule = &regd->reg_rules[valid_rules - 1];
795
796 if (new_rule)
797 rule->freq_range.start_freq_khz =
798 MHZ_TO_KHZ(center_freq - 10);
799
800 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
801
802 /* this doesn't matter - not used by FW */
803 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
804 rule->power_rule.max_eirp =
805 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
806
807 rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
808 ch_flags, cfg);
809
810 /* rely on auto-calculation to merge BW of contiguous chans */
811 rule->flags |= NL80211_RRF_AUTO_BW;
812 rule->freq_range.max_bandwidth_khz = 0;
813
814 prev_ch_flags = ch_flags;
815 prev_center_freq = center_freq;
816
817 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
818 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
819 center_freq,
820 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
821 CHECK_AND_PRINT_I(VALID),
822 CHECK_AND_PRINT_I(ACTIVE),
823 CHECK_AND_PRINT_I(RADAR),
824 CHECK_AND_PRINT_I(WIDE),
825 CHECK_AND_PRINT_I(40MHZ),
826 CHECK_AND_PRINT_I(80MHZ),
827 CHECK_AND_PRINT_I(160MHZ),
828 CHECK_AND_PRINT_I(INDOOR_ONLY),
829 CHECK_AND_PRINT_I(GO_CONCURRENT),
830 ch_flags,
831 ((ch_flags & NVM_CHANNEL_ACTIVE) &&
832 !(ch_flags & NVM_CHANNEL_RADAR))
833 ? "" : "not ");
834 }
835
836 regd->n_reg_rules = valid_rules;
837
838 /* set alpha2 from FW. */
839 regd->alpha2[0] = fw_mcc >> 8;
840 regd->alpha2[1] = fw_mcc & 0xff;
841
842 return regd;
843}
844IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
new file mode 100644
index 000000000000..9f44d8188c5c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_nvm_parse_h__
63#define __iwl_nvm_parse_h__
64
65#include <net/cfg80211.h>
66#include "iwl-eeprom-parse.h"
67
68/**
69 * iwl_parse_nvm_data - parse NVM data and return values
70 *
71 * This function parses all NVM values we need and then
72 * returns a (newly allocated) struct containing all the
73 * relevant values for driver use. The struct must be freed
74 * later with iwl_free_nvm_data().
75 */
76struct iwl_nvm_data *
77iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
78 const __le16 *nvm_hw, const __le16 *nvm_sw,
79 const __le16 *nvm_calib, const __le16 *regulatory,
80 const __le16 *mac_override, const __le16 *phy_sku,
81 u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
82 u32 mac_addr0, u32 mac_addr1, u32 hw_id);
83
84/**
85 * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
86 *
87 * This function parses the regulatory channel data received as a
88 * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
89 * to be fed into the regulatory core. An ERR_PTR is returned on error.
90 * If not given to the regulatory core, the user is responsible for freeing
91 * the regdomain returned here with kfree.
92 */
93struct ieee80211_regdomain *
94iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
95 int num_of_ch, __le32 *channels, u16 fw_mcc);
96
97#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
new file mode 100644
index 000000000000..2a58d6833224
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -0,0 +1,271 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#ifndef __iwl_op_mode_h__
68#define __iwl_op_mode_h__
69
70#include <linux/netdevice.h>
71#include <linux/debugfs.h>
72
73struct iwl_op_mode;
74struct iwl_trans;
75struct sk_buff;
76struct iwl_device_cmd;
77struct iwl_rx_cmd_buffer;
78struct iwl_fw;
79struct iwl_cfg;
80
81/**
82 * DOC: Operational mode - what is it ?
83 *
84 * The operational mode (a.k.a. op_mode) is the layer that implements
85 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
86 * the transport API to access the HW. The op_mode doesn't need to know how the
87 * underlying HW works, since the transport layer takes care of that.
88 *
89 * There can be several op_mode: i.e. different fw APIs will require two
90 * different op_modes. This is why the op_mode is virtualized.
91 */
92
93/**
94 * DOC: Life cycle of the Operational mode
95 *
96 * The operational mode has a very simple life cycle.
97 *
98 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
99 * capabilities advertised by the fw file (in TLV format).
100 * 2) The driver layer starts the op_mode (ops->start)
101 * 3) The op_mode registers mac80211
102 * 4) The op_mode is governed by mac80211
103 * 5) The driver layer stops the op_mode
104 */
105
106/**
107 * struct iwl_op_mode_ops - op_mode specific operations
108 *
109 * The op_mode exports its ops so that external components can start it and
110 * interact with it. The driver layer typically calls the start and stop
111 * handlers, the transport layer calls the others.
112 *
113 * All the handlers MUST be implemented, except @rx_rss which can be left
114 * out *iff* the opmode will never run on hardware with multi-queue capability.
115 *
116 * @start: start the op_mode. The transport layer is already allocated.
117 * May sleep
118 * @stop: stop the op_mode. Must free all the memory allocated.
119 * May sleep
120 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
121 * HCMD this Rx responds to. Can't sleep.
122 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
123 * received on the RSS queue(s). The queue parameter indicates which of the
124 * RSS queues received this frame; it will always be non-zero.
125 * This method must not sleep.
126 * @queue_full: notifies that a HW queue is full.
127 * Must be atomic and called with BH disabled.
128 * @queue_not_full: notifies that a HW queue is not full any more.
129 * Must be atomic and called with BH disabled.
130 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
131 * the radio is killed. Return %true if the device should be stopped by
132 * the transport immediately after the call. May sleep.
133 * @free_skb: allows the transport layer to free skbs that haven't been
134 * reclaimed by the op_mode. This can happen when the driver is freed and
135 * there are Tx packets pending in the transport layer.
136 * Must be atomic
137 * @nic_error: error notification. Must be atomic and must be called with BH
138 * disabled.
139 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
140 * called with BH disabled.
141 * @nic_config: configure NIC, called before firmware is started.
142 * May sleep
143 * @wimax_active: invoked when WiMax becomes active. May sleep
144 * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3
145 * entrance is aborted (e.g. due to held reference). May sleep.
146 * @exit_d0i3: configure the fw to exit d0i3. May sleep.
147 */
148struct iwl_op_mode_ops {
149 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
150 const struct iwl_cfg *cfg,
151 const struct iwl_fw *fw,
152 struct dentry *dbgfs_dir);
153 void (*stop)(struct iwl_op_mode *op_mode);
154 void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
155 struct iwl_rx_cmd_buffer *rxb);
156 void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
157 struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
158 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
159 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
160 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
161 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
162 void (*nic_error)(struct iwl_op_mode *op_mode);
163 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
164 void (*nic_config)(struct iwl_op_mode *op_mode);
165 void (*wimax_active)(struct iwl_op_mode *op_mode);
166 int (*enter_d0i3)(struct iwl_op_mode *op_mode);
167 int (*exit_d0i3)(struct iwl_op_mode *op_mode);
168};
169
170int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
171void iwl_opmode_deregister(const char *name);
172
173/**
174 * struct iwl_op_mode - operational mode
175 * @ops: pointer to its own ops
176 *
177 * This holds an implementation of the mac80211 / fw API.
178 */
179struct iwl_op_mode {
180 const struct iwl_op_mode_ops *ops;
181
182 char op_mode_specific[0] __aligned(sizeof(void *));
183};
184
185static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
186{
187 might_sleep();
188 op_mode->ops->stop(op_mode);
189}
190
191static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
192 struct napi_struct *napi,
193 struct iwl_rx_cmd_buffer *rxb)
194{
195 return op_mode->ops->rx(op_mode, napi, rxb);
196}
197
198static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
199 struct napi_struct *napi,
200 struct iwl_rx_cmd_buffer *rxb,
201 unsigned int queue)
202{
203 op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
204}
205
206static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
207 int queue)
208{
209 op_mode->ops->queue_full(op_mode, queue);
210}
211
212static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
213 int queue)
214{
215 op_mode->ops->queue_not_full(op_mode, queue);
216}
217
218static inline bool __must_check
219iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
220{
221 might_sleep();
222 return op_mode->ops->hw_rf_kill(op_mode, state);
223}
224
225static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
226 struct sk_buff *skb)
227{
228 op_mode->ops->free_skb(op_mode, skb);
229}
230
231static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
232{
233 op_mode->ops->nic_error(op_mode);
234}
235
236static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
237{
238 op_mode->ops->cmd_queue_full(op_mode);
239}
240
241static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
242{
243 might_sleep();
244 op_mode->ops->nic_config(op_mode);
245}
246
247static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
248{
249 might_sleep();
250 op_mode->ops->wimax_active(op_mode);
251}
252
253static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
254{
255 might_sleep();
256
257 if (!op_mode->ops->enter_d0i3)
258 return 0;
259 return op_mode->ops->enter_d0i3(op_mode);
260}
261
262static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
263{
264 might_sleep();
265
266 if (!op_mode->ops->exit_d0i3)
267 return 0;
268 return op_mode->ops->exit_d0i3(op_mode);
269}
270
271#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
new file mode 100644
index 000000000000..a105455b6a24
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,471 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/slab.h>
65#include <linux/string.h>
66#include <linux/export.h>
67
68#include "iwl-drv.h"
69#include "iwl-phy-db.h"
70#include "iwl-debug.h"
71#include "iwl-op-mode.h"
72#include "iwl-trans.h"
73
74#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
75#define IWL_NUM_PAPD_CH_GROUPS 9
76#define IWL_NUM_TXP_CH_GROUPS 9
77
78struct iwl_phy_db_entry {
79 u16 size;
80 u8 *data;
81};
82
83/**
84 * struct iwl_phy_db - stores phy configuration and calibration data.
85 *
86 * @cfg: phy configuration.
87 * @calib_nch: non channel specific calibration data.
88 * @calib_ch: channel specific calibration data.
89 * @calib_ch_group_papd: calibration data related to papd channel group.
90 * @calib_ch_group_txp: calibration data related to tx power chanel group.
91 */
92struct iwl_phy_db {
93 struct iwl_phy_db_entry cfg;
94 struct iwl_phy_db_entry calib_nch;
95 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
96 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
97
98 struct iwl_trans *trans;
99};
100
101enum iwl_phy_db_section_type {
102 IWL_PHY_DB_CFG = 1,
103 IWL_PHY_DB_CALIB_NCH,
104 IWL_PHY_DB_UNUSED,
105 IWL_PHY_DB_CALIB_CHG_PAPD,
106 IWL_PHY_DB_CALIB_CHG_TXP,
107 IWL_PHY_DB_MAX
108};
109
110#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
111
112/*
113 * phy db - configure operational ucode
114 */
115struct iwl_phy_db_cmd {
116 __le16 type;
117 __le16 length;
118 u8 data[];
119} __packed;
120
121/* for parsing of tx power channel group data that comes from the firmware*/
122struct iwl_phy_db_chg_txp {
123 __le32 space;
124 __le16 max_channel_idx;
125} __packed;
126
127/*
128 * phy db - Receive phy db chunk after calibrations
129 */
130struct iwl_calib_res_notif_phy_db {
131 __le16 type;
132 __le16 length;
133 u8 data[];
134} __packed;
135
136struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
137{
138 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
139 GFP_KERNEL);
140
141 if (!phy_db)
142 return phy_db;
143
144 phy_db->trans = trans;
145
146 /* TODO: add default values of the phy db. */
147 return phy_db;
148}
149IWL_EXPORT_SYMBOL(iwl_phy_db_init);
150
151/*
152 * get phy db section: returns a pointer to a phy db section specified by
153 * type and channel group id.
154 */
155static struct iwl_phy_db_entry *
156iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
157 enum iwl_phy_db_section_type type,
158 u16 chg_id)
159{
160 if (!phy_db || type >= IWL_PHY_DB_MAX)
161 return NULL;
162
163 switch (type) {
164 case IWL_PHY_DB_CFG:
165 return &phy_db->cfg;
166 case IWL_PHY_DB_CALIB_NCH:
167 return &phy_db->calib_nch;
168 case IWL_PHY_DB_CALIB_CHG_PAPD:
169 if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
170 return NULL;
171 return &phy_db->calib_ch_group_papd[chg_id];
172 case IWL_PHY_DB_CALIB_CHG_TXP:
173 if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
174 return NULL;
175 return &phy_db->calib_ch_group_txp[chg_id];
176 default:
177 return NULL;
178 }
179 return NULL;
180}
181
182static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
183 enum iwl_phy_db_section_type type,
184 u16 chg_id)
185{
186 struct iwl_phy_db_entry *entry =
187 iwl_phy_db_get_section(phy_db, type, chg_id);
188 if (!entry)
189 return;
190
191 kfree(entry->data);
192 entry->data = NULL;
193 entry->size = 0;
194}
195
196void iwl_phy_db_free(struct iwl_phy_db *phy_db)
197{
198 int i;
199
200 if (!phy_db)
201 return;
202
203 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
204 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
205 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
206 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
207 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
208 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
209
210 kfree(phy_db);
211}
212IWL_EXPORT_SYMBOL(iwl_phy_db_free);
213
214int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
215 gfp_t alloc_ctx)
216{
217 struct iwl_calib_res_notif_phy_db *phy_db_notif =
218 (struct iwl_calib_res_notif_phy_db *)pkt->data;
219 enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
220 u16 size = le16_to_cpu(phy_db_notif->length);
221 struct iwl_phy_db_entry *entry;
222 u16 chg_id = 0;
223
224 if (!phy_db)
225 return -EINVAL;
226
227 if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
228 type == IWL_PHY_DB_CALIB_CHG_TXP)
229 chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
230
231 entry = iwl_phy_db_get_section(phy_db, type, chg_id);
232 if (!entry)
233 return -EINVAL;
234
235 kfree(entry->data);
236 entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
237 if (!entry->data) {
238 entry->size = 0;
239 return -ENOMEM;
240 }
241
242 entry->size = size;
243
244 IWL_DEBUG_INFO(phy_db->trans,
245 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
246 __func__, __LINE__, type, size);
247
248 return 0;
249}
250IWL_EXPORT_SYMBOL(iwl_phy_db_set_section);
251
252static int is_valid_channel(u16 ch_id)
253{
254 if (ch_id <= 14 ||
255 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
256 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
257 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
258 return 1;
259 return 0;
260}
261
262static u8 ch_id_to_ch_index(u16 ch_id)
263{
264 if (WARN_ON(!is_valid_channel(ch_id)))
265 return 0xff;
266
267 if (ch_id <= 14)
268 return ch_id - 1;
269 if (ch_id <= 64)
270 return (ch_id + 20) / 4;
271 if (ch_id <= 140)
272 return (ch_id - 12) / 4;
273 return (ch_id - 13) / 4;
274}
275
276
277static u16 channel_id_to_papd(u16 ch_id)
278{
279 if (WARN_ON(!is_valid_channel(ch_id)))
280 return 0xff;
281
282 if (1 <= ch_id && ch_id <= 14)
283 return 0;
284 if (36 <= ch_id && ch_id <= 64)
285 return 1;
286 if (100 <= ch_id && ch_id <= 140)
287 return 2;
288 return 3;
289}
290
291static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
292{
293 struct iwl_phy_db_chg_txp *txp_chg;
294 int i;
295 u8 ch_index = ch_id_to_ch_index(ch_id);
296 if (ch_index == 0xff)
297 return 0xff;
298
299 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
300 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
301 if (!txp_chg)
302 return 0xff;
303 /*
304 * Looking for the first channel group that its max channel is
305 * higher then wanted channel.
306 */
307 if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
308 return i;
309 }
310 return 0xff;
311}
312static
313int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
314 u32 type, u8 **data, u16 *size, u16 ch_id)
315{
316 struct iwl_phy_db_entry *entry;
317 u16 ch_group_id = 0;
318
319 if (!phy_db)
320 return -EINVAL;
321
322 /* find wanted channel group */
323 if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
324 ch_group_id = channel_id_to_papd(ch_id);
325 else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
326 ch_group_id = channel_id_to_txp(phy_db, ch_id);
327
328 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
329 if (!entry)
330 return -EINVAL;
331
332 *data = entry->data;
333 *size = entry->size;
334
335 IWL_DEBUG_INFO(phy_db->trans,
336 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
337 __func__, __LINE__, type, *size);
338
339 return 0;
340}
341
342static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
343 u16 length, void *data)
344{
345 struct iwl_phy_db_cmd phy_db_cmd;
346 struct iwl_host_cmd cmd = {
347 .id = PHY_DB_CMD,
348 };
349
350 IWL_DEBUG_INFO(phy_db->trans,
351 "Sending PHY-DB hcmd of type %d, of length %d\n",
352 type, length);
353
354 /* Set phy db cmd variables */
355 phy_db_cmd.type = cpu_to_le16(type);
356 phy_db_cmd.length = cpu_to_le16(length);
357
358 /* Set hcmd variables */
359 cmd.data[0] = &phy_db_cmd;
360 cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
361 cmd.data[1] = data;
362 cmd.len[1] = length;
363 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
364
365 return iwl_trans_send_cmd(phy_db->trans, &cmd);
366}
367
368static int iwl_phy_db_send_all_channel_groups(
369 struct iwl_phy_db *phy_db,
370 enum iwl_phy_db_section_type type,
371 u8 max_ch_groups)
372{
373 u16 i;
374 int err;
375 struct iwl_phy_db_entry *entry;
376
377 /* Send all the channel specific groups to operational fw */
378 for (i = 0; i < max_ch_groups; i++) {
379 entry = iwl_phy_db_get_section(phy_db,
380 type,
381 i);
382 if (!entry)
383 return -EINVAL;
384
385 if (!entry->size)
386 continue;
387
388 /* Send the requested PHY DB section */
389 err = iwl_send_phy_db_cmd(phy_db,
390 type,
391 entry->size,
392 entry->data);
393 if (err) {
394 IWL_ERR(phy_db->trans,
395 "Can't SEND phy_db section %d (%d), err %d\n",
396 type, i, err);
397 return err;
398 }
399
400 IWL_DEBUG_INFO(phy_db->trans,
401 "Sent PHY_DB HCMD, type = %d num = %d\n",
402 type, i);
403 }
404
405 return 0;
406}
407
408int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
409{
410 u8 *data = NULL;
411 u16 size = 0;
412 int err;
413
414 IWL_DEBUG_INFO(phy_db->trans,
415 "Sending phy db data and configuration to runtime image\n");
416
417 /* Send PHY DB CFG section */
418 err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
419 &data, &size, 0);
420 if (err) {
421 IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
422 return err;
423 }
424
425 err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
426 if (err) {
427 IWL_ERR(phy_db->trans,
428 "Cannot send HCMD of Phy DB cfg section\n");
429 return err;
430 }
431
432 err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
433 &data, &size, 0);
434 if (err) {
435 IWL_ERR(phy_db->trans,
436 "Cannot get Phy DB non specific channel section\n");
437 return err;
438 }
439
440 err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
441 if (err) {
442 IWL_ERR(phy_db->trans,
443 "Cannot send HCMD of Phy DB non specific channel section\n");
444 return err;
445 }
446
447 /* Send all the TXP channel specific data */
448 err = iwl_phy_db_send_all_channel_groups(phy_db,
449 IWL_PHY_DB_CALIB_CHG_PAPD,
450 IWL_NUM_PAPD_CH_GROUPS);
451 if (err) {
452 IWL_ERR(phy_db->trans,
453 "Cannot send channel specific PAPD groups\n");
454 return err;
455 }
456
457 /* Send all the TXP channel specific data */
458 err = iwl_phy_db_send_all_channel_groups(phy_db,
459 IWL_PHY_DB_CALIB_CHG_TXP,
460 IWL_NUM_TXP_CH_GROUPS);
461 if (err) {
462 IWL_ERR(phy_db->trans,
463 "Cannot send channel specific TX power groups\n");
464 return err;
465 }
466
467 IWL_DEBUG_INFO(phy_db->trans,
468 "Finished sending phy db non channel data\n");
469 return 0;
470}
471IWL_EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
new file mode 100644
index 000000000000..9ee18d0d2d01
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,82 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_PHYDB_H__
65#define __IWL_PHYDB_H__
66
67#include <linux/types.h>
68
69#include "iwl-op-mode.h"
70#include "iwl-trans.h"
71
72struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
73
74void iwl_phy_db_free(struct iwl_phy_db *phy_db);
75
76int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
77 gfp_t alloc_ctx);
78
79
80int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
81
82#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
new file mode 100644
index 000000000000..3ab777f79e4f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -0,0 +1,401 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __iwl_prph_h__
66#define __iwl_prph_h__
67
68/*
69 * Registers in this file are internal, not PCI bus memory mapped.
70 * Driver accesses these via HBUS_TARG_PRPH_* registers.
71 */
72#define PRPH_BASE (0x00000)
73#define PRPH_END (0xFFFFF)
74
75/* APMG (power management) constants */
76#define APMG_BASE (PRPH_BASE + 0x3000)
77#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
78#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
79#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
80#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
81#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
82#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
83#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
84#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
85#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
86#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
87
88#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
89#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
90#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
91
92#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
93#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
94#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
95#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
96#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
97#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
98#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
99
100#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
101#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
102#define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000)
103
104#define APMG_RTC_INT_STT_RFKILL (0x10000000)
105
106/* Device system time */
107#define DEVICE_SYSTEM_TIME_REG 0xA0206C
108
109/* Device NMI register */
110#define DEVICE_SET_NMI_REG 0x00a01c30
111#define DEVICE_SET_NMI_VAL_HW BIT(0)
112#define DEVICE_SET_NMI_VAL_DRV BIT(7)
113#define DEVICE_SET_NMI_8000_REG 0x00a01c24
114#define DEVICE_SET_NMI_8000_VAL 0x1000000
115
116/* Shared registers (0x0..0x3ff, via target indirect or periphery */
117#define SHR_BASE 0x00a10000
118
119/* Shared GP1 register */
120#define SHR_APMG_GP1_REG 0x01dc
121#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
122#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
123#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
124
125/* Shared DL_CFG register */
126#define SHR_APMG_DL_CFG_REG 0x01c4
127#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
128#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
129#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
130#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
131
132/* Shared APMG_XTAL_CFG register */
133#define SHR_APMG_XTAL_CFG_REG 0x1c0
134#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
135
136/*
137 * Device reset for family 8000
138 * write to bit 24 in order to reset the CPU
139*/
140#define RELEASE_CPU_RESET (0x300C)
141#define RELEASE_CPU_RESET_BIT BIT(24)
142
143/*****************************************************************************
144 * 7000/3000 series SHR DTS addresses *
145 *****************************************************************************/
146
147#define SHR_MISC_WFM_DTS_EN (0x00a10024)
148#define DTSC_CFG_MODE (0x00a10604)
149#define DTSC_VREF_AVG (0x00a10648)
150#define DTSC_VREF5_AVG (0x00a1064c)
151#define DTSC_CFG_MODE_PERIODIC (0x2)
152#define DTSC_PTAT_AVG (0x00a10650)
153
154
155/**
156 * Tx Scheduler
157 *
158 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
159 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
160 * host DRAM. It steers each frame's Tx command (which contains the frame
161 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
162 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
163 * but one DMA channel may take input from several queues.
164 *
165 * Tx DMA FIFOs have dedicated purposes.
166 *
167 * For 5000 series and up, they are used differently
168 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
169 *
170 * 0 -- EDCA BK (background) frames, lowest priority
171 * 1 -- EDCA BE (best effort) frames, normal priority
172 * 2 -- EDCA VI (video) frames, higher priority
173 * 3 -- EDCA VO (voice) and management frames, highest priority
174 * 4 -- unused
175 * 5 -- unused
176 * 6 -- unused
177 * 7 -- Commands
178 *
179 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
180 * In addition, driver can map the remaining queues to Tx DMA/FIFO
181 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
182 *
183 * The driver sets up each queue to work in one of two modes:
184 *
185 * 1) Scheduler-Ack, in which the scheduler automatically supports a
186 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
187 * contains TFDs for a unique combination of Recipient Address (RA)
188 * and Traffic Identifier (TID), that is, traffic of a given
189 * Quality-Of-Service (QOS) priority, destined for a single station.
190 *
191 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
192 * each frame within the BA window, including whether it's been transmitted,
193 * and whether it's been acknowledged by the receiving station. The device
194 * automatically processes block-acks received from the receiving STA,
195 * and reschedules un-acked frames to be retransmitted (successful
196 * Tx completion may end up being out-of-order).
197 *
198 * The driver must maintain the queue's Byte Count table in host DRAM
199 * for this mode.
200 * This mode does not support fragmentation.
201 *
202 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
203 * The device may automatically retry Tx, but will retry only one frame
204 * at a time, until receiving ACK from receiving station, or reaching
205 * retry limit and giving up.
206 *
207 * The command queue (#4/#9) must use this mode!
208 * This mode does not require use of the Byte Count table in host DRAM.
209 *
210 * Driver controls scheduler operation via 3 means:
211 * 1) Scheduler registers
212 * 2) Shared scheduler data base in internal SRAM
213 * 3) Shared data in host DRAM
214 *
215 * Initialization:
216 *
217 * When loading, driver should allocate memory for:
218 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
219 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
220 * (1024 bytes for each queue).
221 *
222 * After receiving "Alive" response from uCode, driver must initialize
223 * the scheduler (especially for queue #4/#9, the command queue, otherwise
224 * the driver can't issue commands!):
225 */
226#define SCD_MEM_LOWER_BOUND (0x0000)
227
228/**
229 * Max Tx window size is the max number of contiguous TFDs that the scheduler
230 * can keep track of at one time when creating block-ack chains of frames.
231 * Note that "64" matches the number of ack bits in a block-ack packet.
232 */
233#define SCD_WIN_SIZE 64
234#define SCD_FRAME_LIMIT 64
235
236#define SCD_TXFIFO_POS_TID (0)
237#define SCD_TXFIFO_POS_RA (4)
238#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
239
240/* agn SCD */
241#define SCD_QUEUE_STTS_REG_POS_TXF (0)
242#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
243#define SCD_QUEUE_STTS_REG_POS_WSL (4)
244#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
245#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
246
247#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
248#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
249#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
250#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
251#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
252#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
253#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
254#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
255#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
256#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18)
257
258/* Context Data */
259#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
260#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
261
262/* Tx status */
263#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
264#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
265
266/* Translation Data */
267#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
268#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808)
269
270#define SCD_CONTEXT_QUEUE_OFFSET(x)\
271 (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
272
273#define SCD_TX_STTS_QUEUE_OFFSET(x)\
274 (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
275
276#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
277 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
278
279#define SCD_BASE (PRPH_BASE + 0xa02c00)
280
281#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
282#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8)
283#define SCD_AIT (SCD_BASE + 0x0c)
284#define SCD_TXFACT (SCD_BASE + 0x10)
285#define SCD_ACTIVE (SCD_BASE + 0x14)
286#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
287#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
288#define SCD_AGGR_SEL (SCD_BASE + 0x248)
289#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
290#define SCD_GP_CTRL (SCD_BASE + 0x1a8)
291#define SCD_EN_CTRL (SCD_BASE + 0x254)
292
293/*********************** END TX SCHEDULER *************************************/
294
295/* tcp checksum offload */
296#define RX_EN_CSUM (0x00a00d88)
297
298/* Oscillator clock */
299#define OSC_CLK (0xa04068)
300#define OSC_CLK_FORCE_CONTROL (0x8)
301
302#define FH_UCODE_LOAD_STATUS (0x1AF0)
303#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
304enum secure_load_status_reg {
305 LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
306 LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
307 LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
308 LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
309 LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
310};
311
312#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
313#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
314#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
315#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
316
317#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
318#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
319#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
320#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
321
322/* Rx FIFO */
323#define RXF_SIZE_ADDR (0xa00c88)
324#define RXF_RD_D_SPACE (0xa00c40)
325#define RXF_RD_WR_PTR (0xa00c50)
326#define RXF_RD_RD_PTR (0xa00c54)
327#define RXF_RD_FENCE_PTR (0xa00c4c)
328#define RXF_SET_FENCE_MODE (0xa00c14)
329#define RXF_LD_WR2FENCE (0xa00c1c)
330#define RXF_FIFO_RD_FENCE_INC (0xa00c68)
331#define RXF_SIZE_BYTE_CND_POS (7)
332#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
333#define RXF_DIFF_FROM_PREV (0x200)
334
335#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
336#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
337
338/* Tx FIFO */
339#define TXF_FIFO_ITEM_CNT (0xa00438)
340#define TXF_WR_PTR (0xa00414)
341#define TXF_RD_PTR (0xa00410)
342#define TXF_FENCE_PTR (0xa00418)
343#define TXF_LOCK_FENCE (0xa00424)
344#define TXF_LARC_NUM (0xa0043c)
345#define TXF_READ_MODIFY_DATA (0xa00448)
346#define TXF_READ_MODIFY_ADDR (0xa0044c)
347
348/* FW monitor */
349#define MON_BUFF_SAMPLE_CTL (0xa03c00)
350#define MON_BUFF_BASE_ADDR (0xa03c3c)
351#define MON_BUFF_END_ADDR (0xa03c40)
352#define MON_BUFF_WRPTR (0xa03c44)
353#define MON_BUFF_CYCLE_CNT (0xa03c48)
354
355#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
356#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
357
358#define DBGC_IN_SAMPLE (0xa03c00)
359
360/* enable the ID buf for read */
361#define WFPM_PS_CTL_CLR 0xA0300C
362#define WFMP_MAC_ADDR_0 0xA03080
363#define WFMP_MAC_ADDR_1 0xA03084
364#define LMPM_PMG_EN 0xA01CEC
365#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
366#define RFIC_REG_RD 0xAD0470
367#define WFPM_CTRL_REG 0xA03030
368enum {
369 ENABLE_WFPM = BIT(31),
370 WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
371};
372
373#define AUX_MISC_REG 0xA200B0
374enum {
375 HW_STEP_LOCATION_BITS = 24,
376};
377
378#define AUX_MISC_MASTER1_EN 0xA20818
379enum aux_misc_master1_en {
380 AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
381};
382
383#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
384#define RSA_ENABLE 0xA24B08
385#define PREG_AUX_BUS_WPROT_0 0xA04CC0
386#define SB_CPU_1_STATUS 0xA01E30
387#define SB_CPU_2_STATUS 0xA01E34
388
389/* FW chicken bits */
390#define LMPM_CHICK 0xA01FF8
391enum {
392 LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
393};
394
395/* FW chicken bits */
396#define LMPM_PAGE_PASS_NOTIF 0xA03824
397enum {
398 LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
399};
400
401#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
new file mode 100644
index 000000000000..f2353ebf2666
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
@@ -0,0 +1,143 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __iwl_scd_h__
65#define __iwl_scd_h__
66
67#include "iwl-trans.h"
68#include "iwl-io.h"
69#include "iwl-prph.h"
70
71
72static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans,
73 u16 txq_id)
74{
75 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
76}
77
78static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans,
79 u16 txq_id)
80{
81 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
82}
83
84static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans,
85 u16 txq_id)
86{
87 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
88}
89
90static inline void iwl_scd_disable_agg(struct iwl_trans *trans)
91{
92 iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0);
93}
94
95static inline void iwl_scd_activate_fifos(struct iwl_trans *trans)
96{
97 iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7));
98}
99
100static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans)
101{
102 iwl_write_prph(trans, SCD_TXFACT, 0);
103}
104
105static inline void iwl_scd_enable_set_active(struct iwl_trans *trans,
106 u32 value)
107{
108 iwl_write_prph(trans, SCD_EN_CTRL, value);
109}
110
111static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
112{
113 if (chnl < 20)
114 return SCD_BASE + 0x18 + chnl * 4;
115 WARN_ON_ONCE(chnl >= 32);
116 return SCD_BASE + 0x284 + (chnl - 20) * 4;
117}
118
119static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
120{
121 if (chnl < 20)
122 return SCD_BASE + 0x68 + chnl * 4;
123 WARN_ON_ONCE(chnl >= 32);
124 return SCD_BASE + 0x2B4 + chnl * 4;
125}
126
127static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
128{
129 if (chnl < 20)
130 return SCD_BASE + 0x10c + chnl * 4;
131 WARN_ON_ONCE(chnl >= 32);
132 return SCD_BASE + 0x334 + chnl * 4;
133}
134
135static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
136 u16 txq_id)
137{
138 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
139 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
140 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
141}
142
143#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
new file mode 100644
index 000000000000..71610968c365
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -0,0 +1,114 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/kernel.h>
64#include "iwl-trans.h"
65
66struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
67 struct device *dev,
68 const struct iwl_cfg *cfg,
69 const struct iwl_trans_ops *ops,
70 size_t dev_cmd_headroom)
71{
72 struct iwl_trans *trans;
73#ifdef CONFIG_LOCKDEP
74 static struct lock_class_key __key;
75#endif
76
77 trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
78 if (!trans)
79 return NULL;
80
81#ifdef CONFIG_LOCKDEP
82 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
83 &__key, 0);
84#endif
85
86 trans->dev = dev;
87 trans->cfg = cfg;
88 trans->ops = ops;
89 trans->dev_cmd_headroom = dev_cmd_headroom;
90 trans->num_rx_queues = 1;
91
92 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
93 "iwl_cmd_pool:%s", dev_name(trans->dev));
94 trans->dev_cmd_pool =
95 kmem_cache_create(trans->dev_cmd_pool_name,
96 sizeof(struct iwl_device_cmd)
97 + trans->dev_cmd_headroom,
98 sizeof(void *),
99 SLAB_HWCACHE_ALIGN,
100 NULL);
101 if (!trans->dev_cmd_pool)
102 goto free;
103
104 return trans;
105 free:
106 kfree(trans);
107 return NULL;
108}
109
110void iwl_trans_free(struct iwl_trans *trans)
111{
112 kmem_cache_destroy(trans->dev_cmd_pool);
113 kfree(trans);
114}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
new file mode 100644
index 000000000000..6f76525088f0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -0,0 +1,1125 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#ifndef __iwl_trans_h__
66#define __iwl_trans_h__
67
68#include <linux/ieee80211.h>
69#include <linux/mm.h> /* for page_address */
70#include <linux/lockdep.h>
71
72#include "iwl-debug.h"
73#include "iwl-config.h"
74#include "iwl-fw.h"
75#include "iwl-op-mode.h"
76
77/**
78 * DOC: Transport layer - what is it ?
79 *
80 * The transport layer is the layer that deals with the HW directly. It provides
81 * an abstraction of the underlying HW to the upper layer. The transport layer
82 * doesn't provide any policy, algorithm or anything of this kind, but only
83 * mechanisms to make the HW do something. It is not completely stateless but
84 * close to it.
85 * We will have an implementation for each different supported bus.
86 */
87
88/**
89 * DOC: Life cycle of the transport layer
90 *
91 * The transport layer has a very precise life cycle.
92 *
93 * 1) A helper function is called during the module initialization and
94 * registers the bus driver's ops with the transport's alloc function.
95 * 2) Bus's probe calls to the transport layer's allocation functions.
96 * Of course this function is bus specific.
97 * 3) This allocation functions will spawn the upper layer which will
98 * register mac80211.
99 *
100 * 4) At some point (i.e. mac80211's start call), the op_mode will call
101 * the following sequence:
102 * start_hw
103 * start_fw
104 *
105 * 5) Then when finished (or reset):
106 * stop_device
107 *
108 * 6) Eventually, the free function will be called.
109 */
110
111/**
112 * DOC: Host command section
113 *
114 * A host command is a command issued by the upper layer to the fw. There are
115 * several versions of fw that have several APIs. The transport layer is
116 * completely agnostic to these differences.
117 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
118 */
119#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
120#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
121#define SEQ_TO_INDEX(s) ((s) & 0xff)
122#define INDEX_TO_SEQ(i) ((i) & 0xff)
123#define SEQ_RX_FRAME cpu_to_le16(0x8000)
124
125/*
126 * those functions retrieve specific information from
127 * the id field in the iwl_host_cmd struct which contains
128 * the command id, the group id and the version of the command
129 * and vice versa
130*/
131static inline u8 iwl_cmd_opcode(u32 cmdid)
132{
133 return cmdid & 0xFF;
134}
135
136static inline u8 iwl_cmd_groupid(u32 cmdid)
137{
138 return ((cmdid & 0xFF00) >> 8);
139}
140
141static inline u8 iwl_cmd_version(u32 cmdid)
142{
143 return ((cmdid & 0xFF0000) >> 16);
144}
145
146static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
147{
148 return opcode + (groupid << 8) + (version << 16);
149}
150
151/* make u16 wide id out of u8 group and opcode */
152#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
153
154/* due to the conversion, this group is special; new groups
155 * should be defined in the appropriate fw-api header files
156 */
157#define IWL_ALWAYS_LONG_GROUP 1
158
159/**
160 * struct iwl_cmd_header
161 *
162 * This header format appears in the beginning of each command sent from the
163 * driver, and each response/notification received from uCode.
164 */
165struct iwl_cmd_header {
166 u8 cmd; /* Command ID: REPLY_RXON, etc. */
167 u8 group_id;
168 /*
169 * The driver sets up the sequence number to values of its choosing.
170 * uCode does not use this value, but passes it back to the driver
171 * when sending the response to each driver-originated command, so
172 * the driver can match the response to the command. Since the values
173 * don't get used by uCode, the driver may set up an arbitrary format.
174 *
175 * There is one exception: uCode sets bit 15 when it originates
176 * the response/notification, i.e. when the response/notification
177 * is not a direct response to a command sent by the driver. For
178 * example, uCode issues REPLY_RX when it sends a received frame
179 * to the driver; it is not a direct response to any driver command.
180 *
181 * The Linux driver uses the following format:
182 *
183 * 0:7 tfd index - position within TX queue
184 * 8:12 TX queue id
185 * 13:14 reserved
186 * 15 unsolicited RX or uCode-originated notification
187 */
188 __le16 sequence;
189} __packed;
190
191/**
192 * struct iwl_cmd_header_wide
193 *
194 * This header format appears in the beginning of each command sent from the
195 * driver, and each response/notification received from uCode.
196 * this is the wide version that contains more information about the command
197 * like length, version and command type
198 */
199struct iwl_cmd_header_wide {
200 u8 cmd;
201 u8 group_id;
202 __le16 sequence;
203 __le16 length;
204 u8 reserved;
205 u8 version;
206} __packed;
207
208#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
209#define FH_RSCSR_FRAME_INVALID 0x55550000
210#define FH_RSCSR_FRAME_ALIGN 0x40
211
212struct iwl_rx_packet {
213 /*
214 * The first 4 bytes of the RX frame header contain both the RX frame
215 * size and some flags.
216 * Bit fields:
217 * 31: flag flush RB request
218 * 30: flag ignore TC (terminal counter) request
219 * 29: flag fast IRQ request
220 * 28-14: Reserved
221 * 13-00: RX frame size
222 */
223 __le32 len_n_flags;
224 struct iwl_cmd_header hdr;
225 u8 data[];
226} __packed;
227
228static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
229{
230 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
231}
232
233static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
234{
235 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
236}
237
238/**
239 * enum CMD_MODE - how to send the host commands ?
240 *
241 * @CMD_ASYNC: Return right away and don't wait for the response
242 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
243 * the response. The caller needs to call iwl_free_resp when done.
244 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
245 * command queue, but after other high priority commands. Valid only
246 * with CMD_ASYNC.
247 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
248 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
249 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
250 * (i.e. mark it as non-idle).
251 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
252 * check that we leave enough room for the TBs bitmap which needs 20 bits.
253 */
254enum CMD_MODE {
255 CMD_ASYNC = BIT(0),
256 CMD_WANT_SKB = BIT(1),
257 CMD_SEND_IN_RFKILL = BIT(2),
258 CMD_HIGH_PRIO = BIT(3),
259 CMD_SEND_IN_IDLE = BIT(4),
260 CMD_MAKE_TRANS_IDLE = BIT(5),
261 CMD_WAKE_UP_TRANS = BIT(6),
262
263 CMD_TB_BITMAP_POS = 11,
264};
265
266#define DEF_CMD_PAYLOAD_SIZE 320
267
268/**
269 * struct iwl_device_cmd
270 *
271 * For allocation of the command and tx queues, this establishes the overall
272 * size of the largest command we send to uCode, except for commands that
273 * aren't fully copied and use other TFD space.
274 */
275struct iwl_device_cmd {
276 union {
277 struct {
278 struct iwl_cmd_header hdr; /* uCode API */
279 u8 payload[DEF_CMD_PAYLOAD_SIZE];
280 };
281 struct {
282 struct iwl_cmd_header_wide hdr_wide;
283 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
284 sizeof(struct iwl_cmd_header_wide) +
285 sizeof(struct iwl_cmd_header)];
286 };
287 };
288} __packed;
289
290#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
291
292/*
293 * number of transfer buffers (fragments) per transmit frame descriptor;
294 * this is just the driver's idea, the hardware supports 20
295 */
296#define IWL_MAX_CMD_TBS_PER_TFD 2
297
298/**
299 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
300 *
301 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
302 * ring. The transport layer doesn't map the command's buffer to DMA, but
303 * rather copies it to a previously allocated DMA buffer. This flag tells
304 * the transport layer not to copy the command, but to map the existing
305 * buffer (that is passed in) instead. This saves the memcpy and allows
306 * commands that are bigger than the fixed buffer to be submitted.
307 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
308 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
309 * chunk internally and free it again after the command completes. This
310 * can (currently) be used only once per command.
311 * Note that a TFD entry after a DUP one cannot be a normal copied one.
312 */
313enum iwl_hcmd_dataflag {
314 IWL_HCMD_DFL_NOCOPY = BIT(0),
315 IWL_HCMD_DFL_DUP = BIT(1),
316};
317
318/**
319 * struct iwl_host_cmd - Host command to the uCode
320 *
321 * @data: array of chunks that composes the data of the host command
322 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
323 * @_rx_page_order: (internally used to free response packet)
324 * @_rx_page_addr: (internally used to free response packet)
325 * @flags: can be CMD_*
326 * @len: array of the lengths of the chunks in data
327 * @dataflags: IWL_HCMD_DFL_*
328 * @id: command id of the host command, for wide commands encoding the
329 * version and group as well
330 */
331struct iwl_host_cmd {
332 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
333 struct iwl_rx_packet *resp_pkt;
334 unsigned long _rx_page_addr;
335 u32 _rx_page_order;
336
337 u32 flags;
338 u32 id;
339 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
340 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
341};
342
343static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
344{
345 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
346}
347
348struct iwl_rx_cmd_buffer {
349 struct page *_page;
350 int _offset;
351 bool _page_stolen;
352 u32 _rx_page_order;
353 unsigned int truesize;
354};
355
356static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
357{
358 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
359}
360
361static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
362{
363 return r->_offset;
364}
365
366static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
367{
368 r->_page_stolen = true;
369 get_page(r->_page);
370 return r->_page;
371}
372
373static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
374{
375 __free_pages(r->_page, r->_rx_page_order);
376}
377
378#define MAX_NO_RECLAIM_CMDS 6
379
380#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
381
382/*
383 * Maximum number of HW queues the transport layer
384 * currently supports
385 */
386#define IWL_MAX_HW_QUEUES 32
387#define IWL_MAX_TID_COUNT 8
388#define IWL_FRAME_LIMIT 64
389#define IWL_MAX_RX_HW_QUEUES 16
390
391/**
392 * enum iwl_wowlan_status - WoWLAN image/device status
393 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
394 * @IWL_D3_STATUS_RESET: device was reset while suspended
395 */
396enum iwl_d3_status {
397 IWL_D3_STATUS_ALIVE,
398 IWL_D3_STATUS_RESET,
399};
400
401/**
402 * enum iwl_trans_status: transport status flags
403 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
404 * @STATUS_DEVICE_ENABLED: APM is enabled
405 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
406 * @STATUS_INT_ENABLED: interrupts are enabled
407 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
408 * @STATUS_FW_ERROR: the fw is in error state
409 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
410 * are sent
411 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
412 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
413 */
414enum iwl_trans_status {
415 STATUS_SYNC_HCMD_ACTIVE,
416 STATUS_DEVICE_ENABLED,
417 STATUS_TPOWER_PMI,
418 STATUS_INT_ENABLED,
419 STATUS_RFKILL,
420 STATUS_FW_ERROR,
421 STATUS_TRANS_GOING_IDLE,
422 STATUS_TRANS_IDLE,
423 STATUS_TRANS_DEAD,
424};
425
426/**
427 * struct iwl_trans_config - transport configuration
428 *
429 * @op_mode: pointer to the upper layer.
430 * @cmd_queue: the index of the command queue.
431 * Must be set before start_fw.
432 * @cmd_fifo: the fifo for host commands
433 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
434 * @no_reclaim_cmds: Some devices erroneously don't set the
435 * SEQ_RX_FRAME bit on some notifications, this is the
436 * list of such notifications to filter. Max length is
437 * %MAX_NO_RECLAIM_CMDS.
438 * @n_no_reclaim_cmds: # of commands in list
439 * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
440 * if unset 4k will be the RX buffer size
441 * @bc_table_dword: set to true if the BC table expects the byte count to be
442 * in DWORD (as opposed to bytes)
443 * @scd_set_active: should the transport configure the SCD for HCMD queue
444 * @wide_cmd_header: firmware supports wide host command header
445 * @command_names: array of command names, must be 256 entries
446 * (one for each command); for debugging only
447 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
448 * we get the ALIVE from the uCode
449 */
450struct iwl_trans_config {
451 struct iwl_op_mode *op_mode;
452
453 u8 cmd_queue;
454 u8 cmd_fifo;
455 unsigned int cmd_q_wdg_timeout;
456 const u8 *no_reclaim_cmds;
457 unsigned int n_no_reclaim_cmds;
458
459 bool rx_buf_size_8k;
460 bool bc_table_dword;
461 bool scd_set_active;
462 bool wide_cmd_header;
463 const char *const *command_names;
464
465 u32 sdio_adma_addr;
466};
467
468struct iwl_trans_dump_data {
469 u32 len;
470 u8 data[];
471};
472
473struct iwl_trans;
474
475struct iwl_trans_txq_scd_cfg {
476 u8 fifo;
477 s8 sta_id;
478 u8 tid;
479 bool aggregate;
480 int frame_limit;
481};
482
483/**
484 * struct iwl_trans_ops - transport specific operations
485 *
486 * All the handlers MUST be implemented
487 *
488 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
489 * out of a low power state. From that point on, the HW can send
490 * interrupts. May sleep.
491 * @op_mode_leave: Turn off the HW RF kill indication if on
492 * May sleep
493 * @start_fw: allocates and inits all the resources for the transport
494 * layer. Also kick a fw image.
495 * May sleep
496 * @fw_alive: called when the fw sends alive notification. If the fw provides
497 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
498 * May sleep
499 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
500 * the HW. If low_power is true, the NIC will be put in low power state.
501 * From that point on, the HW will be stopped but will still issue an
502 * interrupt if the HW RF kill switch is triggered.
503 * This callback must do the right thing and not crash even if %start_hw()
504 * was called but not &start_fw(). May sleep.
505 * @d3_suspend: put the device into the correct mode for WoWLAN during
506 * suspend. This is optional, if not implemented WoWLAN will not be
507 * supported. This callback may sleep.
508 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
509 * talk to the WoWLAN image to get its status. This is optional, if not
510 * implemented WoWLAN will not be supported. This callback may sleep.
511 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
512 * If RFkill is asserted in the middle of a SYNC host command, it must
513 * return -ERFKILL straight away.
514 * May sleep only if CMD_ASYNC is not set
515 * @tx: send an skb
516 * Must be atomic
517 * @reclaim: free packet until ssn. Returns a list of freed packets.
518 * Must be atomic
519 * @txq_enable: setup a queue. To setup an AC queue, use the
520 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
521 * this one. The op_mode must not configure the HCMD queue. The scheduler
522 * configuration may be %NULL, in which case the hardware will not be
523 * configured. May sleep.
524 * @txq_disable: de-configure a Tx queue to send AMPDUs
525 * Must be atomic
526 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
527 * @freeze_txq_timer: prevents the timer of the queue from firing until the
528 * queue is set to awake. Must be atomic.
529 * @dbgfs_register: add the dbgfs files under this directory. Files will be
530 * automatically deleted.
531 * @write8: write a u8 to a register at offset ofs from the BAR
532 * @write32: write a u32 to a register at offset ofs from the BAR
533 * @read32: read a u32 register at offset ofs from the BAR
534 * @read_prph: read a DWORD from a periphery register
535 * @write_prph: write a DWORD to a periphery register
536 * @read_mem: read device's SRAM in DWORD
537 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
538 * will be zeroed.
539 * @configure: configure parameters required by the transport layer from
540 * the op_mode. May be called several times before start_fw, can't be
541 * called after that.
542 * @set_pmi: set the power pmi state
543 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
544 * Sleeping is not allowed between grab_nic_access and
545 * release_nic_access.
546 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
547 * must be the same one that was sent before to the grab_nic_access.
548 * @set_bits_mask - set SRAM register according to value and mask.
549 * @ref: grab a reference to the transport/FW layers, disallowing
550 * certain low power states
551 * @unref: release a reference previously taken with @ref. Note that
552 * initially the reference count is 1, making an initial @unref
553 * necessary to allow low power states.
554 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
555 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
556 * Note that the transport must fill in the proper file headers.
557 */
558struct iwl_trans_ops {
559
560 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
561 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
562 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
563 bool run_in_rfkill);
564 int (*update_sf)(struct iwl_trans *trans,
565 struct iwl_sf_region *st_fwrd_space);
566 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
567 void (*stop_device)(struct iwl_trans *trans, bool low_power);
568
569 void (*d3_suspend)(struct iwl_trans *trans, bool test);
570 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
571 bool test);
572
573 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
574
575 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
576 struct iwl_device_cmd *dev_cmd, int queue);
577 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
578 struct sk_buff_head *skbs);
579
580 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
581 const struct iwl_trans_txq_scd_cfg *cfg,
582 unsigned int queue_wdg_timeout);
583 void (*txq_disable)(struct iwl_trans *trans, int queue,
584 bool configure_scd);
585
586 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
587 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
588 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
589 bool freeze);
590
591 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
592 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
593 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
594 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
595 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
596 int (*read_mem)(struct iwl_trans *trans, u32 addr,
597 void *buf, int dwords);
598 int (*write_mem)(struct iwl_trans *trans, u32 addr,
599 const void *buf, int dwords);
600 void (*configure)(struct iwl_trans *trans,
601 const struct iwl_trans_config *trans_cfg);
602 void (*set_pmi)(struct iwl_trans *trans, bool state);
603 bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
604 unsigned long *flags);
605 void (*release_nic_access)(struct iwl_trans *trans,
606 unsigned long *flags);
607 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
608 u32 value);
609 void (*ref)(struct iwl_trans *trans);
610 void (*unref)(struct iwl_trans *trans);
611 int (*suspend)(struct iwl_trans *trans);
612 void (*resume)(struct iwl_trans *trans);
613
614 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
615 struct iwl_fw_dbg_trigger_tlv
616 *trigger);
617};
618
619/**
620 * enum iwl_trans_state - state of the transport layer
621 *
622 * @IWL_TRANS_NO_FW: no fw has sent an alive response
623 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
624 */
625enum iwl_trans_state {
626 IWL_TRANS_NO_FW = 0,
627 IWL_TRANS_FW_ALIVE = 1,
628};
629
630/**
631 * enum iwl_d0i3_mode - d0i3 mode
632 *
633 * @IWL_D0I3_MODE_OFF - d0i3 is disabled
634 * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle
635 * (e.g. no active references)
636 * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend
637 * (in case of 'any' trigger)
638 */
639enum iwl_d0i3_mode {
640 IWL_D0I3_MODE_OFF = 0,
641 IWL_D0I3_MODE_ON_IDLE,
642 IWL_D0I3_MODE_ON_SUSPEND,
643};
644
645/**
646 * struct iwl_trans - transport common data
647 *
648 * @ops - pointer to iwl_trans_ops
649 * @op_mode - pointer to the op_mode
650 * @cfg - pointer to the configuration
651 * @status: a bit-mask of transport status flags
652 * @dev - pointer to struct device * that represents the device
653 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
654 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
655 * @hw_id: a u32 with the ID of the device / sub-device.
656 * Set during transport allocation.
657 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
658 * @pm_support: set to true in start_hw if link pm is supported
659 * @ltr_enabled: set to true if the LTR is enabled
660 * @num_rx_queues: number of RX queues allocated by the transport;
661 * the transport must set this before calling iwl_drv_start()
662 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
663 * The user should use iwl_trans_{alloc,free}_tx_cmd.
664 * @dev_cmd_headroom: room needed for the transport's private use before the
665 * device_cmd for Tx - for internal use only
666 * The user should use iwl_trans_{alloc,free}_tx_cmd.
667 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
668 * starting the firmware, used for tracing
669 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
670 * start of the 802.11 header in the @rx_mpdu_cmd
671 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
672 * @dbg_dest_tlv: points to the destination TLV for debug
673 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
674 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
675 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
676 * @paging_req_addr: The location were the FW will upload / download the pages
677 * from. The address is set by the opmode
678 * @paging_db: Pointer to the opmode paging data base, the pointer is set by
679 * the opmode.
680 * @paging_download_buf: Buffer used for copying all of the pages before
681 * downloading them to the FW. The buffer is allocated in the opmode
682 */
683struct iwl_trans {
684 const struct iwl_trans_ops *ops;
685 struct iwl_op_mode *op_mode;
686 const struct iwl_cfg *cfg;
687 enum iwl_trans_state state;
688 unsigned long status;
689
690 struct device *dev;
691 u32 max_skb_frags;
692 u32 hw_rev;
693 u32 hw_id;
694 char hw_id_str[52];
695
696 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
697
698 bool pm_support;
699 bool ltr_enabled;
700
701 u8 num_rx_queues;
702
703 /* The following fields are internal only */
704 struct kmem_cache *dev_cmd_pool;
705 size_t dev_cmd_headroom;
706 char dev_cmd_pool_name[50];
707
708 struct dentry *dbgfs_dir;
709
710#ifdef CONFIG_LOCKDEP
711 struct lockdep_map sync_cmd_lockdep_map;
712#endif
713
714 u64 dflt_pwr_limit;
715
716 const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
717 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
718 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
719 u8 dbg_dest_reg_num;
720
721 /*
722 * Paging parameters - All of the parameters should be set by the
723 * opmode when paging is enabled
724 */
725 u32 paging_req_addr;
726 struct iwl_fw_paging *paging_db;
727 void *paging_download_buf;
728
729 enum iwl_d0i3_mode d0i3_mode;
730
731 bool wowlan_d0i3;
732
733 /* pointer to trans specific struct */
734 /*Ensure that this pointer will always be aligned to sizeof pointer */
735 char trans_specific[0] __aligned(sizeof(void *));
736};
737
738static inline void iwl_trans_configure(struct iwl_trans *trans,
739 const struct iwl_trans_config *trans_cfg)
740{
741 trans->op_mode = trans_cfg->op_mode;
742
743 trans->ops->configure(trans, trans_cfg);
744}
745
746static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
747{
748 might_sleep();
749
750 return trans->ops->start_hw(trans, low_power);
751}
752
753static inline int iwl_trans_start_hw(struct iwl_trans *trans)
754{
755 return trans->ops->start_hw(trans, true);
756}
757
758static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
759{
760 might_sleep();
761
762 if (trans->ops->op_mode_leave)
763 trans->ops->op_mode_leave(trans);
764
765 trans->op_mode = NULL;
766
767 trans->state = IWL_TRANS_NO_FW;
768}
769
770static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
771{
772 might_sleep();
773
774 trans->state = IWL_TRANS_FW_ALIVE;
775
776 trans->ops->fw_alive(trans, scd_addr);
777}
778
779static inline int iwl_trans_start_fw(struct iwl_trans *trans,
780 const struct fw_img *fw,
781 bool run_in_rfkill)
782{
783 might_sleep();
784
785 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
786
787 clear_bit(STATUS_FW_ERROR, &trans->status);
788 return trans->ops->start_fw(trans, fw, run_in_rfkill);
789}
790
791static inline int iwl_trans_update_sf(struct iwl_trans *trans,
792 struct iwl_sf_region *st_fwrd_space)
793{
794 might_sleep();
795
796 if (trans->ops->update_sf)
797 return trans->ops->update_sf(trans, st_fwrd_space);
798
799 return 0;
800}
801
802static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
803 bool low_power)
804{
805 might_sleep();
806
807 trans->ops->stop_device(trans, low_power);
808
809 trans->state = IWL_TRANS_NO_FW;
810}
811
812static inline void iwl_trans_stop_device(struct iwl_trans *trans)
813{
814 _iwl_trans_stop_device(trans, true);
815}
816
817static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
818{
819 might_sleep();
820 if (trans->ops->d3_suspend)
821 trans->ops->d3_suspend(trans, test);
822}
823
824static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
825 enum iwl_d3_status *status,
826 bool test)
827{
828 might_sleep();
829 if (!trans->ops->d3_resume)
830 return 0;
831
832 return trans->ops->d3_resume(trans, status, test);
833}
834
835static inline void iwl_trans_ref(struct iwl_trans *trans)
836{
837 if (trans->ops->ref)
838 trans->ops->ref(trans);
839}
840
841static inline void iwl_trans_unref(struct iwl_trans *trans)
842{
843 if (trans->ops->unref)
844 trans->ops->unref(trans);
845}
846
847static inline int iwl_trans_suspend(struct iwl_trans *trans)
848{
849 if (!trans->ops->suspend)
850 return 0;
851
852 return trans->ops->suspend(trans);
853}
854
855static inline void iwl_trans_resume(struct iwl_trans *trans)
856{
857 if (trans->ops->resume)
858 trans->ops->resume(trans);
859}
860
861static inline struct iwl_trans_dump_data *
862iwl_trans_dump_data(struct iwl_trans *trans,
863 struct iwl_fw_dbg_trigger_tlv *trigger)
864{
865 if (!trans->ops->dump_data)
866 return NULL;
867 return trans->ops->dump_data(trans, trigger);
868}
869
870static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
871 struct iwl_host_cmd *cmd)
872{
873 int ret;
874
875 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
876 test_bit(STATUS_RFKILL, &trans->status)))
877 return -ERFKILL;
878
879 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
880 return -EIO;
881
882 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
883 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
884 return -EIO;
885 }
886
887 if (!(cmd->flags & CMD_ASYNC))
888 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
889
890 ret = trans->ops->send_cmd(trans, cmd);
891
892 if (!(cmd->flags & CMD_ASYNC))
893 lock_map_release(&trans->sync_cmd_lockdep_map);
894
895 return ret;
896}
897
898static inline struct iwl_device_cmd *
899iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
900{
901 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
902
903 if (unlikely(dev_cmd_ptr == NULL))
904 return NULL;
905
906 return (struct iwl_device_cmd *)
907 (dev_cmd_ptr + trans->dev_cmd_headroom);
908}
909
910static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
911 struct iwl_device_cmd *dev_cmd)
912{
913 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
914
915 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
916}
917
918static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
919 struct iwl_device_cmd *dev_cmd, int queue)
920{
921 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
922 return -EIO;
923
924 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
925 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
926
927 return trans->ops->tx(trans, skb, dev_cmd, queue);
928}
929
930static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
931 int ssn, struct sk_buff_head *skbs)
932{
933 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
934 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
935
936 trans->ops->reclaim(trans, queue, ssn, skbs);
937}
938
939static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
940 bool configure_scd)
941{
942 trans->ops->txq_disable(trans, queue, configure_scd);
943}
944
945static inline void
946iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
947 const struct iwl_trans_txq_scd_cfg *cfg,
948 unsigned int queue_wdg_timeout)
949{
950 might_sleep();
951
952 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
953 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
954
955 trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
956}
957
958static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
959 int fifo, int sta_id, int tid,
960 int frame_limit, u16 ssn,
961 unsigned int queue_wdg_timeout)
962{
963 struct iwl_trans_txq_scd_cfg cfg = {
964 .fifo = fifo,
965 .sta_id = sta_id,
966 .tid = tid,
967 .frame_limit = frame_limit,
968 .aggregate = sta_id >= 0,
969 };
970
971 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
972}
973
974static inline
975void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
976 unsigned int queue_wdg_timeout)
977{
978 struct iwl_trans_txq_scd_cfg cfg = {
979 .fifo = fifo,
980 .sta_id = -1,
981 .tid = IWL_MAX_TID_COUNT,
982 .frame_limit = IWL_FRAME_LIMIT,
983 .aggregate = false,
984 };
985
986 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
987}
988
989static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
990 unsigned long txqs,
991 bool freeze)
992{
993 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
994 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
995
996 if (trans->ops->freeze_txq_timer)
997 trans->ops->freeze_txq_timer(trans, txqs, freeze);
998}
999
1000static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
1001 u32 txqs)
1002{
1003 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
1004 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1005
1006 return trans->ops->wait_tx_queue_empty(trans, txqs);
1007}
1008
1009static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
1010 struct dentry *dir)
1011{
1012 return trans->ops->dbgfs_register(trans, dir);
1013}
1014
1015static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1016{
1017 trans->ops->write8(trans, ofs, val);
1018}
1019
1020static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1021{
1022 trans->ops->write32(trans, ofs, val);
1023}
1024
1025static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1026{
1027 return trans->ops->read32(trans, ofs);
1028}
1029
1030static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1031{
1032 return trans->ops->read_prph(trans, ofs);
1033}
1034
1035static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1036 u32 val)
1037{
1038 return trans->ops->write_prph(trans, ofs, val);
1039}
1040
1041static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1042 void *buf, int dwords)
1043{
1044 return trans->ops->read_mem(trans, addr, buf, dwords);
1045}
1046
1047#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1048 do { \
1049 if (__builtin_constant_p(bufsize)) \
1050 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1051 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1052 } while (0)
1053
1054static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1055{
1056 u32 value;
1057
1058 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1059 return 0xa5a5a5a5;
1060
1061 return value;
1062}
1063
1064static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1065 const void *buf, int dwords)
1066{
1067 return trans->ops->write_mem(trans, addr, buf, dwords);
1068}
1069
1070static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1071 u32 val)
1072{
1073 return iwl_trans_write_mem(trans, addr, &val, 1);
1074}
1075
1076static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1077{
1078 if (trans->ops->set_pmi)
1079 trans->ops->set_pmi(trans, state);
1080}
1081
1082static inline void
1083iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1084{
1085 trans->ops->set_bits_mask(trans, reg, mask, value);
1086}
1087
1088#define iwl_trans_grab_nic_access(trans, silent, flags) \
1089 __cond_lock(nic_access, \
1090 likely((trans)->ops->grab_nic_access(trans, silent, flags)))
1091
1092static inline void __releases(nic_access)
1093iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1094{
1095 trans->ops->release_nic_access(trans, flags);
1096 __release(nic_access);
1097}
1098
1099static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1100{
1101 if (WARN_ON_ONCE(!trans->op_mode))
1102 return;
1103
1104 /* prevent double restarts due to the same erroneous FW */
1105 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1106 iwl_op_mode_nic_error(trans->op_mode);
1107}
1108
1109/*****************************************************
1110 * transport helper functions
1111 *****************************************************/
1112struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1113 struct device *dev,
1114 const struct iwl_cfg *cfg,
1115 const struct iwl_trans_ops *ops,
1116 size_t dev_cmd_headroom);
1117void iwl_trans_free(struct iwl_trans *trans);
1118
1119/*****************************************************
1120* driver (transport) register/unregister functions
1121******************************************************/
1122int __must_check iwl_pci_register_driver(void);
1123void iwl_pci_unregister_driver(void);
1124
1125#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
new file mode 100644
index 000000000000..8c2c3d13b092
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -0,0 +1,12 @@
1obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o coex.o coex_legacy.o
6iwlmvm-y += tt.o offloading.o tdls.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
8iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
9iwlmvm-y += tof.o
10iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
11
12ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
new file mode 100644
index 000000000000..a1376539d2dc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -0,0 +1,211 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <net/mac80211.h>
65#include "fw-api.h"
66#include "mvm.h"
67
68struct iwl_mvm_iface_iterator_data {
69 struct ieee80211_vif *ignore_vif;
70 int idx;
71
72 struct iwl_mvm_phy_ctxt *phyctxt;
73
74 u16 ids[MAX_MACS_IN_BINDING];
75 u16 colors[MAX_MACS_IN_BINDING];
76};
77
78static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
79 struct iwl_mvm_iface_iterator_data *data)
80{
81 struct iwl_binding_cmd cmd;
82 struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
83 int i, ret;
84 u32 status;
85
86 memset(&cmd, 0, sizeof(cmd));
87
88 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
89 phyctxt->color));
90 cmd.action = cpu_to_le32(action);
91 cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
92 phyctxt->color));
93
94 for (i = 0; i < MAX_MACS_IN_BINDING; i++)
95 cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
96 for (i = 0; i < data->idx; i++)
97 cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
98 data->colors[i]));
99
100 status = 0;
101 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
102 sizeof(cmd), &cmd, &status);
103 if (ret) {
104 IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
105 action, ret);
106 return ret;
107 }
108
109 if (status) {
110 IWL_ERR(mvm, "Binding command failed: %u\n", status);
111 ret = -EIO;
112 }
113
114 return ret;
115}
116
117static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
118 struct ieee80211_vif *vif)
119{
120 struct iwl_mvm_iface_iterator_data *data = _data;
121 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
122
123 if (vif == data->ignore_vif)
124 return;
125
126 if (mvmvif->phy_ctxt != data->phyctxt)
127 return;
128
129 if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
130 return;
131
132 data->ids[data->idx] = mvmvif->id;
133 data->colors[data->idx] = mvmvif->color;
134 data->idx++;
135}
136
137static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
138 struct ieee80211_vif *vif,
139 struct iwl_mvm_phy_ctxt *phyctxt,
140 bool add)
141{
142 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
143 struct iwl_mvm_iface_iterator_data data = {
144 .ignore_vif = vif,
145 .phyctxt = phyctxt,
146 };
147 u32 action = FW_CTXT_ACTION_MODIFY;
148
149 lockdep_assert_held(&mvm->mutex);
150
151 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
152 IEEE80211_IFACE_ITER_NORMAL,
153 iwl_mvm_iface_iterator,
154 &data);
155
156 /*
157 * If there are no other interfaces yet we
158 * need to create a new binding.
159 */
160 if (data.idx == 0) {
161 if (add)
162 action = FW_CTXT_ACTION_ADD;
163 else
164 action = FW_CTXT_ACTION_REMOVE;
165 }
166
167 if (add) {
168 if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
169 return -EINVAL;
170
171 data.ids[data.idx] = mvmvif->id;
172 data.colors[data.idx] = mvmvif->color;
173 data.idx++;
174 }
175
176 return iwl_mvm_binding_cmd(mvm, action, &data);
177}
178
179int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
180{
181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
182
183 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
184 return -EINVAL;
185
186 /*
187 * Update SF - Disable if needed. if this fails, SF might still be on
188 * while many macs are bound, which is forbidden - so fail the binding.
189 */
190 if (iwl_mvm_sf_update(mvm, vif, false))
191 return -EINVAL;
192
193 return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
194}
195
196int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
197{
198 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199 int ret;
200
201 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
202 return -EINVAL;
203
204 ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
205
206 if (!ret)
207 if (iwl_mvm_sf_update(mvm, vif, true))
208 IWL_ERR(mvm, "Failed to update SF state\n");
209
210 return ret;
211}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
new file mode 100644
index 000000000000..e290ac67d975
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -0,0 +1,1005 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
68#include <net/mac80211.h>
69
70#include "fw-api-coex.h"
71#include "iwl-modparams.h"
72#include "mvm.h"
73#include "iwl-debug.h"
74
75/* 20MHz / 40MHz below / 40Mhz above*/
76static const __le64 iwl_ci_mask[][3] = {
77 /* dummy entry for channel 0 */
78 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
79 {
80 cpu_to_le64(0x0000001FFFULL),
81 cpu_to_le64(0x0ULL),
82 cpu_to_le64(0x00007FFFFFULL),
83 },
84 {
85 cpu_to_le64(0x000000FFFFULL),
86 cpu_to_le64(0x0ULL),
87 cpu_to_le64(0x0003FFFFFFULL),
88 },
89 {
90 cpu_to_le64(0x000003FFFCULL),
91 cpu_to_le64(0x0ULL),
92 cpu_to_le64(0x000FFFFFFCULL),
93 },
94 {
95 cpu_to_le64(0x00001FFFE0ULL),
96 cpu_to_le64(0x0ULL),
97 cpu_to_le64(0x007FFFFFE0ULL),
98 },
99 {
100 cpu_to_le64(0x00007FFF80ULL),
101 cpu_to_le64(0x00007FFFFFULL),
102 cpu_to_le64(0x01FFFFFF80ULL),
103 },
104 {
105 cpu_to_le64(0x0003FFFC00ULL),
106 cpu_to_le64(0x0003FFFFFFULL),
107 cpu_to_le64(0x0FFFFFFC00ULL),
108 },
109 {
110 cpu_to_le64(0x000FFFF000ULL),
111 cpu_to_le64(0x000FFFFFFCULL),
112 cpu_to_le64(0x3FFFFFF000ULL),
113 },
114 {
115 cpu_to_le64(0x007FFF8000ULL),
116 cpu_to_le64(0x007FFFFFE0ULL),
117 cpu_to_le64(0xFFFFFF8000ULL),
118 },
119 {
120 cpu_to_le64(0x01FFFE0000ULL),
121 cpu_to_le64(0x01FFFFFF80ULL),
122 cpu_to_le64(0xFFFFFE0000ULL),
123 },
124 {
125 cpu_to_le64(0x0FFFF00000ULL),
126 cpu_to_le64(0x0FFFFFFC00ULL),
127 cpu_to_le64(0x0ULL),
128 },
129 {
130 cpu_to_le64(0x3FFFC00000ULL),
131 cpu_to_le64(0x3FFFFFF000ULL),
132 cpu_to_le64(0x0)
133 },
134 {
135 cpu_to_le64(0xFFFE000000ULL),
136 cpu_to_le64(0xFFFFFF8000ULL),
137 cpu_to_le64(0x0)
138 },
139 {
140 cpu_to_le64(0xFFF8000000ULL),
141 cpu_to_le64(0xFFFFFE0000ULL),
142 cpu_to_le64(0x0)
143 },
144 {
145 cpu_to_le64(0xFFC0000000ULL),
146 cpu_to_le64(0x0ULL),
147 cpu_to_le64(0x0ULL)
148 },
149};
150
151struct corunning_block_luts {
152 u8 range;
153 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
154};
155
156/*
157 * Ranges for the antenna coupling calibration / co-running block LUT:
158 * LUT0: [ 0, 12[
159 * LUT1: [12, 20[
160 * LUT2: [20, 21[
161 * LUT3: [21, 23[
162 * LUT4: [23, 27[
163 * LUT5: [27, 30[
164 * LUT6: [30, 32[
165 * LUT7: [32, 33[
166 * LUT8: [33, - [
167 */
168static const struct corunning_block_luts antenna_coupling_ranges[] = {
169 {
170 .range = 0,
171 .lut20 = {
172 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
173 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
174 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
175 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
176 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
177 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
178 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
179 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
180 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
181 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
182 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
183 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
184 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
185 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
186 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
187 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
188 },
189 },
190 {
191 .range = 12,
192 .lut20 = {
193 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
194 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
195 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
196 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
197 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
198 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
199 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
200 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
201 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
202 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
203 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
204 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
205 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
206 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
207 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
208 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
209 },
210 },
211 {
212 .range = 20,
213 .lut20 = {
214 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
215 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
216 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
217 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
218 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
219 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
220 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
221 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
222 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
223 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
224 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
225 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
226 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
227 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
228 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
229 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
230 },
231 },
232 {
233 .range = 21,
234 .lut20 = {
235 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
236 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
237 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
238 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
239 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
240 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
241 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
242 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
243 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
244 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
245 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
246 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
247 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
248 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
249 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
250 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
251 },
252 },
253 {
254 .range = 23,
255 .lut20 = {
256 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
257 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
258 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
259 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
260 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
261 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
262 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
263 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
264 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
265 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
266 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
267 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
268 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
269 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
270 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
271 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
272 },
273 },
274 {
275 .range = 27,
276 .lut20 = {
277 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
278 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
279 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
280 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
281 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
282 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
283 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
284 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
285 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
286 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
287 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
288 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
289 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
290 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
291 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
292 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
293 },
294 },
295 {
296 .range = 30,
297 .lut20 = {
298 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
299 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
300 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
301 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
302 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
303 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
304 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
305 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
306 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
307 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
308 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
309 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
310 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
311 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
312 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
313 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
314 },
315 },
316 {
317 .range = 32,
318 .lut20 = {
319 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
320 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
321 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
322 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
323 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
324 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
325 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
326 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
327 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
328 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
329 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
330 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
331 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
332 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
333 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
334 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
335 },
336 },
337 {
338 .range = 33,
339 .lut20 = {
340 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
341 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
342 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
343 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
344 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
345 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
346 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
347 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
348 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
349 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
350 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
351 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
352 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
353 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
354 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
355 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
356 },
357 },
358};
359
360static enum iwl_bt_coex_lut_type
361iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
362{
363 struct ieee80211_chanctx_conf *chanctx_conf;
364 enum iwl_bt_coex_lut_type ret;
365 u16 phy_ctx_id;
366 u32 primary_ch_phy_id, secondary_ch_phy_id;
367
368 /*
369 * Checking that we hold mvm->mutex is a good idea, but the rate
370 * control can't acquire the mutex since it runs in Tx path.
371 * So this is racy in that case, but in the worst case, the AMPDU
372 * size limit will be wrong for a short time which is not a big
373 * issue.
374 */
375
376 rcu_read_lock();
377
378 chanctx_conf = rcu_dereference(vif->chanctx_conf);
379
380 if (!chanctx_conf ||
381 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
382 rcu_read_unlock();
383 return BT_COEX_INVALID_LUT;
384 }
385
386 ret = BT_COEX_TX_DIS_LUT;
387
388 if (mvm->cfg->bt_shared_single_ant) {
389 rcu_read_unlock();
390 return ret;
391 }
392
393 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
394 primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
395 secondary_ch_phy_id =
396 le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
397
398 if (primary_ch_phy_id == phy_ctx_id)
399 ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
400 else if (secondary_ch_phy_id == phy_ctx_id)
401 ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
402 /* else - default = TX TX disallowed */
403
404 rcu_read_unlock();
405
406 return ret;
407}
408
409int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
410{
411 struct iwl_bt_coex_cmd bt_cmd = {};
412 u32 mode;
413
414 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
415 return iwl_send_bt_init_conf_old(mvm);
416
417 lockdep_assert_held(&mvm->mutex);
418
419 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
420 switch (mvm->bt_force_ant_mode) {
421 case BT_FORCE_ANT_BT:
422 mode = BT_COEX_BT;
423 break;
424 case BT_FORCE_ANT_WIFI:
425 mode = BT_COEX_WIFI;
426 break;
427 default:
428 WARN_ON(1);
429 mode = 0;
430 }
431
432 bt_cmd.mode = cpu_to_le32(mode);
433 goto send_cmd;
434 }
435
436 mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
437 bt_cmd.mode = cpu_to_le32(mode);
438
439 if (IWL_MVM_BT_COEX_SYNC2SCO)
440 bt_cmd.enabled_modules |=
441 cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
442
443 if (iwl_mvm_bt_is_plcr_supported(mvm))
444 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
445
446 if (IWL_MVM_BT_COEX_MPLUT) {
447 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
448 bt_cmd.enabled_modules |=
449 cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
450 }
451
452 bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
453
454send_cmd:
455 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
456 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
457
458 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
459}
460
461static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
462 bool enable)
463{
464 struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
465 struct iwl_mvm_sta *mvmsta;
466 u32 value;
467 int ret;
468
469 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
470 if (!mvmsta)
471 return 0;
472
473 /* nothing to do */
474 if (mvmsta->bt_reduced_txpower == enable)
475 return 0;
476
477 value = mvmsta->sta_id;
478
479 if (enable)
480 value |= BT_REDUCED_TX_POWER_BIT;
481
482 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
483 enable ? "en" : "dis", sta_id);
484
485 cmd.reduced_txp = cpu_to_le32(value);
486 mvmsta->bt_reduced_txpower = enable;
487
488 ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
489 sizeof(cmd), &cmd);
490
491 return ret;
492}
493
494struct iwl_bt_iterator_data {
495 struct iwl_bt_coex_profile_notif *notif;
496 struct iwl_mvm *mvm;
497 struct ieee80211_chanctx_conf *primary;
498 struct ieee80211_chanctx_conf *secondary;
499 bool primary_ll;
500};
501
502static inline
503void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
504 struct ieee80211_vif *vif,
505 bool enable, int rssi)
506{
507 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
508
509 mvmvif->bf_data.last_bt_coex_event = rssi;
510 mvmvif->bf_data.bt_coex_max_thold =
511 enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
512 mvmvif->bf_data.bt_coex_min_thold =
513 enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
514}
515
516/* must be called under rcu_read_lock */
517static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
518 struct ieee80211_vif *vif)
519{
520 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
521 struct iwl_bt_iterator_data *data = _data;
522 struct iwl_mvm *mvm = data->mvm;
523 struct ieee80211_chanctx_conf *chanctx_conf;
524 /* default smps_mode is AUTOMATIC - only used for client modes */
525 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
526 u32 bt_activity_grading;
527 int ave_rssi;
528
529 lockdep_assert_held(&mvm->mutex);
530
531 switch (vif->type) {
532 case NL80211_IFTYPE_STATION:
533 break;
534 case NL80211_IFTYPE_AP:
535 if (!mvmvif->ap_ibss_active)
536 return;
537 break;
538 default:
539 return;
540 }
541
542 chanctx_conf = rcu_dereference(vif->chanctx_conf);
543
544 /* If channel context is invalid or not on 2.4GHz .. */
545 if ((!chanctx_conf ||
546 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
547 if (vif->type == NL80211_IFTYPE_STATION) {
548 /* ... relax constraints and disable rssi events */
549 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
550 smps_mode);
551 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
552 false);
553 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
554 }
555 return;
556 }
557
558 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
559 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
560 smps_mode = IEEE80211_SMPS_STATIC;
561 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
562 smps_mode = IEEE80211_SMPS_DYNAMIC;
563
564 /* relax SMPS constraints for next association */
565 if (!vif->bss_conf.assoc)
566 smps_mode = IEEE80211_SMPS_AUTOMATIC;
567
568 if (mvmvif->phy_ctxt &&
569 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
570 mvmvif->phy_ctxt->id))
571 smps_mode = IEEE80211_SMPS_AUTOMATIC;
572
573 IWL_DEBUG_COEX(data->mvm,
574 "mac %d: bt_activity_grading %d smps_req %d\n",
575 mvmvif->id, bt_activity_grading, smps_mode);
576
577 if (vif->type == NL80211_IFTYPE_STATION)
578 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
579 smps_mode);
580
581 /* low latency is always primary */
582 if (iwl_mvm_vif_low_latency(mvmvif)) {
583 data->primary_ll = true;
584
585 data->secondary = data->primary;
586 data->primary = chanctx_conf;
587 }
588
589 if (vif->type == NL80211_IFTYPE_AP) {
590 if (!mvmvif->ap_ibss_active)
591 return;
592
593 if (chanctx_conf == data->primary)
594 return;
595
596 if (!data->primary_ll) {
597 /*
598 * downgrade the current primary no matter what its
599 * type is.
600 */
601 data->secondary = data->primary;
602 data->primary = chanctx_conf;
603 } else {
604 /* there is low latency vif - we will be secondary */
605 data->secondary = chanctx_conf;
606 }
607 return;
608 }
609
610 /*
611 * STA / P2P Client, try to be primary if first vif. If we are in low
612 * latency mode, we are already in primary and just don't do much
613 */
614 if (!data->primary || data->primary == chanctx_conf)
615 data->primary = chanctx_conf;
616 else if (!data->secondary)
617 /* if secondary is not NULL, it might be a GO */
618 data->secondary = chanctx_conf;
619
620 /*
621 * don't reduce the Tx power if one of these is true:
622 * we are in LOOSE
623 * single share antenna product
624 * BT is active
625 * we are associated
626 */
627 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
628 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
629 le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
630 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
631 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
632 return;
633 }
634
635 /* try to get the avg rssi from fw */
636 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
637
638 /* if the RSSI isn't valid, fake it is very low */
639 if (!ave_rssi)
640 ave_rssi = -100;
641 if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
642 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
643 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
644 } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
645 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
646 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
647 }
648
649 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
650 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
651}
652
653static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
654{
655 struct iwl_bt_iterator_data data = {
656 .mvm = mvm,
657 .notif = &mvm->last_bt_notif,
658 };
659 struct iwl_bt_coex_ci_cmd cmd = {};
660 u8 ci_bw_idx;
661
662 /* Ignore updates if we are in force mode */
663 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
664 return;
665
666 rcu_read_lock();
667 ieee80211_iterate_active_interfaces_atomic(
668 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
669 iwl_mvm_bt_notif_iterator, &data);
670
671 if (data.primary) {
672 struct ieee80211_chanctx_conf *chan = data.primary;
673 if (WARN_ON(!chan->def.chan)) {
674 rcu_read_unlock();
675 return;
676 }
677
678 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
679 ci_bw_idx = 0;
680 } else {
681 if (chan->def.center_freq1 >
682 chan->def.chan->center_freq)
683 ci_bw_idx = 2;
684 else
685 ci_bw_idx = 1;
686 }
687
688 cmd.bt_primary_ci =
689 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
690 cmd.primary_ch_phy_id =
691 cpu_to_le32(*((u16 *)data.primary->drv_priv));
692 }
693
694 if (data.secondary) {
695 struct ieee80211_chanctx_conf *chan = data.secondary;
696 if (WARN_ON(!data.secondary->def.chan)) {
697 rcu_read_unlock();
698 return;
699 }
700
701 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
702 ci_bw_idx = 0;
703 } else {
704 if (chan->def.center_freq1 >
705 chan->def.chan->center_freq)
706 ci_bw_idx = 2;
707 else
708 ci_bw_idx = 1;
709 }
710
711 cmd.bt_secondary_ci =
712 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
713 cmd.secondary_ch_phy_id =
714 cpu_to_le32(*((u16 *)data.secondary->drv_priv));
715 }
716
717 rcu_read_unlock();
718
719 /* Don't spam the fw with the same command over and over */
720 if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
721 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
722 sizeof(cmd), &cmd))
723 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
724 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
725 }
726}
727
728void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
729 struct iwl_rx_cmd_buffer *rxb)
730{
731 struct iwl_rx_packet *pkt = rxb_addr(rxb);
732 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
733
734 if (!fw_has_api(&mvm->fw->ucode_capa,
735 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
736 iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
737 return;
738 }
739
740 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
741 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
742 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
743 le32_to_cpu(notif->primary_ch_lut));
744 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
745 le32_to_cpu(notif->secondary_ch_lut));
746 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
747 le32_to_cpu(notif->bt_activity_grading));
748
749 /* remember this notification for future use: rssi fluctuations */
750 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
751
752 iwl_mvm_bt_coex_notif_handle(mvm);
753}
754
755void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
756 enum ieee80211_rssi_event_data rssi_event)
757{
758 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
759 int ret;
760
761 if (!fw_has_api(&mvm->fw->ucode_capa,
762 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
763 iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
764 return;
765 }
766
767 lockdep_assert_held(&mvm->mutex);
768
769 /* Ignore updates if we are in force mode */
770 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
771 return;
772
773 /*
774 * Rssi update while not associated - can happen since the statistics
775 * are handled asynchronously
776 */
777 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
778 return;
779
780 /* No BT - reports should be disabled */
781 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
782 return;
783
784 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
785 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
786
787 /*
788 * Check if rssi is good enough for reduced Tx power, but not in loose
789 * scheme.
790 */
791 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
792 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
793 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
794 false);
795 else
796 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
797
798 if (ret)
799 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
800}
801
802#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
803#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
804
805u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
806 struct ieee80211_sta *sta)
807{
808 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
809 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
810 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
811 enum iwl_bt_coex_lut_type lut_type;
812
813 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
814 return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
815
816 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
817 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
818
819 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
820 BT_HIGH_TRAFFIC)
821 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
822
823 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
824
825 if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
826 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
827
828 /* tight coex, high bt traffic, reduce AGG time limit */
829 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
830}
831
832bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
833 struct ieee80211_sta *sta)
834{
835 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
836 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
837 struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
838 enum iwl_bt_coex_lut_type lut_type;
839
840 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
841 return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
842
843 if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
844 return true;
845
846 if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
847 BT_HIGH_TRAFFIC)
848 return true;
849
850 /*
851 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
852 * since BT is already killed.
853 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
854 * we Tx.
855 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
856 */
857 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
858 return lut_type != BT_COEX_LOOSE_LUT;
859}
860
861bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
862{
863 /* there is no other antenna, shared antenna is always available */
864 if (mvm->cfg->bt_shared_single_ant)
865 return true;
866
867 if (ant & mvm->cfg->non_shared_ant)
868 return true;
869
870 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
871 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
872
873 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
874 BT_HIGH_TRAFFIC;
875}
876
877bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
878{
879 /* there is no other antenna, shared antenna is always available */
880 if (mvm->cfg->bt_shared_single_ant)
881 return true;
882
883 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
884 return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
885
886 return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
887}
888
889bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
890 enum ieee80211_band band)
891{
892 u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
893
894 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
895 return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
896
897 if (band != IEEE80211_BAND_2GHZ)
898 return false;
899
900 return bt_activity >= BT_LOW_TRAFFIC;
901}
902
903u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
904 struct ieee80211_tx_info *info, u8 ac)
905{
906 __le16 fc = hdr->frame_control;
907
908 if (info->band != IEEE80211_BAND_2GHZ)
909 return 0;
910
911 if (unlikely(mvm->bt_tx_prio))
912 return mvm->bt_tx_prio - 1;
913
914 /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
915 if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
916 is_multicast_ether_addr(hdr->addr1) ||
917 ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
918 ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
919 return 3;
920
921 switch (ac) {
922 case IEEE80211_AC_BE:
923 return 1;
924 case IEEE80211_AC_VO:
925 return 3;
926 case IEEE80211_AC_VI:
927 return 2;
928 default:
929 break;
930 }
931
932 return 0;
933}
934
935void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
936{
937 if (!fw_has_api(&mvm->fw->ucode_capa,
938 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
939 iwl_mvm_bt_coex_vif_change_old(mvm);
940 return;
941 }
942
943 iwl_mvm_bt_coex_notif_handle(mvm);
944}
945
946void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
947 struct iwl_rx_cmd_buffer *rxb)
948{
949 struct iwl_rx_packet *pkt = rxb_addr(rxb);
950 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
951 struct iwl_bt_coex_corun_lut_update_cmd cmd = {};
952 u8 __maybe_unused lower_bound, upper_bound;
953 u8 lut;
954
955 if (!fw_has_api(&mvm->fw->ucode_capa,
956 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
957 iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
958 return;
959 }
960
961 if (!iwl_mvm_bt_is_plcr_supported(mvm))
962 return;
963
964 lockdep_assert_held(&mvm->mutex);
965
966 /* Ignore updates if we are in force mode */
967 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
968 return;
969
970 if (ant_isolation == mvm->last_ant_isol)
971 return;
972
973 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
974 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
975 break;
976
977 lower_bound = antenna_coupling_ranges[lut].range;
978
979 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
980 upper_bound = antenna_coupling_ranges[lut + 1].range;
981 else
982 upper_bound = antenna_coupling_ranges[lut].range;
983
984 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
985 ant_isolation, lower_bound, upper_bound, lut);
986
987 mvm->last_ant_isol = ant_isolation;
988
989 if (mvm->last_corun_lut == lut)
990 return;
991
992 mvm->last_corun_lut = lut;
993
994 /* For the moment, use the same LUT for 20GHz and 40GHz */
995 memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20,
996 sizeof(cmd.corun_lut20));
997
998 memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
999 sizeof(cmd.corun_lut40));
1000
1001 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
1002 sizeof(cmd), &cmd))
1003 IWL_ERR(mvm,
1004 "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
1005}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
new file mode 100644
index 000000000000..61c07b05fcaa
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
@@ -0,0 +1,1315 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/ieee80211.h>
67#include <linux/etherdevice.h>
68#include <net/mac80211.h>
69
70#include "fw-api-coex.h"
71#include "iwl-modparams.h"
72#include "mvm.h"
73#include "iwl-debug.h"
74
75#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
76 [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
77 ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
78
79static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
80 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
81 BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
82 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
83 BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
84 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
85 BT_COEX_PRIO_TBL_PRIO_LOW, 0),
86 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
87 BT_COEX_PRIO_TBL_PRIO_LOW, 1),
88 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
89 BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
90 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
91 BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
92 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
93 BT_COEX_PRIO_TBL_DISABLED, 0),
94 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
95 BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
96 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
97 BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
98 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
99 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
100 0, 0, 0, 0, 0, 0,
101};
102
103#undef EVENT_PRIO_ANT
104
105static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
106{
107 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
108 return 0;
109
110 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
111 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
112 &iwl_bt_prio_tbl);
113}
114
115static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
116 cpu_to_le32(0xf0f0f0f0), /* 50% */
117 cpu_to_le32(0xc0c0c0c0), /* 25% */
118 cpu_to_le32(0xfcfcfcfc), /* 75% */
119 cpu_to_le32(0xfefefefe), /* 87.5% */
120};
121
122static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
123 {
124 cpu_to_le32(0x40000000),
125 cpu_to_le32(0x00000000),
126 cpu_to_le32(0x44000000),
127 cpu_to_le32(0x00000000),
128 cpu_to_le32(0x40000000),
129 cpu_to_le32(0x00000000),
130 cpu_to_le32(0x44000000),
131 cpu_to_le32(0x00000000),
132 cpu_to_le32(0xc0004000),
133 cpu_to_le32(0xf0005000),
134 cpu_to_le32(0xc0004000),
135 cpu_to_le32(0xf0005000),
136 },
137 {
138 cpu_to_le32(0x40000000),
139 cpu_to_le32(0x00000000),
140 cpu_to_le32(0x44000000),
141 cpu_to_le32(0x00000000),
142 cpu_to_le32(0x40000000),
143 cpu_to_le32(0x00000000),
144 cpu_to_le32(0x44000000),
145 cpu_to_le32(0x00000000),
146 cpu_to_le32(0xc0004000),
147 cpu_to_le32(0xf0005000),
148 cpu_to_le32(0xc0004000),
149 cpu_to_le32(0xf0005000),
150 },
151 {
152 cpu_to_le32(0x40000000),
153 cpu_to_le32(0x00000000),
154 cpu_to_le32(0x44000000),
155 cpu_to_le32(0x00000000),
156 cpu_to_le32(0x40000000),
157 cpu_to_le32(0x00000000),
158 cpu_to_le32(0x44000000),
159 cpu_to_le32(0x00000000),
160 cpu_to_le32(0xc0004000),
161 cpu_to_le32(0xf0005000),
162 cpu_to_le32(0xc0004000),
163 cpu_to_le32(0xf0005000),
164 },
165};
166
167static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
168 {
169 /* Tight */
170 cpu_to_le32(0xaaaaaaaa),
171 cpu_to_le32(0xaaaaaaaa),
172 cpu_to_le32(0xaeaaaaaa),
173 cpu_to_le32(0xaaaaaaaa),
174 cpu_to_le32(0xcc00ff28),
175 cpu_to_le32(0x0000aaaa),
176 cpu_to_le32(0xcc00aaaa),
177 cpu_to_le32(0x0000aaaa),
178 cpu_to_le32(0xc0004000),
179 cpu_to_le32(0x00004000),
180 cpu_to_le32(0xf0005000),
181 cpu_to_le32(0xf0005000),
182 },
183 {
184 /* Loose */
185 cpu_to_le32(0xaaaaaaaa),
186 cpu_to_le32(0xaaaaaaaa),
187 cpu_to_le32(0xaaaaaaaa),
188 cpu_to_le32(0xaaaaaaaa),
189 cpu_to_le32(0xcc00ff28),
190 cpu_to_le32(0x0000aaaa),
191 cpu_to_le32(0xcc00aaaa),
192 cpu_to_le32(0x0000aaaa),
193 cpu_to_le32(0x00000000),
194 cpu_to_le32(0x00000000),
195 cpu_to_le32(0xf0005000),
196 cpu_to_le32(0xf0005000),
197 },
198 {
199 /* Tx Tx disabled */
200 cpu_to_le32(0xaaaaaaaa),
201 cpu_to_le32(0xaaaaaaaa),
202 cpu_to_le32(0xeeaaaaaa),
203 cpu_to_le32(0xaaaaaaaa),
204 cpu_to_le32(0xcc00ff28),
205 cpu_to_le32(0x0000aaaa),
206 cpu_to_le32(0xcc00aaaa),
207 cpu_to_le32(0x0000aaaa),
208 cpu_to_le32(0xc0004000),
209 cpu_to_le32(0xc0004000),
210 cpu_to_le32(0xf0005000),
211 cpu_to_le32(0xf0005000),
212 },
213};
214
215/* 20MHz / 40MHz below / 40Mhz above*/
216static const __le64 iwl_ci_mask[][3] = {
217 /* dummy entry for channel 0 */
218 {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
219 {
220 cpu_to_le64(0x0000001FFFULL),
221 cpu_to_le64(0x0ULL),
222 cpu_to_le64(0x00007FFFFFULL),
223 },
224 {
225 cpu_to_le64(0x000000FFFFULL),
226 cpu_to_le64(0x0ULL),
227 cpu_to_le64(0x0003FFFFFFULL),
228 },
229 {
230 cpu_to_le64(0x000003FFFCULL),
231 cpu_to_le64(0x0ULL),
232 cpu_to_le64(0x000FFFFFFCULL),
233 },
234 {
235 cpu_to_le64(0x00001FFFE0ULL),
236 cpu_to_le64(0x0ULL),
237 cpu_to_le64(0x007FFFFFE0ULL),
238 },
239 {
240 cpu_to_le64(0x00007FFF80ULL),
241 cpu_to_le64(0x00007FFFFFULL),
242 cpu_to_le64(0x01FFFFFF80ULL),
243 },
244 {
245 cpu_to_le64(0x0003FFFC00ULL),
246 cpu_to_le64(0x0003FFFFFFULL),
247 cpu_to_le64(0x0FFFFFFC00ULL),
248 },
249 {
250 cpu_to_le64(0x000FFFF000ULL),
251 cpu_to_le64(0x000FFFFFFCULL),
252 cpu_to_le64(0x3FFFFFF000ULL),
253 },
254 {
255 cpu_to_le64(0x007FFF8000ULL),
256 cpu_to_le64(0x007FFFFFE0ULL),
257 cpu_to_le64(0xFFFFFF8000ULL),
258 },
259 {
260 cpu_to_le64(0x01FFFE0000ULL),
261 cpu_to_le64(0x01FFFFFF80ULL),
262 cpu_to_le64(0xFFFFFE0000ULL),
263 },
264 {
265 cpu_to_le64(0x0FFFF00000ULL),
266 cpu_to_le64(0x0FFFFFFC00ULL),
267 cpu_to_le64(0x0ULL),
268 },
269 {
270 cpu_to_le64(0x3FFFC00000ULL),
271 cpu_to_le64(0x3FFFFFF000ULL),
272 cpu_to_le64(0x0)
273 },
274 {
275 cpu_to_le64(0xFFFE000000ULL),
276 cpu_to_le64(0xFFFFFF8000ULL),
277 cpu_to_le64(0x0)
278 },
279 {
280 cpu_to_le64(0xFFF8000000ULL),
281 cpu_to_le64(0xFFFFFE0000ULL),
282 cpu_to_le64(0x0)
283 },
284 {
285 cpu_to_le64(0xFFC0000000ULL),
286 cpu_to_le64(0x0ULL),
287 cpu_to_le64(0x0ULL)
288 },
289};
290
291enum iwl_bt_kill_msk {
292 BT_KILL_MSK_DEFAULT,
293 BT_KILL_MSK_NEVER,
294 BT_KILL_MSK_ALWAYS,
295 BT_KILL_MSK_MAX,
296};
297
298static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
299 [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
300 [BT_KILL_MSK_NEVER] = 0xffffffff,
301 [BT_KILL_MSK_ALWAYS] = 0,
302};
303
304static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
305 {
306 BT_KILL_MSK_ALWAYS,
307 BT_KILL_MSK_ALWAYS,
308 BT_KILL_MSK_ALWAYS,
309 },
310 {
311 BT_KILL_MSK_NEVER,
312 BT_KILL_MSK_NEVER,
313 BT_KILL_MSK_NEVER,
314 },
315 {
316 BT_KILL_MSK_NEVER,
317 BT_KILL_MSK_NEVER,
318 BT_KILL_MSK_NEVER,
319 },
320 {
321 BT_KILL_MSK_DEFAULT,
322 BT_KILL_MSK_NEVER,
323 BT_KILL_MSK_DEFAULT,
324 },
325};
326
327static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
328 {
329 BT_KILL_MSK_ALWAYS,
330 BT_KILL_MSK_ALWAYS,
331 BT_KILL_MSK_ALWAYS,
332 },
333 {
334 BT_KILL_MSK_ALWAYS,
335 BT_KILL_MSK_ALWAYS,
336 BT_KILL_MSK_ALWAYS,
337 },
338 {
339 BT_KILL_MSK_ALWAYS,
340 BT_KILL_MSK_ALWAYS,
341 BT_KILL_MSK_ALWAYS,
342 },
343 {
344 BT_KILL_MSK_DEFAULT,
345 BT_KILL_MSK_ALWAYS,
346 BT_KILL_MSK_DEFAULT,
347 },
348};
349
350struct corunning_block_luts {
351 u8 range;
352 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
353};
354
355/*
356 * Ranges for the antenna coupling calibration / co-running block LUT:
357 * LUT0: [ 0, 12[
358 * LUT1: [12, 20[
359 * LUT2: [20, 21[
360 * LUT3: [21, 23[
361 * LUT4: [23, 27[
362 * LUT5: [27, 30[
363 * LUT6: [30, 32[
364 * LUT7: [32, 33[
365 * LUT8: [33, - [
366 */
367static const struct corunning_block_luts antenna_coupling_ranges[] = {
368 {
369 .range = 0,
370 .lut20 = {
371 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
372 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
373 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
374 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
375 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
379 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
380 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
381 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
382 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
383 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
384 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
385 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
386 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
387 },
388 },
389 {
390 .range = 12,
391 .lut20 = {
392 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
393 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
394 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
395 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
396 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
400 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
401 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
402 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
403 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
404 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
405 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
406 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
407 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
408 },
409 },
410 {
411 .range = 20,
412 .lut20 = {
413 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
414 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
415 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
416 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
417 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
421 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
422 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
423 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
424 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
425 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
426 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
427 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
428 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
429 },
430 },
431 {
432 .range = 21,
433 .lut20 = {
434 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
435 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
436 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
437 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
438 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
442 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
443 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
444 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
445 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
446 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
447 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
448 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
449 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
450 },
451 },
452 {
453 .range = 23,
454 .lut20 = {
455 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
456 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
457 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
458 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
459 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
463 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
464 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
465 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
466 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
467 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
468 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
469 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
470 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
471 },
472 },
473 {
474 .range = 27,
475 .lut20 = {
476 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
477 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
478 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
479 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
480 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
484 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
485 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
486 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
487 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
488 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
489 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
490 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
491 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
492 },
493 },
494 {
495 .range = 30,
496 .lut20 = {
497 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
498 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
499 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
500 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
501 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
502 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
503 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
504 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
505 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
506 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
507 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
508 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
509 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
510 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
511 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
512 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
513 },
514 },
515 {
516 .range = 32,
517 .lut20 = {
518 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
519 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
520 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
521 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
522 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
523 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
524 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
525 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
526 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
527 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
528 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
529 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
530 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
531 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
532 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
533 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
534 },
535 },
536 {
537 .range = 33,
538 .lut20 = {
539 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
540 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
541 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
542 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
543 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
544 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
545 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
546 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
547 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
548 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
549 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
550 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
551 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
552 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
553 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
554 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
555 },
556 },
557};
558
559static enum iwl_bt_coex_lut_type
560iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
561{
562 struct ieee80211_chanctx_conf *chanctx_conf;
563 enum iwl_bt_coex_lut_type ret;
564 u16 phy_ctx_id;
565
566 /*
567 * Checking that we hold mvm->mutex is a good idea, but the rate
568 * control can't acquire the mutex since it runs in Tx path.
569 * So this is racy in that case, but in the worst case, the AMPDU
570 * size limit will be wrong for a short time which is not a big
571 * issue.
572 */
573
574 rcu_read_lock();
575
576 chanctx_conf = rcu_dereference(vif->chanctx_conf);
577
578 if (!chanctx_conf ||
579 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
580 rcu_read_unlock();
581 return BT_COEX_INVALID_LUT;
582 }
583
584 ret = BT_COEX_TX_DIS_LUT;
585
586 if (mvm->cfg->bt_shared_single_ant) {
587 rcu_read_unlock();
588 return ret;
589 }
590
591 phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
592
593 if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
594 ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
595 else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
596 ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
597 /* else - default = TX TX disallowed */
598
599 rcu_read_unlock();
600
601 return ret;
602}
603
604int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
605{
606 struct iwl_bt_coex_cmd_old *bt_cmd;
607 struct iwl_host_cmd cmd = {
608 .id = BT_CONFIG,
609 .len = { sizeof(*bt_cmd), },
610 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
611 };
612 int ret;
613 u32 flags;
614
615 ret = iwl_send_bt_prio_tbl(mvm);
616 if (ret)
617 return ret;
618
619 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
620 if (!bt_cmd)
621 return -ENOMEM;
622 cmd.data[0] = bt_cmd;
623
624 lockdep_assert_held(&mvm->mutex);
625
626 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
627 switch (mvm->bt_force_ant_mode) {
628 case BT_FORCE_ANT_AUTO:
629 flags = BT_COEX_AUTO_OLD;
630 break;
631 case BT_FORCE_ANT_BT:
632 flags = BT_COEX_BT_OLD;
633 break;
634 case BT_FORCE_ANT_WIFI:
635 flags = BT_COEX_WIFI_OLD;
636 break;
637 default:
638 WARN_ON(1);
639 flags = 0;
640 }
641
642 bt_cmd->flags = cpu_to_le32(flags);
643 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
644 goto send_cmd;
645 }
646
647 bt_cmd->max_kill = 5;
648 bt_cmd->bt4_antenna_isolation_thr =
649 IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
650 bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
651 bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
652 bt_cmd->bt4_tx_rx_max_freq0 = 15;
653 bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
654 bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
655
656 flags = iwlwifi_mod_params.bt_coex_active ?
657 BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
658 bt_cmd->flags = cpu_to_le32(flags);
659
660 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
661 BT_VALID_BT_PRIO_BOOST |
662 BT_VALID_MAX_KILL |
663 BT_VALID_3W_TMRS |
664 BT_VALID_KILL_ACK |
665 BT_VALID_KILL_CTS |
666 BT_VALID_REDUCED_TX_POWER |
667 BT_VALID_LUT |
668 BT_VALID_WIFI_RX_SW_PRIO_BOOST |
669 BT_VALID_WIFI_TX_SW_PRIO_BOOST |
670 BT_VALID_ANT_ISOLATION |
671 BT_VALID_ANT_ISOLATION_THRS |
672 BT_VALID_TXTX_DELTA_FREQ_THRS |
673 BT_VALID_TXRX_MAX_FREQ_0 |
674 BT_VALID_SYNC_TO_SCO |
675 BT_VALID_TTC |
676 BT_VALID_RRC);
677
678 if (IWL_MVM_BT_COEX_SYNC2SCO)
679 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
680
681 if (iwl_mvm_bt_is_plcr_supported(mvm)) {
682 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
683 BT_VALID_CORUN_LUT_40);
684 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
685 }
686
687 if (IWL_MVM_BT_COEX_MPLUT) {
688 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
689 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
690 }
691
692 if (IWL_MVM_BT_COEX_TTC)
693 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
694
695 if (iwl_mvm_bt_is_rrc_supported(mvm))
696 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
697
698 if (mvm->cfg->bt_shared_single_ant)
699 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
700 sizeof(iwl_single_shared_ant));
701 else
702 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
703 sizeof(iwl_combined_lookup));
704
705 /* Take first Co-running block LUT to get started */
706 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
707 sizeof(bt_cmd->bt4_corun_lut20));
708 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
709 sizeof(bt_cmd->bt4_corun_lut40));
710
711 memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
712 sizeof(iwl_bt_prio_boost));
713 bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
714 bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
715
716send_cmd:
717 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
718 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
719
720 ret = iwl_mvm_send_cmd(mvm, &cmd);
721
722 kfree(bt_cmd);
723 return ret;
724}
725
726static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
727{
728 struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
729 u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
730 u32 ag = le32_to_cpu(notif->bt_activity_grading);
731 struct iwl_bt_coex_cmd_old *bt_cmd;
732 u8 ack_kill_msk, cts_kill_msk;
733 struct iwl_host_cmd cmd = {
734 .id = BT_CONFIG,
735 .data[0] = &bt_cmd,
736 .len = { sizeof(*bt_cmd), },
737 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
738 };
739 int ret = 0;
740
741 lockdep_assert_held(&mvm->mutex);
742
743 ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
744 cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
745
746 if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
747 mvm->bt_cts_kill_msk[0] == cts_kill_msk)
748 return 0;
749
750 mvm->bt_ack_kill_msk[0] = ack_kill_msk;
751 mvm->bt_cts_kill_msk[0] = cts_kill_msk;
752
753 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
754 if (!bt_cmd)
755 return -ENOMEM;
756 cmd.data[0] = bt_cmd;
757 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
758
759 bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
760 bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
761 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
762 BT_VALID_KILL_ACK |
763 BT_VALID_KILL_CTS);
764
765 ret = iwl_mvm_send_cmd(mvm, &cmd);
766
767 kfree(bt_cmd);
768 return ret;
769}
770
771static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
772 bool enable)
773{
774 struct iwl_bt_coex_cmd_old *bt_cmd;
775 /* Send ASYNC since this can be sent from an atomic context */
776 struct iwl_host_cmd cmd = {
777 .id = BT_CONFIG,
778 .len = { sizeof(*bt_cmd), },
779 .dataflags = { IWL_HCMD_DFL_DUP, },
780 .flags = CMD_ASYNC,
781 };
782 struct iwl_mvm_sta *mvmsta;
783 int ret;
784
785 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
786 if (!mvmsta)
787 return 0;
788
789 /* nothing to do */
790 if (mvmsta->bt_reduced_txpower == enable)
791 return 0;
792
793 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
794 if (!bt_cmd)
795 return -ENOMEM;
796 cmd.data[0] = bt_cmd;
797 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
798
799 bt_cmd->valid_bit_msk =
800 cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
801 bt_cmd->bt_reduced_tx_power = sta_id;
802
803 if (enable)
804 bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
805
806 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
807 enable ? "en" : "dis", sta_id);
808
809 mvmsta->bt_reduced_txpower = enable;
810
811 ret = iwl_mvm_send_cmd(mvm, &cmd);
812
813 kfree(bt_cmd);
814 return ret;
815}
816
817struct iwl_bt_iterator_data {
818 struct iwl_bt_coex_profile_notif_old *notif;
819 struct iwl_mvm *mvm;
820 struct ieee80211_chanctx_conf *primary;
821 struct ieee80211_chanctx_conf *secondary;
822 bool primary_ll;
823};
824
825static inline
826void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
827 struct ieee80211_vif *vif,
828 bool enable, int rssi)
829{
830 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
831
832 mvmvif->bf_data.last_bt_coex_event = rssi;
833 mvmvif->bf_data.bt_coex_max_thold =
834 enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
835 mvmvif->bf_data.bt_coex_min_thold =
836 enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
837}
838
839/* must be called under rcu_read_lock */
840static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
841 struct ieee80211_vif *vif)
842{
843 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
844 struct iwl_bt_iterator_data *data = _data;
845 struct iwl_mvm *mvm = data->mvm;
846 struct ieee80211_chanctx_conf *chanctx_conf;
847 enum ieee80211_smps_mode smps_mode;
848 u32 bt_activity_grading;
849 int ave_rssi;
850
851 lockdep_assert_held(&mvm->mutex);
852
853 switch (vif->type) {
854 case NL80211_IFTYPE_STATION:
855 /* default smps_mode for BSS / P2P client is AUTOMATIC */
856 smps_mode = IEEE80211_SMPS_AUTOMATIC;
857 break;
858 case NL80211_IFTYPE_AP:
859 if (!mvmvif->ap_ibss_active)
860 return;
861 break;
862 default:
863 return;
864 }
865
866 chanctx_conf = rcu_dereference(vif->chanctx_conf);
867
868 /* If channel context is invalid or not on 2.4GHz .. */
869 if ((!chanctx_conf ||
870 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
871 if (vif->type == NL80211_IFTYPE_STATION) {
872 /* ... relax constraints and disable rssi events */
873 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
874 smps_mode);
875 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
876 false);
877 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
878 }
879 return;
880 }
881
882 bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
883 if (bt_activity_grading >= BT_HIGH_TRAFFIC)
884 smps_mode = IEEE80211_SMPS_STATIC;
885 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
886 smps_mode = vif->type == NL80211_IFTYPE_AP ?
887 IEEE80211_SMPS_OFF :
888 IEEE80211_SMPS_DYNAMIC;
889
890 /* relax SMPS contraints for next association */
891 if (!vif->bss_conf.assoc)
892 smps_mode = IEEE80211_SMPS_AUTOMATIC;
893
894 if (mvmvif->phy_ctxt &&
895 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
896 smps_mode = IEEE80211_SMPS_AUTOMATIC;
897
898 IWL_DEBUG_COEX(data->mvm,
899 "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
900 mvmvif->id, data->notif->bt_status, bt_activity_grading,
901 smps_mode);
902
903 if (vif->type == NL80211_IFTYPE_STATION)
904 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
905 smps_mode);
906
907 /* low latency is always primary */
908 if (iwl_mvm_vif_low_latency(mvmvif)) {
909 data->primary_ll = true;
910
911 data->secondary = data->primary;
912 data->primary = chanctx_conf;
913 }
914
915 if (vif->type == NL80211_IFTYPE_AP) {
916 if (!mvmvif->ap_ibss_active)
917 return;
918
919 if (chanctx_conf == data->primary)
920 return;
921
922 if (!data->primary_ll) {
923 /*
924 * downgrade the current primary no matter what its
925 * type is.
926 */
927 data->secondary = data->primary;
928 data->primary = chanctx_conf;
929 } else {
930 /* there is low latency vif - we will be secondary */
931 data->secondary = chanctx_conf;
932 }
933 return;
934 }
935
936 /*
937 * STA / P2P Client, try to be primary if first vif. If we are in low
938 * latency mode, we are already in primary and just don't do much
939 */
940 if (!data->primary || data->primary == chanctx_conf)
941 data->primary = chanctx_conf;
942 else if (!data->secondary)
943 /* if secondary is not NULL, it might be a GO */
944 data->secondary = chanctx_conf;
945
946 /*
947 * don't reduce the Tx power if one of these is true:
948 * we are in LOOSE
949 * single share antenna product
950 * BT is active
951 * we are associated
952 */
953 if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
954 mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
955 !data->notif->bt_status) {
956 iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
957 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
958 return;
959 }
960
961 /* try to get the avg rssi from fw */
962 ave_rssi = mvmvif->bf_data.ave_beacon_signal;
963
964 /* if the RSSI isn't valid, fake it is very low */
965 if (!ave_rssi)
966 ave_rssi = -100;
967 if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
968 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
969 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
970 } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
971 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
972 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
973 }
974
975 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
976 iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
977}
978
979static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
980{
981 struct iwl_bt_iterator_data data = {
982 .mvm = mvm,
983 .notif = &mvm->last_bt_notif_old,
984 };
985 struct iwl_bt_coex_ci_cmd_old cmd = {};
986 u8 ci_bw_idx;
987
988 /* Ignore updates if we are in force mode */
989 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
990 return;
991
992 rcu_read_lock();
993 ieee80211_iterate_active_interfaces_atomic(
994 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
995 iwl_mvm_bt_notif_iterator, &data);
996
997 if (data.primary) {
998 struct ieee80211_chanctx_conf *chan = data.primary;
999
1000 if (WARN_ON(!chan->def.chan)) {
1001 rcu_read_unlock();
1002 return;
1003 }
1004
1005 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1006 ci_bw_idx = 0;
1007 cmd.co_run_bw_primary = 0;
1008 } else {
1009 cmd.co_run_bw_primary = 1;
1010 if (chan->def.center_freq1 >
1011 chan->def.chan->center_freq)
1012 ci_bw_idx = 2;
1013 else
1014 ci_bw_idx = 1;
1015 }
1016
1017 cmd.bt_primary_ci =
1018 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1019 cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
1020 }
1021
1022 if (data.secondary) {
1023 struct ieee80211_chanctx_conf *chan = data.secondary;
1024
1025 if (WARN_ON(!data.secondary->def.chan)) {
1026 rcu_read_unlock();
1027 return;
1028 }
1029
1030 if (chan->def.width < NL80211_CHAN_WIDTH_40) {
1031 ci_bw_idx = 0;
1032 cmd.co_run_bw_secondary = 0;
1033 } else {
1034 cmd.co_run_bw_secondary = 1;
1035 if (chan->def.center_freq1 >
1036 chan->def.chan->center_freq)
1037 ci_bw_idx = 2;
1038 else
1039 ci_bw_idx = 1;
1040 }
1041
1042 cmd.bt_secondary_ci =
1043 iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
1044 cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
1045 }
1046
1047 rcu_read_unlock();
1048
1049 /* Don't spam the fw with the same command over and over */
1050 if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
1051 if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
1052 sizeof(cmd), &cmd))
1053 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
1054 memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
1055 }
1056
1057 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1058 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1059}
1060
1061void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1062 struct iwl_rx_cmd_buffer *rxb)
1063{
1064 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1065 struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
1066
1067 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
1068 IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
1069 notif->bt_status ? "ON" : "OFF");
1070 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
1071 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
1072 IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
1073 le32_to_cpu(notif->primary_ch_lut));
1074 IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
1075 le32_to_cpu(notif->secondary_ch_lut));
1076 IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
1077 le32_to_cpu(notif->bt_activity_grading));
1078 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
1079 notif->bt_agg_traffic_load);
1080
1081 /* remember this notification for future use: rssi fluctuations */
1082 memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
1083
1084 iwl_mvm_bt_coex_notif_handle(mvm);
1085}
1086
1087static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1088 struct ieee80211_vif *vif)
1089{
1090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1091 struct iwl_bt_iterator_data *data = _data;
1092 struct iwl_mvm *mvm = data->mvm;
1093
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096
1097 struct ieee80211_chanctx_conf *chanctx_conf;
1098
1099 rcu_read_lock();
1100 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1101 /* If channel context is invalid or not on 2.4GHz - don't count it */
1102 if (!chanctx_conf ||
1103 chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
1104 rcu_read_unlock();
1105 return;
1106 }
1107 rcu_read_unlock();
1108
1109 if (vif->type != NL80211_IFTYPE_STATION ||
1110 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1111 return;
1112
1113 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1114 lockdep_is_held(&mvm->mutex));
1115
1116 /* This can happen if the station has been removed right now */
1117 if (IS_ERR_OR_NULL(sta))
1118 return;
1119
1120 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1121}
1122
1123void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1124 enum ieee80211_rssi_event_data rssi_event)
1125{
1126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1127 struct iwl_bt_iterator_data data = {
1128 .mvm = mvm,
1129 };
1130 int ret;
1131
1132 lockdep_assert_held(&mvm->mutex);
1133
1134 /* Ignore updates if we are in force mode */
1135 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1136 return;
1137
1138 /*
1139 * Rssi update while not associated - can happen since the statistics
1140 * are handled asynchronously
1141 */
1142 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1143 return;
1144
1145 /* No BT - reports should be disabled */
1146 if (!mvm->last_bt_notif_old.bt_status)
1147 return;
1148
1149 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
1150 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
1151
1152 /*
1153 * Check if rssi is good enough for reduced Tx power, but not in loose
1154 * scheme.
1155 */
1156 if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
1157 iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
1158 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
1159 false);
1160 else
1161 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
1162
1163 if (ret)
1164 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
1165
1166 ieee80211_iterate_active_interfaces_atomic(
1167 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1168 iwl_mvm_bt_rssi_iterator, &data);
1169
1170 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
1171 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1172}
1173
1174#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
1175#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
1176
1177u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1178 struct ieee80211_sta *sta)
1179{
1180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1181 enum iwl_bt_coex_lut_type lut_type;
1182
1183 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1184 BT_HIGH_TRAFFIC)
1185 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1186
1187 if (mvm->last_bt_notif_old.ttc_enabled)
1188 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1189
1190 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1191
1192 if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
1193 return LINK_QUAL_AGG_TIME_LIMIT_DEF;
1194
1195 /* tight coex, high bt traffic, reduce AGG time limit */
1196 return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
1197}
1198
1199bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1200 struct ieee80211_sta *sta)
1201{
1202 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1203 enum iwl_bt_coex_lut_type lut_type;
1204
1205 if (mvm->last_bt_notif_old.ttc_enabled)
1206 return true;
1207
1208 if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
1209 BT_HIGH_TRAFFIC)
1210 return true;
1211
1212 /*
1213 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
1214 * since BT is already killed.
1215 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
1216 * we Tx.
1217 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
1218 */
1219 lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
1220 return lut_type != BT_COEX_LOOSE_LUT;
1221}
1222
1223bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
1224{
1225 u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1226 return ag < BT_HIGH_TRAFFIC;
1227}
1228
1229bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1230 enum ieee80211_band band)
1231{
1232 u32 bt_activity =
1233 le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
1234
1235 if (band != IEEE80211_BAND_2GHZ)
1236 return false;
1237
1238 return bt_activity >= BT_LOW_TRAFFIC;
1239}
1240
1241void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
1242{
1243 iwl_mvm_bt_coex_notif_handle(mvm);
1244}
1245
1246void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1247 struct iwl_rx_cmd_buffer *rxb)
1248{
1249 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1250 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1251 u8 __maybe_unused lower_bound, upper_bound;
1252 u8 lut;
1253
1254 struct iwl_bt_coex_cmd_old *bt_cmd;
1255 struct iwl_host_cmd cmd = {
1256 .id = BT_CONFIG,
1257 .len = { sizeof(*bt_cmd), },
1258 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259 };
1260
1261 if (!iwl_mvm_bt_is_plcr_supported(mvm))
1262 return;
1263
1264 lockdep_assert_held(&mvm->mutex);
1265
1266 /* Ignore updates if we are in force mode */
1267 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
1268 return;
1269
1270 if (ant_isolation == mvm->last_ant_isol)
1271 return;
1272
1273 for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
1274 if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
1275 break;
1276
1277 lower_bound = antenna_coupling_ranges[lut].range;
1278
1279 if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
1280 upper_bound = antenna_coupling_ranges[lut + 1].range;
1281 else
1282 upper_bound = antenna_coupling_ranges[lut].range;
1283
1284 IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
1285 ant_isolation, lower_bound, upper_bound, lut);
1286
1287 mvm->last_ant_isol = ant_isolation;
1288
1289 if (mvm->last_corun_lut == lut)
1290 return;
1291
1292 mvm->last_corun_lut = lut;
1293
1294 bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
1295 if (!bt_cmd)
1296 return;
1297 cmd.data[0] = bt_cmd;
1298
1299 bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
1300 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
1301 BT_VALID_CORUN_LUT_20 |
1302 BT_VALID_CORUN_LUT_40);
1303
1304 /* For the moment, use the same LUT for 20GHz and 40GHz */
1305 memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
1306 sizeof(bt_cmd->bt4_corun_lut20));
1307
1308 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1309 sizeof(bt_cmd->bt4_corun_lut40));
1310
1311 if (iwl_mvm_send_cmd(mvm, &cmd))
1312 IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
1313
1314 kfree(bt_cmd);
1315}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
new file mode 100644
index 000000000000..5c21231e195d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -0,0 +1,139 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#ifndef __MVM_CONSTANTS_H
66#define __MVM_CONSTANTS_H
67
68#include <linux/ieee80211.h>
69
70#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
71#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
72#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
73#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
74#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
75#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
76#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
77#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
78#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
79#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
80 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
81 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
82 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
83#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
84#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
85#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
86#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
87#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
88#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
89#define IWL_MVM_PS_SNOOZE_INTERVAL 25
90#define IWL_MVM_PS_SNOOZE_WINDOW 50
91#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
92#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
93#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62
94#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65
95#define IWL_MVM_BT_COEX_SYNC2SCO 1
96#define IWL_MVM_BT_COEX_CORUNNING 0
97#define IWL_MVM_BT_COEX_MPLUT 1
98#define IWL_MVM_BT_COEX_RRC 1
99#define IWL_MVM_BT_COEX_TTC 1
100#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
101#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
102#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
103#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
104#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
105#define IWL_MVM_QUOTA_THRESHOLD 4
106#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
107#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
108#define IWL_MVM_TOF_IS_RESPONDER 0
109#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
110#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
111#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
112#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
113#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
114#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
115#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
116#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
117#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
118#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
119#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
120#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
121#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
122#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
123#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
124#define IWL_MVM_RS_MISSED_RATE_MAX 15
125#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
126#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
127#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
128#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
129#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
130#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
131#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
132#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
133#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
134#define IWL_MVM_RS_AGG_DISABLE_START 3
135#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
136#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
137#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
138
139#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
new file mode 100644
index 000000000000..85ae902df7c0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -0,0 +1,2104 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <linux/ip.h>
68#include <linux/fs.h>
69#include <net/cfg80211.h>
70#include <net/ipv6.h>
71#include <net/tcp.h>
72#include <net/addrconf.h>
73#include "iwl-modparams.h"
74#include "fw-api.h"
75#include "mvm.h"
76
77void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
78 struct ieee80211_vif *vif,
79 struct cfg80211_gtk_rekey_data *data)
80{
81 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
82 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
83
84 if (iwlwifi_mod_params.sw_crypto)
85 return;
86
87 mutex_lock(&mvm->mutex);
88
89 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
90 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
91 mvmvif->rekey_data.replay_ctr =
92 cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
93 mvmvif->rekey_data.valid = true;
94
95 mutex_unlock(&mvm->mutex);
96}
97
98#if IS_ENABLED(CONFIG_IPV6)
99void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
100 struct ieee80211_vif *vif,
101 struct inet6_dev *idev)
102{
103 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
104 struct inet6_ifaddr *ifa;
105 int idx = 0;
106
107 read_lock_bh(&idev->lock);
108 list_for_each_entry(ifa, &idev->addr_list, if_list) {
109 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
110 idx++;
111 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
112 break;
113 }
114 read_unlock_bh(&idev->lock);
115
116 mvmvif->num_target_ipv6_addrs = idx;
117}
118#endif
119
120void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
121 struct ieee80211_vif *vif, int idx)
122{
123 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
124
125 mvmvif->tx_key_idx = idx;
126}
127
128static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
129{
130 int i;
131
132 for (i = 0; i < IWL_P1K_SIZE; i++)
133 out[i] = cpu_to_le16(p1k[i]);
134}
135
136struct wowlan_key_data {
137 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
138 struct iwl_wowlan_tkip_params_cmd *tkip;
139 bool error, use_rsc_tsc, use_tkip;
140 int wep_key_idx;
141};
142
143static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
144 struct ieee80211_vif *vif,
145 struct ieee80211_sta *sta,
146 struct ieee80211_key_conf *key,
147 void *_data)
148{
149 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
151 struct wowlan_key_data *data = _data;
152 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
153 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
154 struct iwl_p1k_cache *rx_p1ks;
155 u8 *rx_mic_key;
156 struct ieee80211_key_seq seq;
157 u32 cur_rx_iv32 = 0;
158 u16 p1k[IWL_P1K_SIZE];
159 int ret, i;
160
161 mutex_lock(&mvm->mutex);
162
163 switch (key->cipher) {
164 case WLAN_CIPHER_SUITE_WEP40:
165 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
166 struct {
167 struct iwl_mvm_wep_key_cmd wep_key_cmd;
168 struct iwl_mvm_wep_key wep_key;
169 } __packed wkc = {
170 .wep_key_cmd.mac_id_n_color =
171 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
172 mvmvif->color)),
173 .wep_key_cmd.num_keys = 1,
174 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
175 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
176 .wep_key.key_index = key->keyidx,
177 .wep_key.key_size = key->keylen,
178 };
179
180 /*
181 * This will fail -- the key functions don't set support
182 * pairwise WEP keys. However, that's better than silently
183 * failing WoWLAN. Or maybe not?
184 */
185 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
186 break;
187
188 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
189 if (key->keyidx == mvmvif->tx_key_idx) {
190 /* TX key must be at offset 0 */
191 wkc.wep_key.key_offset = 0;
192 } else {
193 /* others start at 1 */
194 data->wep_key_idx++;
195 wkc.wep_key.key_offset = data->wep_key_idx;
196 }
197
198 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
199 data->error = ret != 0;
200
201 mvm->ptk_ivlen = key->iv_len;
202 mvm->ptk_icvlen = key->icv_len;
203 mvm->gtk_ivlen = key->iv_len;
204 mvm->gtk_icvlen = key->icv_len;
205
206 /* don't upload key again */
207 goto out_unlock;
208 }
209 default:
210 data->error = true;
211 goto out_unlock;
212 case WLAN_CIPHER_SUITE_AES_CMAC:
213 /*
214 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
215 * but we also shouldn't abort suspend due to that. It does have
216 * support for the IGTK key renewal, but doesn't really use the
217 * IGTK for anything. This means we could spuriously wake up or
218 * be deauthenticated, but that was considered acceptable.
219 */
220 goto out_unlock;
221 case WLAN_CIPHER_SUITE_TKIP:
222 if (sta) {
223 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
224 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
225
226 rx_p1ks = data->tkip->rx_uni;
227
228 ieee80211_get_key_tx_seq(key, &seq);
229 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
230 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
231
232 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
233 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
234
235 memcpy(data->tkip->mic_keys.tx,
236 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
237 IWL_MIC_KEY_SIZE);
238
239 rx_mic_key = data->tkip->mic_keys.rx_unicast;
240 } else {
241 tkip_sc =
242 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
243 rx_p1ks = data->tkip->rx_multi;
244 rx_mic_key = data->tkip->mic_keys.rx_mcast;
245 }
246
247 /*
248 * For non-QoS this relies on the fact that both the uCode and
249 * mac80211 use TID 0 (as they need to to avoid replay attacks)
250 * for checking the IV in the frames.
251 */
252 for (i = 0; i < IWL_NUM_RSC; i++) {
253 ieee80211_get_key_rx_seq(key, i, &seq);
254 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
255 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
256 /* wrapping isn't allowed, AP must rekey */
257 if (seq.tkip.iv32 > cur_rx_iv32)
258 cur_rx_iv32 = seq.tkip.iv32;
259 }
260
261 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
262 cur_rx_iv32, p1k);
263 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
264 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
265 cur_rx_iv32 + 1, p1k);
266 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
267
268 memcpy(rx_mic_key,
269 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
270 IWL_MIC_KEY_SIZE);
271
272 data->use_tkip = true;
273 data->use_rsc_tsc = true;
274 break;
275 case WLAN_CIPHER_SUITE_CCMP:
276 if (sta) {
277 u64 pn64;
278
279 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
280 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
281
282 pn64 = atomic64_read(&key->tx_pn);
283 aes_tx_sc->pn = cpu_to_le64(pn64);
284 } else {
285 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
286 }
287
288 /*
289 * For non-QoS this relies on the fact that both the uCode and
290 * mac80211 use TID 0 for checking the IV in the frames.
291 */
292 for (i = 0; i < IWL_NUM_RSC; i++) {
293 u8 *pn = seq.ccmp.pn;
294
295 ieee80211_get_key_rx_seq(key, i, &seq);
296 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
297 ((u64)pn[4] << 8) |
298 ((u64)pn[3] << 16) |
299 ((u64)pn[2] << 24) |
300 ((u64)pn[1] << 32) |
301 ((u64)pn[0] << 40));
302 }
303 data->use_rsc_tsc = true;
304 break;
305 }
306
307 /*
308 * The D3 firmware hardcodes the key offset 0 as the key it uses
309 * to transmit packets to the AP, i.e. the PTK.
310 */
311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len;
315 } else {
316 /*
317 * firmware only supports TSC/RSC for a single key,
318 * so if there are multiple keep overwriting them
319 * with new ones -- this relies on mac80211 doing
320 * list_add_tail().
321 */
322 key->hw_key_idx = 1;
323 mvm->gtk_ivlen = key->iv_len;
324 mvm->gtk_icvlen = key->icv_len;
325 }
326
327 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
328 data->error = ret != 0;
329out_unlock:
330 mutex_unlock(&mvm->mutex);
331}
332
333static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
334 struct cfg80211_wowlan *wowlan)
335{
336 struct iwl_wowlan_patterns_cmd *pattern_cmd;
337 struct iwl_host_cmd cmd = {
338 .id = WOWLAN_PATTERNS,
339 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
340 };
341 int i, err;
342
343 if (!wowlan->n_patterns)
344 return 0;
345
346 cmd.len[0] = sizeof(*pattern_cmd) +
347 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
348
349 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
350 if (!pattern_cmd)
351 return -ENOMEM;
352
353 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
354
355 for (i = 0; i < wowlan->n_patterns; i++) {
356 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
357
358 memcpy(&pattern_cmd->patterns[i].mask,
359 wowlan->patterns[i].mask, mask_len);
360 memcpy(&pattern_cmd->patterns[i].pattern,
361 wowlan->patterns[i].pattern,
362 wowlan->patterns[i].pattern_len);
363 pattern_cmd->patterns[i].mask_size = mask_len;
364 pattern_cmd->patterns[i].pattern_size =
365 wowlan->patterns[i].pattern_len;
366 }
367
368 cmd.data[0] = pattern_cmd;
369 err = iwl_mvm_send_cmd(mvm, &cmd);
370 kfree(pattern_cmd);
371 return err;
372}
373
374enum iwl_mvm_tcp_packet_type {
375 MVM_TCP_TX_SYN,
376 MVM_TCP_RX_SYNACK,
377 MVM_TCP_TX_DATA,
378 MVM_TCP_RX_ACK,
379 MVM_TCP_RX_WAKE,
380 MVM_TCP_TX_FIN,
381};
382
383static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
384{
385 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
386 return cpu_to_le16(be16_to_cpu((__force __be16)check));
387}
388
389static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
390 struct cfg80211_wowlan_tcp *tcp,
391 void *_pkt, u8 *mask,
392 __le16 *pseudo_hdr_csum,
393 enum iwl_mvm_tcp_packet_type ptype)
394{
395 struct {
396 struct ethhdr eth;
397 struct iphdr ip;
398 struct tcphdr tcp;
399 u8 data[];
400 } __packed *pkt = _pkt;
401 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
402 int i;
403
404 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
405 pkt->ip.version = 4;
406 pkt->ip.ihl = 5;
407 pkt->ip.protocol = IPPROTO_TCP;
408
409 switch (ptype) {
410 case MVM_TCP_TX_SYN:
411 case MVM_TCP_TX_DATA:
412 case MVM_TCP_TX_FIN:
413 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
414 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
415 pkt->ip.ttl = 128;
416 pkt->ip.saddr = tcp->src;
417 pkt->ip.daddr = tcp->dst;
418 pkt->tcp.source = cpu_to_be16(tcp->src_port);
419 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
420 /* overwritten for TX SYN later */
421 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
422 pkt->tcp.window = cpu_to_be16(65000);
423 break;
424 case MVM_TCP_RX_SYNACK:
425 case MVM_TCP_RX_ACK:
426 case MVM_TCP_RX_WAKE:
427 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
428 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
429 pkt->ip.saddr = tcp->dst;
430 pkt->ip.daddr = tcp->src;
431 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
432 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
433 break;
434 default:
435 WARN_ON(1);
436 return;
437 }
438
439 switch (ptype) {
440 case MVM_TCP_TX_SYN:
441 /* firmware assumes 8 option bytes - 8 NOPs for now */
442 memset(pkt->data, 0x01, 8);
443 ip_tot_len += 8;
444 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
445 pkt->tcp.syn = 1;
446 break;
447 case MVM_TCP_TX_DATA:
448 ip_tot_len += tcp->payload_len;
449 memcpy(pkt->data, tcp->payload, tcp->payload_len);
450 pkt->tcp.psh = 1;
451 pkt->tcp.ack = 1;
452 break;
453 case MVM_TCP_TX_FIN:
454 pkt->tcp.fin = 1;
455 pkt->tcp.ack = 1;
456 break;
457 case MVM_TCP_RX_SYNACK:
458 pkt->tcp.syn = 1;
459 pkt->tcp.ack = 1;
460 break;
461 case MVM_TCP_RX_ACK:
462 pkt->tcp.ack = 1;
463 break;
464 case MVM_TCP_RX_WAKE:
465 ip_tot_len += tcp->wake_len;
466 pkt->tcp.psh = 1;
467 pkt->tcp.ack = 1;
468 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
469 break;
470 }
471
472 switch (ptype) {
473 case MVM_TCP_TX_SYN:
474 case MVM_TCP_TX_DATA:
475 case MVM_TCP_TX_FIN:
476 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
477 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
478 break;
479 case MVM_TCP_RX_WAKE:
480 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
481 u8 tmp = tcp->wake_mask[i];
482 mask[i + 6] |= tmp << 6;
483 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
484 mask[i + 7] = tmp >> 2;
485 }
486 /* fall through for ethernet/IP/TCP headers mask */
487 case MVM_TCP_RX_SYNACK:
488 case MVM_TCP_RX_ACK:
489 mask[0] = 0xff; /* match ethernet */
490 /*
491 * match ethernet, ip.version, ip.ihl
492 * the ip.ihl half byte is really masked out by firmware
493 */
494 mask[1] = 0x7f;
495 mask[2] = 0x80; /* match ip.protocol */
496 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
497 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
498 mask[5] = 0x80; /* match tcp flags */
499 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
500 break;
501 };
502
503 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
504 pkt->ip.saddr, pkt->ip.daddr);
505}
506
507static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
508 struct ieee80211_vif *vif,
509 struct cfg80211_wowlan_tcp *tcp)
510{
511 struct iwl_wowlan_remote_wake_config *cfg;
512 struct iwl_host_cmd cmd = {
513 .id = REMOTE_WAKE_CONFIG_CMD,
514 .len = { sizeof(*cfg), },
515 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
516 };
517 int ret;
518
519 if (!tcp)
520 return 0;
521
522 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
523 if (!cfg)
524 return -ENOMEM;
525 cmd.data[0] = cfg;
526
527 cfg->max_syn_retries = 10;
528 cfg->max_data_retries = 10;
529 cfg->tcp_syn_ack_timeout = 1; /* seconds */
530 cfg->tcp_ack_timeout = 1; /* seconds */
531
532 /* SYN (TX) */
533 iwl_mvm_build_tcp_packet(
534 vif, tcp, cfg->syn_tx.data, NULL,
535 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
536 MVM_TCP_TX_SYN);
537 cfg->syn_tx.info.tcp_payload_length = 0;
538
539 /* SYN/ACK (RX) */
540 iwl_mvm_build_tcp_packet(
541 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
542 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
543 MVM_TCP_RX_SYNACK);
544 cfg->synack_rx.info.tcp_payload_length = 0;
545
546 /* KEEPALIVE/ACK (TX) */
547 iwl_mvm_build_tcp_packet(
548 vif, tcp, cfg->keepalive_tx.data, NULL,
549 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
550 MVM_TCP_TX_DATA);
551 cfg->keepalive_tx.info.tcp_payload_length =
552 cpu_to_le16(tcp->payload_len);
553 cfg->sequence_number_offset = tcp->payload_seq.offset;
554 /* length must be 0..4, the field is little endian */
555 cfg->sequence_number_length = tcp->payload_seq.len;
556 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
557 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
558 if (tcp->payload_tok.len) {
559 cfg->token_offset = tcp->payload_tok.offset;
560 cfg->token_length = tcp->payload_tok.len;
561 cfg->num_tokens =
562 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
563 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
564 tcp->tokens_size);
565 } else {
566 /* set tokens to max value to almost never run out */
567 cfg->num_tokens = cpu_to_le16(65535);
568 }
569
570 /* ACK (RX) */
571 iwl_mvm_build_tcp_packet(
572 vif, tcp, cfg->keepalive_ack_rx.data,
573 cfg->keepalive_ack_rx.rx_mask,
574 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
575 MVM_TCP_RX_ACK);
576 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
577
578 /* WAKEUP (RX) */
579 iwl_mvm_build_tcp_packet(
580 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
581 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
582 MVM_TCP_RX_WAKE);
583 cfg->wake_rx.info.tcp_payload_length =
584 cpu_to_le16(tcp->wake_len);
585
586 /* FIN */
587 iwl_mvm_build_tcp_packet(
588 vif, tcp, cfg->fin_tx.data, NULL,
589 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
590 MVM_TCP_TX_FIN);
591 cfg->fin_tx.info.tcp_payload_length = 0;
592
593 ret = iwl_mvm_send_cmd(mvm, &cmd);
594 kfree(cfg);
595
596 return ret;
597}
598
599static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
600 struct ieee80211_sta *ap_sta)
601{
602 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
603 struct ieee80211_chanctx_conf *ctx;
604 u8 chains_static, chains_dynamic;
605 struct cfg80211_chan_def chandef;
606 int ret, i;
607 struct iwl_binding_cmd binding_cmd = {};
608 struct iwl_time_quota_cmd quota_cmd = {};
609 u32 status;
610
611 /* add back the PHY */
612 if (WARN_ON(!mvmvif->phy_ctxt))
613 return -EINVAL;
614
615 rcu_read_lock();
616 ctx = rcu_dereference(vif->chanctx_conf);
617 if (WARN_ON(!ctx)) {
618 rcu_read_unlock();
619 return -EINVAL;
620 }
621 chandef = ctx->def;
622 chains_static = ctx->rx_chains_static;
623 chains_dynamic = ctx->rx_chains_dynamic;
624 rcu_read_unlock();
625
626 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
627 chains_static, chains_dynamic);
628 if (ret)
629 return ret;
630
631 /* add back the MAC */
632 mvmvif->uploaded = false;
633
634 if (WARN_ON(!vif->bss_conf.assoc))
635 return -EINVAL;
636
637 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
638 if (ret)
639 return ret;
640
641 /* add back binding - XXX refactor? */
642 binding_cmd.id_and_color =
643 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
644 mvmvif->phy_ctxt->color));
645 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
646 binding_cmd.phy =
647 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
648 mvmvif->phy_ctxt->color));
649 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
650 mvmvif->color));
651 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
652 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
653
654 status = 0;
655 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
656 sizeof(binding_cmd), &binding_cmd,
657 &status);
658 if (ret) {
659 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
660 return ret;
661 }
662
663 if (status) {
664 IWL_ERR(mvm, "Binding command failed: %u\n", status);
665 return -EIO;
666 }
667
668 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
669 if (ret)
670 return ret;
671 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
672
673 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
674 if (ret)
675 return ret;
676
677 /* and some quota */
678 quota_cmd.quotas[0].id_and_color =
679 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
680 mvmvif->phy_ctxt->color));
681 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
682 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
683
684 for (i = 1; i < MAX_BINDINGS; i++)
685 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
686
687 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
688 sizeof(quota_cmd), &quota_cmd);
689 if (ret)
690 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
691
692 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
693 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
694
695 return 0;
696}
697
698static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
699 struct ieee80211_vif *vif)
700{
701 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
702 struct iwl_nonqos_seq_query_cmd query_cmd = {
703 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
704 .mac_id_n_color =
705 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
706 mvmvif->color)),
707 };
708 struct iwl_host_cmd cmd = {
709 .id = NON_QOS_TX_COUNTER_CMD,
710 .flags = CMD_WANT_SKB,
711 };
712 int err;
713 u32 size;
714
715 cmd.data[0] = &query_cmd;
716 cmd.len[0] = sizeof(query_cmd);
717
718 err = iwl_mvm_send_cmd(mvm, &cmd);
719 if (err)
720 return err;
721
722 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
723 if (size < sizeof(__le16)) {
724 err = -EINVAL;
725 } else {
726 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
727 /* firmware returns next, not last-used seqno */
728 err = (u16) (err - 0x10);
729 }
730
731 iwl_free_resp(&cmd);
732 return err;
733}
734
735void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
736{
737 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
738 struct iwl_nonqos_seq_query_cmd query_cmd = {
739 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
740 .mac_id_n_color =
741 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
742 mvmvif->color)),
743 .value = cpu_to_le16(mvmvif->seqno),
744 };
745
746 /* return if called during restart, not resume from D3 */
747 if (!mvmvif->seqno_valid)
748 return;
749
750 mvmvif->seqno_valid = false;
751
752 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
753 sizeof(query_cmd), &query_cmd))
754 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
755}
756
757static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
758{
759 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
760
761 iwl_trans_stop_device(mvm->trans);
762
763 /*
764 * Set the HW restart bit -- this is mostly true as we're
765 * going to load new firmware and reprogram that, though
766 * the reprogramming is going to be manual to avoid adding
767 * all the MACs that aren't support.
768 * We don't have to clear up everything though because the
769 * reprogramming is manual. When we resume, we'll actually
770 * go through a proper restart sequence again to switch
771 * back to the runtime firmware image.
772 */
773 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
774
775 /* We reprogram keys and shouldn't allocate new key indices */
776 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
777
778 mvm->ptk_ivlen = 0;
779 mvm->ptk_icvlen = 0;
780 mvm->ptk_ivlen = 0;
781 mvm->ptk_icvlen = 0;
782
783 return iwl_mvm_load_d3_fw(mvm);
784}
785
786static int
787iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
788 struct cfg80211_wowlan *wowlan,
789 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
790 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
791 struct ieee80211_sta *ap_sta)
792{
793 int ret;
794 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
795
796 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
797
798 wowlan_config_cmd->is_11n_connection =
799 ap_sta->ht_cap.ht_supported;
800
801 /* Query the last used seqno and set it */
802 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
803 if (ret < 0)
804 return ret;
805
806 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
807
808 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
809
810 if (wowlan->disconnect)
811 wowlan_config_cmd->wakeup_filter |=
812 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
813 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
814 if (wowlan->magic_pkt)
815 wowlan_config_cmd->wakeup_filter |=
816 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
817 if (wowlan->gtk_rekey_failure)
818 wowlan_config_cmd->wakeup_filter |=
819 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
820 if (wowlan->eap_identity_req)
821 wowlan_config_cmd->wakeup_filter |=
822 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
823 if (wowlan->four_way_handshake)
824 wowlan_config_cmd->wakeup_filter |=
825 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
826 if (wowlan->n_patterns)
827 wowlan_config_cmd->wakeup_filter |=
828 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
829
830 if (wowlan->rfkill_release)
831 wowlan_config_cmd->wakeup_filter |=
832 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
833
834 if (wowlan->tcp) {
835 /*
836 * Set the "link change" (really "link lost") flag as well
837 * since that implies losing the TCP connection.
838 */
839 wowlan_config_cmd->wakeup_filter |=
840 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
841 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
842 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
843 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
844 }
845
846 return 0;
847}
848
849static int
850iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
851 struct cfg80211_wowlan *wowlan,
852 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
853 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
854 struct ieee80211_sta *ap_sta)
855{
856 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
857 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
858 struct wowlan_key_data key_data = {
859 .use_rsc_tsc = false,
860 .tkip = &tkip_cmd,
861 .use_tkip = false,
862 };
863 int ret;
864
865 ret = iwl_mvm_switch_to_d3(mvm);
866 if (ret)
867 return ret;
868
869 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
870 if (ret)
871 return ret;
872
873 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
874 if (!key_data.rsc_tsc)
875 return -ENOMEM;
876
877 if (!iwlwifi_mod_params.sw_crypto) {
878 /*
879 * This needs to be unlocked due to lock ordering
880 * constraints. Since we're in the suspend path
881 * that isn't really a problem though.
882 */
883 mutex_unlock(&mvm->mutex);
884 ieee80211_iter_keys(mvm->hw, vif,
885 iwl_mvm_wowlan_program_keys,
886 &key_data);
887 mutex_lock(&mvm->mutex);
888 if (key_data.error) {
889 ret = -EIO;
890 goto out;
891 }
892
893 if (key_data.use_rsc_tsc) {
894 struct iwl_host_cmd rsc_tsc_cmd = {
895 .id = WOWLAN_TSC_RSC_PARAM,
896 .data[0] = key_data.rsc_tsc,
897 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
898 .len[0] = sizeof(*key_data.rsc_tsc),
899 };
900
901 ret = iwl_mvm_send_cmd(mvm, &rsc_tsc_cmd);
902 if (ret)
903 goto out;
904 }
905
906 if (key_data.use_tkip) {
907 ret = iwl_mvm_send_cmd_pdu(mvm,
908 WOWLAN_TKIP_PARAM,
909 0, sizeof(tkip_cmd),
910 &tkip_cmd);
911 if (ret)
912 goto out;
913 }
914
915 if (mvmvif->rekey_data.valid) {
916 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
917 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
918 NL80211_KCK_LEN);
919 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
920 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
921 NL80211_KEK_LEN);
922 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
923 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
924
925 ret = iwl_mvm_send_cmd_pdu(mvm,
926 WOWLAN_KEK_KCK_MATERIAL, 0,
927 sizeof(kek_kck_cmd),
928 &kek_kck_cmd);
929 if (ret)
930 goto out;
931 }
932 }
933
934 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
935 sizeof(*wowlan_config_cmd),
936 wowlan_config_cmd);
937 if (ret)
938 goto out;
939
940 ret = iwl_mvm_send_patterns(mvm, wowlan);
941 if (ret)
942 goto out;
943
944 ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
945 if (ret)
946 goto out;
947
948 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
949
950out:
951 kfree(key_data.rsc_tsc);
952 return ret;
953}
954
955static int
956iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
957 struct cfg80211_wowlan *wowlan,
958 struct cfg80211_sched_scan_request *nd_config,
959 struct ieee80211_vif *vif)
960{
961 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
962 int ret;
963
964 ret = iwl_mvm_switch_to_d3(mvm);
965 if (ret)
966 return ret;
967
968 /* rfkill release can be either for wowlan or netdetect */
969 if (wowlan->rfkill_release)
970 wowlan_config_cmd.wakeup_filter |=
971 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
972
973 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
974 sizeof(wowlan_config_cmd),
975 &wowlan_config_cmd);
976 if (ret)
977 return ret;
978
979 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
980 IWL_MVM_SCAN_NETDETECT);
981 if (ret)
982 return ret;
983
984 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
985 return -EBUSY;
986
987 /* save the sched scan matchsets... */
988 if (nd_config->n_match_sets) {
989 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
990 sizeof(*nd_config->match_sets) *
991 nd_config->n_match_sets,
992 GFP_KERNEL);
993 if (mvm->nd_match_sets)
994 mvm->n_nd_match_sets = nd_config->n_match_sets;
995 }
996
997 /* ...and the sched scan channels for later reporting */
998 mvm->nd_channels = kmemdup(nd_config->channels,
999 sizeof(*nd_config->channels) *
1000 nd_config->n_channels,
1001 GFP_KERNEL);
1002 if (mvm->nd_channels)
1003 mvm->n_nd_channels = nd_config->n_channels;
1004
1005 return 0;
1006}
1007
1008static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
1009{
1010 kfree(mvm->nd_match_sets);
1011 mvm->nd_match_sets = NULL;
1012 mvm->n_nd_match_sets = 0;
1013 kfree(mvm->nd_channels);
1014 mvm->nd_channels = NULL;
1015 mvm->n_nd_channels = 0;
1016}
1017
1018static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1019 struct cfg80211_wowlan *wowlan,
1020 bool test)
1021{
1022 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1023 struct ieee80211_vif *vif = NULL;
1024 struct iwl_mvm_vif *mvmvif = NULL;
1025 struct ieee80211_sta *ap_sta = NULL;
1026 struct iwl_d3_manager_config d3_cfg_cmd_data = {
1027 /*
1028 * Program the minimum sleep time to 10 seconds, as many
1029 * platforms have issues processing a wakeup signal while
1030 * still being in the process of suspending.
1031 */
1032 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1033 };
1034 struct iwl_host_cmd d3_cfg_cmd = {
1035 .id = D3_CONFIG_CMD,
1036 .flags = CMD_WANT_SKB,
1037 .data[0] = &d3_cfg_cmd_data,
1038 .len[0] = sizeof(d3_cfg_cmd_data),
1039 };
1040 int ret;
1041 int len __maybe_unused;
1042
1043 if (!wowlan) {
1044 /*
1045 * mac80211 shouldn't get here, but for D3 test
1046 * it doesn't warrant a warning
1047 */
1048 WARN_ON(!test);
1049 return -EINVAL;
1050 }
1051
1052 mutex_lock(&mvm->mutex);
1053
1054 vif = iwl_mvm_get_bss_vif(mvm);
1055 if (IS_ERR_OR_NULL(vif)) {
1056 ret = 1;
1057 goto out_noreset;
1058 }
1059
1060 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1061
1062 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
1063 /* if we're not associated, this must be netdetect */
1064 if (!wowlan->nd_config && !mvm->nd_config) {
1065 ret = 1;
1066 goto out_noreset;
1067 }
1068
1069 ret = iwl_mvm_netdetect_config(
1070 mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif);
1071 if (ret)
1072 goto out;
1073
1074 mvm->net_detect = true;
1075 } else {
1076 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1077
1078 ap_sta = rcu_dereference_protected(
1079 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1080 lockdep_is_held(&mvm->mutex));
1081 if (IS_ERR_OR_NULL(ap_sta)) {
1082 ret = -EINVAL;
1083 goto out_noreset;
1084 }
1085
1086 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1087 vif, mvmvif, ap_sta);
1088 if (ret)
1089 goto out_noreset;
1090 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1091 vif, mvmvif, ap_sta);
1092 if (ret)
1093 goto out;
1094
1095 mvm->net_detect = false;
1096 }
1097
1098 ret = iwl_mvm_power_update_device(mvm);
1099 if (ret)
1100 goto out;
1101
1102 ret = iwl_mvm_power_update_mac(mvm);
1103 if (ret)
1104 goto out;
1105
1106#ifdef CONFIG_IWLWIFI_DEBUGFS
1107 if (mvm->d3_wake_sysassert)
1108 d3_cfg_cmd_data.wakeup_flags |=
1109 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1110#endif
1111
1112 /* must be last -- this switches firmware state */
1113 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1114 if (ret)
1115 goto out;
1116#ifdef CONFIG_IWLWIFI_DEBUGFS
1117 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1118 if (len >= sizeof(u32)) {
1119 mvm->d3_test_pme_ptr =
1120 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1121 }
1122#endif
1123 iwl_free_resp(&d3_cfg_cmd);
1124
1125 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1126
1127 iwl_trans_d3_suspend(mvm->trans, test);
1128 out:
1129 if (ret < 0) {
1130 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1131 ieee80211_restart_hw(mvm->hw);
1132 iwl_mvm_free_nd(mvm);
1133 }
1134 out_noreset:
1135 mutex_unlock(&mvm->mutex);
1136
1137 return ret;
1138}
1139
1140static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1141{
1142 struct iwl_notification_wait wait_d3;
1143 static const u16 d3_notif[] = { D3_CONFIG_CMD };
1144 int ret;
1145
1146 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1147 d3_notif, ARRAY_SIZE(d3_notif),
1148 NULL, NULL);
1149
1150 ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1151 if (ret)
1152 goto remove_notif;
1153
1154 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1155 WARN_ON_ONCE(ret);
1156 return ret;
1157
1158remove_notif:
1159 iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1160 return ret;
1161}
1162
1163int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1164{
1165 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1166 int ret;
1167
1168 /* make sure the d0i3 exit work is not pending */
1169 flush_work(&mvm->d0i3_exit_work);
1170
1171 ret = iwl_trans_suspend(mvm->trans);
1172 if (ret)
1173 return ret;
1174
1175 mvm->trans->wowlan_d0i3 = wowlan->any;
1176 if (mvm->trans->wowlan_d0i3) {
1177 /* 'any' trigger means d0i3 usage */
1178 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1179 ret = iwl_mvm_enter_d0i3_sync(mvm);
1180
1181 if (ret)
1182 return ret;
1183 }
1184
1185 mutex_lock(&mvm->d0i3_suspend_mutex);
1186 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1187 mutex_unlock(&mvm->d0i3_suspend_mutex);
1188
1189 iwl_trans_d3_suspend(mvm->trans, false);
1190
1191 return 0;
1192 }
1193
1194 return __iwl_mvm_suspend(hw, wowlan, false);
1195}
1196
1197/* converted data from the different status responses */
1198struct iwl_wowlan_status_data {
1199 u16 pattern_number;
1200 u16 qos_seq_ctr[8];
1201 u32 wakeup_reasons;
1202 u32 wake_packet_length;
1203 u32 wake_packet_bufsize;
1204 const u8 *wake_packet;
1205};
1206
1207static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1208 struct ieee80211_vif *vif,
1209 struct iwl_wowlan_status_data *status)
1210{
1211 struct sk_buff *pkt = NULL;
1212 struct cfg80211_wowlan_wakeup wakeup = {
1213 .pattern_idx = -1,
1214 };
1215 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1216 u32 reasons = status->wakeup_reasons;
1217
1218 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1219 wakeup_report = NULL;
1220 goto report;
1221 }
1222
1223 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1224 wakeup.magic_pkt = true;
1225
1226 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1227 wakeup.pattern_idx =
1228 status->pattern_number;
1229
1230 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1231 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1232 wakeup.disconnect = true;
1233
1234 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1235 wakeup.gtk_rekey_failure = true;
1236
1237 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1238 wakeup.rfkill_release = true;
1239
1240 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1241 wakeup.eap_identity_req = true;
1242
1243 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1244 wakeup.four_way_handshake = true;
1245
1246 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1247 wakeup.tcp_connlost = true;
1248
1249 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1250 wakeup.tcp_nomoretokens = true;
1251
1252 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1253 wakeup.tcp_match = true;
1254
1255 if (status->wake_packet_bufsize) {
1256 int pktsize = status->wake_packet_bufsize;
1257 int pktlen = status->wake_packet_length;
1258 const u8 *pktdata = status->wake_packet;
1259 struct ieee80211_hdr *hdr = (void *)pktdata;
1260 int truncated = pktlen - pktsize;
1261
1262 /* this would be a firmware bug */
1263 if (WARN_ON_ONCE(truncated < 0))
1264 truncated = 0;
1265
1266 if (ieee80211_is_data(hdr->frame_control)) {
1267 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1268 int ivlen = 0, icvlen = 4; /* also FCS */
1269
1270 pkt = alloc_skb(pktsize, GFP_KERNEL);
1271 if (!pkt)
1272 goto report;
1273
1274 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
1275 pktdata += hdrlen;
1276 pktsize -= hdrlen;
1277
1278 if (ieee80211_has_protected(hdr->frame_control)) {
1279 /*
1280 * This is unlocked and using gtk_i(c)vlen,
1281 * but since everything is under RTNL still
1282 * that's not really a problem - changing
1283 * it would be difficult.
1284 */
1285 if (is_multicast_ether_addr(hdr->addr1)) {
1286 ivlen = mvm->gtk_ivlen;
1287 icvlen += mvm->gtk_icvlen;
1288 } else {
1289 ivlen = mvm->ptk_ivlen;
1290 icvlen += mvm->ptk_icvlen;
1291 }
1292 }
1293
1294 /* if truncated, FCS/ICV is (partially) gone */
1295 if (truncated >= icvlen) {
1296 icvlen = 0;
1297 truncated -= icvlen;
1298 } else {
1299 icvlen -= truncated;
1300 truncated = 0;
1301 }
1302
1303 pktsize -= ivlen + icvlen;
1304 pktdata += ivlen;
1305
1306 memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
1307
1308 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1309 goto report;
1310 wakeup.packet = pkt->data;
1311 wakeup.packet_present_len = pkt->len;
1312 wakeup.packet_len = pkt->len - truncated;
1313 wakeup.packet_80211 = false;
1314 } else {
1315 int fcslen = 4;
1316
1317 if (truncated >= 4) {
1318 truncated -= 4;
1319 fcslen = 0;
1320 } else {
1321 fcslen -= truncated;
1322 truncated = 0;
1323 }
1324 pktsize -= fcslen;
1325 wakeup.packet = status->wake_packet;
1326 wakeup.packet_present_len = pktsize;
1327 wakeup.packet_len = pktlen - truncated;
1328 wakeup.packet_80211 = true;
1329 }
1330 }
1331
1332 report:
1333 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1334 kfree_skb(pkt);
1335}
1336
1337static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1338 struct ieee80211_key_seq *seq)
1339{
1340 u64 pn;
1341
1342 pn = le64_to_cpu(sc->pn);
1343 seq->ccmp.pn[0] = pn >> 40;
1344 seq->ccmp.pn[1] = pn >> 32;
1345 seq->ccmp.pn[2] = pn >> 24;
1346 seq->ccmp.pn[3] = pn >> 16;
1347 seq->ccmp.pn[4] = pn >> 8;
1348 seq->ccmp.pn[5] = pn;
1349}
1350
1351static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1352 struct ieee80211_key_seq *seq)
1353{
1354 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1355 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1356}
1357
1358static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
1359 struct ieee80211_key_conf *key)
1360{
1361 int tid;
1362
1363 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1364
1365 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1366 struct ieee80211_key_seq seq = {};
1367
1368 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1369 ieee80211_set_key_rx_seq(key, tid, &seq);
1370 }
1371}
1372
1373static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1374 struct ieee80211_key_conf *key)
1375{
1376 int tid;
1377
1378 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1379
1380 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1381 struct ieee80211_key_seq seq = {};
1382
1383 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1384 ieee80211_set_key_rx_seq(key, tid, &seq);
1385 }
1386}
1387
1388static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
1389 struct iwl_wowlan_status *status)
1390{
1391 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1392
1393 switch (key->cipher) {
1394 case WLAN_CIPHER_SUITE_CCMP:
1395 iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
1396 break;
1397 case WLAN_CIPHER_SUITE_TKIP:
1398 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1399 break;
1400 default:
1401 WARN_ON(1);
1402 }
1403}
1404
1405struct iwl_mvm_d3_gtk_iter_data {
1406 struct iwl_wowlan_status *status;
1407 void *last_gtk;
1408 u32 cipher;
1409 bool find_phase, unhandled_cipher;
1410 int num_keys;
1411};
1412
1413static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
1414 struct ieee80211_vif *vif,
1415 struct ieee80211_sta *sta,
1416 struct ieee80211_key_conf *key,
1417 void *_data)
1418{
1419 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1420
1421 if (data->unhandled_cipher)
1422 return;
1423
1424 switch (key->cipher) {
1425 case WLAN_CIPHER_SUITE_WEP40:
1426 case WLAN_CIPHER_SUITE_WEP104:
1427 /* ignore WEP completely, nothing to do */
1428 return;
1429 case WLAN_CIPHER_SUITE_CCMP:
1430 case WLAN_CIPHER_SUITE_TKIP:
1431 /* we support these */
1432 break;
1433 default:
1434 /* everything else (even CMAC for MFP) - disconnect from AP */
1435 data->unhandled_cipher = true;
1436 return;
1437 }
1438
1439 data->num_keys++;
1440
1441 /*
1442 * pairwise key - update sequence counters only;
1443 * note that this assumes no TDLS sessions are active
1444 */
1445 if (sta) {
1446 struct ieee80211_key_seq seq = {};
1447 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1448
1449 if (data->find_phase)
1450 return;
1451
1452 switch (key->cipher) {
1453 case WLAN_CIPHER_SUITE_CCMP:
1454 iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
1455 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1456 break;
1457 case WLAN_CIPHER_SUITE_TKIP:
1458 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1459 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1460 ieee80211_set_key_tx_seq(key, &seq);
1461 break;
1462 }
1463
1464 /* that's it for this key */
1465 return;
1466 }
1467
1468 if (data->find_phase) {
1469 data->last_gtk = key;
1470 data->cipher = key->cipher;
1471 return;
1472 }
1473
1474 if (data->status->num_of_gtk_rekeys)
1475 ieee80211_remove_key(key);
1476 else if (data->last_gtk == key)
1477 iwl_mvm_set_key_rx_seq(key, data->status);
1478}
1479
1480static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1481 struct ieee80211_vif *vif,
1482 struct iwl_wowlan_status *status)
1483{
1484 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1485 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1486 .status = status,
1487 };
1488 u32 disconnection_reasons =
1489 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1490 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1491
1492 if (!status || !vif->bss_conf.bssid)
1493 return false;
1494
1495 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1496 return false;
1497
1498 /* find last GTK that we used initially, if any */
1499 gtkdata.find_phase = true;
1500 ieee80211_iter_keys(mvm->hw, vif,
1501 iwl_mvm_d3_update_gtks, &gtkdata);
1502 /* not trying to keep connections with MFP/unhandled ciphers */
1503 if (gtkdata.unhandled_cipher)
1504 return false;
1505 if (!gtkdata.num_keys)
1506 goto out;
1507 if (!gtkdata.last_gtk)
1508 return false;
1509
1510 /*
1511 * invalidate all other GTKs that might still exist and update
1512 * the one that we used
1513 */
1514 gtkdata.find_phase = false;
1515 ieee80211_iter_keys(mvm->hw, vif,
1516 iwl_mvm_d3_update_gtks, &gtkdata);
1517
1518 if (status->num_of_gtk_rekeys) {
1519 struct ieee80211_key_conf *key;
1520 struct {
1521 struct ieee80211_key_conf conf;
1522 u8 key[32];
1523 } conf = {
1524 .conf.cipher = gtkdata.cipher,
1525 .conf.keyidx = status->gtk.key_index,
1526 };
1527
1528 switch (gtkdata.cipher) {
1529 case WLAN_CIPHER_SUITE_CCMP:
1530 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1531 memcpy(conf.conf.key, status->gtk.decrypt_key,
1532 WLAN_KEY_LEN_CCMP);
1533 break;
1534 case WLAN_CIPHER_SUITE_TKIP:
1535 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1536 memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1537 /* leave TX MIC key zeroed, we don't use it anyway */
1538 memcpy(conf.conf.key +
1539 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1540 status->gtk.tkip_mic_key, 8);
1541 break;
1542 }
1543
1544 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1545 if (IS_ERR(key))
1546 return false;
1547 iwl_mvm_set_key_rx_seq(key, status);
1548 }
1549
1550 if (status->num_of_gtk_rekeys) {
1551 __be64 replay_ctr =
1552 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1553 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1554 (void *)&replay_ctr, GFP_KERNEL);
1555 }
1556
1557out:
1558 mvmvif->seqno_valid = true;
1559 /* +0x10 because the set API expects next-to-use, not last-used */
1560 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1561
1562 return true;
1563}
1564
1565static struct iwl_wowlan_status *
1566iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1567{
1568 u32 base = mvm->error_event_table;
1569 struct error_table_start {
1570 /* cf. struct iwl_error_event_table */
1571 u32 valid;
1572 u32 error_id;
1573 } err_info;
1574 struct iwl_host_cmd cmd = {
1575 .id = WOWLAN_GET_STATUSES,
1576 .flags = CMD_WANT_SKB,
1577 };
1578 struct iwl_wowlan_status *status, *fw_status;
1579 int ret, len, status_size;
1580
1581 iwl_trans_read_mem_bytes(mvm->trans, base,
1582 &err_info, sizeof(err_info));
1583
1584 if (err_info.valid) {
1585 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1586 err_info.valid, err_info.error_id);
1587 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1588 struct cfg80211_wowlan_wakeup wakeup = {
1589 .rfkill_release = true,
1590 };
1591 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1592 GFP_KERNEL);
1593 }
1594 return ERR_PTR(-EIO);
1595 }
1596
1597 /* only for tracing for now */
1598 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1599 if (ret)
1600 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1601
1602 ret = iwl_mvm_send_cmd(mvm, &cmd);
1603 if (ret) {
1604 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1605 return ERR_PTR(ret);
1606 }
1607
1608 /* RF-kill already asserted again... */
1609 if (!cmd.resp_pkt) {
1610 fw_status = ERR_PTR(-ERFKILL);
1611 goto out_free_resp;
1612 }
1613
1614 status_size = sizeof(*fw_status);
1615
1616 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1617 if (len < status_size) {
1618 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1619 fw_status = ERR_PTR(-EIO);
1620 goto out_free_resp;
1621 }
1622
1623 status = (void *)cmd.resp_pkt->data;
1624 if (len != (status_size +
1625 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1626 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1627 fw_status = ERR_PTR(-EIO);
1628 goto out_free_resp;
1629 }
1630
1631 fw_status = kmemdup(status, len, GFP_KERNEL);
1632
1633out_free_resp:
1634 iwl_free_resp(&cmd);
1635 return fw_status;
1636}
1637
1638/* releases the MVM mutex */
1639static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1640 struct ieee80211_vif *vif)
1641{
1642 struct iwl_wowlan_status_data status;
1643 struct iwl_wowlan_status *fw_status;
1644 int i;
1645 bool keep;
1646 struct ieee80211_sta *ap_sta;
1647 struct iwl_mvm_sta *mvm_ap_sta;
1648
1649 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1650 if (IS_ERR_OR_NULL(fw_status))
1651 goto out_unlock;
1652
1653 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1654 for (i = 0; i < 8; i++)
1655 status.qos_seq_ctr[i] =
1656 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1657 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1658 status.wake_packet_length =
1659 le32_to_cpu(fw_status->wake_packet_length);
1660 status.wake_packet_bufsize =
1661 le32_to_cpu(fw_status->wake_packet_bufsize);
1662 status.wake_packet = fw_status->wake_packet;
1663
1664 /* still at hard-coded place 0 for D3 image */
1665 ap_sta = rcu_dereference_protected(
1666 mvm->fw_id_to_mac_id[0],
1667 lockdep_is_held(&mvm->mutex));
1668 if (IS_ERR_OR_NULL(ap_sta))
1669 goto out_free;
1670
1671 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1672 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1673 u16 seq = status.qos_seq_ctr[i];
1674 /* firmware stores last-used value, we store next value */
1675 seq += 0x10;
1676 mvm_ap_sta->tid_data[i].seq_number = seq;
1677 }
1678
1679 /* now we have all the data we need, unlock to avoid mac80211 issues */
1680 mutex_unlock(&mvm->mutex);
1681
1682 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1683
1684 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1685
1686 kfree(fw_status);
1687 return keep;
1688
1689out_free:
1690 kfree(fw_status);
1691out_unlock:
1692 mutex_unlock(&mvm->mutex);
1693 return false;
1694}
1695
1696struct iwl_mvm_nd_query_results {
1697 u32 matched_profiles;
1698 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
1699};
1700
1701static int
1702iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1703 struct iwl_mvm_nd_query_results *results)
1704{
1705 struct iwl_scan_offload_profiles_query *query;
1706 struct iwl_host_cmd cmd = {
1707 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1708 .flags = CMD_WANT_SKB,
1709 };
1710 int ret, len;
1711
1712 ret = iwl_mvm_send_cmd(mvm, &cmd);
1713 if (ret) {
1714 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1715 return ret;
1716 }
1717
1718 /* RF-kill already asserted again... */
1719 if (!cmd.resp_pkt) {
1720 ret = -ERFKILL;
1721 goto out_free_resp;
1722 }
1723
1724 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1725 if (len < sizeof(*query)) {
1726 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1727 ret = -EIO;
1728 goto out_free_resp;
1729 }
1730
1731 query = (void *)cmd.resp_pkt->data;
1732
1733 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1734 memcpy(results->matches, query->matches, sizeof(results->matches));
1735
1736#ifdef CONFIG_IWLWIFI_DEBUGFS
1737 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1738#endif
1739
1740out_free_resp:
1741 iwl_free_resp(&cmd);
1742 return ret;
1743}
1744
1745static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1746 struct ieee80211_vif *vif)
1747{
1748 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1749 struct cfg80211_wowlan_wakeup wakeup = {
1750 .pattern_idx = -1,
1751 };
1752 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1753 struct iwl_mvm_nd_query_results query;
1754 struct iwl_wowlan_status *fw_status;
1755 unsigned long matched_profiles;
1756 u32 reasons = 0;
1757 int i, j, n_matches, ret;
1758
1759 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1760 if (!IS_ERR_OR_NULL(fw_status)) {
1761 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1762 kfree(fw_status);
1763 }
1764
1765 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1766 wakeup.rfkill_release = true;
1767
1768 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1769 goto out;
1770
1771 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1772 if (ret || !query.matched_profiles) {
1773 wakeup_report = NULL;
1774 goto out;
1775 }
1776
1777 matched_profiles = query.matched_profiles;
1778 if (mvm->n_nd_match_sets) {
1779 n_matches = hweight_long(matched_profiles);
1780 } else {
1781 IWL_ERR(mvm, "no net detect match information available\n");
1782 n_matches = 0;
1783 }
1784
1785 net_detect = kzalloc(sizeof(*net_detect) +
1786 (n_matches * sizeof(net_detect->matches[0])),
1787 GFP_KERNEL);
1788 if (!net_detect || !n_matches)
1789 goto out_report_nd;
1790
1791 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1792 struct iwl_scan_offload_profile_match *fw_match;
1793 struct cfg80211_wowlan_nd_match *match;
1794 int idx, n_channels = 0;
1795
1796 fw_match = &query.matches[i];
1797
1798 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
1799 n_channels += hweight8(fw_match->matching_channels[j]);
1800
1801 match = kzalloc(sizeof(*match) +
1802 (n_channels * sizeof(*match->channels)),
1803 GFP_KERNEL);
1804 if (!match)
1805 goto out_report_nd;
1806
1807 net_detect->matches[net_detect->n_matches++] = match;
1808
1809 /* We inverted the order of the SSIDs in the scan
1810 * request, so invert the index here.
1811 */
1812 idx = mvm->n_nd_match_sets - i - 1;
1813 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
1814 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
1815 match->ssid.ssid_len);
1816
1817 if (mvm->n_nd_channels < n_channels)
1818 continue;
1819
1820 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
1821 if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
1822 match->channels[match->n_channels++] =
1823 mvm->nd_channels[j]->center_freq;
1824 }
1825
1826out_report_nd:
1827 wakeup.net_detect = net_detect;
1828out:
1829 iwl_mvm_free_nd(mvm);
1830
1831 mutex_unlock(&mvm->mutex);
1832 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1833
1834 if (net_detect) {
1835 for (i = 0; i < net_detect->n_matches; i++)
1836 kfree(net_detect->matches[i]);
1837 kfree(net_detect);
1838 }
1839}
1840
1841static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
1842{
1843#ifdef CONFIG_IWLWIFI_DEBUGFS
1844 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
1845 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
1846 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1847
1848 if (!mvm->store_d3_resume_sram)
1849 return;
1850
1851 if (!mvm->d3_resume_sram) {
1852 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
1853 if (!mvm->d3_resume_sram)
1854 return;
1855 }
1856
1857 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
1858#endif
1859}
1860
1861static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1862 struct ieee80211_vif *vif)
1863{
1864 /* skip the one we keep connection on */
1865 if (data == vif)
1866 return;
1867
1868 if (vif->type == NL80211_IFTYPE_STATION)
1869 ieee80211_resume_disconnect(vif);
1870}
1871
1872static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1873{
1874 struct ieee80211_vif *vif = NULL;
1875 int ret;
1876 enum iwl_d3_status d3_status;
1877 bool keep = false;
1878
1879 mutex_lock(&mvm->mutex);
1880
1881 /* get the BSS vif pointer again */
1882 vif = iwl_mvm_get_bss_vif(mvm);
1883 if (IS_ERR_OR_NULL(vif))
1884 goto err;
1885
1886 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
1887 if (ret)
1888 goto err;
1889
1890 if (d3_status != IWL_D3_STATUS_ALIVE) {
1891 IWL_INFO(mvm, "Device was reset during suspend\n");
1892 goto err;
1893 }
1894
1895 /* query SRAM first in case we want event logging */
1896 iwl_mvm_read_d3_sram(mvm);
1897
1898 /*
1899 * Query the current location and source from the D3 firmware so we
1900 * can play it back when we re-intiailize the D0 firmware
1901 */
1902 iwl_mvm_update_changed_regdom(mvm);
1903
1904 if (mvm->net_detect) {
1905 iwl_mvm_query_netdetect_reasons(mvm, vif);
1906 /* has unlocked the mutex, so skip that */
1907 goto out;
1908 } else {
1909 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
1910#ifdef CONFIG_IWLWIFI_DEBUGFS
1911 if (keep)
1912 mvm->keep_vif = vif;
1913#endif
1914 /* has unlocked the mutex, so skip that */
1915 goto out_iterate;
1916 }
1917
1918err:
1919 iwl_mvm_free_nd(mvm);
1920 mutex_unlock(&mvm->mutex);
1921
1922out_iterate:
1923 if (!test)
1924 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
1925 IEEE80211_IFACE_ITER_NORMAL,
1926 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
1927
1928out:
1929 /* return 1 to reconfigure the device */
1930 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1931 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
1932
1933 /* We always return 1, which causes mac80211 to do a reconfig
1934 * with IEEE80211_RECONFIG_TYPE_RESTART. This type of
1935 * reconfig calls iwl_mvm_restart_complete(), where we unref
1936 * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
1937 * reference here.
1938 */
1939 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1940 return 1;
1941}
1942
1943static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
1944{
1945 iwl_trans_resume(mvm->trans);
1946
1947 return __iwl_mvm_resume(mvm, false);
1948}
1949
1950static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
1951{
1952 bool exit_now;
1953 enum iwl_d3_status d3_status;
1954
1955 iwl_trans_d3_resume(mvm->trans, &d3_status, false);
1956
1957 /*
1958 * make sure to clear D0I3_DEFER_WAKEUP before
1959 * calling iwl_trans_resume(), which might wait
1960 * for d0i3 exit completion.
1961 */
1962 mutex_lock(&mvm->d0i3_suspend_mutex);
1963 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1964 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
1965 &mvm->d0i3_suspend_flags);
1966 mutex_unlock(&mvm->d0i3_suspend_mutex);
1967 if (exit_now) {
1968 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
1969 _iwl_mvm_exit_d0i3(mvm);
1970 }
1971
1972 iwl_trans_resume(mvm->trans);
1973
1974 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1975 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
1976
1977 if (ret)
1978 return ret;
1979 /*
1980 * d0i3 exit will be deferred until reconfig_complete.
1981 * make sure there we are out of d0i3.
1982 */
1983 }
1984 return 0;
1985}
1986
1987int iwl_mvm_resume(struct ieee80211_hw *hw)
1988{
1989 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1990
1991 /* 'any' trigger means d0i3 was used */
1992 if (hw->wiphy->wowlan_config->any)
1993 return iwl_mvm_resume_d0i3(mvm);
1994 else
1995 return iwl_mvm_resume_d3(mvm);
1996}
1997
1998void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
1999{
2000 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2001
2002 device_set_wakeup_enable(mvm->trans->dev, enabled);
2003}
2004
2005#ifdef CONFIG_IWLWIFI_DEBUGFS
2006static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2007{
2008 struct iwl_mvm *mvm = inode->i_private;
2009 int err;
2010
2011 if (mvm->d3_test_active)
2012 return -EBUSY;
2013
2014 file->private_data = inode->i_private;
2015
2016 ieee80211_stop_queues(mvm->hw);
2017 synchronize_net();
2018
2019 /* start pseudo D3 */
2020 rtnl_lock();
2021 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2022 rtnl_unlock();
2023 if (err > 0)
2024 err = -EINVAL;
2025 if (err) {
2026 ieee80211_wake_queues(mvm->hw);
2027 return err;
2028 }
2029 mvm->d3_test_active = true;
2030 mvm->keep_vif = NULL;
2031 return 0;
2032}
2033
2034static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2035 size_t count, loff_t *ppos)
2036{
2037 struct iwl_mvm *mvm = file->private_data;
2038 u32 pme_asserted;
2039
2040 while (true) {
2041 /* read pme_ptr if available */
2042 if (mvm->d3_test_pme_ptr) {
2043 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2044 mvm->d3_test_pme_ptr);
2045 if (pme_asserted)
2046 break;
2047 }
2048
2049 if (msleep_interruptible(100))
2050 break;
2051 }
2052
2053 return 0;
2054}
2055
2056static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2057 struct ieee80211_vif *vif)
2058{
2059 /* skip the one we keep connection on */
2060 if (_data == vif)
2061 return;
2062
2063 if (vif->type == NL80211_IFTYPE_STATION)
2064 ieee80211_connection_loss(vif);
2065}
2066
2067static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2068{
2069 struct iwl_mvm *mvm = inode->i_private;
2070 int remaining_time = 10;
2071
2072 mvm->d3_test_active = false;
2073 rtnl_lock();
2074 __iwl_mvm_resume(mvm, true);
2075 rtnl_unlock();
2076 iwl_abort_notification_waits(&mvm->notif_wait);
2077 ieee80211_restart_hw(mvm->hw);
2078
2079 /* wait for restart and disconnect all interfaces */
2080 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2081 remaining_time > 0) {
2082 remaining_time--;
2083 msleep(1000);
2084 }
2085
2086 if (remaining_time == 0)
2087 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
2088
2089 ieee80211_iterate_active_interfaces_atomic(
2090 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2091 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2092
2093 ieee80211_wake_queues(mvm->hw);
2094
2095 return 0;
2096}
2097
2098const struct file_operations iwl_dbgfs_d3_test_ops = {
2099 .llseek = no_llseek,
2100 .open = iwl_mvm_d3_test_open,
2101 .read = iwl_mvm_d3_test_read,
2102 .release = iwl_mvm_d3_test_release,
2103};
2104#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000000..7904b41a04c6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,1483 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include "mvm.h"
66#include "fw-api-tof.h"
67#include "debugfs.h"
68
69static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
70 struct ieee80211_vif *vif,
71 enum iwl_dbgfs_pm_mask param, int val)
72{
73 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
74 struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
75
76 dbgfs_pm->mask |= param;
77
78 switch (param) {
79 case MVM_DEBUGFS_PM_KEEP_ALIVE: {
80 int dtimper = vif->bss_conf.dtim_period ?: 1;
81 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
82
83 IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
84 if (val * MSEC_PER_SEC < 3 * dtimper_msec)
85 IWL_WARN(mvm,
86 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
87 val * MSEC_PER_SEC, 3 * dtimper_msec);
88 dbgfs_pm->keep_alive_seconds = val;
89 break;
90 }
91 case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
92 IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
93 val ? "enabled" : "disabled");
94 dbgfs_pm->skip_over_dtim = val;
95 break;
96 case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
97 IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
98 dbgfs_pm->skip_dtim_periods = val;
99 break;
100 case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
101 IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
102 dbgfs_pm->rx_data_timeout = val;
103 break;
104 case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
105 IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
106 dbgfs_pm->tx_data_timeout = val;
107 break;
108 case MVM_DEBUGFS_PM_LPRX_ENA:
109 IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
110 dbgfs_pm->lprx_ena = val;
111 break;
112 case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
113 IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
114 dbgfs_pm->lprx_rssi_threshold = val;
115 break;
116 case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
117 IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
118 dbgfs_pm->snooze_ena = val;
119 break;
120 case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
121 IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
122 dbgfs_pm->uapsd_misbehaving = val;
123 break;
124 case MVM_DEBUGFS_PM_USE_PS_POLL:
125 IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
126 dbgfs_pm->use_ps_poll = val;
127 break;
128 }
129}
130
131static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
132 size_t count, loff_t *ppos)
133{
134 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
135 struct iwl_mvm *mvm = mvmvif->mvm;
136 enum iwl_dbgfs_pm_mask param;
137 int val, ret;
138
139 if (!strncmp("keep_alive=", buf, 11)) {
140 if (sscanf(buf + 11, "%d", &val) != 1)
141 return -EINVAL;
142 param = MVM_DEBUGFS_PM_KEEP_ALIVE;
143 } else if (!strncmp("skip_over_dtim=", buf, 15)) {
144 if (sscanf(buf + 15, "%d", &val) != 1)
145 return -EINVAL;
146 param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
147 } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
148 if (sscanf(buf + 18, "%d", &val) != 1)
149 return -EINVAL;
150 param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
151 } else if (!strncmp("rx_data_timeout=", buf, 16)) {
152 if (sscanf(buf + 16, "%d", &val) != 1)
153 return -EINVAL;
154 param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
155 } else if (!strncmp("tx_data_timeout=", buf, 16)) {
156 if (sscanf(buf + 16, "%d", &val) != 1)
157 return -EINVAL;
158 param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
159 } else if (!strncmp("lprx=", buf, 5)) {
160 if (sscanf(buf + 5, "%d", &val) != 1)
161 return -EINVAL;
162 param = MVM_DEBUGFS_PM_LPRX_ENA;
163 } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
164 if (sscanf(buf + 20, "%d", &val) != 1)
165 return -EINVAL;
166 if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
167 POWER_LPRX_RSSI_THRESHOLD_MIN)
168 return -EINVAL;
169 param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
170 } else if (!strncmp("snooze_enable=", buf, 14)) {
171 if (sscanf(buf + 14, "%d", &val) != 1)
172 return -EINVAL;
173 param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
174 } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
175 if (sscanf(buf + 18, "%d", &val) != 1)
176 return -EINVAL;
177 param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
178 } else if (!strncmp("use_ps_poll=", buf, 12)) {
179 if (sscanf(buf + 12, "%d", &val) != 1)
180 return -EINVAL;
181 param = MVM_DEBUGFS_PM_USE_PS_POLL;
182 } else {
183 return -EINVAL;
184 }
185
186 mutex_lock(&mvm->mutex);
187 iwl_dbgfs_update_pm(mvm, vif, param, val);
188 ret = iwl_mvm_power_update_mac(mvm);
189 mutex_unlock(&mvm->mutex);
190
191 return ret ?: count;
192}
193
194static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
195 char __user *user_buf,
196 size_t count, loff_t *ppos)
197{
198 struct ieee80211_vif *vif = file->private_data;
199 char buf[64];
200 int bufsz = sizeof(buf);
201 int pos;
202
203 pos = scnprintf(buf, bufsz, "bss limit = %d\n",
204 vif->bss_conf.txpower);
205
206 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
207}
208
209static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
210 char __user *user_buf,
211 size_t count, loff_t *ppos)
212{
213 struct ieee80211_vif *vif = file->private_data;
214 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
215 struct iwl_mvm *mvm = mvmvif->mvm;
216 char buf[512];
217 int bufsz = sizeof(buf);
218 int pos;
219
220 pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
221
222 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
223}
224
225static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
226 char __user *user_buf,
227 size_t count, loff_t *ppos)
228{
229 struct ieee80211_vif *vif = file->private_data;
230 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
231 struct iwl_mvm *mvm = mvmvif->mvm;
232 u8 ap_sta_id;
233 struct ieee80211_chanctx_conf *chanctx_conf;
234 char buf[512];
235 int bufsz = sizeof(buf);
236 int pos = 0;
237 int i;
238
239 mutex_lock(&mvm->mutex);
240
241 ap_sta_id = mvmvif->ap_sta_id;
242
243 switch (ieee80211_vif_type_p2p(vif)) {
244 case NL80211_IFTYPE_ADHOC:
245 pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
246 break;
247 case NL80211_IFTYPE_STATION:
248 pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
249 break;
250 case NL80211_IFTYPE_AP:
251 pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
252 break;
253 case NL80211_IFTYPE_P2P_CLIENT:
254 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
255 break;
256 case NL80211_IFTYPE_P2P_GO:
257 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
258 break;
259 case NL80211_IFTYPE_P2P_DEVICE:
260 pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
261 break;
262 default:
263 break;
264 }
265
266 pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
267 mvmvif->id, mvmvif->color);
268 pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
269 vif->bss_conf.bssid);
270 pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
271 for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
272 pos += scnprintf(buf+pos, bufsz-pos,
273 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
274 i, mvmvif->queue_params[i].txop,
275 mvmvif->queue_params[i].cw_min,
276 mvmvif->queue_params[i].cw_max,
277 mvmvif->queue_params[i].aifs,
278 mvmvif->queue_params[i].uapsd);
279
280 if (vif->type == NL80211_IFTYPE_STATION &&
281 ap_sta_id != IWL_MVM_STATION_COUNT) {
282 struct ieee80211_sta *sta;
283
284 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
285 lockdep_is_held(&mvm->mutex));
286 if (!IS_ERR_OR_NULL(sta)) {
287 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
288
289 pos += scnprintf(buf+pos, bufsz-pos,
290 "ap_sta_id %d - reduced Tx power %d\n",
291 ap_sta_id,
292 mvm_sta->bt_reduced_txpower);
293 }
294 }
295
296 rcu_read_lock();
297 chanctx_conf = rcu_dereference(vif->chanctx_conf);
298 if (chanctx_conf)
299 pos += scnprintf(buf+pos, bufsz-pos,
300 "idle rx chains %d, active rx chains: %d\n",
301 chanctx_conf->rx_chains_static,
302 chanctx_conf->rx_chains_dynamic);
303 rcu_read_unlock();
304
305 mutex_unlock(&mvm->mutex);
306
307 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
308}
309
310static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
311 enum iwl_dbgfs_bf_mask param, int value)
312{
313 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
314 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
315
316 dbgfs_bf->mask |= param;
317
318 switch (param) {
319 case MVM_DEBUGFS_BF_ENERGY_DELTA:
320 dbgfs_bf->bf_energy_delta = value;
321 break;
322 case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
323 dbgfs_bf->bf_roaming_energy_delta = value;
324 break;
325 case MVM_DEBUGFS_BF_ROAMING_STATE:
326 dbgfs_bf->bf_roaming_state = value;
327 break;
328 case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
329 dbgfs_bf->bf_temp_threshold = value;
330 break;
331 case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
332 dbgfs_bf->bf_temp_fast_filter = value;
333 break;
334 case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
335 dbgfs_bf->bf_temp_slow_filter = value;
336 break;
337 case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
338 dbgfs_bf->bf_enable_beacon_filter = value;
339 break;
340 case MVM_DEBUGFS_BF_DEBUG_FLAG:
341 dbgfs_bf->bf_debug_flag = value;
342 break;
343 case MVM_DEBUGFS_BF_ESCAPE_TIMER:
344 dbgfs_bf->bf_escape_timer = value;
345 break;
346 case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
347 dbgfs_bf->ba_enable_beacon_abort = value;
348 break;
349 case MVM_DEBUGFS_BA_ESCAPE_TIMER:
350 dbgfs_bf->ba_escape_timer = value;
351 break;
352 }
353}
354
355static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
356 size_t count, loff_t *ppos)
357{
358 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
359 struct iwl_mvm *mvm = mvmvif->mvm;
360 enum iwl_dbgfs_bf_mask param;
361 int value, ret = 0;
362
363 if (!strncmp("bf_energy_delta=", buf, 16)) {
364 if (sscanf(buf+16, "%d", &value) != 1)
365 return -EINVAL;
366 if (value < IWL_BF_ENERGY_DELTA_MIN ||
367 value > IWL_BF_ENERGY_DELTA_MAX)
368 return -EINVAL;
369 param = MVM_DEBUGFS_BF_ENERGY_DELTA;
370 } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
371 if (sscanf(buf+24, "%d", &value) != 1)
372 return -EINVAL;
373 if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
374 value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
375 return -EINVAL;
376 param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
377 } else if (!strncmp("bf_roaming_state=", buf, 17)) {
378 if (sscanf(buf+17, "%d", &value) != 1)
379 return -EINVAL;
380 if (value < IWL_BF_ROAMING_STATE_MIN ||
381 value > IWL_BF_ROAMING_STATE_MAX)
382 return -EINVAL;
383 param = MVM_DEBUGFS_BF_ROAMING_STATE;
384 } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
385 if (sscanf(buf+18, "%d", &value) != 1)
386 return -EINVAL;
387 if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
388 value > IWL_BF_TEMP_THRESHOLD_MAX)
389 return -EINVAL;
390 param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
391 } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
392 if (sscanf(buf+20, "%d", &value) != 1)
393 return -EINVAL;
394 if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
395 value > IWL_BF_TEMP_FAST_FILTER_MAX)
396 return -EINVAL;
397 param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
398 } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
399 if (sscanf(buf+20, "%d", &value) != 1)
400 return -EINVAL;
401 if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
402 value > IWL_BF_TEMP_SLOW_FILTER_MAX)
403 return -EINVAL;
404 param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
405 } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
406 if (sscanf(buf+24, "%d", &value) != 1)
407 return -EINVAL;
408 if (value < 0 || value > 1)
409 return -EINVAL;
410 param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
411 } else if (!strncmp("bf_debug_flag=", buf, 14)) {
412 if (sscanf(buf+14, "%d", &value) != 1)
413 return -EINVAL;
414 if (value < 0 || value > 1)
415 return -EINVAL;
416 param = MVM_DEBUGFS_BF_DEBUG_FLAG;
417 } else if (!strncmp("bf_escape_timer=", buf, 16)) {
418 if (sscanf(buf+16, "%d", &value) != 1)
419 return -EINVAL;
420 if (value < IWL_BF_ESCAPE_TIMER_MIN ||
421 value > IWL_BF_ESCAPE_TIMER_MAX)
422 return -EINVAL;
423 param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
424 } else if (!strncmp("ba_escape_timer=", buf, 16)) {
425 if (sscanf(buf+16, "%d", &value) != 1)
426 return -EINVAL;
427 if (value < IWL_BA_ESCAPE_TIMER_MIN ||
428 value > IWL_BA_ESCAPE_TIMER_MAX)
429 return -EINVAL;
430 param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
431 } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
432 if (sscanf(buf+23, "%d", &value) != 1)
433 return -EINVAL;
434 if (value < 0 || value > 1)
435 return -EINVAL;
436 param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
437 } else {
438 return -EINVAL;
439 }
440
441 mutex_lock(&mvm->mutex);
442 iwl_dbgfs_update_bf(vif, param, value);
443 if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
444 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
445 else
446 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
447 mutex_unlock(&mvm->mutex);
448
449 return ret ?: count;
450}
451
452static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
453 char __user *user_buf,
454 size_t count, loff_t *ppos)
455{
456 struct ieee80211_vif *vif = file->private_data;
457 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
458 char buf[256];
459 int pos = 0;
460 const size_t bufsz = sizeof(buf);
461 struct iwl_beacon_filter_cmd cmd = {
462 IWL_BF_CMD_CONFIG_DEFAULTS,
463 .bf_enable_beacon_filter =
464 cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
465 .ba_enable_beacon_abort =
466 cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
467 };
468
469 iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
470 if (mvmvif->bf_data.bf_enabled)
471 cmd.bf_enable_beacon_filter = cpu_to_le32(1);
472 else
473 cmd.bf_enable_beacon_filter = 0;
474
475 pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
476 le32_to_cpu(cmd.bf_energy_delta));
477 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
478 le32_to_cpu(cmd.bf_roaming_energy_delta));
479 pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
480 le32_to_cpu(cmd.bf_roaming_state));
481 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
482 le32_to_cpu(cmd.bf_temp_threshold));
483 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
484 le32_to_cpu(cmd.bf_temp_fast_filter));
485 pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
486 le32_to_cpu(cmd.bf_temp_slow_filter));
487 pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
488 le32_to_cpu(cmd.bf_enable_beacon_filter));
489 pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
490 le32_to_cpu(cmd.bf_debug_flag));
491 pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
492 le32_to_cpu(cmd.bf_escape_timer));
493 pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
494 le32_to_cpu(cmd.ba_escape_timer));
495 pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
496 le32_to_cpu(cmd.ba_enable_beacon_abort));
497
498 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
499}
500
501static inline char *iwl_dbgfs_is_match(char *name, char *buf)
502{
503 int len = strlen(name);
504
505 return !strncmp(name, buf, len) ? buf + len : NULL;
506}
507
508static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
509 char *buf,
510 size_t count, loff_t *ppos)
511{
512 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
513 struct iwl_mvm *mvm = mvmvif->mvm;
514 u32 value;
515 int ret = -EINVAL;
516 char *data;
517
518 mutex_lock(&mvm->mutex);
519
520 data = iwl_dbgfs_is_match("tof_disabled=", buf);
521 if (data) {
522 ret = kstrtou32(data, 10, &value);
523 if (ret == 0)
524 mvm->tof_data.tof_cfg.tof_disabled = value;
525 goto out;
526 }
527
528 data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
529 if (data) {
530 ret = kstrtou32(data, 10, &value);
531 if (ret == 0)
532 mvm->tof_data.tof_cfg.one_sided_disabled = value;
533 goto out;
534 }
535
536 data = iwl_dbgfs_is_match("is_debug_mode=", buf);
537 if (data) {
538 ret = kstrtou32(data, 10, &value);
539 if (ret == 0)
540 mvm->tof_data.tof_cfg.is_debug_mode = value;
541 goto out;
542 }
543
544 data = iwl_dbgfs_is_match("is_buf=", buf);
545 if (data) {
546 ret = kstrtou32(data, 10, &value);
547 if (ret == 0)
548 mvm->tof_data.tof_cfg.is_buf_required = value;
549 goto out;
550 }
551
552 data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
553 if (data) {
554 ret = kstrtou32(data, 10, &value);
555 if (ret == 0 && value) {
556 ret = iwl_mvm_tof_config_cmd(mvm);
557 goto out;
558 }
559 }
560
561out:
562 mutex_unlock(&mvm->mutex);
563
564 return ret ?: count;
565}
566
567static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
568 char __user *user_buf,
569 size_t count, loff_t *ppos)
570{
571 struct ieee80211_vif *vif = file->private_data;
572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
573 struct iwl_mvm *mvm = mvmvif->mvm;
574 char buf[256];
575 int pos = 0;
576 const size_t bufsz = sizeof(buf);
577 struct iwl_tof_config_cmd *cmd;
578
579 cmd = &mvm->tof_data.tof_cfg;
580
581 mutex_lock(&mvm->mutex);
582
583 pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
584 cmd->tof_disabled);
585 pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
586 cmd->one_sided_disabled);
587 pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
588 cmd->is_debug_mode);
589 pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
590 cmd->is_buf_required);
591
592 mutex_unlock(&mvm->mutex);
593
594 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
595}
596
597static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
598 char *buf,
599 size_t count, loff_t *ppos)
600{
601 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
602 struct iwl_mvm *mvm = mvmvif->mvm;
603 u32 value;
604 int ret = 0;
605 char *data;
606
607 mutex_lock(&mvm->mutex);
608
609 data = iwl_dbgfs_is_match("burst_period=", buf);
610 if (data) {
611 ret = kstrtou32(data, 10, &value);
612 if (!ret)
613 mvm->tof_data.responder_cfg.burst_period =
614 cpu_to_le16(value);
615 goto out;
616 }
617
618 data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
619 if (data) {
620 ret = kstrtou32(data, 10, &value);
621 if (ret == 0)
622 mvm->tof_data.responder_cfg.min_delta_ftm = value;
623 goto out;
624 }
625
626 data = iwl_dbgfs_is_match("burst_duration=", buf);
627 if (data) {
628 ret = kstrtou32(data, 10, &value);
629 if (ret == 0)
630 mvm->tof_data.responder_cfg.burst_duration = value;
631 goto out;
632 }
633
634 data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
635 if (data) {
636 ret = kstrtou32(data, 10, &value);
637 if (ret == 0)
638 mvm->tof_data.responder_cfg.num_of_burst_exp = value;
639 goto out;
640 }
641
642 data = iwl_dbgfs_is_match("abort_responder=", buf);
643 if (data) {
644 ret = kstrtou32(data, 10, &value);
645 if (ret == 0)
646 mvm->tof_data.responder_cfg.abort_responder = value;
647 goto out;
648 }
649
650 data = iwl_dbgfs_is_match("get_ch_est=", buf);
651 if (data) {
652 ret = kstrtou32(data, 10, &value);
653 if (ret == 0)
654 mvm->tof_data.responder_cfg.get_ch_est = value;
655 goto out;
656 }
657
658 data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
659 if (data) {
660 ret = kstrtou32(data, 10, &value);
661 if (ret == 0)
662 mvm->tof_data.responder_cfg.recv_sta_req_params = value;
663 goto out;
664 }
665
666 data = iwl_dbgfs_is_match("channel_num=", buf);
667 if (data) {
668 ret = kstrtou32(data, 10, &value);
669 if (ret == 0)
670 mvm->tof_data.responder_cfg.channel_num = value;
671 goto out;
672 }
673
674 data = iwl_dbgfs_is_match("bandwidth=", buf);
675 if (data) {
676 ret = kstrtou32(data, 10, &value);
677 if (ret == 0)
678 mvm->tof_data.responder_cfg.bandwidth = value;
679 goto out;
680 }
681
682 data = iwl_dbgfs_is_match("rate=", buf);
683 if (data) {
684 ret = kstrtou32(data, 10, &value);
685 if (ret == 0)
686 mvm->tof_data.responder_cfg.rate = value;
687 goto out;
688 }
689
690 data = iwl_dbgfs_is_match("bssid=", buf);
691 if (data) {
692 u8 *mac = mvm->tof_data.responder_cfg.bssid;
693
694 if (!mac_pton(data, mac)) {
695 ret = -EINVAL;
696 goto out;
697 }
698 }
699
700 data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
701 if (data) {
702 ret = kstrtou32(data, 10, &value);
703 if (ret == 0)
704 mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
705 cpu_to_le16(value);
706 goto out;
707 }
708
709 data = iwl_dbgfs_is_match("toa_offset=", buf);
710 if (data) {
711 ret = kstrtou32(data, 10, &value);
712 if (ret == 0)
713 mvm->tof_data.responder_cfg.toa_offset =
714 cpu_to_le16(value);
715 goto out;
716 }
717
718 data = iwl_dbgfs_is_match("center_freq=", buf);
719 if (data) {
720 struct iwl_tof_responder_config_cmd *cmd =
721 &mvm->tof_data.responder_cfg;
722
723 ret = kstrtou32(data, 10, &value);
724 if (ret == 0 && value) {
725 enum ieee80211_band band = (cmd->channel_num <= 14) ?
726 IEEE80211_BAND_2GHZ :
727 IEEE80211_BAND_5GHZ;
728 struct ieee80211_channel chn = {
729 .band = band,
730 .center_freq = ieee80211_channel_to_frequency(
731 cmd->channel_num, band),
732 };
733 struct cfg80211_chan_def chandef = {
734 .chan = &chn,
735 .center_freq1 =
736 ieee80211_channel_to_frequency(value,
737 band),
738 };
739
740 cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
741 }
742 goto out;
743 }
744
745 data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
746 if (data) {
747 ret = kstrtou32(data, 10, &value);
748 if (ret == 0)
749 mvm->tof_data.responder_cfg.ftm_per_burst = value;
750 goto out;
751 }
752
753 data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
754 if (data) {
755 ret = kstrtou32(data, 10, &value);
756 if (ret == 0)
757 mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
758 goto out;
759 }
760
761 data = iwl_dbgfs_is_match("asap_mode=", buf);
762 if (data) {
763 ret = kstrtou32(data, 10, &value);
764 if (ret == 0)
765 mvm->tof_data.responder_cfg.asap_mode = value;
766 goto out;
767 }
768
769 data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
770 if (data) {
771 ret = kstrtou32(data, 10, &value);
772 if (ret == 0 && value) {
773 ret = iwl_mvm_tof_responder_cmd(mvm, vif);
774 goto out;
775 }
776 }
777
778out:
779 mutex_unlock(&mvm->mutex);
780
781 return ret ?: count;
782}
783
784static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
785 char __user *user_buf,
786 size_t count, loff_t *ppos)
787{
788 struct ieee80211_vif *vif = file->private_data;
789 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
790 struct iwl_mvm *mvm = mvmvif->mvm;
791 char buf[256];
792 int pos = 0;
793 const size_t bufsz = sizeof(buf);
794 struct iwl_tof_responder_config_cmd *cmd;
795
796 cmd = &mvm->tof_data.responder_cfg;
797
798 mutex_lock(&mvm->mutex);
799
800 pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
801 le16_to_cpu(cmd->burst_period));
802 pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
803 cmd->burst_duration);
804 pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
805 cmd->bandwidth);
806 pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
807 cmd->channel_num);
808 pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
809 cmd->ctrl_ch_position);
810 pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
811 cmd->bssid);
812 pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
813 cmd->min_delta_ftm);
814 pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
815 cmd->num_of_burst_exp);
816 pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
817 pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
818 cmd->abort_responder);
819 pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
820 cmd->get_ch_est);
821 pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
822 cmd->recv_sta_req_params);
823 pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
824 cmd->ftm_per_burst);
825 pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
826 cmd->ftm_resp_ts_avail);
827 pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
828 cmd->asap_mode);
829 pos += scnprintf(buf + pos, bufsz - pos,
830 "tsf_timer_offset_msecs = %d\n",
831 le16_to_cpu(cmd->tsf_timer_offset_msecs));
832 pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
833 le16_to_cpu(cmd->toa_offset));
834
835 mutex_unlock(&mvm->mutex);
836
837 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
838}
839
840static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
841 char *buf, size_t count,
842 loff_t *ppos)
843{
844 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
845 struct iwl_mvm *mvm = mvmvif->mvm;
846 u32 value;
847 int ret = 0;
848 char *data;
849
850 mutex_lock(&mvm->mutex);
851
852 data = iwl_dbgfs_is_match("request_id=", buf);
853 if (data) {
854 ret = kstrtou32(data, 10, &value);
855 if (ret == 0)
856 mvm->tof_data.range_req.request_id = value;
857 goto out;
858 }
859
860 data = iwl_dbgfs_is_match("initiator=", buf);
861 if (data) {
862 ret = kstrtou32(data, 10, &value);
863 if (ret == 0)
864 mvm->tof_data.range_req.initiator = value;
865 goto out;
866 }
867
868 data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
869 if (data) {
870 ret = kstrtou32(data, 10, &value);
871 if (ret == 0)
872 mvm->tof_data.range_req.one_sided_los_disable = value;
873 goto out;
874 }
875
876 data = iwl_dbgfs_is_match("req_timeout=", buf);
877 if (data) {
878 ret = kstrtou32(data, 10, &value);
879 if (ret == 0)
880 mvm->tof_data.range_req.req_timeout = value;
881 goto out;
882 }
883
884 data = iwl_dbgfs_is_match("report_policy=", buf);
885 if (data) {
886 ret = kstrtou32(data, 10, &value);
887 if (ret == 0)
888 mvm->tof_data.range_req.report_policy = value;
889 goto out;
890 }
891
892 data = iwl_dbgfs_is_match("macaddr_random=", buf);
893 if (data) {
894 ret = kstrtou32(data, 10, &value);
895 if (ret == 0)
896 mvm->tof_data.range_req.macaddr_random = value;
897 goto out;
898 }
899
900 data = iwl_dbgfs_is_match("num_of_ap=", buf);
901 if (data) {
902 ret = kstrtou32(data, 10, &value);
903 if (ret == 0)
904 mvm->tof_data.range_req.num_of_ap = value;
905 goto out;
906 }
907
908 data = iwl_dbgfs_is_match("macaddr_template=", buf);
909 if (data) {
910 u8 mac[ETH_ALEN];
911
912 if (!mac_pton(data, mac)) {
913 ret = -EINVAL;
914 goto out;
915 }
916 memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
917 goto out;
918 }
919
920 data = iwl_dbgfs_is_match("macaddr_mask=", buf);
921 if (data) {
922 u8 mac[ETH_ALEN];
923
924 if (!mac_pton(data, mac)) {
925 ret = -EINVAL;
926 goto out;
927 }
928 memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
929 goto out;
930 }
931
932 data = iwl_dbgfs_is_match("ap=", buf);
933 if (data) {
934 struct iwl_tof_range_req_ap_entry ap = {};
935 int size = sizeof(struct iwl_tof_range_req_ap_entry);
936 u16 burst_period;
937 u8 *mac = ap.bssid;
938 unsigned int i;
939
940 if (sscanf(data, "%u %hhd %hhd %hhd"
941 "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
942 "%hhd %hhd %hd"
943 "%hhd %hhd %d"
944 "%hhx %hhd %hhd %hhd",
945 &i, &ap.channel_num, &ap.bandwidth,
946 &ap.ctrl_ch_position,
947 mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
948 &ap.measure_type, &ap.num_of_bursts,
949 &burst_period,
950 &ap.samples_per_burst, &ap.retries_per_sample,
951 &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
952 &ap.enable_dyn_ack, &ap.rssi) != 20) {
953 ret = -EINVAL;
954 goto out;
955 }
956 if (i >= IWL_MVM_TOF_MAX_APS) {
957 IWL_ERR(mvm, "Invalid AP index %d\n", i);
958 ret = -EINVAL;
959 goto out;
960 }
961
962 ap.burst_period = cpu_to_le16(burst_period);
963
964 memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
965 goto out;
966 }
967
968 data = iwl_dbgfs_is_match("send_range_request=", buf);
969 if (data) {
970 ret = kstrtou32(data, 10, &value);
971 if (ret == 0 && value)
972 ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
973 goto out;
974 }
975
976 ret = -EINVAL;
977out:
978 mutex_unlock(&mvm->mutex);
979 return ret ?: count;
980}
981
982static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
983 char __user *user_buf,
984 size_t count, loff_t *ppos)
985{
986 struct ieee80211_vif *vif = file->private_data;
987 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
988 struct iwl_mvm *mvm = mvmvif->mvm;
989 char buf[512];
990 int pos = 0;
991 const size_t bufsz = sizeof(buf);
992 struct iwl_tof_range_req_cmd *cmd;
993 int i;
994
995 cmd = &mvm->tof_data.range_req;
996
997 mutex_lock(&mvm->mutex);
998
999 pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
1000 cmd->request_id);
1001 pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
1002 cmd->initiator);
1003 pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
1004 cmd->one_sided_los_disable);
1005 pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
1006 cmd->req_timeout);
1007 pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
1008 cmd->report_policy);
1009 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
1010 cmd->macaddr_random);
1011 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
1012 cmd->macaddr_template);
1013 pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
1014 cmd->macaddr_mask);
1015 pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
1016 cmd->num_of_ap);
1017 for (i = 0; i < cmd->num_of_ap; i++) {
1018 struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
1019
1020 pos += scnprintf(buf + pos, bufsz - pos,
1021 "ap %.2d: channel_num=%hhd bw=%hhd"
1022 " control=%hhd bssid=%pM type=%hhd"
1023 " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
1024 " retries=%hhd tsf_delta=%d"
1025 " tsf_delta_direction=%hhd location_req=0x%hhx "
1026 " asap=%hhd enable=%hhd rssi=%hhd\n",
1027 i, ap->channel_num, ap->bandwidth,
1028 ap->ctrl_ch_position, ap->bssid,
1029 ap->measure_type, ap->num_of_bursts,
1030 ap->burst_period, ap->samples_per_burst,
1031 ap->retries_per_sample, ap->tsf_delta,
1032 ap->tsf_delta_direction,
1033 ap->location_req, ap->asap_mode,
1034 ap->enable_dyn_ack, ap->rssi);
1035 }
1036
1037 mutex_unlock(&mvm->mutex);
1038
1039 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1040}
1041
1042static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
1043 char *buf,
1044 size_t count, loff_t *ppos)
1045{
1046 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1047 struct iwl_mvm *mvm = mvmvif->mvm;
1048 u32 value;
1049 int ret = 0;
1050 char *data;
1051
1052 mutex_lock(&mvm->mutex);
1053
1054 data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
1055 if (data) {
1056 ret = kstrtou32(data, 10, &value);
1057 if (ret == 0)
1058 mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
1059 cpu_to_le16(value);
1060 goto out;
1061 }
1062
1063 data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
1064 if (data) {
1065 ret = kstrtou32(data, 10, &value);
1066 if (ret == 0)
1067 mvm->tof_data.range_req_ext.min_delta_ftm = value;
1068 goto out;
1069 }
1070
1071 data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
1072 if (data) {
1073 ret = kstrtou32(data, 10, &value);
1074 if (ret == 0)
1075 mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
1076 value;
1077 goto out;
1078 }
1079
1080 data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
1081 if (data) {
1082 ret = kstrtou32(data, 10, &value);
1083 if (ret == 0)
1084 mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
1085 value;
1086 goto out;
1087 }
1088
1089 data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
1090 if (data) {
1091 ret = kstrtou32(data, 10, &value);
1092 if (ret == 0)
1093 mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
1094 value;
1095 goto out;
1096 }
1097
1098 data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
1099 if (data) {
1100 ret = kstrtou32(data, 10, &value);
1101 if (ret == 0 && value)
1102 ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
1103 goto out;
1104 }
1105
1106 ret = -EINVAL;
1107out:
1108 mutex_unlock(&mvm->mutex);
1109 return ret ?: count;
1110}
1111
1112static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
1113 char __user *user_buf,
1114 size_t count, loff_t *ppos)
1115{
1116 struct ieee80211_vif *vif = file->private_data;
1117 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1118 struct iwl_mvm *mvm = mvmvif->mvm;
1119 char buf[256];
1120 int pos = 0;
1121 const size_t bufsz = sizeof(buf);
1122 struct iwl_tof_range_req_ext_cmd *cmd;
1123
1124 cmd = &mvm->tof_data.range_req_ext;
1125
1126 mutex_lock(&mvm->mutex);
1127
1128 pos += scnprintf(buf + pos, bufsz - pos,
1129 "tsf_timer_offset_msec = %hd\n",
1130 cmd->tsf_timer_offset_msec);
1131 pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
1132 cmd->min_delta_ftm);
1133 pos += scnprintf(buf + pos, bufsz - pos,
1134 "ftm_format_and_bw20M = %hhd\n",
1135 cmd->ftm_format_and_bw20M);
1136 pos += scnprintf(buf + pos, bufsz - pos,
1137 "ftm_format_and_bw40M = %hhd\n",
1138 cmd->ftm_format_and_bw40M);
1139 pos += scnprintf(buf + pos, bufsz - pos,
1140 "ftm_format_and_bw80M = %hhd\n",
1141 cmd->ftm_format_and_bw80M);
1142
1143 mutex_unlock(&mvm->mutex);
1144 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1145}
1146
1147static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
1148 char *buf,
1149 size_t count, loff_t *ppos)
1150{
1151 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1152 struct iwl_mvm *mvm = mvmvif->mvm;
1153 u32 value;
1154 int abort_id, ret = 0;
1155 char *data;
1156
1157 mutex_lock(&mvm->mutex);
1158
1159 data = iwl_dbgfs_is_match("abort_id=", buf);
1160 if (data) {
1161 ret = kstrtou32(data, 10, &value);
1162 if (ret == 0)
1163 mvm->tof_data.last_abort_id = value;
1164 goto out;
1165 }
1166
1167 data = iwl_dbgfs_is_match("send_range_abort=", buf);
1168 if (data) {
1169 ret = kstrtou32(data, 10, &value);
1170 if (ret == 0 && value) {
1171 abort_id = mvm->tof_data.last_abort_id;
1172 ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
1173 goto out;
1174 }
1175 }
1176
1177out:
1178 mutex_unlock(&mvm->mutex);
1179 return ret ?: count;
1180}
1181
1182static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
1183 char __user *user_buf,
1184 size_t count, loff_t *ppos)
1185{
1186 struct ieee80211_vif *vif = file->private_data;
1187 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1188 struct iwl_mvm *mvm = mvmvif->mvm;
1189 char buf[32];
1190 int pos = 0;
1191 const size_t bufsz = sizeof(buf);
1192 int last_abort_id;
1193
1194 mutex_lock(&mvm->mutex);
1195 last_abort_id = mvm->tof_data.last_abort_id;
1196 mutex_unlock(&mvm->mutex);
1197
1198 pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
1199 last_abort_id);
1200 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1201}
1202
1203static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
1204 char __user *user_buf,
1205 size_t count, loff_t *ppos)
1206{
1207 struct ieee80211_vif *vif = file->private_data;
1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1209 struct iwl_mvm *mvm = mvmvif->mvm;
1210 char *buf;
1211 int pos = 0;
1212 const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
1213 struct iwl_tof_range_rsp_ntfy *cmd;
1214 int i, ret;
1215
1216 buf = kzalloc(bufsz, GFP_KERNEL);
1217 if (!buf)
1218 return -ENOMEM;
1219
1220 mutex_lock(&mvm->mutex);
1221 cmd = &mvm->tof_data.range_resp;
1222
1223 pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
1224 cmd->request_id);
1225 pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
1226 cmd->request_status);
1227 pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
1228 cmd->last_in_batch);
1229 pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
1230 cmd->num_of_aps);
1231 for (i = 0; i < cmd->num_of_aps; i++) {
1232 struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
1233
1234 pos += scnprintf(buf + pos, bufsz - pos,
1235 "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
1236 " rtt=%d rtt_var=%d rtt_spread=%d"
1237 " rssi=%hhd rssi_spread=%hhd"
1238 " range=%d range_var=%d"
1239 " time_stamp=%d\n",
1240 i, ap->bssid, ap->measure_status,
1241 ap->measure_bw,
1242 ap->rtt, ap->rtt_variance, ap->rtt_spread,
1243 ap->rssi, ap->rssi_spread, ap->range,
1244 ap->range_variance, ap->timestamp);
1245 }
1246 mutex_unlock(&mvm->mutex);
1247
1248 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1249 kfree(buf);
1250 return ret;
1251}
1252
1253static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
1254 size_t count, loff_t *ppos)
1255{
1256 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1257 struct iwl_mvm *mvm = mvmvif->mvm;
1258 u8 value;
1259 int ret;
1260
1261 ret = kstrtou8(buf, 0, &value);
1262 if (ret)
1263 return ret;
1264 if (value > 1)
1265 return -EINVAL;
1266
1267 mutex_lock(&mvm->mutex);
1268 iwl_mvm_update_low_latency(mvm, vif, value);
1269 mutex_unlock(&mvm->mutex);
1270
1271 return count;
1272}
1273
1274static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
1275 char __user *user_buf,
1276 size_t count, loff_t *ppos)
1277{
1278 struct ieee80211_vif *vif = file->private_data;
1279 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1280 char buf[2];
1281
1282 buf[0] = mvmvif->low_latency ? '1' : '0';
1283 buf[1] = '\n';
1284 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
1285}
1286
1287static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
1288 char __user *user_buf,
1289 size_t count, loff_t *ppos)
1290{
1291 struct ieee80211_vif *vif = file->private_data;
1292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1293 char buf[20];
1294 int len;
1295
1296 len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
1297 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1298}
1299
1300static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
1301 char *buf, size_t count,
1302 loff_t *ppos)
1303{
1304 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1305 struct iwl_mvm *mvm = mvmvif->mvm;
1306 bool ret;
1307
1308 mutex_lock(&mvm->mutex);
1309 ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
1310 mutex_unlock(&mvm->mutex);
1311
1312 return ret ? count : -EINVAL;
1313}
1314
1315static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
1316 size_t count, loff_t *ppos)
1317{
1318 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1319 struct iwl_mvm *mvm = mvmvif->mvm;
1320 struct ieee80211_chanctx_conf *chanctx_conf;
1321 struct iwl_mvm_phy_ctxt *phy_ctxt;
1322 u16 value;
1323 int ret;
1324
1325 ret = kstrtou16(buf, 0, &value);
1326 if (ret)
1327 return ret;
1328
1329 mutex_lock(&mvm->mutex);
1330 rcu_read_lock();
1331
1332 chanctx_conf = rcu_dereference(vif->chanctx_conf);
1333 /* make sure the channel context is assigned */
1334 if (!chanctx_conf) {
1335 rcu_read_unlock();
1336 mutex_unlock(&mvm->mutex);
1337 return -EINVAL;
1338 }
1339
1340 phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
1341 rcu_read_unlock();
1342
1343 mvm->dbgfs_rx_phyinfo = value;
1344
1345 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
1346 chanctx_conf->rx_chains_static,
1347 chanctx_conf->rx_chains_dynamic);
1348 mutex_unlock(&mvm->mutex);
1349
1350 return ret ?: count;
1351}
1352
1353static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
1354 char __user *user_buf,
1355 size_t count, loff_t *ppos)
1356{
1357 struct ieee80211_vif *vif = file->private_data;
1358 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1359 char buf[8];
1360
1361 snprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
1362
1363 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
1364}
1365
1366#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
1367 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
1368#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
1369 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
1370#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
1371 if (!debugfs_create_file(#name, mode, parent, vif, \
1372 &iwl_dbgfs_##name##_ops)) \
1373 goto err; \
1374 } while (0)
1375
1376MVM_DEBUGFS_READ_FILE_OPS(mac_params);
1377MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
1378MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
1379MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
1380MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
1381MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
1382MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
1383MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
1384MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
1385MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
1386MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
1387MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
1388MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
1389
1390void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1391{
1392 struct dentry *dbgfs_dir = vif->debugfs_dir;
1393 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1394 char buf[100];
1395
1396 /*
1397 * Check if debugfs directory already exist before creating it.
1398 * This may happen when, for example, resetting hw or suspend-resume
1399 */
1400 if (!dbgfs_dir || mvmvif->dbgfs_dir)
1401 return;
1402
1403 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
1404
1405 if (!mvmvif->dbgfs_dir) {
1406 IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
1407 dbgfs_dir->d_name.name);
1408 return;
1409 }
1410
1411 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
1412 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
1413 (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
1414 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
1415 MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
1416 S_IRUSR);
1417
1418 MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
1419 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
1420 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
1421 S_IRUSR | S_IWUSR);
1422 MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
1423 S_IRUSR | S_IWUSR);
1424 MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
1425 S_IRUSR | S_IWUSR);
1426
1427 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1428 mvmvif == mvm->bf_allowed_vif)
1429 MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
1430 S_IRUSR | S_IWUSR);
1431
1432 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
1433 !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
1434 if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
1435 MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
1436 mvmvif->dbgfs_dir,
1437 S_IRUSR | S_IWUSR);
1438
1439 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
1440 S_IRUSR | S_IWUSR);
1441 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
1442 S_IRUSR | S_IWUSR);
1443 MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
1444 S_IRUSR | S_IWUSR);
1445 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
1446 S_IRUSR | S_IWUSR);
1447 MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
1448 S_IRUSR);
1449 }
1450
1451 /*
1452 * Create symlink for convenience pointing to interface specific
1453 * debugfs entries for the driver. For example, under
1454 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
1455 * find
1456 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
1457 */
1458 snprintf(buf, 100, "../../../%s/%s/%s/%s",
1459 dbgfs_dir->d_parent->d_parent->d_name.name,
1460 dbgfs_dir->d_parent->d_name.name,
1461 dbgfs_dir->d_name.name,
1462 mvmvif->dbgfs_dir->d_name.name);
1463
1464 mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
1465 mvm->debugfs_dir, buf);
1466 if (!mvmvif->dbgfs_slink)
1467 IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
1468 dbgfs_dir->d_name.name);
1469 return;
1470err:
1471 IWL_ERR(mvm, "Can't create debugfs entity\n");
1472}
1473
1474void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1475{
1476 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1477
1478 debugfs_remove(mvmvif->dbgfs_slink);
1479 mvmvif->dbgfs_slink = NULL;
1480
1481 debugfs_remove_recursive(mvmvif->dbgfs_dir);
1482 mvmvif->dbgfs_dir = NULL;
1483}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
new file mode 100644
index 000000000000..05928fb4021d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,1516 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/vmalloc.h>
66
67#include "mvm.h"
68#include "sta.h"
69#include "iwl-io.h"
70#include "debugfs.h"
71#include "iwl-fw-error-dump.h"
72
73static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
74 size_t count, loff_t *ppos)
75{
76 int ret;
77 u32 scd_q_msk;
78
79 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
80 return -EIO;
81
82 if (sscanf(buf, "%x", &scd_q_msk) != 1)
83 return -EINVAL;
84
85 IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
86
87 mutex_lock(&mvm->mutex);
88 ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
89 mutex_unlock(&mvm->mutex);
90
91 return ret;
92}
93
94static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
95 size_t count, loff_t *ppos)
96{
97 struct iwl_mvm_sta *mvmsta;
98 int sta_id, drain, ret;
99
100 if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
101 return -EIO;
102
103 if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
104 return -EINVAL;
105 if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
106 return -EINVAL;
107 if (drain < 0 || drain > 1)
108 return -EINVAL;
109
110 mutex_lock(&mvm->mutex);
111
112 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
113
114 if (!mvmsta)
115 ret = -ENOENT;
116 else
117 ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
118
119 mutex_unlock(&mvm->mutex);
120
121 return ret;
122}
123
124static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos)
126{
127 struct iwl_mvm *mvm = file->private_data;
128 const struct fw_img *img;
129 unsigned int ofs, len;
130 size_t ret;
131 u8 *ptr;
132
133 if (!mvm->ucode_loaded)
134 return -EINVAL;
135
136 /* default is to dump the entire data segment */
137 img = &mvm->fw->img[mvm->cur_ucode];
138 ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
139 len = img->sec[IWL_UCODE_SECTION_DATA].len;
140
141 if (mvm->dbgfs_sram_len) {
142 ofs = mvm->dbgfs_sram_offset;
143 len = mvm->dbgfs_sram_len;
144 }
145
146 ptr = kzalloc(len, GFP_KERNEL);
147 if (!ptr)
148 return -ENOMEM;
149
150 iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
151
152 ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
153
154 kfree(ptr);
155
156 return ret;
157}
158
159static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
160 size_t count, loff_t *ppos)
161{
162 const struct fw_img *img;
163 u32 offset, len;
164 u32 img_offset, img_len;
165
166 if (!mvm->ucode_loaded)
167 return -EINVAL;
168
169 img = &mvm->fw->img[mvm->cur_ucode];
170 img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
171 img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
172
173 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
174 if ((offset & 0x3) || (len & 0x3))
175 return -EINVAL;
176
177 if (offset + len > img_offset + img_len)
178 return -EINVAL;
179
180 mvm->dbgfs_sram_offset = offset;
181 mvm->dbgfs_sram_len = len;
182 } else {
183 mvm->dbgfs_sram_offset = 0;
184 mvm->dbgfs_sram_len = 0;
185 }
186
187 return count;
188}
189
190static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file,
191 char __user *user_buf,
192 size_t count, loff_t *ppos)
193{
194 struct iwl_mvm *mvm = file->private_data;
195 char buf[16];
196 int pos;
197
198 if (!mvm->temperature_test)
199 pos = scnprintf(buf , sizeof(buf), "disabled\n");
200 else
201 pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature);
202
203 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
204}
205
206/*
207 * Set NIC Temperature
208 * Cause the driver to ignore the actual NIC temperature reported by the FW
209 * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -
210 * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX
211 * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE
212 */
213static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
214 char *buf, size_t count,
215 loff_t *ppos)
216{
217 int temperature;
218
219 if (!mvm->ucode_loaded && !mvm->temperature_test)
220 return -EIO;
221
222 if (kstrtoint(buf, 10, &temperature))
223 return -EINVAL;
224 /* not a legal temperature */
225 if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
226 temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
227 temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN)
228 return -EINVAL;
229
230 mutex_lock(&mvm->mutex);
231 if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
232 if (!mvm->temperature_test)
233 goto out;
234
235 mvm->temperature_test = false;
236 /* Since we can't read the temp while awake, just set
237 * it to zero until we get the next RX stats from the
238 * firmware.
239 */
240 mvm->temperature = 0;
241 } else {
242 mvm->temperature_test = true;
243 mvm->temperature = temperature;
244 }
245 IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
246 mvm->temperature_test ? "En" : "Dis" ,
247 mvm->temperature);
248 /* handle the temperature change */
249 iwl_mvm_tt_handler(mvm);
250
251out:
252 mutex_unlock(&mvm->mutex);
253
254 return count;
255}
256
257static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
258 char __user *user_buf,
259 size_t count, loff_t *ppos)
260{
261 struct iwl_mvm *mvm = file->private_data;
262 char buf[16];
263 int pos, temp;
264
265 if (!mvm->ucode_loaded)
266 return -EIO;
267
268 mutex_lock(&mvm->mutex);
269 temp = iwl_mvm_get_temp(mvm);
270 mutex_unlock(&mvm->mutex);
271
272 if (temp < 0)
273 return temp;
274
275 pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
276
277 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
278}
279
280static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
281 size_t count, loff_t *ppos)
282{
283 struct iwl_mvm *mvm = file->private_data;
284 struct ieee80211_sta *sta;
285 char buf[400];
286 int i, pos = 0, bufsz = sizeof(buf);
287
288 mutex_lock(&mvm->mutex);
289
290 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
291 pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
292 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
293 lockdep_is_held(&mvm->mutex));
294 if (!sta)
295 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
296 else if (IS_ERR(sta))
297 pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
298 PTR_ERR(sta));
299 else
300 pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
301 sta->addr);
302 }
303
304 mutex_unlock(&mvm->mutex);
305
306 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
307}
308
309static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
310 char __user *user_buf,
311 size_t count, loff_t *ppos)
312{
313 struct iwl_mvm *mvm = file->private_data;
314 char buf[64];
315 int bufsz = sizeof(buf);
316 int pos = 0;
317
318 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
319 mvm->disable_power_off);
320 pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
321 mvm->disable_power_off_d3);
322
323 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324}
325
326static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
327 size_t count, loff_t *ppos)
328{
329 int ret, val;
330
331 if (!mvm->ucode_loaded)
332 return -EIO;
333
334 if (!strncmp("disable_power_off_d0=", buf, 21)) {
335 if (sscanf(buf + 21, "%d", &val) != 1)
336 return -EINVAL;
337 mvm->disable_power_off = val;
338 } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
339 if (sscanf(buf + 21, "%d", &val) != 1)
340 return -EINVAL;
341 mvm->disable_power_off_d3 = val;
342 } else {
343 return -EINVAL;
344 }
345
346 mutex_lock(&mvm->mutex);
347 ret = iwl_mvm_power_update_device(mvm);
348 mutex_unlock(&mvm->mutex);
349
350 return ret ?: count;
351}
352
353#define BT_MBOX_MSG(_notif, _num, _field) \
354 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
355 >> BT_MBOX##_num##_##_field##_POS)
356
357
358#define BT_MBOX_PRINT(_num, _field, _end) \
359 pos += scnprintf(buf + pos, bufsz - pos, \
360 "\t%s: %d%s", \
361 #_field, \
362 BT_MBOX_MSG(notif, _num, _field), \
363 true ? "\n" : ", ");
364
365static
366int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
367 int pos, int bufsz)
368{
369 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
370
371 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
372 BT_MBOX_PRINT(0, LE_PROF1, false);
373 BT_MBOX_PRINT(0, LE_PROF2, false);
374 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
375 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
376 BT_MBOX_PRINT(0, INBAND_S, false);
377 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
378 BT_MBOX_PRINT(0, LE_SCAN, false);
379 BT_MBOX_PRINT(0, LE_ADV, false);
380 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
381 BT_MBOX_PRINT(0, OPEN_CON_1, true);
382
383 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
384
385 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
386 BT_MBOX_PRINT(1, IP_SR, false);
387 BT_MBOX_PRINT(1, LE_MSTR, false);
388 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
389 BT_MBOX_PRINT(1, MSG_TYPE, false);
390 BT_MBOX_PRINT(1, SSN, true);
391
392 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
393
394 BT_MBOX_PRINT(2, SNIFF_ACT, false);
395 BT_MBOX_PRINT(2, PAG, false);
396 BT_MBOX_PRINT(2, INQUIRY, false);
397 BT_MBOX_PRINT(2, CONN, false);
398 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
399 BT_MBOX_PRINT(2, DISC, false);
400 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
401 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
402 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
403 BT_MBOX_PRINT(2, SCO_DURATION, true);
404
405 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
406
407 BT_MBOX_PRINT(3, SCO_STATE, false);
408 BT_MBOX_PRINT(3, SNIFF_STATE, false);
409 BT_MBOX_PRINT(3, A2DP_STATE, false);
410 BT_MBOX_PRINT(3, ACL_STATE, false);
411 BT_MBOX_PRINT(3, MSTR_STATE, false);
412 BT_MBOX_PRINT(3, OBX_STATE, false);
413 BT_MBOX_PRINT(3, OPEN_CON_2, false);
414 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
415 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
416 BT_MBOX_PRINT(3, INBAND_P, false);
417 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
418 BT_MBOX_PRINT(3, SSN_2, false);
419 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
420
421 return pos;
422}
423
424static
425int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
426 char *buf, int pos, int bufsz)
427{
428 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
429
430 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
431 BT_MBOX_PRINT(0, LE_PROF1, false);
432 BT_MBOX_PRINT(0, LE_PROF2, false);
433 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
434 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
435 BT_MBOX_PRINT(0, INBAND_S, false);
436 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
437 BT_MBOX_PRINT(0, LE_SCAN, false);
438 BT_MBOX_PRINT(0, LE_ADV, false);
439 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
440 BT_MBOX_PRINT(0, OPEN_CON_1, true);
441
442 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
443
444 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
445 BT_MBOX_PRINT(1, IP_SR, false);
446 BT_MBOX_PRINT(1, LE_MSTR, false);
447 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
448 BT_MBOX_PRINT(1, MSG_TYPE, false);
449 BT_MBOX_PRINT(1, SSN, true);
450
451 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
452
453 BT_MBOX_PRINT(2, SNIFF_ACT, false);
454 BT_MBOX_PRINT(2, PAG, false);
455 BT_MBOX_PRINT(2, INQUIRY, false);
456 BT_MBOX_PRINT(2, CONN, false);
457 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
458 BT_MBOX_PRINT(2, DISC, false);
459 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
460 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
461 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
462 BT_MBOX_PRINT(2, SCO_DURATION, true);
463
464 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
465
466 BT_MBOX_PRINT(3, SCO_STATE, false);
467 BT_MBOX_PRINT(3, SNIFF_STATE, false);
468 BT_MBOX_PRINT(3, A2DP_STATE, false);
469 BT_MBOX_PRINT(3, ACL_STATE, false);
470 BT_MBOX_PRINT(3, MSTR_STATE, false);
471 BT_MBOX_PRINT(3, OBX_STATE, false);
472 BT_MBOX_PRINT(3, OPEN_CON_2, false);
473 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
474 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
475 BT_MBOX_PRINT(3, INBAND_P, false);
476 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
477 BT_MBOX_PRINT(3, SSN_2, false);
478 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
479
480 return pos;
481}
482
483static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
484 size_t count, loff_t *ppos)
485{
486 struct iwl_mvm *mvm = file->private_data;
487 char *buf;
488 int ret, pos = 0, bufsz = sizeof(char) * 1024;
489
490 buf = kmalloc(bufsz, GFP_KERNEL);
491 if (!buf)
492 return -ENOMEM;
493
494 mutex_lock(&mvm->mutex);
495
496 if (!fw_has_api(&mvm->fw->ucode_capa,
497 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
498 struct iwl_bt_coex_profile_notif_old *notif =
499 &mvm->last_bt_notif_old;
500
501 pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
502
503 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
504 notif->bt_ci_compliance);
505 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
506 le32_to_cpu(notif->primary_ch_lut));
507 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
508 le32_to_cpu(notif->secondary_ch_lut));
509 pos += scnprintf(buf+pos,
510 bufsz-pos, "bt_activity_grading = %d\n",
511 le32_to_cpu(notif->bt_activity_grading));
512 pos += scnprintf(buf+pos, bufsz-pos,
513 "antenna isolation = %d CORUN LUT index = %d\n",
514 mvm->last_ant_isol, mvm->last_corun_lut);
515 } else {
516 struct iwl_bt_coex_profile_notif *notif =
517 &mvm->last_bt_notif;
518
519 pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
520
521 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
522 notif->bt_ci_compliance);
523 pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
524 le32_to_cpu(notif->primary_ch_lut));
525 pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
526 le32_to_cpu(notif->secondary_ch_lut));
527 pos += scnprintf(buf+pos,
528 bufsz-pos, "bt_activity_grading = %d\n",
529 le32_to_cpu(notif->bt_activity_grading));
530 pos += scnprintf(buf+pos, bufsz-pos,
531 "antenna isolation = %d CORUN LUT index = %d\n",
532 mvm->last_ant_isol, mvm->last_corun_lut);
533 }
534
535 mutex_unlock(&mvm->mutex);
536
537 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
538 kfree(buf);
539
540 return ret;
541}
542#undef BT_MBOX_PRINT
543
544static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
545 size_t count, loff_t *ppos)
546{
547 struct iwl_mvm *mvm = file->private_data;
548 char buf[256];
549 int bufsz = sizeof(buf);
550 int pos = 0;
551
552 mutex_lock(&mvm->mutex);
553
554 if (!fw_has_api(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
556 struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
557
558 pos += scnprintf(buf+pos, bufsz-pos,
559 "Channel inhibition CMD\n");
560 pos += scnprintf(buf+pos, bufsz-pos,
561 "\tPrimary Channel Bitmap 0x%016llx\n",
562 le64_to_cpu(cmd->bt_primary_ci));
563 pos += scnprintf(buf+pos, bufsz-pos,
564 "\tSecondary Channel Bitmap 0x%016llx\n",
565 le64_to_cpu(cmd->bt_secondary_ci));
566
567 pos += scnprintf(buf+pos, bufsz-pos,
568 "BT Configuration CMD - 0=default, 1=never, 2=always\n");
569 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
570 mvm->bt_ack_kill_msk[0]);
571 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
572 mvm->bt_cts_kill_msk[0]);
573
574 } else {
575 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
576
577 pos += scnprintf(buf+pos, bufsz-pos,
578 "Channel inhibition CMD\n");
579 pos += scnprintf(buf+pos, bufsz-pos,
580 "\tPrimary Channel Bitmap 0x%016llx\n",
581 le64_to_cpu(cmd->bt_primary_ci));
582 pos += scnprintf(buf+pos, bufsz-pos,
583 "\tSecondary Channel Bitmap 0x%016llx\n",
584 le64_to_cpu(cmd->bt_secondary_ci));
585 }
586
587 mutex_unlock(&mvm->mutex);
588
589 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
590}
591
592static ssize_t
593iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
594 size_t count, loff_t *ppos)
595{
596 u32 bt_tx_prio;
597
598 if (sscanf(buf, "%u", &bt_tx_prio) != 1)
599 return -EINVAL;
600 if (bt_tx_prio > 4)
601 return -EINVAL;
602
603 mvm->bt_tx_prio = bt_tx_prio;
604
605 return count;
606}
607
608static ssize_t
609iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
610 size_t count, loff_t *ppos)
611{
612 static const char * const modes_str[BT_FORCE_ANT_MAX] = {
613 [BT_FORCE_ANT_DIS] = "dis",
614 [BT_FORCE_ANT_AUTO] = "auto",
615 [BT_FORCE_ANT_BT] = "bt",
616 [BT_FORCE_ANT_WIFI] = "wifi",
617 };
618 int ret, bt_force_ant_mode;
619
620 for (bt_force_ant_mode = 0;
621 bt_force_ant_mode < ARRAY_SIZE(modes_str);
622 bt_force_ant_mode++) {
623 if (!strcmp(buf, modes_str[bt_force_ant_mode]))
624 break;
625 }
626
627 if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
628 return -EINVAL;
629
630 ret = 0;
631 mutex_lock(&mvm->mutex);
632 if (mvm->bt_force_ant_mode == bt_force_ant_mode)
633 goto out;
634
635 mvm->bt_force_ant_mode = bt_force_ant_mode;
636 IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
637 modes_str[mvm->bt_force_ant_mode]);
638 ret = iwl_send_bt_init_conf(mvm);
639
640out:
641 mutex_unlock(&mvm->mutex);
642 return ret ?: count;
643}
644
645#define PRINT_STATS_LE32(_struct, _memb) \
646 pos += scnprintf(buf + pos, bufsz - pos, \
647 fmt_table, #_memb, \
648 le32_to_cpu(_struct->_memb))
649
650static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
651 char __user *user_buf, size_t count,
652 loff_t *ppos)
653{
654 struct iwl_mvm *mvm = file->private_data;
655 static const char *fmt_table = "\t%-30s %10u\n";
656 static const char *fmt_header = "%-32s\n";
657 int pos = 0;
658 char *buf;
659 int ret;
660 /* 43 is the size of each data line, 33 is the size of each header */
661 size_t bufsz =
662 ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
663 (4 * 33) + 1;
664
665 struct mvm_statistics_rx_phy *ofdm;
666 struct mvm_statistics_rx_phy *cck;
667 struct mvm_statistics_rx_non_phy *general;
668 struct mvm_statistics_rx_ht_phy *ht;
669
670 buf = kzalloc(bufsz, GFP_KERNEL);
671 if (!buf)
672 return -ENOMEM;
673
674 mutex_lock(&mvm->mutex);
675
676 ofdm = &mvm->rx_stats.ofdm;
677 cck = &mvm->rx_stats.cck;
678 general = &mvm->rx_stats.general;
679 ht = &mvm->rx_stats.ofdm_ht;
680
681 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
682 "Statistics_Rx - OFDM");
683 PRINT_STATS_LE32(ofdm, ina_cnt);
684 PRINT_STATS_LE32(ofdm, fina_cnt);
685 PRINT_STATS_LE32(ofdm, plcp_err);
686 PRINT_STATS_LE32(ofdm, crc32_err);
687 PRINT_STATS_LE32(ofdm, overrun_err);
688 PRINT_STATS_LE32(ofdm, early_overrun_err);
689 PRINT_STATS_LE32(ofdm, crc32_good);
690 PRINT_STATS_LE32(ofdm, false_alarm_cnt);
691 PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
692 PRINT_STATS_LE32(ofdm, sfd_timeout);
693 PRINT_STATS_LE32(ofdm, fina_timeout);
694 PRINT_STATS_LE32(ofdm, unresponded_rts);
695 PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
696 PRINT_STATS_LE32(ofdm, sent_ack_cnt);
697 PRINT_STATS_LE32(ofdm, sent_cts_cnt);
698 PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
699 PRINT_STATS_LE32(ofdm, dsp_self_kill);
700 PRINT_STATS_LE32(ofdm, mh_format_err);
701 PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
702 PRINT_STATS_LE32(ofdm, reserved);
703
704 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
705 "Statistics_Rx - CCK");
706 PRINT_STATS_LE32(cck, ina_cnt);
707 PRINT_STATS_LE32(cck, fina_cnt);
708 PRINT_STATS_LE32(cck, plcp_err);
709 PRINT_STATS_LE32(cck, crc32_err);
710 PRINT_STATS_LE32(cck, overrun_err);
711 PRINT_STATS_LE32(cck, early_overrun_err);
712 PRINT_STATS_LE32(cck, crc32_good);
713 PRINT_STATS_LE32(cck, false_alarm_cnt);
714 PRINT_STATS_LE32(cck, fina_sync_err_cnt);
715 PRINT_STATS_LE32(cck, sfd_timeout);
716 PRINT_STATS_LE32(cck, fina_timeout);
717 PRINT_STATS_LE32(cck, unresponded_rts);
718 PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
719 PRINT_STATS_LE32(cck, sent_ack_cnt);
720 PRINT_STATS_LE32(cck, sent_cts_cnt);
721 PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
722 PRINT_STATS_LE32(cck, dsp_self_kill);
723 PRINT_STATS_LE32(cck, mh_format_err);
724 PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
725 PRINT_STATS_LE32(cck, reserved);
726
727 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
728 "Statistics_Rx - GENERAL");
729 PRINT_STATS_LE32(general, bogus_cts);
730 PRINT_STATS_LE32(general, bogus_ack);
731 PRINT_STATS_LE32(general, non_bssid_frames);
732 PRINT_STATS_LE32(general, filtered_frames);
733 PRINT_STATS_LE32(general, non_channel_beacons);
734 PRINT_STATS_LE32(general, channel_beacons);
735 PRINT_STATS_LE32(general, num_missed_bcon);
736 PRINT_STATS_LE32(general, adc_rx_saturation_time);
737 PRINT_STATS_LE32(general, ina_detection_search_time);
738 PRINT_STATS_LE32(general, beacon_silence_rssi_a);
739 PRINT_STATS_LE32(general, beacon_silence_rssi_b);
740 PRINT_STATS_LE32(general, beacon_silence_rssi_c);
741 PRINT_STATS_LE32(general, interference_data_flag);
742 PRINT_STATS_LE32(general, channel_load);
743 PRINT_STATS_LE32(general, dsp_false_alarms);
744 PRINT_STATS_LE32(general, beacon_rssi_a);
745 PRINT_STATS_LE32(general, beacon_rssi_b);
746 PRINT_STATS_LE32(general, beacon_rssi_c);
747 PRINT_STATS_LE32(general, beacon_energy_a);
748 PRINT_STATS_LE32(general, beacon_energy_b);
749 PRINT_STATS_LE32(general, beacon_energy_c);
750 PRINT_STATS_LE32(general, num_bt_kills);
751 PRINT_STATS_LE32(general, mac_id);
752 PRINT_STATS_LE32(general, directed_data_mpdu);
753
754 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
755 "Statistics_Rx - HT");
756 PRINT_STATS_LE32(ht, plcp_err);
757 PRINT_STATS_LE32(ht, overrun_err);
758 PRINT_STATS_LE32(ht, early_overrun_err);
759 PRINT_STATS_LE32(ht, crc32_good);
760 PRINT_STATS_LE32(ht, crc32_err);
761 PRINT_STATS_LE32(ht, mh_format_err);
762 PRINT_STATS_LE32(ht, agg_crc32_good);
763 PRINT_STATS_LE32(ht, agg_mpdu_cnt);
764 PRINT_STATS_LE32(ht, agg_cnt);
765 PRINT_STATS_LE32(ht, unsupport_mcs);
766
767 mutex_unlock(&mvm->mutex);
768
769 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
770 kfree(buf);
771
772 return ret;
773}
774#undef PRINT_STAT_LE32
775
776static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
777 char __user *user_buf, size_t count,
778 loff_t *ppos,
779 struct iwl_mvm_frame_stats *stats)
780{
781 char *buff, *pos, *endpos;
782 int idx, i;
783 int ret;
784 static const size_t bufsz = 1024;
785
786 buff = kmalloc(bufsz, GFP_KERNEL);
787 if (!buff)
788 return -ENOMEM;
789
790 spin_lock_bh(&mvm->drv_stats_lock);
791
792 pos = buff;
793 endpos = pos + bufsz;
794
795 pos += scnprintf(pos, endpos - pos,
796 "Legacy/HT/VHT\t:\t%d/%d/%d\n",
797 stats->legacy_frames,
798 stats->ht_frames,
799 stats->vht_frames);
800 pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
801 stats->bw_20_frames,
802 stats->bw_40_frames,
803 stats->bw_80_frames);
804 pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
805 stats->ngi_frames,
806 stats->sgi_frames);
807 pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
808 stats->siso_frames,
809 stats->mimo2_frames);
810 pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
811 stats->fail_frames,
812 stats->success_frames);
813 pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
814 stats->agg_frames);
815 pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
816 stats->ampdu_count);
817 pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
818 stats->ampdu_count > 0 ?
819 (stats->agg_frames / stats->ampdu_count) : 0);
820
821 pos += scnprintf(pos, endpos - pos, "Last Rates\n");
822
823 idx = stats->last_frame_idx - 1;
824 for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
825 idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
826 if (stats->last_rates[idx] == 0)
827 continue;
828 pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
829 (int)(ARRAY_SIZE(stats->last_rates) - i));
830 pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
831 }
832 spin_unlock_bh(&mvm->drv_stats_lock);
833
834 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
835 kfree(buff);
836
837 return ret;
838}
839
840static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
841 char __user *user_buf, size_t count,
842 loff_t *ppos)
843{
844 struct iwl_mvm *mvm = file->private_data;
845
846 return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
847 &mvm->drv_rx_stats);
848}
849
850static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
851 size_t count, loff_t *ppos)
852{
853 int ret;
854
855 mutex_lock(&mvm->mutex);
856
857 /* allow one more restart that we're provoking here */
858 if (mvm->restart_fw >= 0)
859 mvm->restart_fw++;
860
861 /* take the return value to make compiler happy - it will fail anyway */
862 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
863
864 mutex_unlock(&mvm->mutex);
865
866 return count;
867}
868
869static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
870 size_t count, loff_t *ppos)
871{
872 int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
873 if (ret)
874 return ret;
875
876 iwl_force_nmi(mvm->trans);
877
878 iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
879
880 return count;
881}
882
883static ssize_t
884iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
885 char __user *user_buf,
886 size_t count, loff_t *ppos)
887{
888 struct iwl_mvm *mvm = file->private_data;
889 int pos = 0;
890 char buf[32];
891 const size_t bufsz = sizeof(buf);
892
893 /* print which antennas were set for the scan command by the user */
894 pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
895 if (mvm->scan_rx_ant & ANT_A)
896 pos += scnprintf(buf + pos, bufsz - pos, "A");
897 if (mvm->scan_rx_ant & ANT_B)
898 pos += scnprintf(buf + pos, bufsz - pos, "B");
899 if (mvm->scan_rx_ant & ANT_C)
900 pos += scnprintf(buf + pos, bufsz - pos, "C");
901 pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
902
903 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
904}
905
906static ssize_t
907iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
908 size_t count, loff_t *ppos)
909{
910 u8 scan_rx_ant;
911
912 if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
913 return -EINVAL;
914 if (scan_rx_ant > ANT_ABC)
915 return -EINVAL;
916 if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
917 return -EINVAL;
918
919 if (mvm->scan_rx_ant != scan_rx_ant) {
920 mvm->scan_rx_ant = scan_rx_ant;
921 if (fw_has_capa(&mvm->fw->ucode_capa,
922 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
923 iwl_mvm_config_scan(mvm);
924 }
925
926 return count;
927}
928
929static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
930 char __user *user_buf,
931 size_t count, loff_t *ppos)
932{
933 struct iwl_mvm *mvm = file->private_data;
934 int conf;
935 char buf[8];
936 const size_t bufsz = sizeof(buf);
937 int pos = 0;
938
939 mutex_lock(&mvm->mutex);
940 conf = mvm->fw_dbg_conf;
941 mutex_unlock(&mvm->mutex);
942
943 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
944
945 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
946}
947
948static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
949 char *buf, size_t count,
950 loff_t *ppos)
951{
952 unsigned int conf_id;
953 int ret;
954
955 ret = kstrtouint(buf, 0, &conf_id);
956 if (ret)
957 return ret;
958
959 if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
960 return -EINVAL;
961
962 mutex_lock(&mvm->mutex);
963 ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id);
964 mutex_unlock(&mvm->mutex);
965
966 return ret ?: count;
967}
968
969static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
970 char *buf, size_t count,
971 loff_t *ppos)
972{
973 int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
974
975 if (ret)
976 return ret;
977
978 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
979
980 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
981
982 return count;
983}
984
985#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
986#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
987static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
988 char __user *user_buf,
989 size_t count, loff_t *ppos)
990{
991 struct iwl_mvm *mvm = file->private_data;
992 struct iwl_bcast_filter_cmd cmd;
993 const struct iwl_fw_bcast_filter *filter;
994 char *buf;
995 int bufsz = 1024;
996 int i, j, pos = 0;
997 ssize_t ret;
998
999 buf = kzalloc(bufsz, GFP_KERNEL);
1000 if (!buf)
1001 return -ENOMEM;
1002
1003 mutex_lock(&mvm->mutex);
1004 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
1005 ADD_TEXT("None\n");
1006 mutex_unlock(&mvm->mutex);
1007 goto out;
1008 }
1009 mutex_unlock(&mvm->mutex);
1010
1011 for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
1012 filter = &cmd.filters[i];
1013
1014 ADD_TEXT("Filter [%d]:\n", i);
1015 ADD_TEXT("\tDiscard=%d\n", filter->discard);
1016 ADD_TEXT("\tFrame Type: %s\n",
1017 filter->frame_type ? "IPv4" : "Generic");
1018
1019 for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
1020 const struct iwl_fw_bcast_filter_attr *attr;
1021
1022 attr = &filter->attrs[j];
1023 if (!attr->mask)
1024 break;
1025
1026 ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
1027 j, attr->offset,
1028 attr->offset_type ? "IP End" :
1029 "Payload Start",
1030 be32_to_cpu(attr->mask),
1031 be32_to_cpu(attr->val),
1032 le16_to_cpu(attr->reserved1));
1033 }
1034 }
1035out:
1036 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1037 kfree(buf);
1038 return ret;
1039}
1040
1041static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
1042 size_t count, loff_t *ppos)
1043{
1044 int pos, next_pos;
1045 struct iwl_fw_bcast_filter filter = {};
1046 struct iwl_bcast_filter_cmd cmd;
1047 u32 filter_id, attr_id, mask, value;
1048 int err = 0;
1049
1050 if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
1051 &filter.frame_type, &pos) != 3)
1052 return -EINVAL;
1053
1054 if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
1055 filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
1056 return -EINVAL;
1057
1058 for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
1059 attr_id++) {
1060 struct iwl_fw_bcast_filter_attr *attr =
1061 &filter.attrs[attr_id];
1062
1063 if (pos >= count)
1064 break;
1065
1066 if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
1067 &attr->offset, &attr->offset_type,
1068 &mask, &value, &next_pos) != 4)
1069 return -EINVAL;
1070
1071 attr->mask = cpu_to_be32(mask);
1072 attr->val = cpu_to_be32(value);
1073 if (mask)
1074 filter.num_attrs++;
1075
1076 pos += next_pos;
1077 }
1078
1079 mutex_lock(&mvm->mutex);
1080 memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
1081 &filter, sizeof(filter));
1082
1083 /* send updated bcast filtering configuration */
1084 if (mvm->dbgfs_bcast_filtering.override &&
1085 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1086 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1087 sizeof(cmd), &cmd);
1088 mutex_unlock(&mvm->mutex);
1089
1090 return err ?: count;
1091}
1092
1093static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
1094 char __user *user_buf,
1095 size_t count, loff_t *ppos)
1096{
1097 struct iwl_mvm *mvm = file->private_data;
1098 struct iwl_bcast_filter_cmd cmd;
1099 char *buf;
1100 int bufsz = 1024;
1101 int i, pos = 0;
1102 ssize_t ret;
1103
1104 buf = kzalloc(bufsz, GFP_KERNEL);
1105 if (!buf)
1106 return -ENOMEM;
1107
1108 mutex_lock(&mvm->mutex);
1109 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
1110 ADD_TEXT("None\n");
1111 mutex_unlock(&mvm->mutex);
1112 goto out;
1113 }
1114 mutex_unlock(&mvm->mutex);
1115
1116 for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
1117 const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
1118
1119 ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
1120 i, mac->default_discard, mac->attached_filters);
1121 }
1122out:
1123 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1124 kfree(buf);
1125 return ret;
1126}
1127
1128static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
1129 char *buf, size_t count,
1130 loff_t *ppos)
1131{
1132 struct iwl_bcast_filter_cmd cmd;
1133 struct iwl_fw_bcast_mac mac = {};
1134 u32 mac_id, attached_filters;
1135 int err = 0;
1136
1137 if (!mvm->bcast_filters)
1138 return -ENOENT;
1139
1140 if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
1141 &attached_filters) != 3)
1142 return -EINVAL;
1143
1144 if (mac_id >= ARRAY_SIZE(cmd.macs) ||
1145 mac.default_discard > 1 ||
1146 attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
1147 return -EINVAL;
1148
1149 mac.attached_filters = cpu_to_le16(attached_filters);
1150
1151 mutex_lock(&mvm->mutex);
1152 memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
1153 &mac, sizeof(mac));
1154
1155 /* send updated bcast filtering configuration */
1156 if (mvm->dbgfs_bcast_filtering.override &&
1157 iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1158 err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1159 sizeof(cmd), &cmd);
1160 mutex_unlock(&mvm->mutex);
1161
1162 return err ?: count;
1163}
1164#endif
1165
1166#ifdef CONFIG_PM_SLEEP
1167static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
1168 size_t count, loff_t *ppos)
1169{
1170 int store;
1171
1172 if (sscanf(buf, "%d", &store) != 1)
1173 return -EINVAL;
1174
1175 mvm->store_d3_resume_sram = store;
1176
1177 return count;
1178}
1179
1180static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
1181 size_t count, loff_t *ppos)
1182{
1183 struct iwl_mvm *mvm = file->private_data;
1184 const struct fw_img *img;
1185 int ofs, len, pos = 0;
1186 size_t bufsz, ret;
1187 char *buf;
1188 u8 *ptr = mvm->d3_resume_sram;
1189
1190 img = &mvm->fw->img[IWL_UCODE_WOWLAN];
1191 len = img->sec[IWL_UCODE_SECTION_DATA].len;
1192
1193 bufsz = len * 4 + 256;
1194 buf = kzalloc(bufsz, GFP_KERNEL);
1195 if (!buf)
1196 return -ENOMEM;
1197
1198 pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
1199 mvm->store_d3_resume_sram ? "en" : "dis");
1200
1201 if (ptr) {
1202 for (ofs = 0; ofs < len; ofs += 16) {
1203 pos += scnprintf(buf + pos, bufsz - pos,
1204 "0x%.4x %16ph\n", ofs, ptr + ofs);
1205 }
1206 } else {
1207 pos += scnprintf(buf + pos, bufsz - pos,
1208 "(no data captured)\n");
1209 }
1210
1211 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1212
1213 kfree(buf);
1214
1215 return ret;
1216}
1217#endif
1218
1219#define PRINT_MVM_REF(ref) do { \
1220 if (mvm->refs[ref]) \
1221 pos += scnprintf(buf + pos, bufsz - pos, \
1222 "\t(0x%lx): %d %s\n", \
1223 BIT(ref), mvm->refs[ref], #ref); \
1224} while (0)
1225
1226static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
1227 char __user *user_buf,
1228 size_t count, loff_t *ppos)
1229{
1230 struct iwl_mvm *mvm = file->private_data;
1231 int i, pos = 0;
1232 char buf[256];
1233 const size_t bufsz = sizeof(buf);
1234 u32 refs = 0;
1235
1236 for (i = 0; i < IWL_MVM_REF_COUNT; i++)
1237 if (mvm->refs[i])
1238 refs |= BIT(i);
1239
1240 pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n",
1241 refs);
1242
1243 PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
1244 PRINT_MVM_REF(IWL_MVM_REF_SCAN);
1245 PRINT_MVM_REF(IWL_MVM_REF_ROC);
1246 PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
1247 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
1248 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
1249 PRINT_MVM_REF(IWL_MVM_REF_USER);
1250 PRINT_MVM_REF(IWL_MVM_REF_TX);
1251 PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
1252 PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
1253 PRINT_MVM_REF(IWL_MVM_REF_START_AP);
1254 PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
1255 PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
1256 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
1257 PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
1258 PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
1259 PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
1260 PRINT_MVM_REF(IWL_MVM_REF_NMI);
1261 PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
1262 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
1263 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
1264 PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
1265
1266 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1267}
1268
1269static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
1270 size_t count, loff_t *ppos)
1271{
1272 unsigned long value;
1273 int ret;
1274 bool taken;
1275
1276 ret = kstrtoul(buf, 10, &value);
1277 if (ret < 0)
1278 return ret;
1279
1280 mutex_lock(&mvm->mutex);
1281
1282 taken = mvm->refs[IWL_MVM_REF_USER];
1283 if (value == 1 && !taken)
1284 iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
1285 else if (value == 0 && taken)
1286 iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
1287 else
1288 ret = -EINVAL;
1289
1290 mutex_unlock(&mvm->mutex);
1291
1292 if (ret < 0)
1293 return ret;
1294 return count;
1295}
1296
1297#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
1298 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
1299#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
1300 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
1301#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
1302 if (!debugfs_create_file(alias, mode, parent, mvm, \
1303 &iwl_dbgfs_##name##_ops)) \
1304 goto err; \
1305 } while (0)
1306#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
1307 MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
1308
1309static ssize_t
1310iwl_dbgfs_prph_reg_read(struct file *file,
1311 char __user *user_buf,
1312 size_t count, loff_t *ppos)
1313{
1314 struct iwl_mvm *mvm = file->private_data;
1315 int pos = 0;
1316 char buf[32];
1317 const size_t bufsz = sizeof(buf);
1318 int ret;
1319
1320 if (!mvm->dbgfs_prph_reg_addr)
1321 return -EINVAL;
1322
1323 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
1324 if (ret)
1325 return ret;
1326
1327 pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
1328 mvm->dbgfs_prph_reg_addr,
1329 iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
1330
1331 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
1332
1333 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1334}
1335
1336static ssize_t
1337iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
1338 size_t count, loff_t *ppos)
1339{
1340 u8 args;
1341 u32 value;
1342 int ret;
1343
1344 args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
1345 /* if we only want to set the reg address - nothing more to do */
1346 if (args == 1)
1347 goto out;
1348
1349 /* otherwise, make sure we have both address and value */
1350 if (args != 2)
1351 return -EINVAL;
1352
1353 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
1354 if (ret)
1355 return ret;
1356
1357 iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
1358
1359 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
1360out:
1361 return count;
1362}
1363
1364static ssize_t
1365iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
1366 size_t count, loff_t *ppos)
1367{
1368 int ret;
1369
1370 mutex_lock(&mvm->mutex);
1371 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
1372 mutex_unlock(&mvm->mutex);
1373
1374 return ret ?: count;
1375}
1376
1377MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
1378
1379/* Device wide debugfs entries */
1380MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
1381MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
1382MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
1383MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
1384MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
1385MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
1386MVM_DEBUGFS_READ_FILE_OPS(stations);
1387MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
1388MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
1389MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
1390MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
1391MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
1392MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
1393MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
1394MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
1395MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
1396MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1397MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1398MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
1399MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
1400
1401#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1402MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
1403MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
1404#endif
1405
1406#ifdef CONFIG_PM_SLEEP
1407MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
1408#endif
1409
1410int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1411{
1412 struct dentry *bcast_dir __maybe_unused;
1413 char buf[100];
1414
1415 spin_lock_init(&mvm->drv_stats_lock);
1416
1417 mvm->debugfs_dir = dbgfs_dir;
1418
1419 MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
1420 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
1421 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1422 MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir,
1423 S_IWUSR | S_IRUSR);
1424 MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
1425 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
1426 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
1427 MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
1428 MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
1429 S_IRUSR | S_IWUSR);
1430 MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
1431 MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
1432 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
1433 MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
1434 MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
1435 MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR);
1436 MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
1437 S_IWUSR | S_IRUSR);
1438 MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1439 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1440 MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1441 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
1442 MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
1443 if (!debugfs_create_bool("enable_scan_iteration_notif",
1444 S_IRUSR | S_IWUSR,
1445 mvm->debugfs_dir,
1446 &mvm->scan_iter_notif_enabled))
1447 goto err;
1448
1449#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1450 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
1451 bcast_dir = debugfs_create_dir("bcast_filtering",
1452 mvm->debugfs_dir);
1453 if (!bcast_dir)
1454 goto err;
1455
1456 if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
1457 bcast_dir,
1458 &mvm->dbgfs_bcast_filtering.override))
1459 goto err;
1460
1461 MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
1462 bcast_dir, S_IWUSR | S_IRUSR);
1463 MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
1464 bcast_dir, S_IWUSR | S_IRUSR);
1465 }
1466#endif
1467
1468#ifdef CONFIG_PM_SLEEP
1469 MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1470 MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
1471 if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
1472 mvm->debugfs_dir, &mvm->d3_wake_sysassert))
1473 goto err;
1474 if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
1475 mvm->debugfs_dir, &mvm->last_netdetect_scans))
1476 goto err;
1477#endif
1478
1479 if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
1480 mvm->debugfs_dir,
1481 &mvm->low_latency_agg_frame_limit))
1482 goto err;
1483 if (!debugfs_create_u8("ps_disabled", S_IRUSR,
1484 mvm->debugfs_dir, &mvm->ps_disabled))
1485 goto err;
1486 if (!debugfs_create_blob("nvm_hw", S_IRUSR,
1487 mvm->debugfs_dir, &mvm->nvm_hw_blob))
1488 goto err;
1489 if (!debugfs_create_blob("nvm_sw", S_IRUSR,
1490 mvm->debugfs_dir, &mvm->nvm_sw_blob))
1491 goto err;
1492 if (!debugfs_create_blob("nvm_calib", S_IRUSR,
1493 mvm->debugfs_dir, &mvm->nvm_calib_blob))
1494 goto err;
1495 if (!debugfs_create_blob("nvm_prod", S_IRUSR,
1496 mvm->debugfs_dir, &mvm->nvm_prod_blob))
1497 goto err;
1498 if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
1499 mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
1500 goto err;
1501
1502 /*
1503 * Create a symlink with mac80211. It will be removed when mac80211
1504 * exists (before the opmode exists which removes the target.)
1505 */
1506 snprintf(buf, 100, "../../%s/%s",
1507 dbgfs_dir->d_parent->d_parent->d_name.name,
1508 dbgfs_dir->d_parent->d_name.name);
1509 if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
1510 goto err;
1511
1512 return 0;
1513err:
1514 IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
1515 return -ENOMEM;
1516}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000000..8c4190e7e027
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,103 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#define MVM_DEBUGFS_READ_FILE_OPS(name) \
67static const struct file_operations iwl_dbgfs_##name##_ops = { \
68 .read = iwl_dbgfs_##name##_read, \
69 .open = simple_open, \
70 .llseek = generic_file_llseek, \
71}
72
73#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
74static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
75 const char __user *user_buf, \
76 size_t count, loff_t *ppos) \
77{ \
78 argtype *arg = file->private_data; \
79 char buf[buflen] = {}; \
80 size_t buf_size = min(count, sizeof(buf) - 1); \
81 \
82 if (copy_from_user(buf, user_buf, buf_size)) \
83 return -EFAULT; \
84 \
85 return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
86} \
87
88#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
89MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
90static const struct file_operations iwl_dbgfs_##name##_ops = { \
91 .write = _iwl_dbgfs_##name##_write, \
92 .read = iwl_dbgfs_##name##_read, \
93 .open = simple_open, \
94 .llseek = generic_file_llseek, \
95};
96
97#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
98MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
99static const struct file_operations iwl_dbgfs_##name##_ops = { \
100 .write = _iwl_dbgfs_##name##_write, \
101 .open = simple_open, \
102 .llseek = generic_file_llseek, \
103};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
new file mode 100644
index 000000000000..d398a6102805
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
@@ -0,0 +1,476 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_bt_coex_h__
66#define __fw_api_bt_coex_h__
67
68#include <linux/types.h>
69#include <linux/bitops.h>
70
71#define BITS(nb) (BIT(nb) - 1)
72
73/**
74 * enum iwl_bt_coex_flags - flags for BT_COEX command
75 * @BT_COEX_MODE_POS:
76 * @BT_COEX_MODE_MSK:
77 * @BT_COEX_DISABLE_OLD:
78 * @BT_COEX_2W_OLD:
79 * @BT_COEX_3W_OLD:
80 * @BT_COEX_NW_OLD:
81 * @BT_COEX_AUTO_OLD:
82 * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests)
83 * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests)
84 * @BT_COEX_SYNC2SCO:
85 * @BT_COEX_CORUNNING:
86 * @BT_COEX_MPLUT:
87 * @BT_COEX_TTC:
88 * @BT_COEX_RRC:
89 *
90 * The COEX_MODE must be set for each command. Even if it is not changed.
91 */
92enum iwl_bt_coex_flags {
93 BT_COEX_MODE_POS = 3,
94 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
95 BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS,
96 BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS,
97 BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS,
98 BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS,
99 BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS,
100 BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS,
101 BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS,
102 BT_COEX_SYNC2SCO = BIT(7),
103 BT_COEX_CORUNNING = BIT(8),
104 BT_COEX_MPLUT = BIT(9),
105 BT_COEX_TTC = BIT(20),
106 BT_COEX_RRC = BIT(21),
107};
108
109/*
110 * indicates what has changed in the BT_COEX command.
111 * BT_VALID_ENABLE must be set for each command. Commands without this bit will
112 * discarded by the firmware
113 */
114enum iwl_bt_coex_valid_bit_msk {
115 BT_VALID_ENABLE = BIT(0),
116 BT_VALID_BT_PRIO_BOOST = BIT(1),
117 BT_VALID_MAX_KILL = BIT(2),
118 BT_VALID_3W_TMRS = BIT(3),
119 BT_VALID_KILL_ACK = BIT(4),
120 BT_VALID_KILL_CTS = BIT(5),
121 BT_VALID_REDUCED_TX_POWER = BIT(6),
122 BT_VALID_LUT = BIT(7),
123 BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
124 BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
125 BT_VALID_MULTI_PRIO_LUT = BIT(10),
126 BT_VALID_TRM_KICK_FILTER = BIT(11),
127 BT_VALID_CORUN_LUT_20 = BIT(12),
128 BT_VALID_CORUN_LUT_40 = BIT(13),
129 BT_VALID_ANT_ISOLATION = BIT(14),
130 BT_VALID_ANT_ISOLATION_THRS = BIT(15),
131 BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
132 BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
133 BT_VALID_SYNC_TO_SCO = BIT(18),
134 BT_VALID_TTC = BIT(20),
135 BT_VALID_RRC = BIT(21),
136};
137
138/**
139 * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
140 * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
141 * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
142 *
143 * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
144 * reduces its Tx power, it can work along with BT, hence reducing the amount
145 * of WiFi frames being killed by BT.
146 */
147enum iwl_bt_reduced_tx_power {
148 BT_REDUCED_TX_POWER_CTL = BIT(0),
149 BT_REDUCED_TX_POWER_DATA = BIT(1),
150};
151
152enum iwl_bt_coex_lut_type {
153 BT_COEX_TIGHT_LUT = 0,
154 BT_COEX_LOOSE_LUT,
155 BT_COEX_TX_DIS_LUT,
156
157 BT_COEX_MAX_LUT,
158 BT_COEX_INVALID_LUT = 0xff,
159}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
160
161#define BT_COEX_LUT_SIZE (12)
162#define BT_COEX_CORUN_LUT_SIZE (32)
163#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
164#define BT_COEX_BOOST_SIZE (4)
165#define BT_REDUCED_TX_POWER_BIT BIT(7)
166
167/**
168 * struct iwl_bt_coex_cmd_old - bt coex configuration command
169 * @flags:&enum iwl_bt_coex_flags
170 * @max_kill:
171 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
172 * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
173 * should be set by default
174 * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
175 * should be set by default
176 * @bt4_antenna_isolation: antenna isolation
177 * @bt4_antenna_isolation_thr: antenna threshold value
178 * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
179 * @bt4_tx_rx_max_freq0: TxRx max frequency
180 * @bt_prio_boost: BT priority boost registers
181 * @wifi_tx_prio_boost: SW boost of wifi tx priority
182 * @wifi_rx_prio_boost: SW boost of wifi rx priority
183 * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
184 * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
185 * @decision_lut: PTA decision LUT, per Prio-Ch
186 * @bt4_multiprio_lut: multi priority LUT configuration
187 * @bt4_corun_lut20: co-running 20 MHz LUT configuration
188 * @bt4_corun_lut40: co-running 40 MHz LUT configuration
189 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
190 *
191 * The structure is used for the BT_COEX command.
192 */
193struct iwl_bt_coex_cmd_old {
194 __le32 flags;
195 u8 max_kill;
196 u8 bt_reduced_tx_power;
197 u8 override_primary_lut;
198 u8 override_secondary_lut;
199
200 u8 bt4_antenna_isolation;
201 u8 bt4_antenna_isolation_thr;
202 u8 bt4_tx_tx_delta_freq_thr;
203 u8 bt4_tx_rx_max_freq0;
204
205 __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
206 __le32 wifi_tx_prio_boost;
207 __le32 wifi_rx_prio_boost;
208 __le32 kill_ack_msk;
209 __le32 kill_cts_msk;
210
211 __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
212 __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
213 __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
214 __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
215
216 __le32 valid_bit_msk;
217} __packed; /* BT_COEX_CMD_API_S_VER_5 */
218
219enum iwl_bt_coex_mode {
220 BT_COEX_DISABLE = 0x0,
221 BT_COEX_NW = 0x1,
222 BT_COEX_BT = 0x2,
223 BT_COEX_WIFI = 0x3,
224}; /* BT_COEX_MODES_E */
225
226enum iwl_bt_coex_enabled_modules {
227 BT_COEX_MPLUT_ENABLED = BIT(0),
228 BT_COEX_MPLUT_BOOST_ENABLED = BIT(1),
229 BT_COEX_SYNC2SCO_ENABLED = BIT(2),
230 BT_COEX_CORUN_ENABLED = BIT(3),
231 BT_COEX_HIGH_BAND_RET = BIT(4),
232}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
233
234/**
235 * struct iwl_bt_coex_cmd - bt coex configuration command
236 * @mode: enum %iwl_bt_coex_mode
237 * @enabled_modules: enum %iwl_bt_coex_enabled_modules
238 *
239 * The structure is used for the BT_COEX command.
240 */
241struct iwl_bt_coex_cmd {
242 __le32 mode;
243 __le32 enabled_modules;
244} __packed; /* BT_COEX_CMD_API_S_VER_6 */
245
246/**
247 * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut
248 * @corun_lut20: co-running 20 MHz LUT configuration
249 * @corun_lut40: co-running 40 MHz LUT configuration
250 *
251 * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command.
252 */
253struct iwl_bt_coex_corun_lut_update_cmd {
254 __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE];
255 __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
256} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
257
258/**
259 * struct iwl_bt_coex_reduced_txp_update_cmd
260 * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
261 * bits are the sta_id (value)
262 */
263struct iwl_bt_coex_reduced_txp_update_cmd {
264 __le32 reduced_txp;
265} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
266
267/**
268 * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
269 * @bt_primary_ci:
270 * @primary_ch_phy_id:
271 * @bt_secondary_ci:
272 * @secondary_ch_phy_id:
273 *
274 * Used for BT_COEX_CI command
275 */
276struct iwl_bt_coex_ci_cmd {
277 __le64 bt_primary_ci;
278 __le32 primary_ch_phy_id;
279
280 __le64 bt_secondary_ci;
281 __le32 secondary_ch_phy_id;
282} __packed; /* BT_CI_MSG_API_S_VER_2 */
283
284#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
285 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
286 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
287
288enum iwl_bt_mxbox_dw0 {
289 BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
290 BT_MBOX(0, LE_PROF1, 3, 1),
291 BT_MBOX(0, LE_PROF2, 4, 1),
292 BT_MBOX(0, LE_PROF_OTHER, 5, 1),
293 BT_MBOX(0, CHL_SEQ_N, 8, 4),
294 BT_MBOX(0, INBAND_S, 13, 1),
295 BT_MBOX(0, LE_MIN_RSSI, 16, 4),
296 BT_MBOX(0, LE_SCAN, 20, 1),
297 BT_MBOX(0, LE_ADV, 21, 1),
298 BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
299 BT_MBOX(0, OPEN_CON_1, 28, 2),
300};
301
302enum iwl_bt_mxbox_dw1 {
303 BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
304 BT_MBOX(1, IP_SR, 4, 1),
305 BT_MBOX(1, LE_MSTR, 5, 1),
306 BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
307 BT_MBOX(1, MSG_TYPE, 16, 3),
308 BT_MBOX(1, SSN, 19, 2),
309};
310
311enum iwl_bt_mxbox_dw2 {
312 BT_MBOX(2, SNIFF_ACT, 0, 3),
313 BT_MBOX(2, PAG, 3, 1),
314 BT_MBOX(2, INQUIRY, 4, 1),
315 BT_MBOX(2, CONN, 5, 1),
316 BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
317 BT_MBOX(2, DISC, 13, 1),
318 BT_MBOX(2, SCO_TX_ACT, 16, 2),
319 BT_MBOX(2, SCO_RX_ACT, 18, 2),
320 BT_MBOX(2, ESCO_RE_TX, 20, 2),
321 BT_MBOX(2, SCO_DURATION, 24, 6),
322};
323
324enum iwl_bt_mxbox_dw3 {
325 BT_MBOX(3, SCO_STATE, 0, 1),
326 BT_MBOX(3, SNIFF_STATE, 1, 1),
327 BT_MBOX(3, A2DP_STATE, 2, 1),
328 BT_MBOX(3, ACL_STATE, 3, 1),
329 BT_MBOX(3, MSTR_STATE, 4, 1),
330 BT_MBOX(3, OBX_STATE, 5, 1),
331 BT_MBOX(3, OPEN_CON_2, 8, 2),
332 BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
333 BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
334 BT_MBOX(3, INBAND_P, 13, 1),
335 BT_MBOX(3, MSG_TYPE_2, 16, 3),
336 BT_MBOX(3, SSN_2, 19, 2),
337 BT_MBOX(3, UPDATE_REQUEST, 21, 1),
338};
339
340#define BT_MBOX_MSG(_notif, _num, _field) \
341 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
342 >> BT_MBOX##_num##_##_field##_POS)
343
344enum iwl_bt_activity_grading {
345 BT_OFF = 0,
346 BT_ON_NO_CONNECTION = 1,
347 BT_LOW_TRAFFIC = 2,
348 BT_HIGH_TRAFFIC = 3,
349
350 BT_MAX_AG,
351}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
352
353enum iwl_bt_ci_compliance {
354 BT_CI_COMPLIANCE_NONE = 0,
355 BT_CI_COMPLIANCE_PRIMARY = 1,
356 BT_CI_COMPLIANCE_SECONDARY = 2,
357 BT_CI_COMPLIANCE_BOTH = 3,
358}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
359
360#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \
361 (_ttc_rrc_status & BIT(_phy_id))
362
363#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \
364 ((_ttc_rrc_status >> 4) & BIT(_phy_id))
365
366/**
367 * struct iwl_bt_coex_profile_notif - notification about BT coex
368 * @mbox_msg: message from BT to WiFi
369 * @msg_idx: the index of the message
370 * @bt_ci_compliance: enum %iwl_bt_ci_compliance
371 * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type
372 * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type
373 * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
374 * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY
375 */
376struct iwl_bt_coex_profile_notif {
377 __le32 mbox_msg[4];
378 __le32 msg_idx;
379 __le32 bt_ci_compliance;
380
381 __le32 primary_ch_lut;
382 __le32 secondary_ch_lut;
383 __le32 bt_activity_grading;
384 u8 ttc_rrc_status;
385 u8 reserved[3];
386} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
387
388enum iwl_bt_coex_prio_table_event {
389 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
390 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
391 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
392 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
393 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
394 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
395 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
396 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
397 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
398 BT_COEX_PRIO_TBL_EVT_IDLE = 9,
399 BT_COEX_PRIO_TBL_EVT_MAX = 16,
400}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
401
402enum iwl_bt_coex_prio_table_prio {
403 BT_COEX_PRIO_TBL_DISABLED = 0,
404 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
405 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
406 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
407 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
408 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
409 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
410 BT_COEX_PRIO_TBL_MAX = 8,
411}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
412
413#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
414#define BT_COEX_PRIO_TBL_PRIO_POS (1)
415#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
416
417/**
418 * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
419 * @prio_tbl:
420 */
421struct iwl_bt_coex_prio_tbl_cmd {
422 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
423} __packed;
424
425/**
426 * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command
427 * @bt_primary_ci:
428 * @bt_secondary_ci:
429 * @co_run_bw_primary:
430 * @co_run_bw_secondary:
431 * @primary_ch_phy_id:
432 * @secondary_ch_phy_id:
433 *
434 * Used for BT_COEX_CI command
435 */
436struct iwl_bt_coex_ci_cmd_old {
437 __le64 bt_primary_ci;
438 __le64 bt_secondary_ci;
439
440 u8 co_run_bw_primary;
441 u8 co_run_bw_secondary;
442 u8 primary_ch_phy_id;
443 u8 secondary_ch_phy_id;
444} __packed; /* BT_CI_MSG_API_S_VER_1 */
445
446/**
447 * struct iwl_bt_coex_profile_notif_old - notification about BT coex
448 * @mbox_msg: message from BT to WiFi
449 * @msg_idx: the index of the message
450 * @bt_status: 0 - off, 1 - on
451 * @bt_open_conn: number of BT connections open
452 * @bt_traffic_load: load of BT traffic
453 * @bt_agg_traffic_load: aggregated load of BT traffic
454 * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
455 * @primary_ch_lut: LUT used for primary channel
456 * @secondary_ch_lut: LUT used for secondary channel
457 * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
458 */
459struct iwl_bt_coex_profile_notif_old {
460 __le32 mbox_msg[4];
461 __le32 msg_idx;
462 u8 bt_status;
463 u8 bt_open_conn;
464 u8 bt_traffic_load;
465 u8 bt_agg_traffic_load;
466 u8 bt_ci_compliance;
467 u8 ttc_enabled;
468 u8 rrc_enabled;
469 u8 reserved;
470
471 __le32 primary_ch_lut;
472 __le32 secondary_ch_lut;
473 __le32 bt_activity_grading;
474} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
475
476#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
new file mode 100644
index 000000000000..20521bebb0b1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
@@ -0,0 +1,425 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_d3_h__
66#define __fw_api_d3_h__
67
68/**
69 * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
70 * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
71 */
72enum iwl_d3_wakeup_flags {
73 IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
74}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
75
76/**
77 * struct iwl_d3_manager_config - D3 manager configuration command
78 * @min_sleep_time: minimum sleep time (in usec)
79 * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
80 * @wakeup_host_timer: force wakeup after this many seconds
81 *
82 * The structure is used for the D3_CONFIG_CMD command.
83 */
84struct iwl_d3_manager_config {
85 __le32 min_sleep_time;
86 __le32 wakeup_flags;
87 __le32 wakeup_host_timer;
88} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
89
90
91/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
92
93/**
94 * enum iwl_d3_proto_offloads - enabled protocol offloads
95 * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
96 * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
97 */
98enum iwl_proto_offloads {
99 IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
100 IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
101};
102
103#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
104#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
105#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12
106#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4
107#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12
108
109#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4
110#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2
111
112/**
113 * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
114 * @enabled: enable flags
115 * @remote_ipv4_addr: remote address to answer to (or zero if all)
116 * @host_ipv4_addr: our IPv4 address to respond to queries for
117 * @arp_mac_addr: our MAC address for ARP responses
118 * @reserved: unused
119 */
120struct iwl_proto_offload_cmd_common {
121 __le32 enabled;
122 __be32 remote_ipv4_addr;
123 __be32 host_ipv4_addr;
124 u8 arp_mac_addr[ETH_ALEN];
125 __le16 reserved;
126} __packed;
127
128/**
129 * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
130 * @common: common/IPv4 configuration
131 * @remote_ipv6_addr: remote address to answer to (or zero if all)
132 * @solicited_node_ipv6_addr: broken -- solicited node address exists
133 * for each target address
134 * @target_ipv6_addr: our target addresses
135 * @ndp_mac_addr: neighbor solicitation response MAC address
136 */
137struct iwl_proto_offload_cmd_v1 {
138 struct iwl_proto_offload_cmd_common common;
139 u8 remote_ipv6_addr[16];
140 u8 solicited_node_ipv6_addr[16];
141 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
142 u8 ndp_mac_addr[ETH_ALEN];
143 __le16 reserved2;
144} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
145
146/**
147 * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
148 * @common: common/IPv4 configuration
149 * @remote_ipv6_addr: remote address to answer to (or zero if all)
150 * @solicited_node_ipv6_addr: broken -- solicited node address exists
151 * for each target address
152 * @target_ipv6_addr: our target addresses
153 * @ndp_mac_addr: neighbor solicitation response MAC address
154 */
155struct iwl_proto_offload_cmd_v2 {
156 struct iwl_proto_offload_cmd_common common;
157 u8 remote_ipv6_addr[16];
158 u8 solicited_node_ipv6_addr[16];
159 u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
160 u8 ndp_mac_addr[ETH_ALEN];
161 u8 numValidIPv6Addresses;
162 u8 reserved2[3];
163} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
164
165struct iwl_ns_config {
166 struct in6_addr source_ipv6_addr;
167 struct in6_addr dest_ipv6_addr;
168 u8 target_mac_addr[ETH_ALEN];
169 __le16 reserved;
170} __packed; /* NS_OFFLOAD_CONFIG */
171
172struct iwl_targ_addr {
173 struct in6_addr addr;
174 __le32 config_num;
175} __packed; /* TARGET_IPV6_ADDRESS */
176
177/**
178 * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
179 * @common: common/IPv4 configuration
180 * @target_ipv6_addr: target IPv6 addresses
181 * @ns_config: NS offload configurations
182 */
183struct iwl_proto_offload_cmd_v3_small {
184 struct iwl_proto_offload_cmd_common common;
185 __le32 num_valid_ipv6_addrs;
186 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
187 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
188} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
189
190/**
191 * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
192 * @common: common/IPv4 configuration
193 * @target_ipv6_addr: target IPv6 addresses
194 * @ns_config: NS offload configurations
195 */
196struct iwl_proto_offload_cmd_v3_large {
197 struct iwl_proto_offload_cmd_common common;
198 __le32 num_valid_ipv6_addrs;
199 struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
200 struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
201} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
202
203/*
204 * WOWLAN_PATTERNS
205 */
206#define IWL_WOWLAN_MIN_PATTERN_LEN 16
207#define IWL_WOWLAN_MAX_PATTERN_LEN 128
208
209struct iwl_wowlan_pattern {
210 u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
211 u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
212 u8 mask_size;
213 u8 pattern_size;
214 __le16 reserved;
215} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
216
217#define IWL_WOWLAN_MAX_PATTERNS 20
218
219struct iwl_wowlan_patterns_cmd {
220 __le32 n_patterns;
221 struct iwl_wowlan_pattern patterns[];
222} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
223
224enum iwl_wowlan_wakeup_filters {
225 IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
226 IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
227 IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
228 IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
229 IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
230 IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
231 IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
232 IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
233 IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
234 IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
235 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
236 IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
237 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
238 IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
239 IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
240 IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
241 IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
242}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
243
244struct iwl_wowlan_config_cmd {
245 __le32 wakeup_filter;
246 __le16 non_qos_seq;
247 __le16 qos_seq[8];
248 u8 wowlan_ba_teardown_tids;
249 u8 is_11n_connection;
250 u8 offloading_tid;
251 u8 reserved[3];
252} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
253
254/*
255 * WOWLAN_TSC_RSC_PARAMS
256 */
257#define IWL_NUM_RSC 16
258
259struct tkip_sc {
260 __le16 iv16;
261 __le16 pad;
262 __le32 iv32;
263} __packed; /* TKIP_SC_API_U_VER_1 */
264
265struct iwl_tkip_rsc_tsc {
266 struct tkip_sc unicast_rsc[IWL_NUM_RSC];
267 struct tkip_sc multicast_rsc[IWL_NUM_RSC];
268 struct tkip_sc tsc;
269} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
270
271struct aes_sc {
272 __le64 pn;
273} __packed; /* TKIP_AES_SC_API_U_VER_1 */
274
275struct iwl_aes_rsc_tsc {
276 struct aes_sc unicast_rsc[IWL_NUM_RSC];
277 struct aes_sc multicast_rsc[IWL_NUM_RSC];
278 struct aes_sc tsc;
279} __packed; /* AES_TSC_RSC_API_S_VER_1 */
280
281union iwl_all_tsc_rsc {
282 struct iwl_tkip_rsc_tsc tkip;
283 struct iwl_aes_rsc_tsc aes;
284}; /* ALL_TSC_RSC_API_S_VER_2 */
285
286struct iwl_wowlan_rsc_tsc_params_cmd {
287 union iwl_all_tsc_rsc all_tsc_rsc;
288} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
289
290#define IWL_MIC_KEY_SIZE 8
291struct iwl_mic_keys {
292 u8 tx[IWL_MIC_KEY_SIZE];
293 u8 rx_unicast[IWL_MIC_KEY_SIZE];
294 u8 rx_mcast[IWL_MIC_KEY_SIZE];
295} __packed; /* MIC_KEYS_API_S_VER_1 */
296
297#define IWL_P1K_SIZE 5
298struct iwl_p1k_cache {
299 __le16 p1k[IWL_P1K_SIZE];
300} __packed;
301
302#define IWL_NUM_RX_P1K_CACHE 2
303
304struct iwl_wowlan_tkip_params_cmd {
305 struct iwl_mic_keys mic_keys;
306 struct iwl_p1k_cache tx;
307 struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
308 struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
309} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
310
311#define IWL_KCK_MAX_SIZE 32
312#define IWL_KEK_MAX_SIZE 32
313
314struct iwl_wowlan_kek_kck_material_cmd {
315 u8 kck[IWL_KCK_MAX_SIZE];
316 u8 kek[IWL_KEK_MAX_SIZE];
317 __le16 kck_len;
318 __le16 kek_len;
319 __le64 replay_ctr;
320} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
321
322#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
323
324enum iwl_wowlan_rekey_status {
325 IWL_WOWLAN_REKEY_POST_REKEY = 0,
326 IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
327}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
328
329enum iwl_wowlan_wakeup_reason {
330 IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
331 IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
332 IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
333 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
334 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
335 IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
336 IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
337 IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
338 IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
339 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
340 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
341 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
342 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
343 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
344 IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
345 IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
346 IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
347 IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
348
349}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
350
351struct iwl_wowlan_gtk_status {
352 u8 key_index;
353 u8 reserved[3];
354 u8 decrypt_key[16];
355 u8 tkip_mic_key[8];
356 struct iwl_wowlan_rsc_tsc_params_cmd rsc;
357} __packed;
358
359struct iwl_wowlan_status {
360 struct iwl_wowlan_gtk_status gtk;
361 __le64 replay_ctr;
362 __le16 pattern_number;
363 __le16 non_qos_seq_ctr;
364 __le16 qos_seq_ctr[8];
365 __le32 wakeup_reasons;
366 __le32 num_of_gtk_rekeys;
367 __le32 transmitted_ndps;
368 __le32 received_beacons;
369 __le32 wake_packet_length;
370 __le32 wake_packet_bufsize;
371 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
372} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
373
374#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
375#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
376#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
377
378struct iwl_tcp_packet_info {
379 __le16 tcp_pseudo_header_checksum;
380 __le16 tcp_payload_length;
381} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
382
383struct iwl_tcp_packet {
384 struct iwl_tcp_packet_info info;
385 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
386 u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
387} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
388
389struct iwl_remote_wake_packet {
390 struct iwl_tcp_packet_info info;
391 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
392 u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
393} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
394
395struct iwl_wowlan_remote_wake_config {
396 __le32 connection_max_time; /* unused */
397 /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
398 u8 max_syn_retries;
399 u8 max_data_retries;
400 u8 tcp_syn_ack_timeout;
401 u8 tcp_ack_timeout;
402
403 struct iwl_tcp_packet syn_tx;
404 struct iwl_tcp_packet synack_rx;
405 struct iwl_tcp_packet keepalive_ack_rx;
406 struct iwl_tcp_packet fin_tx;
407
408 struct iwl_remote_wake_packet keepalive_tx;
409 struct iwl_remote_wake_packet wake_rx;
410
411 /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
412 u8 sequence_number_offset;
413 u8 sequence_number_length;
414 u8 token_offset;
415 u8 token_length;
416 /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
417 __le32 initial_sequence_number;
418 __le16 keepalive_interval;
419 __le16 num_tokens;
420 u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
421} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
422
423/* TODO: NetDetect API */
424
425#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
new file mode 100644
index 000000000000..f3f3ee0a766b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -0,0 +1,387 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_mac_h__
64#define __fw_api_mac_h__
65
66/*
67 * The first MAC indices (starting from 0)
68 * are available to the driver, AUX follows
69 */
70#define MAC_INDEX_AUX 4
71#define MAC_INDEX_MIN_DRIVER 0
72#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
73#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
74
75enum iwl_ac {
76 AC_BK,
77 AC_BE,
78 AC_VI,
79 AC_VO,
80 AC_NUM,
81};
82
83/**
84 * enum iwl_mac_protection_flags - MAC context flags
85 * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
86 * this will require CCK RTS/CTS2self.
87 * RTS/CTS will protect full burst time.
88 * @MAC_PROT_FLG_HT_PROT: enable HT protection
89 * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
90 * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
91 */
92enum iwl_mac_protection_flags {
93 MAC_PROT_FLG_TGG_PROTECT = BIT(3),
94 MAC_PROT_FLG_HT_PROT = BIT(23),
95 MAC_PROT_FLG_FAT_PROT = BIT(24),
96 MAC_PROT_FLG_SELF_CTS_EN = BIT(30),
97};
98
99#define MAC_FLG_SHORT_SLOT BIT(4)
100#define MAC_FLG_SHORT_PREAMBLE BIT(5)
101
102/**
103 * enum iwl_mac_types - Supported MAC types
104 * @FW_MAC_TYPE_FIRST: lowest supported MAC type
105 * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
106 * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
107 * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
108 * @FW_MAC_TYPE_IBSS: IBSS
109 * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
110 * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
111 * @FW_MAC_TYPE_P2P_STA: P2P client
112 * @FW_MAC_TYPE_GO: P2P GO
113 * @FW_MAC_TYPE_TEST: ?
114 * @FW_MAC_TYPE_MAX: highest support MAC type
115 */
116enum iwl_mac_types {
117 FW_MAC_TYPE_FIRST = 1,
118 FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
119 FW_MAC_TYPE_LISTENER,
120 FW_MAC_TYPE_PIBSS,
121 FW_MAC_TYPE_IBSS,
122 FW_MAC_TYPE_BSS_STA,
123 FW_MAC_TYPE_P2P_DEVICE,
124 FW_MAC_TYPE_P2P_STA,
125 FW_MAC_TYPE_GO,
126 FW_MAC_TYPE_TEST,
127 FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
128}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
129
130/**
131 * enum iwl_tsf_id - TSF hw timer ID
132 * @TSF_ID_A: use TSF A
133 * @TSF_ID_B: use TSF B
134 * @TSF_ID_C: use TSF C
135 * @TSF_ID_D: use TSF D
136 * @NUM_TSF_IDS: number of TSF timers available
137 */
138enum iwl_tsf_id {
139 TSF_ID_A = 0,
140 TSF_ID_B = 1,
141 TSF_ID_C = 2,
142 TSF_ID_D = 3,
143 NUM_TSF_IDS = 4,
144}; /* TSF_ID_API_E_VER_1 */
145
146/**
147 * struct iwl_mac_data_ap - configuration data for AP MAC context
148 * @beacon_time: beacon transmit time in system time
149 * @beacon_tsf: beacon transmit time in TSF
150 * @bi: beacon interval in TU
151 * @bi_reciprocal: 2^32 / bi
152 * @dtim_interval: dtim transmit time in TU
153 * @dtim_reciprocal: 2^32 / dtim_interval
154 * @mcast_qid: queue ID for multicast traffic
155 * @beacon_template: beacon template ID
156 */
157struct iwl_mac_data_ap {
158 __le32 beacon_time;
159 __le64 beacon_tsf;
160 __le32 bi;
161 __le32 bi_reciprocal;
162 __le32 dtim_interval;
163 __le32 dtim_reciprocal;
164 __le32 mcast_qid;
165 __le32 beacon_template;
166} __packed; /* AP_MAC_DATA_API_S_VER_1 */
167
168/**
169 * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
170 * @beacon_time: beacon transmit time in system time
171 * @beacon_tsf: beacon transmit time in TSF
172 * @bi: beacon interval in TU
173 * @bi_reciprocal: 2^32 / bi
174 * @beacon_template: beacon template ID
175 */
176struct iwl_mac_data_ibss {
177 __le32 beacon_time;
178 __le64 beacon_tsf;
179 __le32 bi;
180 __le32 bi_reciprocal;
181 __le32 beacon_template;
182} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
183
184/**
185 * struct iwl_mac_data_sta - configuration data for station MAC context
186 * @is_assoc: 1 for associated state, 0 otherwise
187 * @dtim_time: DTIM arrival time in system time
188 * @dtim_tsf: DTIM arrival time in TSF
189 * @bi: beacon interval in TU, applicable only when associated
190 * @bi_reciprocal: 2^32 / bi , applicable only when associated
191 * @dtim_interval: DTIM interval in TU, applicable only when associated
192 * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
193 * @listen_interval: in beacon intervals, applicable only when associated
194 * @assoc_id: unique ID assigned by the AP during association
195 */
196struct iwl_mac_data_sta {
197 __le32 is_assoc;
198 __le32 dtim_time;
199 __le64 dtim_tsf;
200 __le32 bi;
201 __le32 bi_reciprocal;
202 __le32 dtim_interval;
203 __le32 dtim_reciprocal;
204 __le32 listen_interval;
205 __le32 assoc_id;
206 __le32 assoc_beacon_arrive_time;
207} __packed; /* STA_MAC_DATA_API_S_VER_1 */
208
209/**
210 * struct iwl_mac_data_go - configuration data for P2P GO MAC context
211 * @ap: iwl_mac_data_ap struct with most config data
212 * @ctwin: client traffic window in TU (period after TBTT when GO is present).
213 * 0 indicates that there is no CT window.
214 * @opp_ps_enabled: indicate that opportunistic PS allowed
215 */
216struct iwl_mac_data_go {
217 struct iwl_mac_data_ap ap;
218 __le32 ctwin;
219 __le32 opp_ps_enabled;
220} __packed; /* GO_MAC_DATA_API_S_VER_1 */
221
222/**
223 * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
224 * @sta: iwl_mac_data_sta struct with most config data
225 * @ctwin: client traffic window in TU (period after TBTT when GO is present).
226 * 0 indicates that there is no CT window.
227 */
228struct iwl_mac_data_p2p_sta {
229 struct iwl_mac_data_sta sta;
230 __le32 ctwin;
231} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
232
233/**
234 * struct iwl_mac_data_pibss - Pseudo IBSS config data
235 * @stats_interval: interval in TU between statistics notifications to host.
236 */
237struct iwl_mac_data_pibss {
238 __le32 stats_interval;
239} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
240
241/*
242 * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
243 * context.
244 * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
245 * other channels as well. This should be to true only in case that the
246 * device is discoverable and there is an active GO. Note that setting this
247 * field when not needed, will increase the number of interrupts and have
248 * effect on the platform power, as this setting opens the Rx filters on
249 * all macs.
250 */
251struct iwl_mac_data_p2p_dev {
252 __le32 is_disc_extended;
253} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
254
255/**
256 * enum iwl_mac_filter_flags - MAC context filter flags
257 * @MAC_FILTER_IN_PROMISC: accept all data frames
258 * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
259 * control frames to the host
260 * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
261 * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
262 * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
263 * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
264 * (in station mode when associated)
265 * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
266 * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
267 * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
268 */
269enum iwl_mac_filter_flags {
270 MAC_FILTER_IN_PROMISC = BIT(0),
271 MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1),
272 MAC_FILTER_ACCEPT_GRP = BIT(2),
273 MAC_FILTER_DIS_DECRYPT = BIT(3),
274 MAC_FILTER_DIS_GRP_DECRYPT = BIT(4),
275 MAC_FILTER_IN_BEACON = BIT(6),
276 MAC_FILTER_OUT_BCAST = BIT(8),
277 MAC_FILTER_IN_CRC32 = BIT(11),
278 MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
279};
280
281/**
282 * enum iwl_mac_qos_flags - QoS flags
283 * @MAC_QOS_FLG_UPDATE_EDCA: ?
284 * @MAC_QOS_FLG_TGN: HT is enabled
285 * @MAC_QOS_FLG_TXOP_TYPE: ?
286 *
287 */
288enum iwl_mac_qos_flags {
289 MAC_QOS_FLG_UPDATE_EDCA = BIT(0),
290 MAC_QOS_FLG_TGN = BIT(1),
291 MAC_QOS_FLG_TXOP_TYPE = BIT(4),
292};
293
294/**
295 * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
296 * @cw_min: Contention window, start value in numbers of slots.
297 * Should be a power-of-2, minus 1. Device's default is 0x0f.
298 * @cw_max: Contention window, max value in numbers of slots.
299 * Should be a power-of-2, minus 1. Device's default is 0x3f.
300 * @aifsn: Number of slots in Arbitration Interframe Space (before
301 * performing random backoff timing prior to Tx). Device default 1.
302 * @fifos_mask: FIFOs used by this MAC for this AC
303 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
304 *
305 * One instance of this config struct for each of 4 EDCA access categories
306 * in struct iwl_qosparam_cmd.
307 *
308 * Device will automatically increase contention window by (2*CW) + 1 for each
309 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
310 * value, to cap the CW value.
311 */
312struct iwl_ac_qos {
313 __le16 cw_min;
314 __le16 cw_max;
315 u8 aifsn;
316 u8 fifos_mask;
317 __le16 edca_txop;
318} __packed; /* AC_QOS_API_S_VER_2 */
319
320/**
321 * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
322 * ( MAC_CONTEXT_CMD = 0x28 )
323 * @id_and_color: ID and color of the MAC
324 * @action: action to perform, one of FW_CTXT_ACTION_*
325 * @mac_type: one of FW_MAC_TYPE_*
326 * @tsd_id: TSF HW timer, one of TSF_ID_*
327 * @node_addr: MAC address
328 * @bssid_addr: BSSID
329 * @cck_rates: basic rates available for CCK
330 * @ofdm_rates: basic rates available for OFDM
331 * @protection_flags: combination of MAC_PROT_FLG_FLAG_*
332 * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
333 * @short_slot: 0x10 for enabling short slots, 0 otherwise
334 * @filter_flags: combination of MAC_FILTER_*
335 * @qos_flags: from MAC_QOS_FLG_*
336 * @ac: one iwl_mac_qos configuration for each AC
337 * @mac_specific: one of struct iwl_mac_data_*, according to mac_type
338 */
339struct iwl_mac_ctx_cmd {
340 /* COMMON_INDEX_HDR_API_S_VER_1 */
341 __le32 id_and_color;
342 __le32 action;
343 /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
344 __le32 mac_type;
345 __le32 tsf_id;
346 u8 node_addr[6];
347 __le16 reserved_for_node_addr;
348 u8 bssid_addr[6];
349 __le16 reserved_for_bssid_addr;
350 __le32 cck_rates;
351 __le32 ofdm_rates;
352 __le32 protection_flags;
353 __le32 cck_short_preamble;
354 __le32 short_slot;
355 __le32 filter_flags;
356 /* MAC_QOS_PARAM_API_S_VER_1 */
357 __le32 qos_flags;
358 struct iwl_ac_qos ac[AC_NUM+1];
359 /* MAC_CONTEXT_COMMON_DATA_API_S */
360 union {
361 struct iwl_mac_data_ap ap;
362 struct iwl_mac_data_go go;
363 struct iwl_mac_data_sta sta;
364 struct iwl_mac_data_p2p_sta p2p_sta;
365 struct iwl_mac_data_p2p_dev p2p_dev;
366 struct iwl_mac_data_pibss pibss;
367 struct iwl_mac_data_ibss ibss;
368 };
369} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
370
371static inline u32 iwl_mvm_reciprocal(u32 v)
372{
373 if (!v)
374 return 0;
375 return 0xFFFFFFFF / v;
376}
377
378#define IWL_NONQOS_SEQ_GET 0x1
379#define IWL_NONQOS_SEQ_SET 0x2
380struct iwl_nonqos_seq_query_cmd {
381 __le32 get_set_flag;
382 __le32 mac_id_n_color;
383 __le16 value;
384 __le16 reserved;
385} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
386
387#endif /* __fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
new file mode 100644
index 000000000000..c8f3e2536cbb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -0,0 +1,467 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#ifndef __fw_api_power_h__
69#define __fw_api_power_h__
70
71/* Power Management Commands, Responses, Notifications */
72
73/**
74 * enum iwl_ltr_config_flags - masks for LTR config command flags
75 * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
76 * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
77 * memory access
78 * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
79 * reg change
80 * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
81 * D0 to D3
82 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
83 * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
84 * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
85 */
86enum iwl_ltr_config_flags {
87 LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
88 LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
89 LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
90 LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
91 LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
92 LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
93 LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
94};
95
96/**
97 * struct iwl_ltr_config_cmd_v1 - configures the LTR
98 * @flags: See %enum iwl_ltr_config_flags
99 */
100struct iwl_ltr_config_cmd_v1 {
101 __le32 flags;
102 __le32 static_long;
103 __le32 static_short;
104} __packed; /* LTR_CAPABLE_API_S_VER_1 */
105
106#define LTR_VALID_STATES_NUM 4
107
108/**
109 * struct iwl_ltr_config_cmd - configures the LTR
110 * @flags: See %enum iwl_ltr_config_flags
111 * @static_long:
112 * @static_short:
113 * @ltr_cfg_values:
114 * @ltr_short_idle_timeout:
115 */
116struct iwl_ltr_config_cmd {
117 __le32 flags;
118 __le32 static_long;
119 __le32 static_short;
120 __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
121 __le32 ltr_short_idle_timeout;
122} __packed; /* LTR_CAPABLE_API_S_VER_2 */
123
124/* Radio LP RX Energy Threshold measured in dBm */
125#define POWER_LPRX_RSSI_THRESHOLD 75
126#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
127#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
128
129/**
130 * enum iwl_power_flags - masks for power table command flags
131 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
132 * receiver and transmitter. '0' - does not allow.
133 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
134 * '1' Driver enables PM (use rest of parameters)
135 * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
136 * '1' PM could sleep over DTIM till listen Interval.
137 * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
138 * access categories are both delivery and trigger enabled.
139 * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
140 * PBW Snoozing enabled
141 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
142 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
143 * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
144 * detection enablement
145*/
146enum iwl_power_flags {
147 POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
148 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
149 POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
150 POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
151 POWER_FLAGS_BT_SCO_ENA = BIT(8),
152 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
153 POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
154 POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
155};
156
157#define IWL_POWER_VEC_SIZE 5
158
159/**
160 * struct iwl_powertable_cmd - legacy power command. Beside old API support this
161 * is used also with a new power API for device wide power settings.
162 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
163 *
164 * @flags: Power table command flags from POWER_FLAGS_*
165 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
166 * Minimum allowed:- 3 * DTIM. Keep alive period must be
167 * set regardless of power scheme or current power state.
168 * FW use this value also when PM is disabled.
169 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
170 * PSM transition - legacy PM
171 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
172 * PSM transition - legacy PM
173 * @sleep_interval: not in use
174 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
175 * is set. For example, if it is required to skip over
176 * one DTIM, this value need to be set to 2 (DTIM periods).
177 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
178 * Default: 80dbm
179 */
180struct iwl_powertable_cmd {
181 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
182 __le16 flags;
183 u8 keep_alive_seconds;
184 u8 debug_flags;
185 __le32 rx_data_timeout;
186 __le32 tx_data_timeout;
187 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
188 __le32 skip_dtim_periods;
189 __le32 lprx_rssi_threshold;
190} __packed;
191
192/**
193 * enum iwl_device_power_flags - masks for device power command flags
194 * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
195 * receiver and transmitter. '0' - does not allow.
196*/
197enum iwl_device_power_flags {
198 DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
199};
200
201/**
202 * struct iwl_device_power_cmd - device wide power command.
203 * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
204 *
205 * @flags: Power table command flags from DEVICE_POWER_FLAGS_*
206 */
207struct iwl_device_power_cmd {
208 /* PM_POWER_TABLE_CMD_API_S_VER_6 */
209 __le16 flags;
210 __le16 reserved;
211} __packed;
212
213/**
214 * struct iwl_mac_power_cmd - New power command containing uAPSD support
215 * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
216 * @id_and_color: MAC contex identifier
217 * @flags: Power table command flags from POWER_FLAGS_*
218 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
219 * Minimum allowed:- 3 * DTIM. Keep alive period must be
220 * set regardless of power scheme or current power state.
221 * FW use this value also when PM is disabled.
222 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
223 * PSM transition - legacy PM
224 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
225 * PSM transition - legacy PM
226 * @sleep_interval: not in use
227 * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
228 * is set. For example, if it is required to skip over
229 * one DTIM, this value need to be set to 2 (DTIM periods).
230 * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
231 * PSM transition - uAPSD
232 * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
233 * PSM transition - uAPSD
234 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
235 * Default: 80dbm
236 * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
237 * @snooze_interval: Maximum time between attempts to retrieve buffered data
238 * from the AP [msec]
239 * @snooze_window: A window of time in which PBW snoozing insures that all
240 * packets received. It is also the minimum time from last
241 * received unicast RX packet, before client stops snoozing
242 * for data. [msec]
243 * @snooze_step: TBD
244 * @qndp_tid: TID client shall use for uAPSD QNDP triggers
245 * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
246 * each corresponding AC.
247 * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
248 * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
249 * values.
250 * @heavy_tx_thld_packets: TX threshold measured in number of packets
251 * @heavy_rx_thld_packets: RX threshold measured in number of packets
252 * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
253 * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
254 * @limited_ps_threshold:
255*/
256struct iwl_mac_power_cmd {
257 /* CONTEXT_DESC_API_T_VER_1 */
258 __le32 id_and_color;
259
260 /* CLIENT_PM_POWER_TABLE_S_VER_1 */
261 __le16 flags;
262 __le16 keep_alive_seconds;
263 __le32 rx_data_timeout;
264 __le32 tx_data_timeout;
265 __le32 rx_data_timeout_uapsd;
266 __le32 tx_data_timeout_uapsd;
267 u8 lprx_rssi_threshold;
268 u8 skip_dtim_periods;
269 __le16 snooze_interval;
270 __le16 snooze_window;
271 u8 snooze_step;
272 u8 qndp_tid;
273 u8 uapsd_ac_flags;
274 u8 uapsd_max_sp;
275 u8 heavy_tx_thld_packets;
276 u8 heavy_rx_thld_packets;
277 u8 heavy_tx_thld_percentage;
278 u8 heavy_rx_thld_percentage;
279 u8 limited_ps_threshold;
280 u8 reserved;
281} __packed;
282
283/*
284 * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
285 * associated AP is identified as improperly implementing uAPSD protocol.
286 * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
287 * @sta_id: index of station in uCode's station table - associated AP ID in
288 * this context.
289 */
290struct iwl_uapsd_misbehaving_ap_notif {
291 __le32 sta_id;
292 u8 mac_id;
293 u8 reserved[3];
294} __packed;
295
296/**
297 * struct iwl_reduce_tx_power_cmd - TX power reduction command
298 * REDUCE_TX_POWER_CMD = 0x9f
299 * @flags: (reserved for future implementation)
300 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
301 * @pwr_restriction: TX power restriction in dBms.
302 */
303struct iwl_reduce_tx_power_cmd {
304 u8 flags;
305 u8 mac_context_id;
306 __le16 pwr_restriction;
307} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
308
309enum iwl_dev_tx_power_cmd_mode {
310 IWL_TX_POWER_MODE_SET_MAC = 0,
311 IWL_TX_POWER_MODE_SET_DEVICE = 1,
312 IWL_TX_POWER_MODE_SET_CHAINS = 2,
313}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
314
315/**
316 * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
317 * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
318 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
319 * @pwr_restriction: TX power restriction in 1/8 dBms.
320 * @dev_24: device TX power restriction in 1/8 dBms
321 * @dev_52_low: device TX power restriction upper band - low
322 * @dev_52_high: device TX power restriction upper band - high
323 */
324struct iwl_dev_tx_power_cmd_v2 {
325 __le32 set_mode;
326 __le32 mac_context_id;
327 __le16 pwr_restriction;
328 __le16 dev_24;
329 __le16 dev_52_low;
330 __le16 dev_52_high;
331} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
332
333#define IWL_NUM_CHAIN_LIMITS 2
334#define IWL_NUM_SUB_BANDS 5
335
336/**
337 * struct iwl_dev_tx_power_cmd - TX power reduction command
338 * @v2: version 2 of the command, embedded here for easier software handling
339 * @per_chain_restriction: per chain restrictions
340 */
341struct iwl_dev_tx_power_cmd {
342 /* v3 is just an extension of v2 - keep this here */
343 struct iwl_dev_tx_power_cmd_v2 v2;
344 __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
345} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
346
347#define IWL_DEV_MAX_TX_POWER 0x7FFF
348
349/**
350 * struct iwl_beacon_filter_cmd
351 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
352 * @id_and_color: MAC contex identifier
353 * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
354 * to driver if delta in Energy values calculated for this and last
355 * passed beacon is greater than this threshold. Zero value means that
356 * the Energy change is ignored for beacon filtering, and beacon will
357 * not be forced to be sent to driver regardless of this delta. Typical
358 * energy delta 5dB.
359 * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
360 * Send beacon to driver if delta in Energy values calculated for this
361 * and last passed beacon is greater than this threshold. Zero value
362 * means that the Energy change is ignored for beacon filtering while in
363 * Roaming state, typical energy delta 1dB.
364 * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
365 * calculated for current beacon is less than the threshold, use
366 * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
367 * Threshold. Typical energy threshold is -72dBm.
368 * @bf_temp_threshold: This threshold determines the type of temperature
369 * filtering (Slow or Fast) that is selected (Units are in Celsuis):
370 * If the current temperature is above this threshold - Fast filter
371 * will be used, If the current temperature is below this threshold -
372 * Slow filter will be used.
373 * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
374 * calculated for this and the last passed beacon is greater than this
375 * threshold. Zero value means that the temperature change is ignored for
376 * beacon filtering; beacons will not be forced to be sent to driver
377 * regardless of whether its temerature has been changed.
378 * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
379 * calculated for this and the last passed beacon is greater than this
380 * threshold. Zero value means that the temperature change is ignored for
381 * beacon filtering; beacons will not be forced to be sent to driver
382 * regardless of whether its temerature has been changed.
383 * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
384 * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
385 * for a specific period of time. Units: Beacons.
386 * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
387 * for a longer period of time then this escape-timeout. Units: Beacons.
388 * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
389 */
390struct iwl_beacon_filter_cmd {
391 __le32 bf_energy_delta;
392 __le32 bf_roaming_energy_delta;
393 __le32 bf_roaming_state;
394 __le32 bf_temp_threshold;
395 __le32 bf_temp_fast_filter;
396 __le32 bf_temp_slow_filter;
397 __le32 bf_enable_beacon_filter;
398 __le32 bf_debug_flag;
399 __le32 bf_escape_timer;
400 __le32 ba_escape_timer;
401 __le32 ba_enable_beacon_abort;
402} __packed;
403
404/* Beacon filtering and beacon abort */
405#define IWL_BF_ENERGY_DELTA_DEFAULT 5
406#define IWL_BF_ENERGY_DELTA_D0I3 20
407#define IWL_BF_ENERGY_DELTA_MAX 255
408#define IWL_BF_ENERGY_DELTA_MIN 0
409
410#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
411#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
412#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
413#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
414
415#define IWL_BF_ROAMING_STATE_DEFAULT 72
416#define IWL_BF_ROAMING_STATE_D0I3 72
417#define IWL_BF_ROAMING_STATE_MAX 255
418#define IWL_BF_ROAMING_STATE_MIN 0
419
420#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
421#define IWL_BF_TEMP_THRESHOLD_D0I3 112
422#define IWL_BF_TEMP_THRESHOLD_MAX 255
423#define IWL_BF_TEMP_THRESHOLD_MIN 0
424
425#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
426#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
427#define IWL_BF_TEMP_FAST_FILTER_MAX 255
428#define IWL_BF_TEMP_FAST_FILTER_MIN 0
429
430#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
431#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
432#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
433#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
434
435#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
436
437#define IWL_BF_DEBUG_FLAG_DEFAULT 0
438#define IWL_BF_DEBUG_FLAG_D0I3 0
439
440#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
441#define IWL_BF_ESCAPE_TIMER_D0I3 0
442#define IWL_BF_ESCAPE_TIMER_MAX 1024
443#define IWL_BF_ESCAPE_TIMER_MIN 0
444
445#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
446#define IWL_BA_ESCAPE_TIMER_D0I3 6
447#define IWL_BA_ESCAPE_TIMER_D3 9
448#define IWL_BA_ESCAPE_TIMER_MAX 1024
449#define IWL_BA_ESCAPE_TIMER_MIN 0
450
451#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
452
453#define IWL_BF_CMD_CONFIG(mode) \
454 .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
455 .bf_roaming_energy_delta = \
456 cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
457 .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
458 .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
459 .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
460 .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
461 .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
462 .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
463 .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
464
465#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
466#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
467#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
new file mode 100644
index 000000000000..0f1ea80a55ef
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -0,0 +1,389 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_rs_h__
64#define __fw_api_rs_h__
65
66#include "fw-api-mac.h"
67
68/*
69 * These serve as indexes into
70 * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
71 * TODO: avoid overlap between legacy and HT rates
72 */
73enum {
74 IWL_RATE_1M_INDEX = 0,
75 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
76 IWL_RATE_2M_INDEX,
77 IWL_RATE_5M_INDEX,
78 IWL_RATE_11M_INDEX,
79 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
80 IWL_RATE_6M_INDEX,
81 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
82 IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
83 IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
84 IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
85 IWL_RATE_9M_INDEX,
86 IWL_RATE_12M_INDEX,
87 IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
88 IWL_RATE_18M_INDEX,
89 IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
90 IWL_RATE_24M_INDEX,
91 IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
92 IWL_RATE_36M_INDEX,
93 IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
94 IWL_RATE_48M_INDEX,
95 IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
96 IWL_RATE_54M_INDEX,
97 IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
98 IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
99 IWL_RATE_60M_INDEX,
100 IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
101 IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
102 IWL_RATE_MCS_8_INDEX,
103 IWL_RATE_MCS_9_INDEX,
104 IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
105 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
106 IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
107};
108
109#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
110
111/* fw API values for legacy bit rates, both OFDM and CCK */
112enum {
113 IWL_RATE_6M_PLCP = 13,
114 IWL_RATE_9M_PLCP = 15,
115 IWL_RATE_12M_PLCP = 5,
116 IWL_RATE_18M_PLCP = 7,
117 IWL_RATE_24M_PLCP = 9,
118 IWL_RATE_36M_PLCP = 11,
119 IWL_RATE_48M_PLCP = 1,
120 IWL_RATE_54M_PLCP = 3,
121 IWL_RATE_1M_PLCP = 10,
122 IWL_RATE_2M_PLCP = 20,
123 IWL_RATE_5M_PLCP = 55,
124 IWL_RATE_11M_PLCP = 110,
125 IWL_RATE_INVM_PLCP = -1,
126};
127
128/*
129 * rate_n_flags bit fields
130 *
131 * The 32-bit value has different layouts in the low 8 bites depending on the
132 * format. There are three formats, HT, VHT and legacy (11abg, with subformats
133 * for CCK and OFDM).
134 *
135 * High-throughput (HT) rate format
136 * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
137 * Very High-throughput (VHT) rate format
138 * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
139 * Legacy OFDM rate format for bits 7:0
140 * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
141 * Legacy CCK rate format for bits 7:0:
142 * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
143 */
144
145/* Bit 8: (1) HT format, (0) legacy or VHT format */
146#define RATE_MCS_HT_POS 8
147#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
148
149/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
150#define RATE_MCS_CCK_POS 9
151#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
152
153/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
154#define RATE_MCS_VHT_POS 26
155#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
156
157
158/*
159 * High-throughput (HT) rate format for bits 7:0
160 *
161 * 2-0: MCS rate base
162 * 0) 6 Mbps
163 * 1) 12 Mbps
164 * 2) 18 Mbps
165 * 3) 24 Mbps
166 * 4) 36 Mbps
167 * 5) 48 Mbps
168 * 6) 54 Mbps
169 * 7) 60 Mbps
170 * 4-3: 0) Single stream (SISO)
171 * 1) Dual stream (MIMO)
172 * 2) Triple stream (MIMO)
173 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
174 * (bits 7-6 are zero)
175 *
176 * Together the low 5 bits work out to the MCS index because we don't
177 * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
178 * streams and 16-23 have three streams. We could also support MCS 32
179 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
180 */
181#define RATE_HT_MCS_RATE_CODE_MSK 0x7
182#define RATE_HT_MCS_NSS_POS 3
183#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
184
185/* Bit 10: (1) Use Green Field preamble */
186#define RATE_HT_MCS_GF_POS 10
187#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
188
189#define RATE_HT_MCS_INDEX_MSK 0x3f
190
191/*
192 * Very High-throughput (VHT) rate format for bits 7:0
193 *
194 * 3-0: VHT MCS (0-9)
195 * 5-4: number of streams - 1:
196 * 0) Single stream (SISO)
197 * 1) Dual stream (MIMO)
198 * 2) Triple stream (MIMO)
199 */
200
201/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
202#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
203#define RATE_VHT_MCS_NSS_POS 4
204#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
205
206/*
207 * Legacy OFDM rate format for bits 7:0
208 *
209 * 3-0: 0xD) 6 Mbps
210 * 0xF) 9 Mbps
211 * 0x5) 12 Mbps
212 * 0x7) 18 Mbps
213 * 0x9) 24 Mbps
214 * 0xB) 36 Mbps
215 * 0x1) 48 Mbps
216 * 0x3) 54 Mbps
217 * (bits 7-4 are 0)
218 *
219 * Legacy CCK rate format for bits 7:0:
220 * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
221 *
222 * 6-0: 10) 1 Mbps
223 * 20) 2 Mbps
224 * 55) 5.5 Mbps
225 * 110) 11 Mbps
226 * (bit 7 is 0)
227 */
228#define RATE_LEGACY_RATE_MSK 0xff
229
230
231/*
232 * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
233 * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
234 */
235#define RATE_MCS_CHAN_WIDTH_POS 11
236#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS)
237#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
238#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
239#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
240#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
241
242/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
243#define RATE_MCS_SGI_POS 13
244#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
245
246/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
247#define RATE_MCS_ANT_POS 14
248#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
249#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
250#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS)
251#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
252 RATE_MCS_ANT_B_MSK)
253#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
254 RATE_MCS_ANT_C_MSK)
255#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
256#define RATE_MCS_ANT_NUM 3
257
258/* Bit 17-18: (0) SS, (1) SS*2 */
259#define RATE_MCS_STBC_POS 17
260#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
261#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
262
263/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
264#define RATE_MCS_BF_POS 19
265#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
266
267/* Bit 20: (0) ZLF is off, (1) ZLF is on */
268#define RATE_MCS_ZLF_POS 20
269#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
270
271/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
272#define RATE_MCS_DUP_POS 24
273#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS)
274
275/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
276#define RATE_MCS_LDPC_POS 27
277#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
278
279
280/* Link Quality definitions */
281
282/* # entries in rate scale table to support Tx retries */
283#define LQ_MAX_RETRY_NUM 16
284
285/* Link quality command flags bit fields */
286
287/* Bit 0: (0) Don't use RTS (1) Use RTS */
288#define LQ_FLAG_USE_RTS_POS 0
289#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS)
290
291/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
292#define LQ_FLAG_COLOR_POS 1
293#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
294
295/* Bit 4-5: Tx RTS BW Signalling
296 * (0) No RTS BW signalling
297 * (1) Static BW signalling
298 * (2) Dynamic BW signalling
299 */
300#define LQ_FLAG_RTS_BW_SIG_POS 4
301#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS)
302#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS)
303#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS)
304
305/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
306 * Dyanmic BW selection allows Tx with narrower BW then requested in rates
307 */
308#define LQ_FLAG_DYNAMIC_BW_POS 6
309#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
310
311/* Single Stream Tx Parameters (lq_cmd->ss_params)
312 * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
313 * used for single stream Tx.
314 */
315
316/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
317 * (0) - No STBC allowed
318 * (1) - 2x1 STBC allowed (HT/VHT)
319 * (2) - 4x2 STBC allowed (HT/VHT)
320 * (3) - 3x2 STBC allowed (HT only)
321 * All our chips are at most 2 antennas so only (1) is valid for now.
322 */
323#define LQ_SS_STBC_ALLOWED_POS 0
324#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK)
325
326/* 2x1 STBC is allowed */
327#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS)
328
329/* Bit 2: Beamformer (VHT only) is allowed */
330#define LQ_SS_BFER_ALLOWED_POS 2
331#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS)
332
333/* Bit 3: Force BFER or STBC for testing
334 * If this is set:
335 * If BFER is allowed then force the ucode to choose BFER else
336 * If STBC is allowed then force the ucode to choose STBC over SISO
337 */
338#define LQ_SS_FORCE_POS 3
339#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS)
340
341/* Bit 31: ss_params field is valid. Used for FW backward compatibility
342 * with other drivers which don't support the ss_params API yet
343 */
344#define LQ_SS_PARAMS_VALID_POS 31
345#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS)
346
347/**
348 * struct iwl_lq_cmd - link quality command
349 * @sta_id: station to update
350 * @control: not used
351 * @flags: combination of LQ_FLAG_*
352 * @mimo_delim: the first SISO index in rs_table, which separates MIMO
353 * and SISO rates
354 * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
355 * Should be ANT_[ABC]
356 * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
357 * @initial_rate_index: first index from rs_table per AC category
358 * @agg_time_limit: aggregation max time threshold in usec/100, meaning
359 * value of 100 is one usec. Range is 100 to 8000
360 * @agg_disable_start_th: try-count threshold for starting aggregation.
361 * If a frame has higher try-count, it should not be selected for
362 * starting an aggregation sequence.
363 * @agg_frame_cnt_limit: max frame count in an aggregation.
364 * 0: no limit
365 * 1: no aggregation (one frame per aggregation)
366 * 2 - 0x3f: maximal number of frames (up to 3f == 63)
367 * @rs_table: array of rates for each TX try, each is rate_n_flags,
368 * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
369 * @ss_params: single stream features. declare whether STBC or BFER are allowed.
370 */
371struct iwl_lq_cmd {
372 u8 sta_id;
373 u8 reduced_tpc;
374 u16 control;
375 /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
376 u8 flags;
377 u8 mimo_delim;
378 u8 single_stream_ant_msk;
379 u8 dual_stream_ant_msk;
380 u8 initial_rate_index[AC_NUM];
381 /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
382 __le16 agg_time_limit;
383 u8 agg_disable_start_th;
384 u8 agg_frame_cnt_limit;
385 __le32 reserved2;
386 __le32 rs_table[LQ_MAX_RETRY_NUM];
387 __le32 ss_params;
388}; /* LINK_QUALITY_CMD_API_S_VER_1 */
389#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
new file mode 100644
index 000000000000..9b7e49d4620f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -0,0 +1,238 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#ifndef __fw_api_rx_h__
69#define __fw_api_rx_h__
70
71#define IWL_RX_INFO_PHY_CNT 8
72#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
73#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
74#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
75#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
76#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
77#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
78#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
79
80/**
81 * struct iwl_rx_phy_info - phy info
82 * (REPLY_RX_PHY_CMD = 0xc0)
83 * @non_cfg_phy_cnt: non configurable DSP phy data byte count
84 * @cfg_phy_cnt: configurable DSP phy data byte count
85 * @stat_id: configurable DSP phy data set ID
86 * @reserved1:
87 * @system_timestamp: GP2 at on air rise
88 * @timestamp: TSF at on air rise
89 * @beacon_time_stamp: beacon at on-air rise
90 * @phy_flags: general phy flags: band, modulation, ...
91 * @channel: channel number
92 * @non_cfg_phy_buf: for various implementations of non_cfg_phy
93 * @rate_n_flags: RATE_MCS_*
94 * @byte_count: frame's byte-count
95 * @frame_time: frame's time on the air, based on byte count and frame rate
96 * calculation
97 * @mac_active_msk: what MACs were active when the frame was received
98 *
99 * Before each Rx, the device sends this data. It contains PHY information
100 * about the reception of the packet.
101 */
102struct iwl_rx_phy_info {
103 u8 non_cfg_phy_cnt;
104 u8 cfg_phy_cnt;
105 u8 stat_id;
106 u8 reserved1;
107 __le32 system_timestamp;
108 __le64 timestamp;
109 __le32 beacon_time_stamp;
110 __le16 phy_flags;
111 __le16 channel;
112 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
113 __le32 rate_n_flags;
114 __le32 byte_count;
115 __le16 mac_active_msk;
116 __le16 frame_time;
117} __packed;
118
119/*
120 * TCP offload Rx assist info
121 *
122 * bits 0:3 - reserved
123 * bits 4:7 - MIC CRC length
124 * bits 8:12 - MAC header length
125 * bit 13 - Padding indication
126 * bit 14 - A-AMSDU indication
127 * bit 15 - Offload enabled
128 */
129enum iwl_csum_rx_assist_info {
130 CSUM_RXA_RESERVED_MASK = 0x000f,
131 CSUM_RXA_MICSIZE_MASK = 0x00f0,
132 CSUM_RXA_HEADERLEN_MASK = 0x1f00,
133 CSUM_RXA_PADD = BIT(13),
134 CSUM_RXA_AMSDU = BIT(14),
135 CSUM_RXA_ENA = BIT(15)
136};
137
138/**
139 * struct iwl_rx_mpdu_res_start - phy info
140 * @assist: see CSUM_RX_ASSIST_ above
141 */
142struct iwl_rx_mpdu_res_start {
143 __le16 byte_count;
144 __le16 assist;
145} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
146
147/**
148 * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
149 * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
150 * @RX_RES_PHY_FLAGS_MOD_CCK:
151 * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
152 * @RX_RES_PHY_FLAGS_NARROW_BAND:
153 * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
154 * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
155 * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
156 * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
157 * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
158 */
159enum iwl_rx_phy_flags {
160 RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
161 RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
162 RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
163 RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
164 RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
165 RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
166 RX_RES_PHY_FLAGS_AGG = BIT(7),
167 RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
168 RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
169 RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
170};
171
172/**
173 * enum iwl_mvm_rx_status - written by fw for each Rx packet
174 * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
175 * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
176 * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
177 * @RX_MPDU_RES_STATUS_KEY_VALID:
178 * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
179 * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
180 * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
181 * in the driver.
182 * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
183 * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
184 * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
185 * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
186 * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
187 * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
188 * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
189 * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
190 * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
191 * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
192 * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
193 * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
194 * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
195 * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
196 * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
197 * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
198 * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
199 * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
200 * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
201 * @RX_MPDU_RES_STATUS_STA_ID_MSK:
202 * @RX_MPDU_RES_STATUS_RRF_KILL:
203 * @RX_MPDU_RES_STATUS_FILTERING_MSK:
204 * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
205 */
206enum iwl_mvm_rx_status {
207 RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
208 RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
209 RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
210 RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
211 RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
212 RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
213 RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
214 RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
215 RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
216 RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
217 RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
218 RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
219 RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
220 RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
221 RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
222 RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
223 RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
224 RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
225 RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
226 RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
227 RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
228 RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
229 RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
230 RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
231 RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
232 RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
233 RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
234 RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
235 RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
236};
237
238#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
new file mode 100644
index 000000000000..3a657e4b60ac
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -0,0 +1,730 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_scan_h__
67#define __fw_api_scan_h__
68
69#include "fw-api.h"
70
71/* Scan Commands, Responses, Notifications */
72
73/* Max number of IEs for direct SSID scans in a command */
74#define PROBE_OPTION_MAX 20
75
76/**
77 * struct iwl_ssid_ie - directed scan network information element
78 *
79 * Up to 20 of these may appear in REPLY_SCAN_CMD,
80 * selected by "type" bit field in struct iwl_scan_channel;
81 * each channel may select different ssids from among the 20 entries.
82 * SSID IEs get transmitted in reverse order of entry.
83 */
84struct iwl_ssid_ie {
85 u8 id;
86 u8 len;
87 u8 ssid[IEEE80211_MAX_SSID_LEN];
88} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
89
90/* scan offload */
91#define IWL_SCAN_MAX_BLACKLIST_LEN 64
92#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
93#define IWL_SCAN_MAX_PROFILES 11
94#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
95
96/* Default watchdog (in MS) for scheduled scan iteration */
97#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
98
99#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
100#define CAN_ABORT_STATUS 1
101
102#define IWL_FULL_SCAN_MULTIPLIER 5
103#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
104#define IWL_MAX_SCHED_SCAN_PLANS 2
105
106enum scan_framework_client {
107 SCAN_CLIENT_SCHED_SCAN = BIT(0),
108 SCAN_CLIENT_NETDETECT = BIT(1),
109 SCAN_CLIENT_ASSET_TRACKING = BIT(2),
110};
111
112/**
113 * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
114 * @ssid: MAC address to filter out
115 * @reported_rssi: AP rssi reported to the host
116 * @client_bitmap: clients ignore this entry - enum scan_framework_client
117 */
118struct iwl_scan_offload_blacklist {
119 u8 ssid[ETH_ALEN];
120 u8 reported_rssi;
121 u8 client_bitmap;
122} __packed;
123
124enum iwl_scan_offload_network_type {
125 IWL_NETWORK_TYPE_BSS = 1,
126 IWL_NETWORK_TYPE_IBSS = 2,
127 IWL_NETWORK_TYPE_ANY = 3,
128};
129
130enum iwl_scan_offload_band_selection {
131 IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
132 IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
133 IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
134};
135
136/**
137 * iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
138 * @ssid_index: index to ssid list in fixed part
139 * @unicast_cipher: encryption algorithm to match - bitmap
140 * @aut_alg: authentication algorithm to match - bitmap
141 * @network_type: enum iwl_scan_offload_network_type
142 * @band_selection: enum iwl_scan_offload_band_selection
143 * @client_bitmap: clients waiting for match - enum scan_framework_client
144 */
145struct iwl_scan_offload_profile {
146 u8 ssid_index;
147 u8 unicast_cipher;
148 u8 auth_alg;
149 u8 network_type;
150 u8 band_selection;
151 u8 client_bitmap;
152 u8 reserved[2];
153} __packed;
154
155/**
156 * iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
157 * @blaclist: AP list to filter off from scan results
158 * @profiles: profiles to search for match
159 * @blacklist_len: length of blacklist
160 * @num_profiles: num of profiles in the list
161 * @match_notify: clients waiting for match found notification
162 * @pass_match: clients waiting for the results
163 * @active_clients: active clients bitmap - enum scan_framework_client
164 * @any_beacon_notify: clients waiting for match notification without match
165 */
166struct iwl_scan_offload_profile_cfg {
167 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
168 u8 blacklist_len;
169 u8 num_profiles;
170 u8 match_notify;
171 u8 pass_match;
172 u8 active_clients;
173 u8 any_beacon_notify;
174 u8 reserved[2];
175} __packed;
176
177/**
178 * iwl_scan_schedule_lmac - schedule of scan offload
179 * @delay: delay between iterations, in seconds.
180 * @iterations: num of scan iterations
181 * @full_scan_mul: number of partial scans before each full scan
182 */
183struct iwl_scan_schedule_lmac {
184 __le16 delay;
185 u8 iterations;
186 u8 full_scan_mul;
187} __packed; /* SCAN_SCHEDULE_API_S */
188
189enum iwl_scan_offload_complete_status {
190 IWL_SCAN_OFFLOAD_COMPLETED = 1,
191 IWL_SCAN_OFFLOAD_ABORTED = 2,
192};
193
194enum iwl_scan_ebs_status {
195 IWL_SCAN_EBS_SUCCESS,
196 IWL_SCAN_EBS_FAILED,
197 IWL_SCAN_EBS_CHAN_NOT_FOUND,
198 IWL_SCAN_EBS_INACTIVE,
199};
200
201/**
202 * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
203 * @tx_flags: combination of TX_CMD_FLG_*
204 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
205 * cleared. Combination of RATE_MCS_*
206 * @sta_id: index of destination station in FW station table
207 * @reserved: for alignment and future use
208 */
209struct iwl_scan_req_tx_cmd {
210 __le32 tx_flags;
211 __le32 rate_n_flags;
212 u8 sta_id;
213 u8 reserved[3];
214} __packed;
215
216enum iwl_scan_channel_flags_lmac {
217 IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27),
218 IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28),
219};
220
221/**
222 * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
223 * @flags: bits 1-20: directed scan to i'th ssid
224 * other bits &enum iwl_scan_channel_flags_lmac
225 * @channel_number: channel number 1-13 etc
226 * @iter_count: scan iteration on this channel
227 * @iter_interval: interval in seconds between iterations on one channel
228 */
229struct iwl_scan_channel_cfg_lmac {
230 __le32 flags;
231 __le16 channel_num;
232 __le16 iter_count;
233 __le32 iter_interval;
234} __packed;
235
236/*
237 * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
238 * @offset: offset in the data block
239 * @len: length of the segment
240 */
241struct iwl_scan_probe_segment {
242 __le16 offset;
243 __le16 len;
244} __packed;
245
246/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
247 * @mac_header: first (and common) part of the probe
248 * @band_data: band specific data
249 * @common_data: last (and common) part of the probe
250 * @buf: raw data block
251 */
252struct iwl_scan_probe_req {
253 struct iwl_scan_probe_segment mac_header;
254 struct iwl_scan_probe_segment band_data[2];
255 struct iwl_scan_probe_segment common_data;
256 u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
257} __packed;
258
259enum iwl_scan_channel_flags {
260 IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
261 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
262 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
263};
264
265/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
266 * @flags: enum iwl_scan_channel_flags
267 * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
268 * involved.
269 * 1 - EBS is disabled.
270 * 2 - every second scan will be full scan(and so on).
271 */
272struct iwl_scan_channel_opt {
273 __le16 flags;
274 __le16 non_ebs_ratio;
275} __packed;
276
277/**
278 * iwl_mvm_lmac_scan_flags
279 * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
280 * without filtering.
281 * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
282 * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
283 * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
284 * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching
285 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
286 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
287 * and DS parameter set IEs into probe requests.
288 * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
289 */
290enum iwl_mvm_lmac_scan_flags {
291 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
292 IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1),
293 IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2),
294 IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
295 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
296 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
297 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
298 IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
299};
300
301enum iwl_scan_priority {
302 IWL_SCAN_PRIORITY_LOW,
303 IWL_SCAN_PRIORITY_MEDIUM,
304 IWL_SCAN_PRIORITY_HIGH,
305};
306
307enum iwl_scan_priority_ext {
308 IWL_SCAN_PRIORITY_EXT_0_LOWEST,
309 IWL_SCAN_PRIORITY_EXT_1,
310 IWL_SCAN_PRIORITY_EXT_2,
311 IWL_SCAN_PRIORITY_EXT_3,
312 IWL_SCAN_PRIORITY_EXT_4,
313 IWL_SCAN_PRIORITY_EXT_5,
314 IWL_SCAN_PRIORITY_EXT_6,
315 IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
316};
317
318/**
319 * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
320 * @reserved1: for alignment and future use
321 * @channel_num: num of channels to scan
322 * @active-dwell: dwell time for active channels
323 * @passive-dwell: dwell time for passive channels
324 * @fragmented-dwell: dwell time for fragmented passive scan
325 * @reserved2: for alignment and future use
326 * @rx_chain_selct: PHY_RX_CHAIN_* flags
327 * @scan_flags: &enum iwl_mvm_lmac_scan_flags
328 * @max_out_time: max time (in TU) to be out of associated channel
329 * @suspend_time: pause scan this long (TUs) when returning to service channel
330 * @flags: RXON flags
331 * @filter_flags: RXON filter
332 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
333 * @direct_scan: list of SSIDs for directed active scan
334 * @scan_prio: enum iwl_scan_priority
335 * @iter_num: number of scan iterations
336 * @delay: delay in seconds before first iteration
337 * @schedule: two scheduling plans. The first one is finite, the second one can
338 * be infinite.
339 * @channel_opt: channel optimization options, for full and partial scan
340 * @data: channel configuration and probe request packet.
341 */
342struct iwl_scan_req_lmac {
343 /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
344 __le32 reserved1;
345 u8 n_channels;
346 u8 active_dwell;
347 u8 passive_dwell;
348 u8 fragmented_dwell;
349 __le16 reserved2;
350 __le16 rx_chain_select;
351 __le32 scan_flags;
352 __le32 max_out_time;
353 __le32 suspend_time;
354 /* RX_ON_FLAGS_API_S_VER_1 */
355 __le32 flags;
356 __le32 filter_flags;
357 struct iwl_scan_req_tx_cmd tx_cmd[2];
358 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
359 __le32 scan_prio;
360 /* SCAN_REQ_PERIODIC_PARAMS_API_S */
361 __le32 iter_num;
362 __le32 delay;
363 struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
364 struct iwl_scan_channel_opt channel_opt[2];
365 u8 data[];
366} __packed;
367
368/**
369 * struct iwl_scan_results_notif - scan results for one channel -
370 * SCAN_RESULT_NTF_API_S_VER_3
371 * @channel: which channel the results are from
372 * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
373 * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
374 * @num_probe_not_sent: # of request that weren't sent due to not enough time
375 * @duration: duration spent in channel, in usecs
376 */
377struct iwl_scan_results_notif {
378 u8 channel;
379 u8 band;
380 u8 probe_status;
381 u8 num_probe_not_sent;
382 __le32 duration;
383} __packed;
384
385/**
386 * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels)
387 * SCAN_COMPLETE_NTF_API_S_VER_3
388 * @scanned_channels: number of channels scanned (and number of valid results)
389 * @status: one of SCAN_COMP_STATUS_*
390 * @bt_status: BT on/off status
391 * @last_channel: last channel that was scanned
392 * @tsf_low: TSF timer (lower half) in usecs
393 * @tsf_high: TSF timer (higher half) in usecs
394 * @results: an array of scan results, only "scanned_channels" of them are valid
395 */
396struct iwl_lmac_scan_complete_notif {
397 u8 scanned_channels;
398 u8 status;
399 u8 bt_status;
400 u8 last_channel;
401 __le32 tsf_low;
402 __le32 tsf_high;
403 struct iwl_scan_results_notif results[];
404} __packed;
405
406/**
407 * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
408 * @last_schedule_line: last schedule line executed (fast or regular)
409 * @last_schedule_iteration: last scan iteration executed before scan abort
410 * @status: enum iwl_scan_offload_complete_status
411 * @ebs_status: EBS success status &enum iwl_scan_ebs_status
412 * @time_after_last_iter; time in seconds elapsed after last iteration
413 */
414struct iwl_periodic_scan_complete {
415 u8 last_schedule_line;
416 u8 last_schedule_iteration;
417 u8 status;
418 u8 ebs_status;
419 __le32 time_after_last_iter;
420 __le32 reserved;
421} __packed;
422
423/* UMAC Scan API */
424
425/* The maximum of either of these cannot exceed 8, because we use an
426 * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
427 */
428#define IWL_MVM_MAX_UMAC_SCANS 8
429#define IWL_MVM_MAX_LMAC_SCANS 1
430
431enum scan_config_flags {
432 SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
433 SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
434 SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
435 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
436 SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
437 SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
438 SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
439 SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
440 SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
441 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
442 SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
443 SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
444 SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
445 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
446 SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
447 SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
448 SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
449 SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
450
451 /* Bits 26-31 are for num of channels in channel_array */
452#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
453};
454
455enum scan_config_rates {
456 /* OFDM basic rates */
457 SCAN_CONFIG_RATE_6M = BIT(0),
458 SCAN_CONFIG_RATE_9M = BIT(1),
459 SCAN_CONFIG_RATE_12M = BIT(2),
460 SCAN_CONFIG_RATE_18M = BIT(3),
461 SCAN_CONFIG_RATE_24M = BIT(4),
462 SCAN_CONFIG_RATE_36M = BIT(5),
463 SCAN_CONFIG_RATE_48M = BIT(6),
464 SCAN_CONFIG_RATE_54M = BIT(7),
465 /* CCK basic rates */
466 SCAN_CONFIG_RATE_1M = BIT(8),
467 SCAN_CONFIG_RATE_2M = BIT(9),
468 SCAN_CONFIG_RATE_5M = BIT(10),
469 SCAN_CONFIG_RATE_11M = BIT(11),
470
471 /* Bits 16-27 are for supported rates */
472#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
473};
474
475enum iwl_channel_flags {
476 IWL_CHANNEL_FLAG_EBS = BIT(0),
477 IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
478 IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
479 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
480};
481
482/**
483 * struct iwl_scan_config
484 * @flags: enum scan_config_flags
485 * @tx_chains: valid_tx antenna - ANT_* definitions
486 * @rx_chains: valid_rx antenna - ANT_* definitions
487 * @legacy_rates: default legacy rates - enum scan_config_rates
488 * @out_of_channel_time: default max out of serving channel time
489 * @suspend_time: default max suspend time
490 * @dwell_active: default dwell time for active scan
491 * @dwell_passive: default dwell time for passive scan
492 * @dwell_fragmented: default dwell time for fragmented scan
493 * @reserved: for future use and alignment
494 * @mac_addr: default mac address to be used in probes
495 * @bcast_sta_id: the index of the station in the fw
496 * @channel_flags: default channel flags - enum iwl_channel_flags
497 * scan_config_channel_flag
498 * @channel_array: default supported channels
499 */
500struct iwl_scan_config {
501 __le32 flags;
502 __le32 tx_chains;
503 __le32 rx_chains;
504 __le32 legacy_rates;
505 __le32 out_of_channel_time;
506 __le32 suspend_time;
507 u8 dwell_active;
508 u8 dwell_passive;
509 u8 dwell_fragmented;
510 u8 reserved;
511 u8 mac_addr[ETH_ALEN];
512 u8 bcast_sta_id;
513 u8 channel_flags;
514 u8 channel_array[];
515} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
516
517/**
518 * iwl_umac_scan_flags
519 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
520 * can be preempted by other scan requests with higher priority.
521 * The low priority scan will be resumed when the higher proirity scan is
522 * completed.
523 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
524 * when scan starts.
525 */
526enum iwl_umac_scan_flags {
527 IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
528 IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
529};
530
531enum iwl_umac_scan_uid_offsets {
532 IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
533 IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
534};
535
536enum iwl_umac_scan_general_flags {
537 IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
538 IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
539 IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
540 IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
541 IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
542 IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
543 IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
544 IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
545 IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
546 IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9)
547};
548
549/**
550 * struct iwl_scan_channel_cfg_umac
551 * @flags: bitmap - 0-19: directed scan to i'th ssid.
552 * @channel_num: channel number 1-13 etc.
553 * @iter_count: repetition count for the channel.
554 * @iter_interval: interval between two scan iterations on one channel.
555 */
556struct iwl_scan_channel_cfg_umac {
557 __le32 flags;
558 u8 channel_num;
559 u8 iter_count;
560 __le16 iter_interval;
561} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
562
563/**
564 * struct iwl_scan_umac_schedule
565 * @interval: interval in seconds between scan iterations
566 * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
567 * @reserved: for alignment and future use
568 */
569struct iwl_scan_umac_schedule {
570 __le16 interval;
571 u8 iter_count;
572 u8 reserved;
573} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
574
575/**
576 * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
577 * parameters following channels configuration array.
578 * @schedule: two scheduling plans.
579 * @delay: delay in TUs before starting the first scan iteration
580 * @reserved: for future use and alignment
581 * @preq: probe request with IEs blocks
582 * @direct_scan: list of SSIDs for directed active scan
583 */
584struct iwl_scan_req_umac_tail {
585 /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
586 struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
587 __le16 delay;
588 __le16 reserved;
589 /* SCAN_PROBE_PARAMS_API_S_VER_1 */
590 struct iwl_scan_probe_req preq;
591 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
592} __packed;
593
594/**
595 * struct iwl_scan_req_umac
596 * @flags: &enum iwl_umac_scan_flags
597 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
598 * @ooc_priority: out of channel priority - &enum iwl_scan_priority
599 * @general_flags: &enum iwl_umac_scan_general_flags
600 * @reserved1: for future use and alignment
601 * @active_dwell: dwell time for active scan
602 * @passive_dwell: dwell time for passive scan
603 * @fragmented_dwell: dwell time for fragmented passive scan
604 * @max_out_time: max out of serving channel time
605 * @suspend_time: max suspend time
606 * @scan_priority: scan internal prioritization &enum iwl_scan_priority
607 * @channel_flags: &enum iwl_scan_channel_flags
608 * @n_channels: num of channels in scan request
609 * @reserved2: for future use and alignment
610 * @data: &struct iwl_scan_channel_cfg_umac and
611 * &struct iwl_scan_req_umac_tail
612 */
613struct iwl_scan_req_umac {
614 __le32 flags;
615 __le32 uid;
616 __le32 ooc_priority;
617 /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
618 __le32 general_flags;
619 u8 reserved1;
620 u8 active_dwell;
621 u8 passive_dwell;
622 u8 fragmented_dwell;
623 __le32 max_out_time;
624 __le32 suspend_time;
625 __le32 scan_priority;
626 /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
627 u8 channel_flags;
628 u8 n_channels;
629 __le16 reserved2;
630 u8 data[];
631} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
632
633/**
634 * struct iwl_umac_scan_abort
635 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
636 * @flags: reserved
637 */
638struct iwl_umac_scan_abort {
639 __le32 uid;
640 __le32 flags;
641} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
642
643/**
644 * struct iwl_umac_scan_complete
645 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
646 * @last_schedule: last scheduling line
647 * @last_iter: last scan iteration number
648 * @scan status: &enum iwl_scan_offload_complete_status
649 * @ebs_status: &enum iwl_scan_ebs_status
650 * @time_from_last_iter: time elapsed from last iteration
651 * @reserved: for future use
652 */
653struct iwl_umac_scan_complete {
654 __le32 uid;
655 u8 last_schedule;
656 u8 last_iter;
657 u8 status;
658 u8 ebs_status;
659 __le32 time_from_last_iter;
660 __le32 reserved;
661} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
662
663#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
664/**
665 * struct iwl_scan_offload_profile_match - match information
666 * @bssid: matched bssid
667 * @channel: channel where the match occurred
668 * @energy:
669 * @matching_feature:
670 * @matching_channels: bitmap of channels that matched, referencing
671 * the channels passed in tue scan offload request
672 */
673struct iwl_scan_offload_profile_match {
674 u8 bssid[ETH_ALEN];
675 __le16 reserved;
676 u8 channel;
677 u8 energy;
678 u8 matching_feature;
679 u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
680} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
681
682/**
683 * struct iwl_scan_offload_profiles_query - match results query response
684 * @matched_profiles: bitmap of matched profiles, referencing the
685 * matches passed in the scan offload request
686 * @last_scan_age: age of the last offloaded scan
687 * @n_scans_done: number of offloaded scans done
688 * @gp2_d0u: GP2 when D0U occurred
689 * @gp2_invoked: GP2 when scan offload was invoked
690 * @resume_while_scanning: not used
691 * @self_recovery: obsolete
692 * @reserved: reserved
693 * @matches: array of match information, one for each match
694 */
695struct iwl_scan_offload_profiles_query {
696 __le32 matched_profiles;
697 __le32 last_scan_age;
698 __le32 n_scans_done;
699 __le32 gp2_d0u;
700 __le32 gp2_invoked;
701 u8 resume_while_scanning;
702 u8 self_recovery;
703 __le16 reserved;
704 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
705} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
706
707/**
708 * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
709 * @uid: scan id, &enum iwl_umac_scan_uid_offsets
710 * @scanned_channels: number of channels scanned and number of valid elements in
711 * results array
712 * @status: one of SCAN_COMP_STATUS_*
713 * @bt_status: BT on/off status
714 * @last_channel: last channel that was scanned
715 * @tsf_low: TSF timer (lower half) in usecs
716 * @tsf_high: TSF timer (higher half) in usecs
717 * @results: array of scan results, only "scanned_channels" of them are valid
718 */
719struct iwl_umac_scan_iter_complete_notif {
720 __le32 uid;
721 u8 scanned_channels;
722 u8 status;
723 u8 bt_status;
724 u8 last_channel;
725 __le32 tsf_low;
726 __le32 tsf_high;
727 struct iwl_scan_results_notif results[];
728} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
729
730#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
new file mode 100644
index 000000000000..493a8bdfbc9e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -0,0 +1,414 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64
65#ifndef __fw_api_sta_h__
66#define __fw_api_sta_h__
67
68/**
69 * enum iwl_sta_flags - flags for the ADD_STA host command
70 * @STA_FLG_REDUCED_TX_PWR_CTRL:
71 * @STA_FLG_REDUCED_TX_PWR_DATA:
72 * @STA_FLG_DISABLE_TX: set if TX should be disabled
73 * @STA_FLG_PS: set if STA is in Power Save
74 * @STA_FLG_INVALID: set if STA is invalid
75 * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
76 * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
77 * @STA_FLG_DRAIN_FLOW: drain flow
78 * @STA_FLG_PAN: STA is for PAN interface
79 * @STA_FLG_CLASS_AUTH:
80 * @STA_FLG_CLASS_ASSOC:
81 * @STA_FLG_CLASS_MIMO_PROT:
82 * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
83 * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
84 * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
85 * initialised by driver and can be updated by fw upon reception of
86 * action frames that can change the channel width. When cleared the fw
87 * will send all the frames in 20MHz even when FAT channel is requested.
88 * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
89 * driver and can be updated by fw upon reception of action frames.
90 * @STA_FLG_MFP_EN: Management Frame Protection
91 */
92enum iwl_sta_flags {
93 STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
94 STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
95
96 STA_FLG_DISABLE_TX = BIT(4),
97
98 STA_FLG_PS = BIT(8),
99 STA_FLG_DRAIN_FLOW = BIT(12),
100 STA_FLG_PAN = BIT(13),
101 STA_FLG_CLASS_AUTH = BIT(14),
102 STA_FLG_CLASS_ASSOC = BIT(15),
103 STA_FLG_RTS_MIMO_PROT = BIT(17),
104
105 STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
106 STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
107 STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
108 STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
109 STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
110 STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
111 STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
112 STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
113 STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
114 STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
115
116 STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
117 STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
118 STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
119 STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
120 STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
121 STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
122
123 STA_FLG_FAT_EN_20MHZ = (0 << 26),
124 STA_FLG_FAT_EN_40MHZ = (1 << 26),
125 STA_FLG_FAT_EN_80MHZ = (2 << 26),
126 STA_FLG_FAT_EN_160MHZ = (3 << 26),
127 STA_FLG_FAT_EN_MSK = (3 << 26),
128
129 STA_FLG_MIMO_EN_SISO = (0 << 28),
130 STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
131 STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
132 STA_FLG_MIMO_EN_MSK = (3 << 28),
133};
134
135/**
136 * enum iwl_sta_key_flag - key flags for the ADD_STA host command
137 * @STA_KEY_FLG_NO_ENC: no encryption
138 * @STA_KEY_FLG_WEP: WEP encryption algorithm
139 * @STA_KEY_FLG_CCM: CCMP encryption algorithm
140 * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
141 * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
142 * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
143 * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
144 * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
145 * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
146 * station info array (1 - n 1X mode)
147 * @STA_KEY_FLG_KEYID_MSK: the index of the key
148 * @STA_KEY_NOT_VALID: key is invalid
149 * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
150 * @STA_KEY_MULTICAST: set for multical key
151 * @STA_KEY_MFP: key is used for Management Frame Protection
152 */
153enum iwl_sta_key_flag {
154 STA_KEY_FLG_NO_ENC = (0 << 0),
155 STA_KEY_FLG_WEP = (1 << 0),
156 STA_KEY_FLG_CCM = (2 << 0),
157 STA_KEY_FLG_TKIP = (3 << 0),
158 STA_KEY_FLG_EXT = (4 << 0),
159 STA_KEY_FLG_CMAC = (6 << 0),
160 STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
161 STA_KEY_FLG_EN_MSK = (7 << 0),
162
163 STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
164 STA_KEY_FLG_KEYID_POS = 8,
165 STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
166 STA_KEY_NOT_VALID = BIT(11),
167 STA_KEY_FLG_WEP_13BYTES = BIT(12),
168 STA_KEY_MULTICAST = BIT(14),
169 STA_KEY_MFP = BIT(15),
170};
171
172/**
173 * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
174 * @STA_MODIFY_KEY: this command modifies %key
175 * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
176 * @STA_MODIFY_TX_RATE: unused
177 * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
178 * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
179 * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
180 * @STA_MODIFY_PROT_TH:
181 * @STA_MODIFY_QUEUES: modify the queues used by this station
182 */
183enum iwl_sta_modify_flag {
184 STA_MODIFY_KEY = BIT(0),
185 STA_MODIFY_TID_DISABLE_TX = BIT(1),
186 STA_MODIFY_TX_RATE = BIT(2),
187 STA_MODIFY_ADD_BA_TID = BIT(3),
188 STA_MODIFY_REMOVE_BA_TID = BIT(4),
189 STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
190 STA_MODIFY_PROT_TH = BIT(6),
191 STA_MODIFY_QUEUES = BIT(7),
192};
193
194#define STA_MODE_MODIFY 1
195
196/**
197 * enum iwl_sta_sleep_flag - type of sleep of the station
198 * @STA_SLEEP_STATE_AWAKE:
199 * @STA_SLEEP_STATE_PS_POLL:
200 * @STA_SLEEP_STATE_UAPSD:
201 * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
202 * (last) released frame
203 */
204enum iwl_sta_sleep_flag {
205 STA_SLEEP_STATE_AWAKE = 0,
206 STA_SLEEP_STATE_PS_POLL = BIT(0),
207 STA_SLEEP_STATE_UAPSD = BIT(1),
208 STA_SLEEP_STATE_MOREDATA = BIT(2),
209};
210
211/* STA ID and color bits definitions */
212#define STA_ID_SEED (0x0f)
213#define STA_ID_POS (0)
214#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
215
216#define STA_COLOR_SEED (0x7)
217#define STA_COLOR_POS (4)
218#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
219
220#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
221 (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
222#define STA_ID_N_COLOR_GET_ID(id_n_color) \
223 (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
224
225#define STA_KEY_MAX_NUM (16)
226#define STA_KEY_IDX_INVALID (0xff)
227#define STA_KEY_MAX_DATA_KEY_NUM (4)
228#define IWL_MAX_GLOBAL_KEYS (4)
229#define STA_KEY_LEN_WEP40 (5)
230#define STA_KEY_LEN_WEP104 (13)
231
232/**
233 * struct iwl_mvm_keyinfo - key information
234 * @key_flags: type %iwl_sta_key_flag
235 * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
236 * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
237 * @key_offset: key offset in the fw's key table
238 * @key: 16-byte unicast decryption key
239 * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
240 * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
241 * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
242 */
243struct iwl_mvm_keyinfo {
244 __le16 key_flags;
245 u8 tkip_rx_tsc_byte2;
246 u8 reserved1;
247 __le16 tkip_rx_ttak[5];
248 u8 key_offset;
249 u8 reserved2;
250 u8 key[16];
251 __le64 tx_secur_seq_cnt;
252 __le64 hw_tkip_mic_rx_key;
253 __le64 hw_tkip_mic_tx_key;
254} __packed;
255
256/**
257 * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
258 * ( REPLY_ADD_STA = 0x18 )
259 * @add_modify: 1: modify existing, 0: add new station
260 * @awake_acs:
261 * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
262 * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
263 * @mac_id_n_color: the Mac context this station belongs to
264 * @addr[ETH_ALEN]: station's MAC address
265 * @sta_id: index of station in uCode's station table
266 * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
267 * alone. 1 - modify, 0 - don't change.
268 * @station_flags: look at %iwl_sta_flags
269 * @station_flags_msk: what of %station_flags have changed
270 * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
271 * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
272 * add_immediate_ba_ssn.
273 * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
274 * Set %STA_MODIFY_REMOVE_BA_TID to use this field
275 * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
276 * add_immediate_ba_tid.
277 * @sleep_tx_count: number of packets to transmit to station even though it is
278 * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
279 * keeps track of STA sleep state.
280 * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
281 * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
282 * mac-addr.
283 * @beamform_flags: beam forming controls
284 * @tfd_queue_msk: tfd queues used by this station
285 *
286 * The device contains an internal table of per-station information, with info
287 * on security keys, aggregation parameters, and Tx rates for initial Tx
288 * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
289 *
290 * ADD_STA sets up the table entry for one station, either creating a new
291 * entry, or modifying a pre-existing one.
292 */
293struct iwl_mvm_add_sta_cmd {
294 u8 add_modify;
295 u8 awake_acs;
296 __le16 tid_disable_tx;
297 __le32 mac_id_n_color;
298 u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
299 __le16 reserved2;
300 u8 sta_id;
301 u8 modify_mask;
302 __le16 reserved3;
303 __le32 station_flags;
304 __le32 station_flags_msk;
305 u8 add_immediate_ba_tid;
306 u8 remove_immediate_ba_tid;
307 __le16 add_immediate_ba_ssn;
308 __le16 sleep_tx_count;
309 __le16 sleep_state_flags;
310 __le16 assoc_id;
311 __le16 beamform_flags;
312 __le32 tfd_queue_msk;
313} __packed; /* ADD_STA_CMD_API_S_VER_7 */
314
315/**
316 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
317 * ( REPLY_ADD_STA_KEY = 0x17 )
318 * @sta_id: index of station in uCode's station table
319 * @key_offset: key offset in key storage
320 * @key_flags: type %iwl_sta_key_flag
321 * @key: key material data
322 * @key2: key material data
323 * @rx_secur_seq_cnt: RX security sequence counter for the key
324 * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
325 * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
326 */
327struct iwl_mvm_add_sta_key_cmd {
328 u8 sta_id;
329 u8 key_offset;
330 __le16 key_flags;
331 u8 key[16];
332 u8 key2[16];
333 u8 rx_secur_seq_cnt[16];
334 u8 tkip_rx_tsc_byte2;
335 u8 reserved;
336 __le16 tkip_rx_ttak[5];
337} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
338
339/**
340 * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
341 * @ADD_STA_SUCCESS: operation was executed successfully
342 * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
343 * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
344 * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
345 * doesn't exist.
346 */
347enum iwl_mvm_add_sta_rsp_status {
348 ADD_STA_SUCCESS = 0x1,
349 ADD_STA_STATIONS_OVERLOAD = 0x2,
350 ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
351 ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
352};
353
354/**
355 * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
356 * ( REMOVE_STA = 0x19 )
357 * @sta_id: the station id of the station to be removed
358 */
359struct iwl_mvm_rm_sta_cmd {
360 u8 sta_id;
361 u8 reserved[3];
362} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
363
364/**
365 * struct iwl_mvm_mgmt_mcast_key_cmd
366 * ( MGMT_MCAST_KEY = 0x1f )
367 * @ctrl_flags: %iwl_sta_key_flag
368 * @IGTK:
369 * @K1: unused
370 * @K2: unused
371 * @sta_id: station ID that support IGTK
372 * @key_id:
373 * @receive_seq_cnt: initial RSC/PN needed for replay check
374 */
375struct iwl_mvm_mgmt_mcast_key_cmd {
376 __le32 ctrl_flags;
377 u8 IGTK[16];
378 u8 K1[16];
379 u8 K2[16];
380 __le32 key_id;
381 __le32 sta_id;
382 __le64 receive_seq_cnt;
383} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
384
385struct iwl_mvm_wep_key {
386 u8 key_index;
387 u8 key_offset;
388 __le16 reserved1;
389 u8 key_size;
390 u8 reserved2[3];
391 u8 key[16];
392} __packed;
393
394struct iwl_mvm_wep_key_cmd {
395 __le32 mac_id_n_color;
396 u8 num_keys;
397 u8 decryption_type;
398 u8 flags;
399 u8 reserved;
400 struct iwl_mvm_wep_key wep_key[0];
401} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
402
403/**
404 * struct iwl_mvm_eosp_notification - EOSP notification from firmware
405 * @remain_frame_count: # of frames remaining, non-zero if SP was cut
406 * short by GO absence
407 * @sta_id: station ID
408 */
409struct iwl_mvm_eosp_notification {
410 __le32 remain_frame_count;
411 __le32 sta_id;
412} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
413
414#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
new file mode 100644
index 000000000000..0c321f63ee42
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
@@ -0,0 +1,284 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_stats_h__
67#define __fw_api_stats_h__
68#include "fw-api-mac.h"
69
70struct mvm_statistics_dbg {
71 __le32 burst_check;
72 __le32 burst_count;
73 __le32 wait_for_silence_timeout_cnt;
74 __le32 reserved[3];
75} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
76
77struct mvm_statistics_div {
78 __le32 tx_on_a;
79 __le32 tx_on_b;
80 __le32 exec_time;
81 __le32 probe_time;
82 __le32 rssi_ant;
83 __le32 reserved2;
84} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
85
86struct mvm_statistics_rx_non_phy {
87 __le32 bogus_cts; /* CTS received when not expecting CTS */
88 __le32 bogus_ack; /* ACK received when not expecting ACK */
89 __le32 non_bssid_frames; /* number of frames with BSSID that
90 * doesn't belong to the STA BSSID */
91 __le32 filtered_frames; /* count frames that were dumped in the
92 * filtering process */
93 __le32 non_channel_beacons; /* beacons with our bss id but not on
94 * our serving channel */
95 __le32 channel_beacons; /* beacons with our bss id and in our
96 * serving channel */
97 __le32 num_missed_bcon; /* number of missed beacons */
98 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
99 * ADC was in saturation */
100 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
101 * for INA */
102 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
103 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
104 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
105 __le32 interference_data_flag; /* flag for interference data
106 * availability. 1 when data is
107 * available. */
108 __le32 channel_load; /* counts RX Enable time in uSec */
109 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
110 * and CCK) counter */
111 __le32 beacon_rssi_a;
112 __le32 beacon_rssi_b;
113 __le32 beacon_rssi_c;
114 __le32 beacon_energy_a;
115 __le32 beacon_energy_b;
116 __le32 beacon_energy_c;
117 __le32 num_bt_kills;
118 __le32 mac_id;
119 __le32 directed_data_mpdu;
120} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
121
122struct mvm_statistics_rx_phy {
123 __le32 ina_cnt;
124 __le32 fina_cnt;
125 __le32 plcp_err;
126 __le32 crc32_err;
127 __le32 overrun_err;
128 __le32 early_overrun_err;
129 __le32 crc32_good;
130 __le32 false_alarm_cnt;
131 __le32 fina_sync_err_cnt;
132 __le32 sfd_timeout;
133 __le32 fina_timeout;
134 __le32 unresponded_rts;
135 __le32 rxe_frame_lmt_overrun;
136 __le32 sent_ack_cnt;
137 __le32 sent_cts_cnt;
138 __le32 sent_ba_rsp_cnt;
139 __le32 dsp_self_kill;
140 __le32 mh_format_err;
141 __le32 re_acq_main_rssi_sum;
142 __le32 reserved;
143} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
144
145struct mvm_statistics_rx_ht_phy {
146 __le32 plcp_err;
147 __le32 overrun_err;
148 __le32 early_overrun_err;
149 __le32 crc32_good;
150 __le32 crc32_err;
151 __le32 mh_format_err;
152 __le32 agg_crc32_good;
153 __le32 agg_mpdu_cnt;
154 __le32 agg_cnt;
155 __le32 unsupport_mcs;
156} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
157
158struct mvm_statistics_tx_non_phy {
159 __le32 preamble_cnt;
160 __le32 rx_detected_cnt;
161 __le32 bt_prio_defer_cnt;
162 __le32 bt_prio_kill_cnt;
163 __le32 few_bytes_cnt;
164 __le32 cts_timeout;
165 __le32 ack_timeout;
166 __le32 expected_ack_cnt;
167 __le32 actual_ack_cnt;
168 __le32 dump_msdu_cnt;
169 __le32 burst_abort_next_frame_mismatch_cnt;
170 __le32 burst_abort_missing_next_frame_cnt;
171 __le32 cts_timeout_collision;
172 __le32 ack_or_ba_timeout_collision;
173} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
174
175#define MAX_CHAINS 3
176
177struct mvm_statistics_tx_non_phy_agg {
178 __le32 ba_timeout;
179 __le32 ba_reschedule_frames;
180 __le32 scd_query_agg_frame_cnt;
181 __le32 scd_query_no_agg;
182 __le32 scd_query_agg;
183 __le32 scd_query_mismatch;
184 __le32 frame_not_ready;
185 __le32 underrun;
186 __le32 bt_prio_kill;
187 __le32 rx_ba_rsp_cnt;
188 __s8 txpower[MAX_CHAINS];
189 __s8 reserved;
190 __le32 reserved2;
191} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
192
193struct mvm_statistics_tx_channel_width {
194 __le32 ext_cca_narrow_ch20[1];
195 __le32 ext_cca_narrow_ch40[2];
196 __le32 ext_cca_narrow_ch80[3];
197 __le32 ext_cca_narrow_ch160[4];
198 __le32 last_tx_ch_width_indx;
199 __le32 rx_detected_per_ch_width[4];
200 __le32 success_per_ch_width[4];
201 __le32 fail_per_ch_width[4];
202}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
203
204struct mvm_statistics_tx {
205 struct mvm_statistics_tx_non_phy general;
206 struct mvm_statistics_tx_non_phy_agg agg;
207 struct mvm_statistics_tx_channel_width channel_width;
208} __packed; /* STATISTICS_TX_API_S_VER_4 */
209
210
211struct mvm_statistics_bt_activity {
212 __le32 hi_priority_tx_req_cnt;
213 __le32 hi_priority_tx_denied_cnt;
214 __le32 lo_priority_tx_req_cnt;
215 __le32 lo_priority_tx_denied_cnt;
216 __le32 hi_priority_rx_req_cnt;
217 __le32 hi_priority_rx_denied_cnt;
218 __le32 lo_priority_rx_req_cnt;
219 __le32 lo_priority_rx_denied_cnt;
220} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
221
222struct mvm_statistics_general_v8 {
223 __le32 radio_temperature;
224 __le32 radio_voltage;
225 struct mvm_statistics_dbg dbg;
226 __le32 sleep_time;
227 __le32 slots_out;
228 __le32 slots_idle;
229 __le32 ttl_timestamp;
230 struct mvm_statistics_div slow_div;
231 __le32 rx_enable_counter;
232 /*
233 * num_of_sos_states:
234 * count the number of times we have to re-tune
235 * in order to get out of bad PHY status
236 */
237 __le32 num_of_sos_states;
238 __le32 beacon_filtered;
239 __le32 missed_beacons;
240 u8 beacon_filter_average_energy;
241 u8 beacon_filter_reason;
242 u8 beacon_filter_current_energy;
243 u8 beacon_filter_reserved;
244 __le32 beacon_filter_delta_time;
245 struct mvm_statistics_bt_activity bt_activity;
246 __le64 rx_time;
247 __le64 on_time_rf;
248 __le64 on_time_scan;
249 __le64 tx_time;
250 __le32 beacon_counter[NUM_MAC_INDEX];
251 u8 beacon_average_energy[NUM_MAC_INDEX];
252 u8 reserved[4 - (NUM_MAC_INDEX % 4)];
253} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
254
255struct mvm_statistics_rx {
256 struct mvm_statistics_rx_phy ofdm;
257 struct mvm_statistics_rx_phy cck;
258 struct mvm_statistics_rx_non_phy general;
259 struct mvm_statistics_rx_ht_phy ofdm_ht;
260} __packed; /* STATISTICS_RX_API_S_VER_3 */
261
262/*
263 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
264 *
265 * By default, uCode issues this notification after receiving a beacon
266 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
267 * STATISTICS_CMD (0x9c), below.
268 */
269
270struct iwl_notif_statistics_v10 {
271 __le32 flag;
272 struct mvm_statistics_rx rx;
273 struct mvm_statistics_tx tx;
274 struct mvm_statistics_general_v8 general;
275} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
276
277#define IWL_STATISTICS_FLG_CLEAR 0x1
278#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
279
280struct iwl_statistics_cmd {
281 __le32 flags;
282} __packed; /* STATISTICS_CMD_API_S_VER_1 */
283
284#endif /* __fw_api_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h
new file mode 100644
index 000000000000..eed6271d01a3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h
@@ -0,0 +1,386 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __fw_api_tof_h__
64#define __fw_api_tof_h__
65
66#include "fw-api.h"
67
68/* ToF sub-group command IDs */
69enum iwl_mvm_tof_sub_grp_ids {
70 TOF_RANGE_REQ_CMD = 0x1,
71 TOF_CONFIG_CMD = 0x2,
72 TOF_RANGE_ABORT_CMD = 0x3,
73 TOF_RANGE_REQ_EXT_CMD = 0x4,
74 TOF_RESPONDER_CONFIG_CMD = 0x5,
75 TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
76 TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
77 TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
78 TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
79 TOF_RANGE_RESPONSE_NOTIF = 0xFE,
80 TOF_MCSI_DEBUG_NOTIF = 0xFB,
81};
82
83/**
84 * struct iwl_tof_config_cmd - ToF configuration
85 * @tof_disabled: 0 enabled, 1 - disabled
86 * @one_sided_disabled: 0 enabled, 1 - disabled
87 * @is_debug_mode: 1 debug mode, 0 - otherwise
88 * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
89 */
90struct iwl_tof_config_cmd {
91 __le32 sub_grp_cmd_id;
92 u8 tof_disabled;
93 u8 one_sided_disabled;
94 u8 is_debug_mode;
95 u8 is_buf_required;
96} __packed;
97
98/**
99 * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
100 * @burst_period: future use: (currently hard coded in the LMAC)
101 * The interval between two sequential bursts.
102 * @min_delta_ftm: future use: (currently hard coded in the LMAC)
103 * The minimum delay between two sequential FTM Responses
104 * in the same burst.
105 * @burst_duration: future use: (currently hard coded in the LMAC)
106 * The total time for all FTMs handshake in the same burst.
107 * Affect the time events duration in the LMAC.
108 * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
109 * The number of bursts for the current ToF request. Affect
110 * the number of events allocations in the current iteration.
111 * @get_ch_est: for xVT only, NA for driver
112 * @abort_responder: when set to '1' - Responder will terminate its activity
113 * (all other fields in the command are ignored)
114 * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
115 * params and use the recomended Initiator params.
116 * 0 - otherwise
117 * @channel_num: current AP Channel
118 * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
119 * @rate: current AP rate
120 * @ctrl_ch_position: coding of the control channel position relative to
121 * the center frequency.
122 * 40MHz 0 below center, 1 above center
123 * 80MHz bits [0..1]: 0 the near 20MHz to the center,
124 * 1 the far 20MHz to the center
125 * bit[2] as above 40MHz
126 * @ftm_per_burst: FTMs per Burst
127 * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
128 * '1' - we measure over the Initial FTM Response
129 * @asap_mode: ASAP / Non ASAP mode for the current WLS station
130 * @sta_id: index of the AP STA when in AP mode
131 * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
132 * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
133 * purposes, simulating station movement by adding various values
134 * to this field
135 * @bssid: Current AP BSSID
136 */
137struct iwl_tof_responder_config_cmd {
138 __le32 sub_grp_cmd_id;
139 __le16 burst_period;
140 u8 min_delta_ftm;
141 u8 burst_duration;
142 u8 num_of_burst_exp;
143 u8 get_ch_est;
144 u8 abort_responder;
145 u8 recv_sta_req_params;
146 u8 channel_num;
147 u8 bandwidth;
148 u8 rate;
149 u8 ctrl_ch_position;
150 u8 ftm_per_burst;
151 u8 ftm_resp_ts_avail;
152 u8 asap_mode;
153 u8 sta_id;
154 __le16 tsf_timer_offset_msecs;
155 __le16 toa_offset;
156 u8 bssid[ETH_ALEN];
157} __packed;
158
159/**
160 * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
161 * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
162 * @min_delta_ftm: Minimal time between two consecutive measurements,
163 * in units of 100us. 0 means no preference by station
164 * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
165 * value be sent to the AP
166 * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
167 * value to be sent to the AP
168 * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
169 * value to be sent to the AP
170 */
171struct iwl_tof_range_req_ext_cmd {
172 __le32 sub_grp_cmd_id;
173 __le16 tsf_timer_offset_msec;
174 __le16 reserved;
175 u8 min_delta_ftm;
176 u8 ftm_format_and_bw20M;
177 u8 ftm_format_and_bw40M;
178 u8 ftm_format_and_bw80M;
179} __packed;
180
181#define IWL_MVM_TOF_MAX_APS 21
182
183/**
184 * struct iwl_tof_range_req_ap_entry - AP configuration parameters
185 * @channel_num: Current AP Channel
186 * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
187 * @tsf_delta_direction: TSF relatively to the subject AP
188 * @ctrl_ch_position: Coding of the control channel position relative to the
189 * center frequency.
190 * 40MHz 0 below center, 1 above center
191 * 80MHz bits [0..1]: 0 the near 20MHz to the center,
192 * 1 the far 20MHz to the center
193 * bit[2] as above 40MHz
194 * @bssid: AP's bss id
195 * @measure_type: Measurement type: 0 - two sided, 1 - One sided
196 * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
197 * number of measurement iterations (min 2^0 = 1, max 2^14)
198 * @burst_period: Recommended value to be sent to the AP. Measurement
199 * periodicity In units of 100ms. ignored if num_of_bursts = 0
200 * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
201 * 1-sided: how many rts/cts pairs should be used per burst.
202 * @retries_per_sample: Max number of retries that the LMAC should send
203 * in case of no replies by the AP.
204 * @tsf_delta: TSF Delta in units of microseconds.
205 * The difference between the AP TSF and the device local clock.
206 * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
207 * Bit[1] Civic should be sent in the FTMR
208 * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
209 * @enable_dyn_ack: Enable Dynamic ACK BW.
210 * 0 Initiator interact with regular AP
211 * 1 Initiator interact with Responder machine: need to send the
212 * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
213 * use it for its ch est measurement (this flag will be set when we
214 * configure the opposite machine to be Responder).
215 * @rssi: Last received value
216 * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
217 */
218struct iwl_tof_range_req_ap_entry {
219 u8 channel_num;
220 u8 bandwidth;
221 u8 tsf_delta_direction;
222 u8 ctrl_ch_position;
223 u8 bssid[ETH_ALEN];
224 u8 measure_type;
225 u8 num_of_bursts;
226 __le16 burst_period;
227 u8 samples_per_burst;
228 u8 retries_per_sample;
229 __le32 tsf_delta;
230 u8 location_req;
231 u8 asap_mode;
232 u8 enable_dyn_ack;
233 s8 rssi;
234} __packed;
235
236/**
237 * enum iwl_tof_response_mode
238 * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
239 * possible (not supported for this release)
240 * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
241 * timeout expiration
242 * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
243 * earlier of: measurements completion / timeout
244 * expiration.
245 */
246enum iwl_tof_response_mode {
247 IWL_MVM_TOF_RESPOSE_ASAP = 1,
248 IWL_MVM_TOF_RESPOSE_TIMEOUT,
249 IWL_MVM_TOF_RESPOSE_COMPLETE,
250};
251
252/**
253 * struct iwl_tof_range_req_cmd - start measurement cmd
254 * @request_id: A Token incremented per request. The same Token will be
255 * sent back in the range response
256 * @initiator: 0- NW initiated, 1 - Client Initiated
257 * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
258 * '1' - run ML-Algo for ToF only
259 * @req_timeout: Requested timeout of the response in units of 100ms.
260 * This is equivalent to the session time configured to the
261 * LMAC in Initiator Request
262 * @report_policy: Supported partially for this release: For current release -
263 * the range report will be uploaded as a batch when ready or
264 * when the session is done (successfully / partially).
265 * one of iwl_tof_response_mode.
266 * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
267 * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
268 * '1' Use MAC Address randomization according to the below
269 * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
270 * Bits set to 1 shall be randomized by the UMAC
271 */
272struct iwl_tof_range_req_cmd {
273 __le32 sub_grp_cmd_id;
274 u8 request_id;
275 u8 initiator;
276 u8 one_sided_los_disable;
277 u8 req_timeout;
278 u8 report_policy;
279 u8 los_det_disable;
280 u8 num_of_ap;
281 u8 macaddr_random;
282 u8 macaddr_template[ETH_ALEN];
283 u8 macaddr_mask[ETH_ALEN];
284 struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
285} __packed;
286
287/**
288 * struct iwl_tof_gen_resp_cmd - generic ToF response
289 */
290struct iwl_tof_gen_resp_cmd {
291 __le32 sub_grp_cmd_id;
292 u8 data[];
293} __packed;
294
295/**
296 * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
297 * @measure_status: current APs measurement status
298 * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
299 * @rtt: The Round Trip Time that took for the last measurement for
300 * current AP [nSec]
301 * @rtt_variance: The Variance of the RTT values measured for current AP
302 * @rtt_spread: The Difference between the maximum and the minimum RTT
303 * values measured for current AP in the current session [nsec]
304 * @rssi: RSSI as uploaded in the Channel Estimation notification
305 * @rssi_spread: The Difference between the maximum and the minimum RSSI values
306 * measured for current AP in the current session
307 * @range: Measured range [cm]
308 * @range_variance: Measured range variance [cm]
309 * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
310 * uploaded by the LMAC
311 */
312struct iwl_tof_range_rsp_ap_entry_ntfy {
313 u8 bssid[ETH_ALEN];
314 u8 measure_status;
315 u8 measure_bw;
316 __le32 rtt;
317 __le32 rtt_variance;
318 __le32 rtt_spread;
319 s8 rssi;
320 u8 rssi_spread;
321 __le16 reserved;
322 __le32 range;
323 __le32 range_variance;
324 __le32 timestamp;
325} __packed;
326
327/**
328 * struct iwl_tof_range_rsp_ntfy -
329 * @request_id: A Token ID of the corresponding Range request
330 * @request_status: status of current measurement session
331 * @last_in_batch: reprot policy (when not all responses are uploaded at once)
332 * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
333 */
334struct iwl_tof_range_rsp_ntfy {
335 u8 request_id;
336 u8 request_status;
337 u8 last_in_batch;
338 u8 num_of_aps;
339 struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
340} __packed;
341
342#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
343/**
344 * struct iwl_tof_mcsi_notif - used for debug
345 * @token: token ID for the current session
346 * @role: '0' - initiator, '1' - responder
347 * @initiator_bssid: initiator machine
348 * @responder_bssid: responder machine
349 * @mcsi_buffer: debug data
350 */
351struct iwl_tof_mcsi_notif {
352 u8 token;
353 u8 role;
354 __le16 reserved;
355 u8 initiator_bssid[ETH_ALEN];
356 u8 responder_bssid[ETH_ALEN];
357 u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
358} __packed;
359
360/**
361 * struct iwl_tof_neighbor_report_notif
362 * @bssid: BSSID of the AP which sent the report
363 * @request_token: same token as the corresponding request
364 * @status:
365 * @report_ie_len: the length of the response frame starting from the Element ID
366 * @data: the IEs
367 */
368struct iwl_tof_neighbor_report {
369 u8 bssid[ETH_ALEN];
370 u8 request_token;
371 u8 status;
372 __le16 report_ie_len;
373 u8 data[];
374} __packed;
375
376/**
377 * struct iwl_tof_range_abort_cmd
378 * @request_id: corresponds to a range request
379 */
380struct iwl_tof_range_abort_cmd {
381 __le32 sub_grp_cmd_id;
382 u8 request_id;
383 u8 reserved[3];
384} __packed;
385
386#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
new file mode 100644
index 000000000000..853698ab8b05
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -0,0 +1,646 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_tx_h__
64#define __fw_api_tx_h__
65
66/**
67 * enum iwl_tx_flags - bitmasks for tx_flags in TX command
68 * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
69 * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame
70 * @TX_CMD_FLG_ACK: expect ACK from receiving station
71 * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
72 * Otherwise, use rate_n_flags from the TX command
73 * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
74 * Must set TX_CMD_FLG_ACK with this flag.
75 * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
76 * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
77 * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
78 * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
79 * on old firmwares).
80 * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
81 * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
82 * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
83 * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
84 * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
85 * Should be set for beacons and probe responses
86 * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
87 * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
88 * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
89 * Should be set for 26/30 length MAC headers
90 * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
91 * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
92 * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
93 * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
94 * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
95 * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
96 * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
97 * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
98 */
99enum iwl_tx_flags {
100 TX_CMD_FLG_PROT_REQUIRE = BIT(0),
101 TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
102 TX_CMD_FLG_ACK = BIT(3),
103 TX_CMD_FLG_STA_RATE = BIT(4),
104 TX_CMD_FLG_BAR = BIT(6),
105 TX_CMD_FLG_TXOP_PROT = BIT(7),
106 TX_CMD_FLG_VHT_NDPA = BIT(8),
107 TX_CMD_FLG_HT_NDPA = BIT(9),
108 TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
109 TX_CMD_FLG_BT_PRIO_POS = 11,
110 TX_CMD_FLG_BT_DIS = BIT(12),
111 TX_CMD_FLG_SEQ_CTL = BIT(13),
112 TX_CMD_FLG_MORE_FRAG = BIT(14),
113 TX_CMD_FLG_TSF = BIT(16),
114 TX_CMD_FLG_CALIB = BIT(17),
115 TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
116 TX_CMD_FLG_MH_PAD = BIT(20),
117 TX_CMD_FLG_RESP_TO_DRV = BIT(21),
118 TX_CMD_FLG_CCMP_AGG = BIT(22),
119 TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
120 TX_CMD_FLG_DUR = BIT(25),
121 TX_CMD_FLG_FW_DROP = BIT(26),
122 TX_CMD_FLG_EXEC_PAPD = BIT(27),
123 TX_CMD_FLG_PAPD_TYPE = BIT(28),
124 TX_CMD_FLG_HCCA_CHUNK = BIT(31)
125}; /* TX_FLAGS_BITS_API_S_VER_1 */
126
127/**
128 * enum iwl_tx_pm_timeouts - pm timeout values in TX command
129 * @PM_FRAME_NONE: no need to suspend sleep mode
130 * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
131 * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
132 */
133enum iwl_tx_pm_timeouts {
134 PM_FRAME_NONE = 0,
135 PM_FRAME_MGMT = 2,
136 PM_FRAME_ASSOC = 3,
137};
138
139/*
140 * TX command security control
141 */
142#define TX_CMD_SEC_WEP 0x01
143#define TX_CMD_SEC_CCM 0x02
144#define TX_CMD_SEC_TKIP 0x03
145#define TX_CMD_SEC_EXT 0x04
146#define TX_CMD_SEC_MSK 0x07
147#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
148#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
149#define TX_CMD_SEC_KEY128 0x08
150
151/* TODO: how does these values are OK with only 16 bit variable??? */
152/*
153 * TX command next frame info
154 *
155 * bits 0:2 - security control (TX_CMD_SEC_*)
156 * bit 3 - immediate ACK required
157 * bit 4 - rate is taken from STA table
158 * bit 5 - frame belongs to BA stream
159 * bit 6 - immediate BA response expected
160 * bit 7 - unused
161 * bits 8:15 - Station ID
162 * bits 16:31 - rate
163 */
164#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
165#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
166#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
167#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
168#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
169#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
170#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
171#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
172#define TX_CMD_NEXT_FRAME_RATE_POS (16)
173
174/*
175 * TX command Frame life time in us - to be written in pm_frame_timeout
176 */
177#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
178#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
179#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
180#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
181
182/*
183 * TID for non QoS frames - to be written in tid_tspec
184 */
185#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
186
187/*
188 * Limits on the retransmissions - to be written in {data,rts}_retry_limit
189 */
190#define IWL_DEFAULT_TX_RETRY 15
191#define IWL_MGMT_DFAULT_RETRY_LIMIT 3
192#define IWL_RTS_DFAULT_RETRY_LIMIT 60
193#define IWL_BAR_DFAULT_RETRY_LIMIT 60
194#define IWL_LOW_RETRY_LIMIT 7
195
196/* TODO: complete documentation for try_cnt and btkill_cnt */
197/**
198 * struct iwl_tx_cmd - TX command struct to FW
199 * ( TX_CMD = 0x1c )
200 * @len: in bytes of the payload, see below for details
201 * @tx_flags: combination of TX_CMD_FLG_*
202 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
203 * cleared. Combination of RATE_MCS_*
204 * @sta_id: index of destination station in FW station table
205 * @sec_ctl: security control, TX_CMD_SEC_*
206 * @initial_rate_index: index into the the rate table for initial TX attempt.
207 * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
208 * @key: security key
209 * @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
210 * @life_time: frame life time (usecs??)
211 * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
212 * btkill_cnd + reserved), first 32 bits. "0" disables usage.
213 * @dram_msb_ptr: upper bits of the scratch physical address
214 * @rts_retry_limit: max attempts for RTS
215 * @data_retry_limit: max attempts to send the data packet
216 * @tid_spec: TID/tspec
217 * @pm_frame_timeout: PM TX frame timeout
218 *
219 * The byte count (both len and next_frame_len) includes MAC header
220 * (24/26/30/32 bytes)
221 * + 2 bytes pad if 26/30 header size
222 * + 8 byte IV for CCM or TKIP (not used for WEP)
223 * + Data payload
224 * + 8-byte MIC (not used for CCM/WEP)
225 * It does not include post-MAC padding, i.e.,
226 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
227 * Range of len: 14-2342 bytes.
228 *
229 * After the struct fields the MAC header is placed, plus any padding,
230 * and then the actial payload.
231 */
232struct iwl_tx_cmd {
233 __le16 len;
234 __le16 next_frame_len;
235 __le32 tx_flags;
236 struct {
237 u8 try_cnt;
238 u8 btkill_cnt;
239 __le16 reserved;
240 } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
241 __le32 rate_n_flags;
242 u8 sta_id;
243 u8 sec_ctl;
244 u8 initial_rate_index;
245 u8 reserved2;
246 u8 key[16];
247 __le32 reserved3;
248 __le32 life_time;
249 __le32 dram_lsb_ptr;
250 u8 dram_msb_ptr;
251 u8 rts_retry_limit;
252 u8 data_retry_limit;
253 u8 tid_tspec;
254 __le16 pm_frame_timeout;
255 __le16 reserved4;
256 u8 payload[0];
257 struct ieee80211_hdr hdr[0];
258} __packed; /* TX_CMD_API_S_VER_3 */
259
260/*
261 * TX response related data
262 */
263
264/*
265 * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
266 * @TX_STATUS_SUCCESS:
267 * @TX_STATUS_DIRECT_DONE:
268 * @TX_STATUS_POSTPONE_DELAY:
269 * @TX_STATUS_POSTPONE_FEW_BYTES:
270 * @TX_STATUS_POSTPONE_BT_PRIO:
271 * @TX_STATUS_POSTPONE_QUIET_PERIOD:
272 * @TX_STATUS_POSTPONE_CALC_TTAK:
273 * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
274 * @TX_STATUS_FAIL_SHORT_LIMIT:
275 * @TX_STATUS_FAIL_LONG_LIMIT:
276 * @TX_STATUS_FAIL_UNDERRUN:
277 * @TX_STATUS_FAIL_DRAIN_FLOW:
278 * @TX_STATUS_FAIL_RFKILL_FLUSH:
279 * @TX_STATUS_FAIL_LIFE_EXPIRE:
280 * @TX_STATUS_FAIL_DEST_PS:
281 * @TX_STATUS_FAIL_HOST_ABORTED:
282 * @TX_STATUS_FAIL_BT_RETRY:
283 * @TX_STATUS_FAIL_STA_INVALID:
284 * @TX_TATUS_FAIL_FRAG_DROPPED:
285 * @TX_STATUS_FAIL_TID_DISABLE:
286 * @TX_STATUS_FAIL_FIFO_FLUSHED:
287 * @TX_STATUS_FAIL_SMALL_CF_POLL:
288 * @TX_STATUS_FAIL_FW_DROP:
289 * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
290 * STA table
291 * @TX_FRAME_STATUS_INTERNAL_ABORT:
292 * @TX_MODE_MSK:
293 * @TX_MODE_NO_BURST:
294 * @TX_MODE_IN_BURST_SEQ:
295 * @TX_MODE_FIRST_IN_BURST:
296 * @TX_QUEUE_NUM_MSK:
297 *
298 * Valid only if frame_count =1
299 * TODO: complete documentation
300 */
301enum iwl_tx_status {
302 TX_STATUS_MSK = 0x000000ff,
303 TX_STATUS_SUCCESS = 0x01,
304 TX_STATUS_DIRECT_DONE = 0x02,
305 /* postpone TX */
306 TX_STATUS_POSTPONE_DELAY = 0x40,
307 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
308 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
309 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
310 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
311 /* abort TX */
312 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
313 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
314 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
315 TX_STATUS_FAIL_UNDERRUN = 0x84,
316 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
317 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
318 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
319 TX_STATUS_FAIL_DEST_PS = 0x88,
320 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
321 TX_STATUS_FAIL_BT_RETRY = 0x8a,
322 TX_STATUS_FAIL_STA_INVALID = 0x8b,
323 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
324 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
325 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
326 TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
327 TX_STATUS_FAIL_FW_DROP = 0x90,
328 TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
329 TX_STATUS_INTERNAL_ABORT = 0x92,
330 TX_MODE_MSK = 0x00000f00,
331 TX_MODE_NO_BURST = 0x00000000,
332 TX_MODE_IN_BURST_SEQ = 0x00000100,
333 TX_MODE_FIRST_IN_BURST = 0x00000200,
334 TX_QUEUE_NUM_MSK = 0x0001f000,
335 TX_NARROW_BW_MSK = 0x00060000,
336 TX_NARROW_BW_1DIV2 = 0x00020000,
337 TX_NARROW_BW_1DIV4 = 0x00040000,
338 TX_NARROW_BW_1DIV8 = 0x00060000,
339};
340
341/*
342 * enum iwl_tx_agg_status - TX aggregation status
343 * @AGG_TX_STATE_STATUS_MSK:
344 * @AGG_TX_STATE_TRANSMITTED:
345 * @AGG_TX_STATE_UNDERRUN:
346 * @AGG_TX_STATE_BT_PRIO:
347 * @AGG_TX_STATE_FEW_BYTES:
348 * @AGG_TX_STATE_ABORT:
349 * @AGG_TX_STATE_LAST_SENT_TTL:
350 * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
351 * @AGG_TX_STATE_LAST_SENT_BT_KILL:
352 * @AGG_TX_STATE_SCD_QUERY:
353 * @AGG_TX_STATE_TEST_BAD_CRC32:
354 * @AGG_TX_STATE_RESPONSE:
355 * @AGG_TX_STATE_DUMP_TX:
356 * @AGG_TX_STATE_DELAY_TX:
357 * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
358 * occur if tx failed for this frame when it was a member of a previous
359 * aggregation block). If rate scaling is used, retry count indicates the
360 * rate table entry used for all frames in the new agg.
361 *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
362 * this frame
363 *
364 * TODO: complete documentation
365 */
366enum iwl_tx_agg_status {
367 AGG_TX_STATE_STATUS_MSK = 0x00fff,
368 AGG_TX_STATE_TRANSMITTED = 0x000,
369 AGG_TX_STATE_UNDERRUN = 0x001,
370 AGG_TX_STATE_BT_PRIO = 0x002,
371 AGG_TX_STATE_FEW_BYTES = 0x004,
372 AGG_TX_STATE_ABORT = 0x008,
373 AGG_TX_STATE_LAST_SENT_TTL = 0x010,
374 AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
375 AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
376 AGG_TX_STATE_SCD_QUERY = 0x080,
377 AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
378 AGG_TX_STATE_RESPONSE = 0x1ff,
379 AGG_TX_STATE_DUMP_TX = 0x200,
380 AGG_TX_STATE_DELAY_TX = 0x400,
381 AGG_TX_STATE_TRY_CNT_POS = 12,
382 AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
383};
384
385#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \
386 AGG_TX_STATE_LAST_SENT_TRY_CNT| \
387 AGG_TX_STATE_LAST_SENT_BT_KILL)
388
389/*
390 * The mask below describes a status where we are absolutely sure that the MPDU
391 * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
392 * written the bytes to the TXE, but we know nothing about what the DSP did.
393 */
394#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
395 AGG_TX_STATE_ABORT | \
396 AGG_TX_STATE_SCD_QUERY)
397
398/*
399 * REPLY_TX = 0x1c (response)
400 *
401 * This response may be in one of two slightly different formats, indicated
402 * by the frame_count field:
403 *
404 * 1) No aggregation (frame_count == 1). This reports Tx results for a single
405 * frame. Multiple attempts, at various bit rates, may have been made for
406 * this frame.
407 *
408 * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
409 * frames that used block-acknowledge. All frames were transmitted at
410 * same rate. Rate scaling may have been used if first frame in this new
411 * agg block failed in previous agg block(s).
412 *
413 * Note that, for aggregation, ACK (block-ack) status is not delivered
414 * here; block-ack has not been received by the time the device records
415 * this status.
416 * This status relates to reasons the tx might have been blocked or aborted
417 * within the device, rather than whether it was received successfully by
418 * the destination station.
419 */
420
421/**
422 * struct agg_tx_status - per packet TX aggregation status
423 * @status: enum iwl_tx_agg_status
424 * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
425 */
426struct agg_tx_status {
427 __le16 status;
428 __le16 sequence;
429} __packed;
430
431/*
432 * definitions for initial rate index field
433 * bits [3:0] initial rate index
434 * bits [6:4] rate table color, used for the initial rate
435 * bit-7 invalid rate indication
436 */
437#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
438#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
439#define TX_RES_INV_RATE_INDEX_MSK 0x80
440
441#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
442#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
443
444/**
445 * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
446 * ( REPLY_TX = 0x1c )
447 * @frame_count: 1 no aggregation, >1 aggregation
448 * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
449 * @failure_rts: num of failures due to unsuccessful RTS
450 * @failure_frame: num failures due to no ACK (unused for agg)
451 * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
452 * Tx of all the batch. RATE_MCS_*
453 * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
454 * for agg: RTS + CTS + aggregation tx time + block-ack time.
455 * in usec.
456 * @pa_status: tx power info
457 * @pa_integ_res_a: tx power info
458 * @pa_integ_res_b: tx power info
459 * @pa_integ_res_c: tx power info
460 * @measurement_req_id: tx power info
461 * @tfd_info: TFD information set by the FH
462 * @seq_ctl: sequence control from the Tx cmd
463 * @byte_cnt: byte count from the Tx cmd
464 * @tlc_info: TLC rate info
465 * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
466 * @frame_ctrl: frame control
467 * @status: for non-agg: frame status TX_STATUS_*
468 * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
469 * follow this one, up to frame_count.
470 *
471 * After the array of statuses comes the SSN of the SCD. Look at
472 * %iwl_mvm_get_scd_ssn for more details.
473 */
474struct iwl_mvm_tx_resp {
475 u8 frame_count;
476 u8 bt_kill_count;
477 u8 failure_rts;
478 u8 failure_frame;
479 __le32 initial_rate;
480 __le16 wireless_media_time;
481
482 u8 pa_status;
483 u8 pa_integ_res_a[3];
484 u8 pa_integ_res_b[3];
485 u8 pa_integ_res_c[3];
486 __le16 measurement_req_id;
487 u8 reduced_tpc;
488 u8 reserved;
489
490 __le32 tfd_info;
491 __le16 seq_ctl;
492 __le16 byte_cnt;
493 u8 tlc_info;
494 u8 ra_tid;
495 __le16 frame_ctrl;
496
497 struct agg_tx_status status;
498} __packed; /* TX_RSP_API_S_VER_3 */
499
500/**
501 * struct iwl_mvm_ba_notif - notifies about reception of BA
502 * ( BA_NOTIF = 0xc5 )
503 * @sta_addr_lo32: lower 32 bits of the MAC address
504 * @sta_addr_hi16: upper 16 bits of the MAC address
505 * @sta_id: Index of recipient (BA-sending) station in fw's station table
506 * @tid: tid of the session
507 * @seq_ctl:
508 * @bitmap: the bitmap of the BA notification as seen in the air
509 * @scd_flow: the tx queue this BA relates to
510 * @scd_ssn: the index of the last contiguously sent packet
511 * @txed: number of Txed frames in this batch
512 * @txed_2_done: number of Acked frames in this batch
513 */
514struct iwl_mvm_ba_notif {
515 __le32 sta_addr_lo32;
516 __le16 sta_addr_hi16;
517 __le16 reserved;
518
519 u8 sta_id;
520 u8 tid;
521 __le16 seq_ctl;
522 __le64 bitmap;
523 __le16 scd_flow;
524 __le16 scd_ssn;
525 u8 txed;
526 u8 txed_2_done;
527 __le16 reserved1;
528} __packed;
529
530/*
531 * struct iwl_mac_beacon_cmd - beacon template command
532 * @tx: the tx commands associated with the beacon frame
533 * @template_id: currently equal to the mac context id of the coresponding
534 * mac.
535 * @tim_idx: the offset of the tim IE in the beacon
536 * @tim_size: the length of the tim IE
537 * @frame: the template of the beacon frame
538 */
539struct iwl_mac_beacon_cmd {
540 struct iwl_tx_cmd tx;
541 __le32 template_id;
542 __le32 tim_idx;
543 __le32 tim_size;
544 struct ieee80211_hdr frame[0];
545} __packed;
546
547struct iwl_beacon_notif {
548 struct iwl_mvm_tx_resp beacon_notify_hdr;
549 __le64 tsf;
550 __le32 ibss_mgr_status;
551} __packed;
552
553/**
554 * struct iwl_extended_beacon_notif - notifies about beacon transmission
555 * @beacon_notify_hdr: tx response command associated with the beacon
556 * @tsf: last beacon tsf
557 * @ibss_mgr_status: whether IBSS is manager
558 * @gp2: last beacon time in gp2
559 */
560struct iwl_extended_beacon_notif {
561 struct iwl_mvm_tx_resp beacon_notify_hdr;
562 __le64 tsf;
563 __le32 ibss_mgr_status;
564 __le32 gp2;
565} __packed; /* BEACON_NTFY_API_S_VER_5 */
566
567/**
568 * enum iwl_dump_control - dump (flush) control flags
569 * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
570 * and the TFD queues are empty.
571 */
572enum iwl_dump_control {
573 DUMP_TX_FIFO_FLUSH = BIT(1),
574};
575
576/**
577 * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
578 * @queues_ctl: bitmap of queues to flush
579 * @flush_ctl: control flags
580 * @reserved: reserved
581 */
582struct iwl_tx_path_flush_cmd {
583 __le32 queues_ctl;
584 __le16 flush_ctl;
585 __le16 reserved;
586} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
587
588/**
589 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
590 * @tx_resp: the Tx response from the fw (agg or non-agg)
591 *
592 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
593 * it can't know that everything will go well until the end of the AMPDU, it
594 * can't know in advance the number of MPDUs that will be sent in the current
595 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
596 * Hence, it can't know in advance what the SSN of the SCD will be at the end
597 * of the batch. This is why the SSN of the SCD is written at the end of the
598 * whole struct at a variable offset. This function knows how to cope with the
599 * variable offset and returns the SSN of the SCD.
600 */
601static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
602{
603 return le32_to_cpup((__le32 *)&tx_resp->status +
604 tx_resp->frame_count) & 0xfff;
605}
606
607/**
608 * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
609 * @token:
610 * @sta_id: station id
611 * @tid:
612 * @scd_queue: scheduler queue to confiug
613 * @enable: 1 queue enable, 0 queue disable
614 * @aggregate: 1 aggregated queue, 0 otherwise
615 * @tx_fifo: %enum iwl_mvm_tx_fifo
616 * @window: BA window size
617 * @ssn: SSN for the BA agreement
618 */
619struct iwl_scd_txq_cfg_cmd {
620 u8 token;
621 u8 sta_id;
622 u8 tid;
623 u8 scd_queue;
624 u8 enable;
625 u8 aggregate;
626 u8 tx_fifo;
627 u8 window;
628 __le16 ssn;
629 __le16 reserved;
630} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
631
632/**
633 * struct iwl_scd_txq_cfg_rsp
634 * @token: taken from the command
635 * @sta_id: station id from the command
636 * @tid: tid from the command
637 * @scd_queue: scd_queue from the command
638 */
639struct iwl_scd_txq_cfg_rsp {
640 u8 token;
641 u8 sta_id;
642 u8 tid;
643 u8 scd_queue;
644} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
645
646#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
new file mode 100644
index 000000000000..181590fbd3b3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,1773 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_h__
67#define __fw_api_h__
68
69#include "fw-api-rs.h"
70#include "fw-api-rx.h"
71#include "fw-api-tx.h"
72#include "fw-api-sta.h"
73#include "fw-api-mac.h"
74#include "fw-api-power.h"
75#include "fw-api-d3.h"
76#include "fw-api-coex.h"
77#include "fw-api-scan.h"
78#include "fw-api-stats.h"
79#include "fw-api-tof.h"
80
81/* Tx queue numbers */
82enum {
83 IWL_MVM_OFFCHANNEL_QUEUE = 8,
84 IWL_MVM_CMD_QUEUE = 9,
85};
86
87enum iwl_mvm_tx_fifo {
88 IWL_MVM_TX_FIFO_BK = 0,
89 IWL_MVM_TX_FIFO_BE,
90 IWL_MVM_TX_FIFO_VI,
91 IWL_MVM_TX_FIFO_VO,
92 IWL_MVM_TX_FIFO_MCAST = 5,
93 IWL_MVM_TX_FIFO_CMD = 7,
94};
95
96#define IWL_MVM_STATION_COUNT 16
97
98#define IWL_MVM_TDLS_STA_COUNT 4
99
100/* commands */
101enum {
102 MVM_ALIVE = 0x1,
103 REPLY_ERROR = 0x2,
104 ECHO_CMD = 0x3,
105
106 INIT_COMPLETE_NOTIF = 0x4,
107
108 /* PHY context commands */
109 PHY_CONTEXT_CMD = 0x8,
110 DBG_CFG = 0x9,
111 ANTENNA_COUPLING_NOTIFICATION = 0xa,
112
113 /* UMAC scan commands */
114 SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
115 SCAN_CFG_CMD = 0xc,
116 SCAN_REQ_UMAC = 0xd,
117 SCAN_ABORT_UMAC = 0xe,
118 SCAN_COMPLETE_UMAC = 0xf,
119
120 /* station table */
121 ADD_STA_KEY = 0x17,
122 ADD_STA = 0x18,
123 REMOVE_STA = 0x19,
124
125 /* paging get item */
126 FW_GET_ITEM_CMD = 0x1a,
127
128 /* TX */
129 TX_CMD = 0x1c,
130 TXPATH_FLUSH = 0x1e,
131 MGMT_MCAST_KEY = 0x1f,
132
133 /* scheduler config */
134 SCD_QUEUE_CFG = 0x1d,
135
136 /* global key */
137 WEP_KEY = 0x20,
138
139 /* Memory */
140 SHARED_MEM_CFG = 0x25,
141
142 /* TDLS */
143 TDLS_CHANNEL_SWITCH_CMD = 0x27,
144 TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
145 TDLS_CONFIG_CMD = 0xa7,
146
147 /* MAC and Binding commands */
148 MAC_CONTEXT_CMD = 0x28,
149 TIME_EVENT_CMD = 0x29, /* both CMD and response */
150 TIME_EVENT_NOTIFICATION = 0x2a,
151 BINDING_CONTEXT_CMD = 0x2b,
152 TIME_QUOTA_CMD = 0x2c,
153 NON_QOS_TX_COUNTER_CMD = 0x2d,
154
155 LQ_CMD = 0x4e,
156
157 /* paging block to FW cpu2 */
158 FW_PAGING_BLOCK_CMD = 0x4f,
159
160 /* Scan offload */
161 SCAN_OFFLOAD_REQUEST_CMD = 0x51,
162 SCAN_OFFLOAD_ABORT_CMD = 0x52,
163 HOT_SPOT_CMD = 0x53,
164 SCAN_OFFLOAD_COMPLETE = 0x6D,
165 SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
166 SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
167 MATCH_FOUND_NOTIFICATION = 0xd9,
168 SCAN_ITERATION_COMPLETE = 0xe7,
169
170 /* Phy */
171 PHY_CONFIGURATION_CMD = 0x6a,
172 CALIB_RES_NOTIF_PHY_DB = 0x6b,
173 /* PHY_DB_CMD = 0x6c, */
174
175 /* ToF - 802.11mc FTM */
176 TOF_CMD = 0x10,
177 TOF_NOTIFICATION = 0x11,
178
179 /* Power - legacy power table command */
180 POWER_TABLE_CMD = 0x77,
181 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
182 LTR_CONFIG = 0xee,
183
184 /* Thermal Throttling*/
185 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
186
187 /* Set/Get DC2DC frequency tune */
188 DC2DC_CONFIG_CMD = 0x83,
189
190 /* NVM */
191 NVM_ACCESS_CMD = 0x88,
192
193 SET_CALIB_DEFAULT_CMD = 0x8e,
194
195 BEACON_NOTIFICATION = 0x90,
196 BEACON_TEMPLATE_CMD = 0x91,
197 TX_ANT_CONFIGURATION_CMD = 0x98,
198 STATISTICS_CMD = 0x9c,
199 STATISTICS_NOTIFICATION = 0x9d,
200 EOSP_NOTIFICATION = 0x9e,
201 REDUCE_TX_POWER_CMD = 0x9f,
202
203 /* RF-KILL commands and notifications */
204 CARD_STATE_CMD = 0xa0,
205 CARD_STATE_NOTIFICATION = 0xa1,
206
207 MISSED_BEACONS_NOTIFICATION = 0xa2,
208
209 /* Power - new power table command */
210 MAC_PM_POWER_TABLE = 0xa9,
211
212 MFUART_LOAD_NOTIFICATION = 0xb1,
213
214 REPLY_RX_PHY_CMD = 0xc0,
215 REPLY_RX_MPDU_CMD = 0xc1,
216 BA_NOTIF = 0xc5,
217
218 /* Location Aware Regulatory */
219 MCC_UPDATE_CMD = 0xc8,
220 MCC_CHUB_UPDATE_CMD = 0xc9,
221
222 MARKER_CMD = 0xcb,
223
224 /* BT Coex */
225 BT_COEX_PRIO_TABLE = 0xcc,
226 BT_COEX_PROT_ENV = 0xcd,
227 BT_PROFILE_NOTIFICATION = 0xce,
228 BT_CONFIG = 0x9b,
229 BT_COEX_UPDATE_SW_BOOST = 0x5a,
230 BT_COEX_UPDATE_CORUN_LUT = 0x5b,
231 BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
232 BT_COEX_CI = 0x5d,
233
234 REPLY_SF_CFG_CMD = 0xd1,
235 REPLY_BEACON_FILTERING_CMD = 0xd2,
236
237 /* DTS measurements */
238 CMD_DTS_MEASUREMENT_TRIGGER = 0xdc,
239 DTS_MEASUREMENT_NOTIFICATION = 0xdd,
240
241 REPLY_DEBUG_CMD = 0xf0,
242 DEBUG_LOG_MSG = 0xf7,
243
244 BCAST_FILTER_CMD = 0xcf,
245 MCAST_FILTER_CMD = 0xd0,
246
247 /* D3 commands/notifications */
248 D3_CONFIG_CMD = 0xd3,
249 PROT_OFFLOAD_CONFIG_CMD = 0xd4,
250 OFFLOADS_QUERY_CMD = 0xd5,
251 REMOTE_WAKE_CONFIG_CMD = 0xd6,
252 D0I3_END_CMD = 0xed,
253
254 /* for WoWLAN in particular */
255 WOWLAN_PATTERNS = 0xe0,
256 WOWLAN_CONFIGURATION = 0xe1,
257 WOWLAN_TSC_RSC_PARAM = 0xe2,
258 WOWLAN_TKIP_PARAM = 0xe3,
259 WOWLAN_KEK_KCK_MATERIAL = 0xe4,
260 WOWLAN_GET_STATUSES = 0xe5,
261 WOWLAN_TX_POWER_PER_DB = 0xe6,
262
263 /* and for NetDetect */
264 SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
265 SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
266 SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
267
268 REPLY_MAX = 0xff,
269};
270
271enum iwl_phy_ops_subcmd_ids {
272 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
273 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
274};
275
276/* command groups */
277enum {
278 PHY_OPS_GROUP = 0x4,
279};
280
281/**
282 * struct iwl_cmd_response - generic response struct for most commands
283 * @status: status of the command asked, changes for each one
284 */
285struct iwl_cmd_response {
286 __le32 status;
287};
288
289/*
290 * struct iwl_tx_ant_cfg_cmd
291 * @valid: valid antenna configuration
292 */
293struct iwl_tx_ant_cfg_cmd {
294 __le32 valid;
295} __packed;
296
297/*
298 * Calibration control struct.
299 * Sent as part of the phy configuration command.
300 * @flow_trigger: bitmap for which calibrations to perform according to
301 * flow triggers.
302 * @event_trigger: bitmap for which calibrations to perform according to
303 * event triggers.
304 */
305struct iwl_calib_ctrl {
306 __le32 flow_trigger;
307 __le32 event_trigger;
308} __packed;
309
310/* This enum defines the bitmap of various calibrations to enable in both
311 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
312 */
313enum iwl_calib_cfg {
314 IWL_CALIB_CFG_XTAL_IDX = BIT(0),
315 IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
316 IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
317 IWL_CALIB_CFG_PAPD_IDX = BIT(3),
318 IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
319 IWL_CALIB_CFG_DC_IDX = BIT(5),
320 IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
321 IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
322 IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
323 IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
324 IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
325 IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
326 IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
327 IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
328 IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
329 IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
330 IWL_CALIB_CFG_DAC_IDX = BIT(16),
331 IWL_CALIB_CFG_ABS_IDX = BIT(17),
332 IWL_CALIB_CFG_AGC_IDX = BIT(18),
333};
334
335/*
336 * Phy configuration command.
337 */
338struct iwl_phy_cfg_cmd {
339 __le32 phy_cfg;
340 struct iwl_calib_ctrl calib_control;
341} __packed;
342
343#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
344#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
345#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
346#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
347#define PHY_CFG_TX_CHAIN_A BIT(8)
348#define PHY_CFG_TX_CHAIN_B BIT(9)
349#define PHY_CFG_TX_CHAIN_C BIT(10)
350#define PHY_CFG_RX_CHAIN_A BIT(12)
351#define PHY_CFG_RX_CHAIN_B BIT(13)
352#define PHY_CFG_RX_CHAIN_C BIT(14)
353
354
355/* Target of the NVM_ACCESS_CMD */
356enum {
357 NVM_ACCESS_TARGET_CACHE = 0,
358 NVM_ACCESS_TARGET_OTP = 1,
359 NVM_ACCESS_TARGET_EEPROM = 2,
360};
361
362/* Section types for NVM_ACCESS_CMD */
363enum {
364 NVM_SECTION_TYPE_SW = 1,
365 NVM_SECTION_TYPE_REGULATORY = 3,
366 NVM_SECTION_TYPE_CALIBRATION = 4,
367 NVM_SECTION_TYPE_PRODUCTION = 5,
368 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
369 NVM_SECTION_TYPE_PHY_SKU = 12,
370 NVM_MAX_NUM_SECTIONS = 13,
371};
372
373/**
374 * struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
375 * @op_code: 0 - read, 1 - write
376 * @target: NVM_ACCESS_TARGET_*
377 * @type: NVM_SECTION_TYPE_*
378 * @offset: offset in bytes into the section
379 * @length: in bytes, to read/write
380 * @data: if write operation, the data to write. On read its empty
381 */
382struct iwl_nvm_access_cmd {
383 u8 op_code;
384 u8 target;
385 __le16 type;
386 __le16 offset;
387 __le16 length;
388 u8 data[];
389} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
390
391#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
392
393/*
394 * struct iwl_fw_paging_cmd - paging layout
395 *
396 * (FW_PAGING_BLOCK_CMD = 0x4f)
397 *
398 * Send to FW the paging layout in the driver.
399 *
400 * @flags: various flags for the command
401 * @block_size: the block size in powers of 2
402 * @block_num: number of blocks specified in the command.
403 * @device_phy_addr: virtual addresses from device side
404*/
405struct iwl_fw_paging_cmd {
406 __le32 flags;
407 __le32 block_size;
408 __le32 block_num;
409 __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
410} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
411
412/*
413 * Fw items ID's
414 *
415 * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
416 * download
417 */
418enum iwl_fw_item_id {
419 IWL_FW_ITEM_ID_PAGING = 3,
420};
421
422/*
423 * struct iwl_fw_get_item_cmd - get an item from the fw
424 */
425struct iwl_fw_get_item_cmd {
426 __le32 item_id;
427} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
428
429struct iwl_fw_get_item_resp {
430 __le32 item_id;
431 __le32 item_byte_cnt;
432 __le32 item_val;
433} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
434
435/**
436 * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
437 * @offset: offset in bytes into the section
438 * @length: in bytes, either how much was written or read
439 * @type: NVM_SECTION_TYPE_*
440 * @status: 0 for success, fail otherwise
441 * @data: if read operation, the data returned. Empty on write.
442 */
443struct iwl_nvm_access_resp {
444 __le16 offset;
445 __le16 length;
446 __le16 type;
447 __le16 status;
448 u8 data[];
449} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
450
451/* MVM_ALIVE 0x1 */
452
453/* alive response is_valid values */
454#define ALIVE_RESP_UCODE_OK BIT(0)
455#define ALIVE_RESP_RFKILL BIT(1)
456
457/* alive response ver_type values */
458enum {
459 FW_TYPE_HW = 0,
460 FW_TYPE_PROT = 1,
461 FW_TYPE_AP = 2,
462 FW_TYPE_WOWLAN = 3,
463 FW_TYPE_TIMING = 4,
464 FW_TYPE_WIPAN = 5
465};
466
467/* alive response ver_subtype values */
468enum {
469 FW_SUBTYPE_FULL_FEATURE = 0,
470 FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
471 FW_SUBTYPE_REDUCED = 2,
472 FW_SUBTYPE_ALIVE_ONLY = 3,
473 FW_SUBTYPE_WOWLAN = 4,
474 FW_SUBTYPE_AP_SUBTYPE = 5,
475 FW_SUBTYPE_WIPAN = 6,
476 FW_SUBTYPE_INITIALIZE = 9
477};
478
479#define IWL_ALIVE_STATUS_ERR 0xDEAD
480#define IWL_ALIVE_STATUS_OK 0xCAFE
481
482#define IWL_ALIVE_FLG_RFKILL BIT(0)
483
484struct mvm_alive_resp_ver1 {
485 __le16 status;
486 __le16 flags;
487 u8 ucode_minor;
488 u8 ucode_major;
489 __le16 id;
490 u8 api_minor;
491 u8 api_major;
492 u8 ver_subtype;
493 u8 ver_type;
494 u8 mac;
495 u8 opt;
496 __le16 reserved2;
497 __le32 timestamp;
498 __le32 error_event_table_ptr; /* SRAM address for error log */
499 __le32 log_event_table_ptr; /* SRAM address for event log */
500 __le32 cpu_register_ptr;
501 __le32 dbgm_config_ptr;
502 __le32 alive_counter_ptr;
503 __le32 scd_base_ptr; /* SRAM address for SCD */
504} __packed; /* ALIVE_RES_API_S_VER_1 */
505
506struct mvm_alive_resp_ver2 {
507 __le16 status;
508 __le16 flags;
509 u8 ucode_minor;
510 u8 ucode_major;
511 __le16 id;
512 u8 api_minor;
513 u8 api_major;
514 u8 ver_subtype;
515 u8 ver_type;
516 u8 mac;
517 u8 opt;
518 __le16 reserved2;
519 __le32 timestamp;
520 __le32 error_event_table_ptr; /* SRAM address for error log */
521 __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
522 __le32 cpu_register_ptr;
523 __le32 dbgm_config_ptr;
524 __le32 alive_counter_ptr;
525 __le32 scd_base_ptr; /* SRAM address for SCD */
526 __le32 st_fwrd_addr; /* pointer to Store and forward */
527 __le32 st_fwrd_size;
528 u8 umac_minor; /* UMAC version: minor */
529 u8 umac_major; /* UMAC version: major */
530 __le16 umac_id; /* UMAC version: id */
531 __le32 error_info_addr; /* SRAM address for UMAC error log */
532 __le32 dbg_print_buff_addr;
533} __packed; /* ALIVE_RES_API_S_VER_2 */
534
535struct mvm_alive_resp {
536 __le16 status;
537 __le16 flags;
538 __le32 ucode_minor;
539 __le32 ucode_major;
540 u8 ver_subtype;
541 u8 ver_type;
542 u8 mac;
543 u8 opt;
544 __le32 timestamp;
545 __le32 error_event_table_ptr; /* SRAM address for error log */
546 __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
547 __le32 cpu_register_ptr;
548 __le32 dbgm_config_ptr;
549 __le32 alive_counter_ptr;
550 __le32 scd_base_ptr; /* SRAM address for SCD */
551 __le32 st_fwrd_addr; /* pointer to Store and forward */
552 __le32 st_fwrd_size;
553 __le32 umac_minor; /* UMAC version: minor */
554 __le32 umac_major; /* UMAC version: major */
555 __le32 error_info_addr; /* SRAM address for UMAC error log */
556 __le32 dbg_print_buff_addr;
557} __packed; /* ALIVE_RES_API_S_VER_3 */
558
559/* Error response/notification */
560enum {
561 FW_ERR_UNKNOWN_CMD = 0x0,
562 FW_ERR_INVALID_CMD_PARAM = 0x1,
563 FW_ERR_SERVICE = 0x2,
564 FW_ERR_ARC_MEMORY = 0x3,
565 FW_ERR_ARC_CODE = 0x4,
566 FW_ERR_WATCH_DOG = 0x5,
567 FW_ERR_WEP_GRP_KEY_INDX = 0x10,
568 FW_ERR_WEP_KEY_SIZE = 0x11,
569 FW_ERR_OBSOLETE_FUNC = 0x12,
570 FW_ERR_UNEXPECTED = 0xFE,
571 FW_ERR_FATAL = 0xFF
572};
573
574/**
575 * struct iwl_error_resp - FW error indication
576 * ( REPLY_ERROR = 0x2 )
577 * @error_type: one of FW_ERR_*
578 * @cmd_id: the command ID for which the error occured
579 * @bad_cmd_seq_num: sequence number of the erroneous command
580 * @error_service: which service created the error, applicable only if
581 * error_type = 2, otherwise 0
582 * @timestamp: TSF in usecs.
583 */
584struct iwl_error_resp {
585 __le32 error_type;
586 u8 cmd_id;
587 u8 reserved1;
588 __le16 bad_cmd_seq_num;
589 __le32 error_service;
590 __le64 timestamp;
591} __packed;
592
593
594/* Common PHY, MAC and Bindings definitions */
595
596#define MAX_MACS_IN_BINDING (3)
597#define MAX_BINDINGS (4)
598#define AUX_BINDING_INDEX (3)
599#define MAX_PHYS (4)
600
601/* Used to extract ID and color from the context dword */
602#define FW_CTXT_ID_POS (0)
603#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
604#define FW_CTXT_COLOR_POS (8)
605#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
606#define FW_CTXT_INVALID (0xffffffff)
607
608#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
609 (_color << FW_CTXT_COLOR_POS))
610
611/* Possible actions on PHYs, MACs and Bindings */
612enum {
613 FW_CTXT_ACTION_STUB = 0,
614 FW_CTXT_ACTION_ADD,
615 FW_CTXT_ACTION_MODIFY,
616 FW_CTXT_ACTION_REMOVE,
617 FW_CTXT_ACTION_NUM
618}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
619
620/* Time Events */
621
622/* Time Event types, according to MAC type */
623enum iwl_time_event_type {
624 /* BSS Station Events */
625 TE_BSS_STA_AGGRESSIVE_ASSOC,
626 TE_BSS_STA_ASSOC,
627 TE_BSS_EAP_DHCP_PROT,
628 TE_BSS_QUIET_PERIOD,
629
630 /* P2P Device Events */
631 TE_P2P_DEVICE_DISCOVERABLE,
632 TE_P2P_DEVICE_LISTEN,
633 TE_P2P_DEVICE_ACTION_SCAN,
634 TE_P2P_DEVICE_FULL_SCAN,
635
636 /* P2P Client Events */
637 TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
638 TE_P2P_CLIENT_ASSOC,
639 TE_P2P_CLIENT_QUIET_PERIOD,
640
641 /* P2P GO Events */
642 TE_P2P_GO_ASSOC_PROT,
643 TE_P2P_GO_REPETITIVE_NOA,
644 TE_P2P_GO_CT_WINDOW,
645
646 /* WiDi Sync Events */
647 TE_WIDI_TX_SYNC,
648
649 /* Channel Switch NoA */
650 TE_CHANNEL_SWITCH_PERIOD,
651
652 TE_MAX
653}; /* MAC_EVENT_TYPE_API_E_VER_1 */
654
655
656
657/* Time event - defines for command API v1 */
658
659/*
660 * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
661 * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
662 * the first fragment is scheduled.
663 * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
664 * the first 2 fragments are scheduled.
665 * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
666 * number of fragments are valid.
667 *
668 * Other than the constant defined above, specifying a fragmentation value 'x'
669 * means that the event can be fragmented but only the first 'x' will be
670 * scheduled.
671 */
672enum {
673 TE_V1_FRAG_NONE = 0,
674 TE_V1_FRAG_SINGLE = 1,
675 TE_V1_FRAG_DUAL = 2,
676 TE_V1_FRAG_ENDLESS = 0xffffffff
677};
678
679/* If a Time Event can be fragmented, this is the max number of fragments */
680#define TE_V1_FRAG_MAX_MSK 0x0fffffff
681/* Repeat the time event endlessly (until removed) */
682#define TE_V1_REPEAT_ENDLESS 0xffffffff
683/* If a Time Event has bounded repetitions, this is the maximal value */
684#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
685
686/* Time Event dependencies: none, on another TE, or in a specific time */
687enum {
688 TE_V1_INDEPENDENT = 0,
689 TE_V1_DEP_OTHER = BIT(0),
690 TE_V1_DEP_TSF = BIT(1),
691 TE_V1_EVENT_SOCIOPATHIC = BIT(2),
692}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
693
694/*
695 * @TE_V1_NOTIF_NONE: no notifications
696 * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
697 * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
698 * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
699 * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
700 * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
701 * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
702 * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
703 * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
704 *
705 * Supported Time event notifications configuration.
706 * A notification (both event and fragment) includes a status indicating weather
707 * the FW was able to schedule the event or not. For fragment start/end
708 * notification the status is always success. There is no start/end fragment
709 * notification for monolithic events.
710 */
711enum {
712 TE_V1_NOTIF_NONE = 0,
713 TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
714 TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
715 TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
716 TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
717 TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
718 TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
719 TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
720 TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
721}; /* MAC_EVENT_ACTION_API_E_VER_2 */
722
723/* Time event - defines for command API */
724
725/*
726 * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
727 * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
728 * the first fragment is scheduled.
729 * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
730 * the first 2 fragments are scheduled.
731 * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
732 * number of fragments are valid.
733 *
734 * Other than the constant defined above, specifying a fragmentation value 'x'
735 * means that the event can be fragmented but only the first 'x' will be
736 * scheduled.
737 */
738enum {
739 TE_V2_FRAG_NONE = 0,
740 TE_V2_FRAG_SINGLE = 1,
741 TE_V2_FRAG_DUAL = 2,
742 TE_V2_FRAG_MAX = 0xfe,
743 TE_V2_FRAG_ENDLESS = 0xff
744};
745
746/* Repeat the time event endlessly (until removed) */
747#define TE_V2_REPEAT_ENDLESS 0xff
748/* If a Time Event has bounded repetitions, this is the maximal value */
749#define TE_V2_REPEAT_MAX 0xfe
750
751#define TE_V2_PLACEMENT_POS 12
752#define TE_V2_ABSENCE_POS 15
753
754/* Time event policy values
755 * A notification (both event and fragment) includes a status indicating weather
756 * the FW was able to schedule the event or not. For fragment start/end
757 * notification the status is always success. There is no start/end fragment
758 * notification for monolithic events.
759 *
760 * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
761 * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
762 * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
763 * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
764 * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
765 * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
766 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
767 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
768 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
769 * @TE_V2_DEP_OTHER: depends on another time event
770 * @TE_V2_DEP_TSF: depends on a specific time
771 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
772 * @TE_V2_ABSENCE: are we present or absent during the Time Event.
773 */
774enum {
775 TE_V2_DEFAULT_POLICY = 0x0,
776
777 /* notifications (event start/stop, fragment start/stop) */
778 TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
779 TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
780 TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
781 TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
782
783 TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
784 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
785 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
786 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
787 T2_V2_START_IMMEDIATELY = BIT(11),
788
789 TE_V2_NOTIF_MSK = 0xff,
790
791 /* placement characteristics */
792 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
793 TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
794 TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
795
796 /* are we present or absent during the Time Event. */
797 TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
798};
799
800/**
801 * struct iwl_time_event_cmd_api - configuring Time Events
802 * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
803 * with version 1. determined by IWL_UCODE_TLV_FLAGS)
804 * ( TIME_EVENT_CMD = 0x29 )
805 * @id_and_color: ID and color of the relevant MAC
806 * @action: action to perform, one of FW_CTXT_ACTION_*
807 * @id: this field has two meanings, depending on the action:
808 * If the action is ADD, then it means the type of event to add.
809 * For all other actions it is the unique event ID assigned when the
810 * event was added by the FW.
811 * @apply_time: When to start the Time Event (in GP2)
812 * @max_delay: maximum delay to event's start (apply time), in TU
813 * @depends_on: the unique ID of the event we depend on (if any)
814 * @interval: interval between repetitions, in TU
815 * @duration: duration of event in TU
816 * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
817 * @max_frags: maximal number of fragments the Time Event can be divided to
818 * @policy: defines whether uCode shall notify the host or other uCode modules
819 * on event and/or fragment start and/or end
820 * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
821 * TE_EVENT_SOCIOPATHIC
822 * using TE_ABSENCE and using TE_NOTIF_*
823 */
824struct iwl_time_event_cmd {
825 /* COMMON_INDEX_HDR_API_S_VER_1 */
826 __le32 id_and_color;
827 __le32 action;
828 __le32 id;
829 /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
830 __le32 apply_time;
831 __le32 max_delay;
832 __le32 depends_on;
833 __le32 interval;
834 __le32 duration;
835 u8 repeat;
836 u8 max_frags;
837 __le16 policy;
838} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
839
840/**
841 * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
842 * @status: bit 0 indicates success, all others specify errors
843 * @id: the Time Event type
844 * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
845 * @id_and_color: ID and color of the relevant MAC
846 */
847struct iwl_time_event_resp {
848 __le32 status;
849 __le32 id;
850 __le32 unique_id;
851 __le32 id_and_color;
852} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
853
854/**
855 * struct iwl_time_event_notif - notifications of time event start/stop
856 * ( TIME_EVENT_NOTIFICATION = 0x2a )
857 * @timestamp: action timestamp in GP2
858 * @session_id: session's unique id
859 * @unique_id: unique id of the Time Event itself
860 * @id_and_color: ID and color of the relevant MAC
861 * @action: one of TE_NOTIF_START or TE_NOTIF_END
862 * @status: true if scheduled, false otherwise (not executed)
863 */
864struct iwl_time_event_notif {
865 __le32 timestamp;
866 __le32 session_id;
867 __le32 unique_id;
868 __le32 id_and_color;
869 __le32 action;
870 __le32 status;
871} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
872
873
874/* Bindings and Time Quota */
875
876/**
877 * struct iwl_binding_cmd - configuring bindings
878 * ( BINDING_CONTEXT_CMD = 0x2b )
879 * @id_and_color: ID and color of the relevant Binding
880 * @action: action to perform, one of FW_CTXT_ACTION_*
881 * @macs: array of MAC id and colors which belong to the binding
882 * @phy: PHY id and color which belongs to the binding
883 */
884struct iwl_binding_cmd {
885 /* COMMON_INDEX_HDR_API_S_VER_1 */
886 __le32 id_and_color;
887 __le32 action;
888 /* BINDING_DATA_API_S_VER_1 */
889 __le32 macs[MAX_MACS_IN_BINDING];
890 __le32 phy;
891} __packed; /* BINDING_CMD_API_S_VER_1 */
892
893/* The maximal number of fragments in the FW's schedule session */
894#define IWL_MVM_MAX_QUOTA 128
895
896/**
897 * struct iwl_time_quota_data - configuration of time quota per binding
898 * @id_and_color: ID and color of the relevant Binding
899 * @quota: absolute time quota in TU. The scheduler will try to divide the
900 * remainig quota (after Time Events) according to this quota.
901 * @max_duration: max uninterrupted context duration in TU
902 */
903struct iwl_time_quota_data {
904 __le32 id_and_color;
905 __le32 quota;
906 __le32 max_duration;
907} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
908
909/**
910 * struct iwl_time_quota_cmd - configuration of time quota between bindings
911 * ( TIME_QUOTA_CMD = 0x2c )
912 * @quotas: allocations per binding
913 */
914struct iwl_time_quota_cmd {
915 struct iwl_time_quota_data quotas[MAX_BINDINGS];
916} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
917
918
919/* PHY context */
920
921/* Supported bands */
922#define PHY_BAND_5 (0)
923#define PHY_BAND_24 (1)
924
925/* Supported channel width, vary if there is VHT support */
926#define PHY_VHT_CHANNEL_MODE20 (0x0)
927#define PHY_VHT_CHANNEL_MODE40 (0x1)
928#define PHY_VHT_CHANNEL_MODE80 (0x2)
929#define PHY_VHT_CHANNEL_MODE160 (0x3)
930
931/*
932 * Control channel position:
933 * For legacy set bit means upper channel, otherwise lower.
934 * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
935 * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
936 * center_freq
937 * |
938 * 40Mhz |_______|_______|
939 * 80Mhz |_______|_______|_______|_______|
940 * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
941 * code 011 010 001 000 | 100 101 110 111
942 */
943#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
944#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
945#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
946#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
947#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
948#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
949#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
950#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
951
952/*
953 * @band: PHY_BAND_*
954 * @channel: channel number
955 * @width: PHY_[VHT|LEGACY]_CHANNEL_*
956 * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
957 */
958struct iwl_fw_channel_info {
959 u8 band;
960 u8 channel;
961 u8 width;
962 u8 ctrl_pos;
963} __packed;
964
965#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
966#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
967 (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
968#define PHY_RX_CHAIN_VALID_POS (1)
969#define PHY_RX_CHAIN_VALID_MSK \
970 (0x7 << PHY_RX_CHAIN_VALID_POS)
971#define PHY_RX_CHAIN_FORCE_SEL_POS (4)
972#define PHY_RX_CHAIN_FORCE_SEL_MSK \
973 (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
974#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
975#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
976 (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
977#define PHY_RX_CHAIN_CNT_POS (10)
978#define PHY_RX_CHAIN_CNT_MSK \
979 (0x3 << PHY_RX_CHAIN_CNT_POS)
980#define PHY_RX_CHAIN_MIMO_CNT_POS (12)
981#define PHY_RX_CHAIN_MIMO_CNT_MSK \
982 (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
983#define PHY_RX_CHAIN_MIMO_FORCE_POS (14)
984#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
985 (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
986
987/* TODO: fix the value, make it depend on firmware at runtime? */
988#define NUM_PHY_CTX 3
989
990/* TODO: complete missing documentation */
991/**
992 * struct iwl_phy_context_cmd - config of the PHY context
993 * ( PHY_CONTEXT_CMD = 0x8 )
994 * @id_and_color: ID and color of the relevant Binding
995 * @action: action to perform, one of FW_CTXT_ACTION_*
996 * @apply_time: 0 means immediate apply and context switch.
997 * other value means apply new params after X usecs
998 * @tx_param_color: ???
999 * @channel_info:
1000 * @txchain_info: ???
1001 * @rxchain_info: ???
1002 * @acquisition_data: ???
1003 * @dsp_cfg_flags: set to 0
1004 */
1005struct iwl_phy_context_cmd {
1006 /* COMMON_INDEX_HDR_API_S_VER_1 */
1007 __le32 id_and_color;
1008 __le32 action;
1009 /* PHY_CONTEXT_DATA_API_S_VER_1 */
1010 __le32 apply_time;
1011 __le32 tx_param_color;
1012 struct iwl_fw_channel_info ci;
1013 __le32 txchain_info;
1014 __le32 rxchain_info;
1015 __le32 acquisition_data;
1016 __le32 dsp_cfg_flags;
1017} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
1018
1019/*
1020 * Aux ROC command
1021 *
1022 * Command requests the firmware to create a time event for a certain duration
1023 * and remain on the given channel. This is done by using the Aux framework in
1024 * the FW.
1025 * The command was first used for Hot Spot issues - but can be used regardless
1026 * to Hot Spot.
1027 *
1028 * ( HOT_SPOT_CMD 0x53 )
1029 *
1030 * @id_and_color: ID and color of the MAC
1031 * @action: action to perform, one of FW_CTXT_ACTION_*
1032 * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
1033 * event_unique_id should be the id of the time event assigned by ucode.
1034 * Otherwise ignore the event_unique_id.
1035 * @sta_id_and_color: station id and color, resumed during "Remain On Channel"
1036 * activity.
1037 * @channel_info: channel info
1038 * @node_addr: Our MAC Address
1039 * @reserved: reserved for alignment
1040 * @apply_time: GP2 value to start (should always be the current GP2 value)
1041 * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
1042 * time by which start of the event is allowed to be postponed.
1043 * @duration: event duration in TU To calculate event duration:
1044 * timeEventDuration = min(duration, remainingQuota)
1045 */
1046struct iwl_hs20_roc_req {
1047 /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
1048 __le32 id_and_color;
1049 __le32 action;
1050 __le32 event_unique_id;
1051 __le32 sta_id_and_color;
1052 struct iwl_fw_channel_info channel_info;
1053 u8 node_addr[ETH_ALEN];
1054 __le16 reserved;
1055 __le32 apply_time;
1056 __le32 apply_time_max_delay;
1057 __le32 duration;
1058} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
1059
1060/*
1061 * values for AUX ROC result values
1062 */
1063enum iwl_mvm_hot_spot {
1064 HOT_SPOT_RSP_STATUS_OK,
1065 HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
1066 HOT_SPOT_MAX_NUM_OF_SESSIONS,
1067};
1068
1069/*
1070 * Aux ROC command response
1071 *
1072 * In response to iwl_hs20_roc_req the FW sends this command to notify the
1073 * driver the uid of the timevent.
1074 *
1075 * ( HOT_SPOT_CMD 0x53 )
1076 *
1077 * @event_unique_id: Unique ID of time event assigned by ucode
1078 * @status: Return status 0 is success, all the rest used for specific errors
1079 */
1080struct iwl_hs20_roc_res {
1081 __le32 event_unique_id;
1082 __le32 status;
1083} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
1084
1085/**
1086 * struct iwl_radio_version_notif - information on the radio version
1087 * ( RADIO_VERSION_NOTIFICATION = 0x68 )
1088 * @radio_flavor:
1089 * @radio_step:
1090 * @radio_dash:
1091 */
1092struct iwl_radio_version_notif {
1093 __le32 radio_flavor;
1094 __le32 radio_step;
1095 __le32 radio_dash;
1096} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
1097
1098enum iwl_card_state_flags {
1099 CARD_ENABLED = 0x00,
1100 HW_CARD_DISABLED = 0x01,
1101 SW_CARD_DISABLED = 0x02,
1102 CT_KILL_CARD_DISABLED = 0x04,
1103 HALT_CARD_DISABLED = 0x08,
1104 CARD_DISABLED_MSK = 0x0f,
1105 CARD_IS_RX_ON = 0x10,
1106};
1107
1108/**
1109 * struct iwl_radio_version_notif - information on the radio version
1110 * ( CARD_STATE_NOTIFICATION = 0xa1 )
1111 * @flags: %iwl_card_state_flags
1112 */
1113struct iwl_card_state_notif {
1114 __le32 flags;
1115} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
1116
1117/**
1118 * struct iwl_missed_beacons_notif - information on missed beacons
1119 * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
1120 * @mac_id: interface ID
1121 * @consec_missed_beacons_since_last_rx: number of consecutive missed
1122 * beacons since last RX.
1123 * @consec_missed_beacons: number of consecutive missed beacons
1124 * @num_expected_beacons:
1125 * @num_recvd_beacons:
1126 */
1127struct iwl_missed_beacons_notif {
1128 __le32 mac_id;
1129 __le32 consec_missed_beacons_since_last_rx;
1130 __le32 consec_missed_beacons;
1131 __le32 num_expected_beacons;
1132 __le32 num_recvd_beacons;
1133} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
1134
1135/**
1136 * struct iwl_mfuart_load_notif - mfuart image version & status
1137 * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
1138 * @installed_ver: installed image version
1139 * @external_ver: external image version
1140 * @status: MFUART loading status
1141 * @duration: MFUART loading time
1142*/
1143struct iwl_mfuart_load_notif {
1144 __le32 installed_ver;
1145 __le32 external_ver;
1146 __le32 status;
1147 __le32 duration;
1148} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
1149
1150/**
1151 * struct iwl_set_calib_default_cmd - set default value for calibration.
1152 * ( SET_CALIB_DEFAULT_CMD = 0x8e )
1153 * @calib_index: the calibration to set value for
1154 * @length: of data
1155 * @data: the value to set for the calibration result
1156 */
1157struct iwl_set_calib_default_cmd {
1158 __le16 calib_index;
1159 __le16 length;
1160 u8 data[0];
1161} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
1162
1163#define MAX_PORT_ID_NUM 2
1164#define MAX_MCAST_FILTERING_ADDRESSES 256
1165
1166/**
1167 * struct iwl_mcast_filter_cmd - configure multicast filter.
1168 * @filter_own: Set 1 to filter out multicast packets sent by station itself
1169 * @port_id: Multicast MAC addresses array specifier. This is a strange way
1170 * to identify network interface adopted in host-device IF.
1171 * It is used by FW as index in array of addresses. This array has
1172 * MAX_PORT_ID_NUM members.
1173 * @count: Number of MAC addresses in the array
1174 * @pass_all: Set 1 to pass all multicast packets.
1175 * @bssid: current association BSSID.
1176 * @addr_list: Place holder for array of MAC addresses.
1177 * IMPORTANT: add padding if necessary to ensure DWORD alignment.
1178 */
1179struct iwl_mcast_filter_cmd {
1180 u8 filter_own;
1181 u8 port_id;
1182 u8 count;
1183 u8 pass_all;
1184 u8 bssid[6];
1185 u8 reserved[2];
1186 u8 addr_list[0];
1187} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
1188
1189#define MAX_BCAST_FILTERS 8
1190#define MAX_BCAST_FILTER_ATTRS 2
1191
1192/**
1193 * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
1194 * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
1195 * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
1196 * start of ip payload).
1197 */
1198enum iwl_mvm_bcast_filter_attr_offset {
1199 BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
1200 BCAST_FILTER_OFFSET_IP_END = 1,
1201};
1202
1203/**
1204 * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
1205 * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
1206 * @offset: starting offset of this pattern.
1207 * @val: value to match - big endian (MSB is the first
1208 * byte to match from offset pos).
1209 * @mask: mask to match (big endian).
1210 */
1211struct iwl_fw_bcast_filter_attr {
1212 u8 offset_type;
1213 u8 offset;
1214 __le16 reserved1;
1215 __be32 val;
1216 __be32 mask;
1217} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
1218
1219/**
1220 * enum iwl_mvm_bcast_filter_frame_type - filter frame type
1221 * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
1222 * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
1223 */
1224enum iwl_mvm_bcast_filter_frame_type {
1225 BCAST_FILTER_FRAME_TYPE_ALL = 0,
1226 BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
1227};
1228
1229/**
1230 * struct iwl_fw_bcast_filter - broadcast filter
1231 * @discard: discard frame (1) or let it pass (0).
1232 * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
1233 * @num_attrs: number of valid attributes in this filter.
1234 * @attrs: attributes of this filter. a filter is considered matched
1235 * only when all its attributes are matched (i.e. AND relationship)
1236 */
1237struct iwl_fw_bcast_filter {
1238 u8 discard;
1239 u8 frame_type;
1240 u8 num_attrs;
1241 u8 reserved1;
1242 struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
1243} __packed; /* BCAST_FILTER_S_VER_1 */
1244
1245/**
1246 * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
1247 * @default_discard: default action for this mac (discard (1) / pass (0)).
1248 * @attached_filters: bitmap of relevant filters for this mac.
1249 */
1250struct iwl_fw_bcast_mac {
1251 u8 default_discard;
1252 u8 reserved1;
1253 __le16 attached_filters;
1254} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
1255
1256/**
1257 * struct iwl_bcast_filter_cmd - broadcast filtering configuration
1258 * @disable: enable (0) / disable (1)
1259 * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
1260 * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
1261 * @filters: broadcast filters
1262 * @macs: broadcast filtering configuration per-mac
1263 */
1264struct iwl_bcast_filter_cmd {
1265 u8 disable;
1266 u8 max_bcast_filters;
1267 u8 max_macs;
1268 u8 reserved1;
1269 struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
1270 struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
1271} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
1272
1273/*
1274 * enum iwl_mvm_marker_id - maker ids
1275 *
1276 * The ids for different type of markers to insert into the usniffer logs
1277 */
1278enum iwl_mvm_marker_id {
1279 MARKER_ID_TX_FRAME_LATENCY = 1,
1280}; /* MARKER_ID_API_E_VER_1 */
1281
1282/**
1283 * struct iwl_mvm_marker - mark info into the usniffer logs
1284 *
1285 * (MARKER_CMD = 0xcb)
1286 *
1287 * Mark the UTC time stamp into the usniffer logs together with additional
1288 * metadata, so the usniffer output can be parsed.
1289 * In the command response the ucode will return the GP2 time.
1290 *
1291 * @dw_len: The amount of dwords following this byte including this byte.
1292 * @marker_id: A unique marker id (iwl_mvm_marker_id).
1293 * @reserved: reserved.
1294 * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC
1295 * @metadata: additional meta data that will be written to the unsiffer log
1296 */
1297struct iwl_mvm_marker {
1298 u8 dwLen;
1299 u8 markerId;
1300 __le16 reserved;
1301 __le64 timestamp;
1302 __le32 metadata[0];
1303} __packed; /* MARKER_API_S_VER_1 */
1304
1305/*
1306 * enum iwl_dc2dc_config_id - flag ids
1307 *
1308 * Ids of dc2dc configuration flags
1309 */
1310enum iwl_dc2dc_config_id {
1311 DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
1312 DCDC_FREQ_TUNE_SET = 0x2,
1313}; /* MARKER_ID_API_E_VER_1 */
1314
1315/**
1316 * struct iwl_dc2dc_config_cmd - configure dc2dc values
1317 *
1318 * (DC2DC_CONFIG_CMD = 0x83)
1319 *
1320 * Set/Get & configure dc2dc values.
1321 * The command always returns the current dc2dc values.
1322 *
1323 * @flags: set/get dc2dc
1324 * @enable_low_power_mode: not used.
1325 * @dc2dc_freq_tune0: frequency divider - digital domain
1326 * @dc2dc_freq_tune1: frequency divider - analog domain
1327 */
1328struct iwl_dc2dc_config_cmd {
1329 __le32 flags;
1330 __le32 enable_low_power_mode; /* not used */
1331 __le32 dc2dc_freq_tune0;
1332 __le32 dc2dc_freq_tune1;
1333} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
1334
1335/**
1336 * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
1337 *
1338 * Current dc2dc values returned by the FW.
1339 *
1340 * @dc2dc_freq_tune0: frequency divider - digital domain
1341 * @dc2dc_freq_tune1: frequency divider - analog domain
1342 */
1343struct iwl_dc2dc_config_resp {
1344 __le32 dc2dc_freq_tune0;
1345 __le32 dc2dc_freq_tune1;
1346} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
1347
1348/***********************************
1349 * Smart Fifo API
1350 ***********************************/
1351/* Smart Fifo state */
1352enum iwl_sf_state {
1353 SF_LONG_DELAY_ON = 0, /* should never be called by driver */
1354 SF_FULL_ON,
1355 SF_UNINIT,
1356 SF_INIT_OFF,
1357 SF_HW_NUM_STATES
1358};
1359
1360/* Smart Fifo possible scenario */
1361enum iwl_sf_scenario {
1362 SF_SCENARIO_SINGLE_UNICAST,
1363 SF_SCENARIO_AGG_UNICAST,
1364 SF_SCENARIO_MULTICAST,
1365 SF_SCENARIO_BA_RESP,
1366 SF_SCENARIO_TX_RESP,
1367 SF_NUM_SCENARIO
1368};
1369
1370#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
1371#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
1372
1373/* smart FIFO default values */
1374#define SF_W_MARK_SISO 6144
1375#define SF_W_MARK_MIMO2 8192
1376#define SF_W_MARK_MIMO3 6144
1377#define SF_W_MARK_LEGACY 4096
1378#define SF_W_MARK_SCAN 4096
1379
1380/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
1381#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1382#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1383#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1384#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1385#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
1386#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1387#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
1388#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
1389#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
1390#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
1391
1392/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
1393#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
1394#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
1395#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
1396#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
1397#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
1398#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
1399#define SF_BA_IDLE_TIMER 320 /* 300 uSec */
1400#define SF_BA_AGING_TIMER 2016 /* 2 mSec */
1401#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
1402#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
1403
1404#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
1405
1406#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
1407
1408/**
1409 * Smart Fifo configuration command.
1410 * @state: smart fifo state, types listed in enum %iwl_sf_sate.
1411 * @watermark: Minimum allowed availabe free space in RXF for transient state.
1412 * @long_delay_timeouts: aging and idle timer values for each scenario
1413 * in long delay state.
1414 * @full_on_timeouts: timer values for each scenario in full on state.
1415 */
1416struct iwl_sf_cfg_cmd {
1417 __le32 state;
1418 __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
1419 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1420 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1421} __packed; /* SF_CFG_API_S_VER_2 */
1422
1423/***********************************
1424 * Location Aware Regulatory (LAR) API - MCC updates
1425 ***********************************/
1426
1427/**
1428 * struct iwl_mcc_update_cmd - Request the device to update geographic
1429 * regulatory profile according to the given MCC (Mobile Country Code).
1430 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1431 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1432 * MCC in the cmd response will be the relevant MCC in the NVM.
1433 * @mcc: given mobile country code
1434 * @source_id: the source from where we got the MCC, see iwl_mcc_source
1435 * @reserved: reserved for alignment
1436 */
1437struct iwl_mcc_update_cmd {
1438 __le16 mcc;
1439 u8 source_id;
1440 u8 reserved;
1441} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
1442
1443/**
1444 * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
1445 * Contains the new channel control profile map, if changed, and the new MCC
1446 * (mobile country code).
1447 * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
1448 * @status: see &enum iwl_mcc_update_status
1449 * @mcc: the new applied MCC
1450 * @cap: capabilities for all channels which matches the MCC
1451 * @source_id: the MCC source, see iwl_mcc_source
1452 * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
1453 * channels, depending on platform)
1454 * @channels: channel control data map, DWORD for each channel. Only the first
1455 * 16bits are used.
1456 */
1457struct iwl_mcc_update_resp {
1458 __le32 status;
1459 __le16 mcc;
1460 u8 cap;
1461 u8 source_id;
1462 __le32 n_channels;
1463 __le32 channels[0];
1464} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
1465
1466/**
1467 * struct iwl_mcc_chub_notif - chub notifies of mcc change
1468 * (MCC_CHUB_UPDATE_CMD = 0xc9)
1469 * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
1470 * the cellular and connectivity cores that gets updates of the mcc, and
1471 * notifies the ucode directly of any mcc change.
1472 * The ucode requests the driver to request the device to update geographic
1473 * regulatory profile according to the given MCC (Mobile Country Code).
1474 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1475 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1476 * MCC in the cmd response will be the relevant MCC in the NVM.
1477 * @mcc: given mobile country code
1478 * @source_id: identity of the change originator, see iwl_mcc_source
1479 * @reserved1: reserved for alignment
1480 */
1481struct iwl_mcc_chub_notif {
1482 u16 mcc;
1483 u8 source_id;
1484 u8 reserved1;
1485} __packed; /* LAR_MCC_NOTIFY_S */
1486
1487enum iwl_mcc_update_status {
1488 MCC_RESP_NEW_CHAN_PROFILE,
1489 MCC_RESP_SAME_CHAN_PROFILE,
1490 MCC_RESP_INVALID,
1491 MCC_RESP_NVM_DISABLED,
1492 MCC_RESP_ILLEGAL,
1493 MCC_RESP_LOW_PRIORITY,
1494};
1495
1496enum iwl_mcc_source {
1497 MCC_SOURCE_OLD_FW = 0,
1498 MCC_SOURCE_ME = 1,
1499 MCC_SOURCE_BIOS = 2,
1500 MCC_SOURCE_3G_LTE_HOST = 3,
1501 MCC_SOURCE_3G_LTE_DEVICE = 4,
1502 MCC_SOURCE_WIFI = 5,
1503 MCC_SOURCE_RESERVED = 6,
1504 MCC_SOURCE_DEFAULT = 7,
1505 MCC_SOURCE_UNINITIALIZED = 8,
1506 MCC_SOURCE_GET_CURRENT = 0x10
1507};
1508
1509/* DTS measurements */
1510
1511enum iwl_dts_measurement_flags {
1512 DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
1513 DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
1514};
1515
1516/**
1517 * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements
1518 *
1519 * @flags: indicates which measurements we want as specified in &enum
1520 * iwl_dts_measurement_flags
1521 */
1522struct iwl_dts_measurement_cmd {
1523 __le32 flags;
1524} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
1525
1526/**
1527* enum iwl_dts_control_measurement_mode - DTS measurement type
1528* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
1529* back (latest value. Not waiting for new value). Use automatic
1530* SW DTS configuration.
1531* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
1532* trigger DTS reading and provide read back temperature read
1533* when available.
1534* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
1535* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
1536* without measurement trigger.
1537*/
1538enum iwl_dts_control_measurement_mode {
1539 DTS_AUTOMATIC = 0,
1540 DTS_REQUEST_READ = 1,
1541 DTS_OVER_WRITE = 2,
1542 DTS_DIRECT_WITHOUT_MEASURE = 3,
1543};
1544
1545/**
1546* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
1547* @DTS_USE_TOP: Top
1548* @DTS_USE_CHAIN_A: chain A
1549* @DTS_USE_CHAIN_B: chain B
1550* @DTS_USE_CHAIN_C: chain C
1551* @XTAL_TEMPERATURE - read temperature from xtal
1552*/
1553enum iwl_dts_used {
1554 DTS_USE_TOP = 0,
1555 DTS_USE_CHAIN_A = 1,
1556 DTS_USE_CHAIN_B = 2,
1557 DTS_USE_CHAIN_C = 3,
1558 XTAL_TEMPERATURE = 4,
1559};
1560
1561/**
1562* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
1563* @DTS_BIT6_MODE: bit 6 mode
1564* @DTS_BIT8_MODE: bit 8 mode
1565*/
1566enum iwl_dts_bit_mode {
1567 DTS_BIT6_MODE = 0,
1568 DTS_BIT8_MODE = 1,
1569};
1570
1571/**
1572 * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
1573 * @control_mode: see &enum iwl_dts_control_measurement_mode
1574 * @temperature: used when over write DTS mode is selected
1575 * @sensor: set temperature sensor to use. See &enum iwl_dts_used
1576 * @avg_factor: average factor to DTS in request DTS read mode
1577 * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
1578 * @step_duration: step duration for the DTS
1579 */
1580struct iwl_ext_dts_measurement_cmd {
1581 __le32 control_mode;
1582 __le32 temperature;
1583 __le32 sensor;
1584 __le32 avg_factor;
1585 __le32 bit_mode;
1586 __le32 step_duration;
1587} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
1588
1589/**
1590 * iwl_dts_measurement_notif - notification received with the measurements
1591 *
1592 * @temp: the measured temperature
1593 * @voltage: the measured voltage
1594 */
1595struct iwl_dts_measurement_notif {
1596 __le32 temp;
1597 __le32 voltage;
1598} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
1599
1600/***********************************
1601 * TDLS API
1602 ***********************************/
1603
1604/* Type of TDLS request */
1605enum iwl_tdls_channel_switch_type {
1606 TDLS_SEND_CHAN_SW_REQ = 0,
1607 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
1608 TDLS_MOVE_CH,
1609}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
1610
1611/**
1612 * Switch timing sub-element in a TDLS channel-switch command
1613 * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
1614 * received from peer
1615 * @max_offchan_duration: What amount of microseconds out of a DTIM is given
1616 * to the TDLS off-channel communication. For instance if the DTIM is
1617 * 200TU and the TDLS peer is to be given 25% of the time, the value
1618 * given will be 50TU, or 50 * 1024 if translated into microseconds.
1619 * @switch_time: switch time the peer sent in its channel switch timing IE
1620 * @switch_timout: switch timeout the peer sent in its channel switch timing IE
1621 */
1622struct iwl_tdls_channel_switch_timing {
1623 __le32 frame_timestamp; /* GP2 time of peer packet Rx */
1624 __le32 max_offchan_duration; /* given in micro-seconds */
1625 __le32 switch_time; /* given in micro-seconds */
1626 __le32 switch_timeout; /* given in micro-seconds */
1627} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
1628
1629#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
1630
1631/**
1632 * TDLS channel switch frame template
1633 *
1634 * A template representing a TDLS channel-switch request or response frame
1635 *
1636 * @switch_time_offset: offset to the channel switch timing IE in the template
1637 * @tx_cmd: Tx parameters for the frame
1638 * @data: frame data
1639 */
1640struct iwl_tdls_channel_switch_frame {
1641 __le32 switch_time_offset;
1642 struct iwl_tx_cmd tx_cmd;
1643 u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
1644} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
1645
1646/**
1647 * TDLS channel switch command
1648 *
1649 * The command is sent to initiate a channel switch and also in response to
1650 * incoming TDLS channel-switch request/response packets from remote peers.
1651 *
1652 * @switch_type: see &enum iwl_tdls_channel_switch_type
1653 * @peer_sta_id: station id of TDLS peer
1654 * @ci: channel we switch to
1655 * @timing: timing related data for command
1656 * @frame: channel-switch request/response template, depending to switch_type
1657 */
1658struct iwl_tdls_channel_switch_cmd {
1659 u8 switch_type;
1660 __le32 peer_sta_id;
1661 struct iwl_fw_channel_info ci;
1662 struct iwl_tdls_channel_switch_timing timing;
1663 struct iwl_tdls_channel_switch_frame frame;
1664} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
1665
1666/**
1667 * TDLS channel switch start notification
1668 *
1669 * @status: non-zero on success
1670 * @offchannel_duration: duration given in microseconds
1671 * @sta_id: peer currently performing the channel-switch with
1672 */
1673struct iwl_tdls_channel_switch_notif {
1674 __le32 status;
1675 __le32 offchannel_duration;
1676 __le32 sta_id;
1677} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
1678
1679/**
1680 * TDLS station info
1681 *
1682 * @sta_id: station id of the TDLS peer
1683 * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
1684 * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
1685 * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
1686 */
1687struct iwl_tdls_sta_info {
1688 u8 sta_id;
1689 u8 tx_to_peer_tid;
1690 __le16 tx_to_peer_ssn;
1691 __le32 is_initiator;
1692} __packed; /* TDLS_STA_INFO_VER_1 */
1693
1694/**
1695 * TDLS basic config command
1696 *
1697 * @id_and_color: MAC id and color being configured
1698 * @tdls_peer_count: amount of currently connected TDLS peers
1699 * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
1700 * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
1701 * @sta_info: per-station info. Only the first tdls_peer_count entries are set
1702 * @pti_req_data_offset: offset of network-level data for the PTI template
1703 * @pti_req_tx_cmd: Tx parameters for PTI request template
1704 * @pti_req_template: PTI request template data
1705 */
1706struct iwl_tdls_config_cmd {
1707 __le32 id_and_color; /* mac id and color */
1708 u8 tdls_peer_count;
1709 u8 tx_to_ap_tid;
1710 __le16 tx_to_ap_ssn;
1711 struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
1712
1713 __le32 pti_req_data_offset;
1714 struct iwl_tx_cmd pti_req_tx_cmd;
1715 u8 pti_req_template[0];
1716} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
1717
1718/**
1719 * TDLS per-station config information from FW
1720 *
1721 * @sta_id: station id of the TDLS peer
1722 * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
1723 * the peer
1724 */
1725struct iwl_tdls_config_sta_info_res {
1726 __le16 sta_id;
1727 __le16 tx_to_peer_last_seq;
1728} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
1729
1730/**
1731 * TDLS config information from FW
1732 *
1733 * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
1734 * @sta_info: per-station TDLS config information
1735 */
1736struct iwl_tdls_config_res {
1737 __le32 tx_to_ap_last_seq;
1738 struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
1739} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
1740
1741#define TX_FIFO_MAX_NUM 8
1742#define RX_FIFO_MAX_NUM 2
1743
1744/**
1745 * Shared memory configuration information from the FW
1746 *
1747 * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
1748 * accessible)
1749 * @shared_mem_size: shared memory size
1750 * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
1751 * 0x0 as accessible only via DBGM RDAT)
1752 * @sample_buff_size: internal sample buff size
1753 * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
1754 * 8000 HW set to 0x0 as not accessible)
1755 * @txfifo_size: size of TXF0 ... TXF7
1756 * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
1757 * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
1758 * when paging is not supported this should be 0
1759 * @page_buff_size: size of %page_buff_addr
1760 */
1761struct iwl_shared_mem_cfg {
1762 __le32 shared_mem_addr;
1763 __le32 shared_mem_size;
1764 __le32 sample_buff_addr;
1765 __le32 sample_buff_size;
1766 __le32 txfifo_addr;
1767 __le32 txfifo_size[TX_FIFO_MAX_NUM];
1768 __le32 rxfifo_size[RX_FIFO_MAX_NUM];
1769 __le32 page_buff_addr;
1770 __le32 page_buff_size;
1771} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
1772
1773#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
new file mode 100644
index 000000000000..d906fa13ba97
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -0,0 +1,1166 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/mac80211.h>
66
67#include "iwl-trans.h"
68#include "iwl-op-mode.h"
69#include "iwl-fw.h"
70#include "iwl-debug.h"
71#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73#include "iwl-prph.h"
74#include "iwl-eeprom-parse.h"
75
76#include "mvm.h"
77#include "iwl-phy-db.h"
78
79#define MVM_UCODE_ALIVE_TIMEOUT HZ
80#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
81
82#define UCODE_VALID_OK cpu_to_le32(0x1)
83
84struct iwl_mvm_alive_data {
85 bool valid;
86 u32 scd_base_addr;
87};
88
89static inline const struct fw_img *
90iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
91{
92 if (ucode_type >= IWL_UCODE_TYPE_MAX)
93 return NULL;
94
95 return &mvm->fw->img[ucode_type];
96}
97
98static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
99{
100 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
101 .valid = cpu_to_le32(valid_tx_ant),
102 };
103
104 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
105 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
106 sizeof(tx_ant_cmd), &tx_ant_cmd);
107}
108
109static void iwl_free_fw_paging(struct iwl_mvm *mvm)
110{
111 int i;
112
113 if (!mvm->fw_paging_db[0].fw_paging_block)
114 return;
115
116 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
117 if (!mvm->fw_paging_db[i].fw_paging_block) {
118 IWL_DEBUG_FW(mvm,
119 "Paging: block %d already freed, continue to next page\n",
120 i);
121
122 continue;
123 }
124
125 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
126 get_order(mvm->fw_paging_db[i].fw_paging_size));
127 }
128 kfree(mvm->trans->paging_download_buf);
129 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
130}
131
132static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
133{
134 int sec_idx, idx;
135 u32 offset = 0;
136
137 /*
138 * find where is the paging image start point:
139 * if CPU2 exist and it's in paging format, then the image looks like:
140 * CPU1 sections (2 or more)
141 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
142 * CPU2 sections (not paged)
143 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
144 * non paged to CPU2 paging sec
145 * CPU2 paging CSS
146 * CPU2 paging image (including instruction and data)
147 */
148 for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
149 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
150 sec_idx++;
151 break;
152 }
153 }
154
155 if (sec_idx >= IWL_UCODE_SECTION_MAX) {
156 IWL_ERR(mvm, "driver didn't find paging image\n");
157 iwl_free_fw_paging(mvm);
158 return -EINVAL;
159 }
160
161 /* copy the CSS block to the dram */
162 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
163 sec_idx);
164
165 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
166 image->sec[sec_idx].data,
167 mvm->fw_paging_db[0].fw_paging_size);
168
169 IWL_DEBUG_FW(mvm,
170 "Paging: copied %d CSS bytes to first block\n",
171 mvm->fw_paging_db[0].fw_paging_size);
172
173 sec_idx++;
174
175 /*
176 * copy the paging blocks to the dram
177 * loop index start from 1 since that CSS block already copied to dram
178 * and CSS index is 0.
179 * loop stop at num_of_paging_blk since that last block is not full.
180 */
181 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
182 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
183 image->sec[sec_idx].data + offset,
184 mvm->fw_paging_db[idx].fw_paging_size);
185
186 IWL_DEBUG_FW(mvm,
187 "Paging: copied %d paging bytes to block %d\n",
188 mvm->fw_paging_db[idx].fw_paging_size,
189 idx);
190
191 offset += mvm->fw_paging_db[idx].fw_paging_size;
192 }
193
194 /* copy the last paging block */
195 if (mvm->num_of_pages_in_last_blk > 0) {
196 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
197 image->sec[sec_idx].data + offset,
198 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
199
200 IWL_DEBUG_FW(mvm,
201 "Paging: copied %d pages in the last block %d\n",
202 mvm->num_of_pages_in_last_blk, idx);
203 }
204
205 return 0;
206}
207
208static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
209 const struct fw_img *image)
210{
211 struct page *block;
212 dma_addr_t phys = 0;
213 int blk_idx = 0;
214 int order, num_of_pages;
215 int dma_enabled;
216
217 if (mvm->fw_paging_db[0].fw_paging_block)
218 return 0;
219
220 dma_enabled = is_device_dma_capable(mvm->trans->dev);
221
222 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
223 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
224
225 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
226 mvm->num_of_paging_blk = ((num_of_pages - 1) /
227 NUM_OF_PAGE_PER_GROUP) + 1;
228
229 mvm->num_of_pages_in_last_blk =
230 num_of_pages -
231 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
232
233 IWL_DEBUG_FW(mvm,
234 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
235 mvm->num_of_paging_blk,
236 mvm->num_of_pages_in_last_blk);
237
238 /* allocate block of 4Kbytes for paging CSS */
239 order = get_order(FW_PAGING_SIZE);
240 block = alloc_pages(GFP_KERNEL, order);
241 if (!block) {
242 /* free all the previous pages since we failed */
243 iwl_free_fw_paging(mvm);
244 return -ENOMEM;
245 }
246
247 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
248 mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
249
250 if (dma_enabled) {
251 phys = dma_map_page(mvm->trans->dev, block, 0,
252 PAGE_SIZE << order, DMA_BIDIRECTIONAL);
253 if (dma_mapping_error(mvm->trans->dev, phys)) {
254 /*
255 * free the previous pages and the current one since
256 * we failed to map_page.
257 */
258 iwl_free_fw_paging(mvm);
259 return -ENOMEM;
260 }
261 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
262 } else {
263 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
264 blk_idx << BLOCK_2_EXP_SIZE;
265 }
266
267 IWL_DEBUG_FW(mvm,
268 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
269 order);
270
271 /*
272 * allocate blocks in dram.
273 * since that CSS allocated in fw_paging_db[0] loop start from index 1
274 */
275 for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
276 /* allocate block of PAGING_BLOCK_SIZE (32K) */
277 order = get_order(PAGING_BLOCK_SIZE);
278 block = alloc_pages(GFP_KERNEL, order);
279 if (!block) {
280 /* free all the previous pages since we failed */
281 iwl_free_fw_paging(mvm);
282 return -ENOMEM;
283 }
284
285 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
286 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
287
288 if (dma_enabled) {
289 phys = dma_map_page(mvm->trans->dev, block, 0,
290 PAGE_SIZE << order,
291 DMA_BIDIRECTIONAL);
292 if (dma_mapping_error(mvm->trans->dev, phys)) {
293 /*
294 * free the previous pages and the current one
295 * since we failed to map_page.
296 */
297 iwl_free_fw_paging(mvm);
298 return -ENOMEM;
299 }
300 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
301 } else {
302 mvm->fw_paging_db[blk_idx].fw_paging_phys =
303 PAGING_ADDR_SIG |
304 blk_idx << BLOCK_2_EXP_SIZE;
305 }
306
307 IWL_DEBUG_FW(mvm,
308 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
309 order);
310 }
311
312 return 0;
313}
314
315static int iwl_save_fw_paging(struct iwl_mvm *mvm,
316 const struct fw_img *fw)
317{
318 int ret;
319
320 ret = iwl_alloc_fw_paging_mem(mvm, fw);
321 if (ret)
322 return ret;
323
324 return iwl_fill_paging_mem(mvm, fw);
325}
326
327/* send paging cmd to FW in case CPU2 has paging image */
328static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
329{
330 int blk_idx;
331 __le32 dev_phy_addr;
332 struct iwl_fw_paging_cmd fw_paging_cmd = {
333 .flags =
334 cpu_to_le32(PAGING_CMD_IS_SECURED |
335 PAGING_CMD_IS_ENABLED |
336 (mvm->num_of_pages_in_last_blk <<
337 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
338 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
339 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
340 };
341
342 /* loop for for all paging blocks + CSS block */
343 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
344 dev_phy_addr =
345 cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
346 PAGE_2_EXP_SIZE);
347 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
348 }
349
350 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
351 IWL_ALWAYS_LONG_GROUP, 0),
352 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
353}
354
355/*
356 * Send paging item cmd to FW in case CPU2 has paging image
357 */
358static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
359{
360 int ret;
361 struct iwl_fw_get_item_cmd fw_get_item_cmd = {
362 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
363 };
364
365 struct iwl_fw_get_item_resp *item_resp;
366 struct iwl_host_cmd cmd = {
367 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
368 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
369 .data = { &fw_get_item_cmd, },
370 };
371
372 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
373
374 ret = iwl_mvm_send_cmd(mvm, &cmd);
375 if (ret) {
376 IWL_ERR(mvm,
377 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
378 ret);
379 return ret;
380 }
381
382 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
383 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
384 IWL_ERR(mvm,
385 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
386 le32_to_cpu(item_resp->item_id));
387 ret = -EIO;
388 goto exit;
389 }
390
391 mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
392 GFP_KERNEL);
393 if (!mvm->trans->paging_download_buf) {
394 ret = -ENOMEM;
395 goto exit;
396 }
397 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
398 mvm->trans->paging_db = mvm->fw_paging_db;
399 IWL_DEBUG_FW(mvm,
400 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
401 mvm->trans->paging_req_addr);
402
403exit:
404 iwl_free_resp(&cmd);
405
406 return ret;
407}
408
409static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
410 struct iwl_rx_packet *pkt, void *data)
411{
412 struct iwl_mvm *mvm =
413 container_of(notif_wait, struct iwl_mvm, notif_wait);
414 struct iwl_mvm_alive_data *alive_data = data;
415 struct mvm_alive_resp_ver1 *palive1;
416 struct mvm_alive_resp_ver2 *palive2;
417 struct mvm_alive_resp *palive;
418
419 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
420 palive1 = (void *)pkt->data;
421
422 mvm->support_umac_log = false;
423 mvm->error_event_table =
424 le32_to_cpu(palive1->error_event_table_ptr);
425 mvm->log_event_table =
426 le32_to_cpu(palive1->log_event_table_ptr);
427 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
428
429 alive_data->valid = le16_to_cpu(palive1->status) ==
430 IWL_ALIVE_STATUS_OK;
431 IWL_DEBUG_FW(mvm,
432 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
433 le16_to_cpu(palive1->status), palive1->ver_type,
434 palive1->ver_subtype, palive1->flags);
435 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
436 palive2 = (void *)pkt->data;
437
438 mvm->error_event_table =
439 le32_to_cpu(palive2->error_event_table_ptr);
440 mvm->log_event_table =
441 le32_to_cpu(palive2->log_event_table_ptr);
442 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
443 mvm->umac_error_event_table =
444 le32_to_cpu(palive2->error_info_addr);
445 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
446 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
447
448 alive_data->valid = le16_to_cpu(palive2->status) ==
449 IWL_ALIVE_STATUS_OK;
450 if (mvm->umac_error_event_table)
451 mvm->support_umac_log = true;
452
453 IWL_DEBUG_FW(mvm,
454 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
455 le16_to_cpu(palive2->status), palive2->ver_type,
456 palive2->ver_subtype, palive2->flags);
457
458 IWL_DEBUG_FW(mvm,
459 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
460 palive2->umac_major, palive2->umac_minor);
461 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
462 palive = (void *)pkt->data;
463
464 mvm->error_event_table =
465 le32_to_cpu(palive->error_event_table_ptr);
466 mvm->log_event_table =
467 le32_to_cpu(palive->log_event_table_ptr);
468 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
469 mvm->umac_error_event_table =
470 le32_to_cpu(palive->error_info_addr);
471 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
472 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
473
474 alive_data->valid = le16_to_cpu(palive->status) ==
475 IWL_ALIVE_STATUS_OK;
476 if (mvm->umac_error_event_table)
477 mvm->support_umac_log = true;
478
479 IWL_DEBUG_FW(mvm,
480 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
481 le16_to_cpu(palive->status), palive->ver_type,
482 palive->ver_subtype, palive->flags);
483
484 IWL_DEBUG_FW(mvm,
485 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
486 le32_to_cpu(palive->umac_major),
487 le32_to_cpu(palive->umac_minor));
488 }
489
490 return true;
491}
492
493static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
494 struct iwl_rx_packet *pkt, void *data)
495{
496 struct iwl_phy_db *phy_db = data;
497
498 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
499 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
500 return true;
501 }
502
503 WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
504
505 return false;
506}
507
508static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
509 enum iwl_ucode_type ucode_type)
510{
511 struct iwl_notification_wait alive_wait;
512 struct iwl_mvm_alive_data alive_data;
513 const struct fw_img *fw;
514 int ret, i;
515 enum iwl_ucode_type old_type = mvm->cur_ucode;
516 static const u16 alive_cmd[] = { MVM_ALIVE };
517 struct iwl_sf_region st_fwrd_space;
518
519 if (ucode_type == IWL_UCODE_REGULAR &&
520 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
521 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
522 else
523 fw = iwl_get_ucode_image(mvm, ucode_type);
524 if (WARN_ON(!fw))
525 return -EINVAL;
526 mvm->cur_ucode = ucode_type;
527 mvm->ucode_loaded = false;
528
529 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
530 alive_cmd, ARRAY_SIZE(alive_cmd),
531 iwl_alive_fn, &alive_data);
532
533 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
534 if (ret) {
535 mvm->cur_ucode = old_type;
536 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
537 return ret;
538 }
539
540 /*
541 * Some things may run in the background now, but we
542 * just wait for the ALIVE notification here.
543 */
544 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
545 MVM_UCODE_ALIVE_TIMEOUT);
546 if (ret) {
547 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
548 IWL_ERR(mvm,
549 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
550 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
551 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
552 mvm->cur_ucode = old_type;
553 return ret;
554 }
555
556 if (!alive_data.valid) {
557 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
558 mvm->cur_ucode = old_type;
559 return -EIO;
560 }
561
562 /*
563 * update the sdio allocation according to the pointer we get in the
564 * alive notification.
565 */
566 st_fwrd_space.addr = mvm->sf_space.addr;
567 st_fwrd_space.size = mvm->sf_space.size;
568 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
569 if (ret) {
570 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
571 return ret;
572 }
573
574 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
575
576 /*
577 * configure and operate fw paging mechanism.
578 * driver configures the paging flow only once, CPU2 paging image
579 * included in the IWL_UCODE_INIT image.
580 */
581 if (fw->paging_mem_size) {
582 /*
583 * When dma is not enabled, the driver needs to copy / write
584 * the downloaded / uploaded page to / from the smem.
585 * This gets the location of the place were the pages are
586 * stored.
587 */
588 if (!is_device_dma_capable(mvm->trans->dev)) {
589 ret = iwl_trans_get_paging_item(mvm);
590 if (ret) {
591 IWL_ERR(mvm, "failed to get FW paging item\n");
592 return ret;
593 }
594 }
595
596 ret = iwl_save_fw_paging(mvm, fw);
597 if (ret) {
598 IWL_ERR(mvm, "failed to save the FW paging image\n");
599 return ret;
600 }
601
602 ret = iwl_send_paging_cmd(mvm, fw);
603 if (ret) {
604 IWL_ERR(mvm, "failed to send the paging cmd\n");
605 iwl_free_fw_paging(mvm);
606 return ret;
607 }
608 }
609
610 /*
611 * Note: all the queues are enabled as part of the interface
612 * initialization, but in firmware restart scenarios they
613 * could be stopped, so wake them up. In firmware restart,
614 * mac80211 will have the queues stopped as well until the
615 * reconfiguration completes. During normal startup, they
616 * will be empty.
617 */
618
619 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
620 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
621
622 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
623 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
624
625 mvm->ucode_loaded = true;
626
627 return 0;
628}
629
630static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
631{
632 struct iwl_phy_cfg_cmd phy_cfg_cmd;
633 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
634
635 /* Set parameters */
636 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
637 phy_cfg_cmd.calib_control.event_trigger =
638 mvm->fw->default_calib[ucode_type].event_trigger;
639 phy_cfg_cmd.calib_control.flow_trigger =
640 mvm->fw->default_calib[ucode_type].flow_trigger;
641
642 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
643 phy_cfg_cmd.phy_cfg);
644
645 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
646 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
647}
648
649int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
650{
651 struct iwl_notification_wait calib_wait;
652 static const u16 init_complete[] = {
653 INIT_COMPLETE_NOTIF,
654 CALIB_RES_NOTIF_PHY_DB
655 };
656 int ret;
657
658 lockdep_assert_held(&mvm->mutex);
659
660 if (WARN_ON_ONCE(mvm->calibrating))
661 return 0;
662
663 iwl_init_notification_wait(&mvm->notif_wait,
664 &calib_wait,
665 init_complete,
666 ARRAY_SIZE(init_complete),
667 iwl_wait_phy_db_entry,
668 mvm->phy_db);
669
670 /* Will also start the device */
671 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
672 if (ret) {
673 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
674 goto error;
675 }
676
677 ret = iwl_send_bt_init_conf(mvm);
678 if (ret)
679 goto error;
680
681 /* Read the NVM only at driver load time, no need to do this twice */
682 if (read_nvm) {
683 /* Read nvm */
684 ret = iwl_nvm_init(mvm, true);
685 if (ret) {
686 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
687 goto error;
688 }
689 }
690
691 /* In case we read the NVM from external file, load it to the NIC */
692 if (mvm->nvm_file_name)
693 iwl_mvm_load_nvm_to_nic(mvm);
694
695 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
696 WARN_ON(ret);
697
698 /*
699 * abort after reading the nvm in case RF Kill is on, we will complete
700 * the init seq later when RF kill will switch to off
701 */
702 if (iwl_mvm_is_radio_hw_killed(mvm)) {
703 IWL_DEBUG_RF_KILL(mvm,
704 "jump over all phy activities due to RF kill\n");
705 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
706 ret = 1;
707 goto out;
708 }
709
710 mvm->calibrating = true;
711
712 /* Send TX valid antennas before triggering calibrations */
713 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
714 if (ret)
715 goto error;
716
717 /*
718 * Send phy configurations command to init uCode
719 * to start the 16.0 uCode init image internal calibrations.
720 */
721 ret = iwl_send_phy_cfg_cmd(mvm);
722 if (ret) {
723 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
724 ret);
725 goto error;
726 }
727
728 /*
729 * Some things may run in the background now, but we
730 * just wait for the calibration complete notification.
731 */
732 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
733 MVM_UCODE_CALIB_TIMEOUT);
734
735 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
736 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
737 ret = 1;
738 }
739 goto out;
740
741error:
742 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
743out:
744 mvm->calibrating = false;
745 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
746 /* we want to debug INIT and we have no NVM - fake */
747 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
748 sizeof(struct ieee80211_channel) +
749 sizeof(struct ieee80211_rate),
750 GFP_KERNEL);
751 if (!mvm->nvm_data)
752 return -ENOMEM;
753 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
754 mvm->nvm_data->bands[0].n_channels = 1;
755 mvm->nvm_data->bands[0].n_bitrates = 1;
756 mvm->nvm_data->bands[0].bitrates =
757 (void *)mvm->nvm_data->channels + 1;
758 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
759 }
760
761 return ret;
762}
763
764static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
765{
766 struct iwl_host_cmd cmd = {
767 .id = SHARED_MEM_CFG,
768 .flags = CMD_WANT_SKB,
769 .data = { NULL, },
770 .len = { 0, },
771 };
772 struct iwl_rx_packet *pkt;
773 struct iwl_shared_mem_cfg *mem_cfg;
774 u32 i;
775
776 lockdep_assert_held(&mvm->mutex);
777
778 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
779 return;
780
781 pkt = cmd.resp_pkt;
782 mem_cfg = (void *)pkt->data;
783
784 mvm->shared_mem_cfg.shared_mem_addr =
785 le32_to_cpu(mem_cfg->shared_mem_addr);
786 mvm->shared_mem_cfg.shared_mem_size =
787 le32_to_cpu(mem_cfg->shared_mem_size);
788 mvm->shared_mem_cfg.sample_buff_addr =
789 le32_to_cpu(mem_cfg->sample_buff_addr);
790 mvm->shared_mem_cfg.sample_buff_size =
791 le32_to_cpu(mem_cfg->sample_buff_size);
792 mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
793 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
794 mvm->shared_mem_cfg.txfifo_size[i] =
795 le32_to_cpu(mem_cfg->txfifo_size[i]);
796 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
797 mvm->shared_mem_cfg.rxfifo_size[i] =
798 le32_to_cpu(mem_cfg->rxfifo_size[i]);
799 mvm->shared_mem_cfg.page_buff_addr =
800 le32_to_cpu(mem_cfg->page_buff_addr);
801 mvm->shared_mem_cfg.page_buff_size =
802 le32_to_cpu(mem_cfg->page_buff_size);
803 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
804
805 iwl_free_resp(&cmd);
806}
807
808int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
809 struct iwl_mvm_dump_desc *desc,
810 struct iwl_fw_dbg_trigger_tlv *trigger)
811{
812 unsigned int delay = 0;
813
814 if (trigger)
815 delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
816
817 if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
818 return -EBUSY;
819
820 if (WARN_ON(mvm->fw_dump_desc))
821 iwl_mvm_free_fw_dump_desc(mvm);
822
823 IWL_WARN(mvm, "Collecting data: trigger %d fired.\n",
824 le32_to_cpu(desc->trig_desc.type));
825
826 mvm->fw_dump_desc = desc;
827 mvm->fw_dump_trig = trigger;
828
829 queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
830
831 return 0;
832}
833
834int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
835 const char *str, size_t len,
836 struct iwl_fw_dbg_trigger_tlv *trigger)
837{
838 struct iwl_mvm_dump_desc *desc;
839
840 desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
841 if (!desc)
842 return -ENOMEM;
843
844 desc->len = len;
845 desc->trig_desc.type = cpu_to_le32(trig);
846 memcpy(desc->trig_desc.data, str, len);
847
848 return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
849}
850
851int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
852 struct iwl_fw_dbg_trigger_tlv *trigger,
853 const char *fmt, ...)
854{
855 u16 occurrences = le16_to_cpu(trigger->occurrences);
856 int ret, len = 0;
857 char buf[64];
858
859 if (!occurrences)
860 return 0;
861
862 if (fmt) {
863 va_list ap;
864
865 buf[sizeof(buf) - 1] = '\0';
866
867 va_start(ap, fmt);
868 vsnprintf(buf, sizeof(buf), fmt, ap);
869 va_end(ap);
870
871 /* check for truncation */
872 if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
873 buf[sizeof(buf) - 1] = '\0';
874
875 len = strlen(buf) + 1;
876 }
877
878 ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
879 trigger);
880
881 if (ret)
882 return ret;
883
884 trigger->occurrences = cpu_to_le16(occurrences - 1);
885 return 0;
886}
887
888static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
889{
890 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
891 iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
892 else
893 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
894}
895
896int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
897{
898 u8 *ptr;
899 int ret;
900 int i;
901
902 if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
903 "Invalid configuration %d\n", conf_id))
904 return -EINVAL;
905
906 /* EARLY START - firmware's configuration is hard coded */
907 if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
908 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
909 conf_id == FW_DBG_START_FROM_ALIVE) {
910 iwl_mvm_restart_early_start(mvm);
911 return 0;
912 }
913
914 if (!mvm->fw->dbg_conf_tlv[conf_id])
915 return -EINVAL;
916
917 if (mvm->fw_dbg_conf != FW_DBG_INVALID)
918 IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
919 mvm->fw_dbg_conf);
920
921 /* Send all HCMDs for configuring the FW debug */
922 ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
923 for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
924 struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
925
926 ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
927 le16_to_cpu(cmd->len), cmd->data);
928 if (ret)
929 return ret;
930
931 ptr += sizeof(*cmd);
932 ptr += le16_to_cpu(cmd->len);
933 }
934
935 mvm->fw_dbg_conf = conf_id;
936 return ret;
937}
938
939static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
940{
941 struct iwl_ltr_config_cmd cmd = {
942 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
943 };
944
945 if (!mvm->trans->ltr_enabled)
946 return 0;
947
948 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
949 sizeof(cmd), &cmd);
950}
951
952int iwl_mvm_up(struct iwl_mvm *mvm)
953{
954 int ret, i;
955 struct ieee80211_channel *chan;
956 struct cfg80211_chan_def chandef;
957
958 lockdep_assert_held(&mvm->mutex);
959
960 ret = iwl_trans_start_hw(mvm->trans);
961 if (ret)
962 return ret;
963
964 /*
965 * If we haven't completed the run of the init ucode during
966 * module loading, load init ucode now
967 * (for example, if we were in RFKILL)
968 */
969 ret = iwl_run_init_mvm_ucode(mvm, false);
970 if (ret && !iwlmvm_mod_params.init_dbg) {
971 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
972 /* this can't happen */
973 if (WARN_ON(ret > 0))
974 ret = -ERFKILL;
975 goto error;
976 }
977 if (!iwlmvm_mod_params.init_dbg) {
978 /*
979 * Stop and start the transport without entering low power
980 * mode. This will save the state of other components on the
981 * device that are triggered by the INIT firwmare (MFUART).
982 */
983 _iwl_trans_stop_device(mvm->trans, false);
984 ret = _iwl_trans_start_hw(mvm->trans, false);
985 if (ret)
986 goto error;
987 }
988
989 if (iwlmvm_mod_params.init_dbg)
990 return 0;
991
992 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
993 if (ret) {
994 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
995 goto error;
996 }
997
998 iwl_mvm_get_shared_mem_conf(mvm);
999
1000 ret = iwl_mvm_sf_update(mvm, NULL, false);
1001 if (ret)
1002 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1003
1004 mvm->fw_dbg_conf = FW_DBG_INVALID;
1005 /* if we have a destination, assume EARLY START */
1006 if (mvm->fw->dbg_dest_tlv)
1007 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1008 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1009
1010 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1011 if (ret)
1012 goto error;
1013
1014 ret = iwl_send_bt_init_conf(mvm);
1015 if (ret)
1016 goto error;
1017
1018 /* Send phy db control command and then phy db calibration*/
1019 ret = iwl_send_phy_db_data(mvm->phy_db);
1020 if (ret)
1021 goto error;
1022
1023 ret = iwl_send_phy_cfg_cmd(mvm);
1024 if (ret)
1025 goto error;
1026
1027 /* init the fw <-> mac80211 STA mapping */
1028 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1029 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1030
1031 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1032
1033 /* reset quota debouncing buffer - 0xff will yield invalid data */
1034 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1035
1036 /* Add auxiliary station for scanning */
1037 ret = iwl_mvm_add_aux_sta(mvm);
1038 if (ret)
1039 goto error;
1040
1041 /* Add all the PHY contexts */
1042 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
1043 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1044 for (i = 0; i < NUM_PHY_CTX; i++) {
1045 /*
1046 * The channel used here isn't relevant as it's
1047 * going to be overwritten in the other flows.
1048 * For now use the first channel we have.
1049 */
1050 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1051 &chandef, 1, 1);
1052 if (ret)
1053 goto error;
1054 }
1055
1056 /* Initialize tx backoffs to the minimal possible */
1057 iwl_mvm_tt_tx_backoff(mvm, 0);
1058
1059 WARN_ON(iwl_mvm_config_ltr(mvm));
1060
1061 ret = iwl_mvm_power_update_device(mvm);
1062 if (ret)
1063 goto error;
1064
1065 /*
1066 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1067 * anyway, so don't init MCC.
1068 */
1069 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1070 ret = iwl_mvm_init_mcc(mvm);
1071 if (ret)
1072 goto error;
1073 }
1074
1075 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1076 ret = iwl_mvm_config_scan(mvm);
1077 if (ret)
1078 goto error;
1079 }
1080
1081 if (iwl_mvm_is_csum_supported(mvm) &&
1082 mvm->cfg->features & NETIF_F_RXCSUM)
1083 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1084
1085 /* allow FW/transport low power modes if not during restart */
1086 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1087 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1088
1089 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1090 return 0;
1091 error:
1092 iwl_trans_stop_device(mvm->trans);
1093 return ret;
1094}
1095
1096int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1097{
1098 int ret, i;
1099
1100 lockdep_assert_held(&mvm->mutex);
1101
1102 ret = iwl_trans_start_hw(mvm->trans);
1103 if (ret)
1104 return ret;
1105
1106 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1107 if (ret) {
1108 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1109 goto error;
1110 }
1111
1112 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1113 if (ret)
1114 goto error;
1115
1116 /* Send phy db control command and then phy db calibration*/
1117 ret = iwl_send_phy_db_data(mvm->phy_db);
1118 if (ret)
1119 goto error;
1120
1121 ret = iwl_send_phy_cfg_cmd(mvm);
1122 if (ret)
1123 goto error;
1124
1125 /* init the fw <-> mac80211 STA mapping */
1126 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1127 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1128
1129 /* Add auxiliary station for scanning */
1130 ret = iwl_mvm_add_aux_sta(mvm);
1131 if (ret)
1132 goto error;
1133
1134 return 0;
1135 error:
1136 iwl_trans_stop_device(mvm->trans);
1137 return ret;
1138}
1139
1140void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1141 struct iwl_rx_cmd_buffer *rxb)
1142{
1143 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1144 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1145 u32 flags = le32_to_cpu(card_state_notif->flags);
1146
1147 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1148 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1149 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1150 (flags & CT_KILL_CARD_DISABLED) ?
1151 "Reached" : "Not reached");
1152}
1153
1154void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1155 struct iwl_rx_cmd_buffer *rxb)
1156{
1157 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1158 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1159
1160 IWL_DEBUG_INFO(mvm,
1161 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1162 le32_to_cpu(mfuart_notif->installed_ver),
1163 le32_to_cpu(mfuart_notif->external_ver),
1164 le32_to_cpu(mfuart_notif->status),
1165 le32_to_cpu(mfuart_notif->duration));
1166}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
new file mode 100644
index 000000000000..e3b3cf4dbd77
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -0,0 +1,136 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/leds.h>
65#include "iwl-io.h"
66#include "iwl-csr.h"
67#include "mvm.h"
68
69/* Set led register on */
70static void iwl_mvm_led_enable(struct iwl_mvm *mvm)
71{
72 iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
73}
74
75/* Set led register off */
76static void iwl_mvm_led_disable(struct iwl_mvm *mvm)
77{
78 iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
79}
80
81static void iwl_led_brightness_set(struct led_classdev *led_cdev,
82 enum led_brightness brightness)
83{
84 struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
85 if (brightness > 0)
86 iwl_mvm_led_enable(mvm);
87 else
88 iwl_mvm_led_disable(mvm);
89}
90
91int iwl_mvm_leds_init(struct iwl_mvm *mvm)
92{
93 int mode = iwlwifi_mod_params.led_mode;
94 int ret;
95
96 switch (mode) {
97 case IWL_LED_BLINK:
98 IWL_ERR(mvm, "Blink led mode not supported, used default\n");
99 case IWL_LED_DEFAULT:
100 case IWL_LED_RF_STATE:
101 mode = IWL_LED_RF_STATE;
102 break;
103 case IWL_LED_DISABLE:
104 IWL_INFO(mvm, "Led disabled\n");
105 return 0;
106 default:
107 return -EINVAL;
108 }
109
110 mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
111 wiphy_name(mvm->hw->wiphy));
112 mvm->led.brightness_set = iwl_led_brightness_set;
113 mvm->led.max_brightness = 1;
114
115 if (mode == IWL_LED_RF_STATE)
116 mvm->led.default_trigger =
117 ieee80211_get_radio_led_name(mvm->hw);
118
119 ret = led_classdev_register(mvm->trans->dev, &mvm->led);
120 if (ret) {
121 kfree(mvm->led.name);
122 IWL_INFO(mvm, "Failed to enable led\n");
123 return ret;
124 }
125
126 return 0;
127}
128
129void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
130{
131 if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
132 return;
133
134 led_classdev_unregister(&mvm->led);
135 kfree(mvm->led.name);
136}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 000000000000..ad7ad720d2e7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,1452 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#include <linux/etherdevice.h>
69#include <net/mac80211.h>
70#include "iwl-io.h"
71#include "iwl-prph.h"
72#include "fw-api.h"
73#include "mvm.h"
74#include "time-event.h"
75
76const u8 iwl_mvm_ac_to_tx_fifo[] = {
77 IWL_MVM_TX_FIFO_VO,
78 IWL_MVM_TX_FIFO_VI,
79 IWL_MVM_TX_FIFO_BE,
80 IWL_MVM_TX_FIFO_BK,
81};
82
83struct iwl_mvm_mac_iface_iterator_data {
84 struct iwl_mvm *mvm;
85 struct ieee80211_vif *vif;
86 unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
87 unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
88 enum iwl_tsf_id preferred_tsf;
89 bool found_vif;
90};
91
92struct iwl_mvm_hw_queues_iface_iterator_data {
93 struct ieee80211_vif *exclude_vif;
94 unsigned long used_hw_queues;
95};
96
97static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
98 struct ieee80211_vif *vif)
99{
100 struct iwl_mvm_mac_iface_iterator_data *data = _data;
101 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
102 u16 min_bi;
103
104 /* Skip the interface for which we are trying to assign a tsf_id */
105 if (vif == data->vif)
106 return;
107
108 /*
109 * The TSF is a hardware/firmware resource, there are 4 and
110 * the driver should assign and free them as needed. However,
111 * there are cases where 2 MACs should share the same TSF ID
112 * for the purpose of clock sync, an optimization to avoid
113 * clock drift causing overlapping TBTTs/DTIMs for a GO and
114 * client in the system.
115 *
116 * The firmware will decide according to the MAC type which
117 * will be the master and slave. Clients that need to sync
118 * with a remote station will be the master, and an AP or GO
119 * will be the slave.
120 *
121 * Depending on the new interface type it can be slaved to
122 * or become the master of an existing interface.
123 */
124 switch (data->vif->type) {
125 case NL80211_IFTYPE_STATION:
126 /*
127 * The new interface is a client, so if the one we're iterating
128 * is an AP, and the beacon interval of the AP is a multiple or
129 * divisor of the beacon interval of the client, the same TSF
130 * should be used to avoid drift between the new client and
131 * existing AP. The existing AP will get drift updates from the
132 * new client context in this case.
133 */
134 if (vif->type != NL80211_IFTYPE_AP ||
135 data->preferred_tsf != NUM_TSF_IDS ||
136 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
137 break;
138
139 min_bi = min(data->vif->bss_conf.beacon_int,
140 vif->bss_conf.beacon_int);
141
142 if (!min_bi)
143 break;
144
145 if ((data->vif->bss_conf.beacon_int -
146 vif->bss_conf.beacon_int) % min_bi == 0) {
147 data->preferred_tsf = mvmvif->tsf_id;
148 return;
149 }
150 break;
151
152 case NL80211_IFTYPE_AP:
153 /*
154 * The new interface is AP/GO, so if its beacon interval is a
155 * multiple or a divisor of the beacon interval of an existing
156 * interface, it should get drift updates from an existing
157 * client or use the same TSF as an existing GO. There's no
158 * drift between TSFs internally but if they used different
159 * TSFs then a new client MAC could update one of them and
160 * cause drift that way.
161 */
162 if ((vif->type != NL80211_IFTYPE_AP &&
163 vif->type != NL80211_IFTYPE_STATION) ||
164 data->preferred_tsf != NUM_TSF_IDS ||
165 !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
166 break;
167
168 min_bi = min(data->vif->bss_conf.beacon_int,
169 vif->bss_conf.beacon_int);
170
171 if (!min_bi)
172 break;
173
174 if ((data->vif->bss_conf.beacon_int -
175 vif->bss_conf.beacon_int) % min_bi == 0) {
176 data->preferred_tsf = mvmvif->tsf_id;
177 return;
178 }
179 break;
180 default:
181 /*
182 * For all other interface types there's no need to
183 * take drift into account. Either they're exclusive
184 * like IBSS and monitor, or we don't care much about
185 * their TSF (like P2P Device), but we won't be able
186 * to share the TSF resource.
187 */
188 break;
189 }
190
191 /*
192 * Unless we exited above, we can't share the TSF resource
193 * that the virtual interface we're iterating over is using
194 * with the new one, so clear the available bit and if this
195 * was the preferred one, reset that as well.
196 */
197 __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
198
199 if (data->preferred_tsf == mvmvif->tsf_id)
200 data->preferred_tsf = NUM_TSF_IDS;
201}
202
203/*
204 * Get the mask of the queues used by the vif
205 */
206u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
207{
208 u32 qmask = 0, ac;
209
210 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
211 return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
212
213 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
214 if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
215 qmask |= BIT(vif->hw_queue[ac]);
216 }
217
218 if (vif->type == NL80211_IFTYPE_AP)
219 qmask |= BIT(vif->cab_queue);
220
221 return qmask;
222}
223
224static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
225 struct ieee80211_vif *vif)
226{
227 struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
228
229 /* exclude the given vif */
230 if (vif == data->exclude_vif)
231 return;
232
233 data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
234}
235
236static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
237 struct ieee80211_sta *sta)
238{
239 struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
240 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
241
242 /* Mark the queues used by the sta */
243 data->used_hw_queues |= mvmsta->tfd_queue_msk;
244}
245
246unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
247 struct ieee80211_vif *exclude_vif)
248{
249 u8 sta_id;
250 struct iwl_mvm_hw_queues_iface_iterator_data data = {
251 .exclude_vif = exclude_vif,
252 .used_hw_queues =
253 BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
254 BIT(mvm->aux_queue) |
255 BIT(IWL_MVM_CMD_QUEUE),
256 };
257
258 lockdep_assert_held(&mvm->mutex);
259
260 /* mark all VIF used hw queues */
261 ieee80211_iterate_active_interfaces_atomic(
262 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
263 iwl_mvm_iface_hw_queues_iter, &data);
264
265 /* don't assign the same hw queues as TDLS stations */
266 ieee80211_iterate_stations_atomic(mvm->hw,
267 iwl_mvm_mac_sta_hw_queues_iter,
268 &data);
269
270 /*
271 * Some TDLS stations may be removed but are in the process of being
272 * drained. Don't touch their queues.
273 */
274 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT)
275 data.used_hw_queues |= mvm->tfd_drained[sta_id];
276
277 return data.used_hw_queues;
278}
279
280static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
281 struct ieee80211_vif *vif)
282{
283 struct iwl_mvm_mac_iface_iterator_data *data = _data;
284 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
285
286 /* Iterator may already find the interface being added -- skip it */
287 if (vif == data->vif) {
288 data->found_vif = true;
289 return;
290 }
291
292 /* Mark MAC IDs as used by clearing the available bit, and
293 * (below) mark TSFs as used if their existing use is not
294 * compatible with the new interface type.
295 * No locking or atomic bit operations are needed since the
296 * data is on the stack of the caller function.
297 */
298 __clear_bit(mvmvif->id, data->available_mac_ids);
299
300 /* find a suitable tsf_id */
301 iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
302}
303
304void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
305 struct ieee80211_vif *vif)
306{
307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
308 struct iwl_mvm_mac_iface_iterator_data data = {
309 .mvm = mvm,
310 .vif = vif,
311 .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
312 /* no preference yet */
313 .preferred_tsf = NUM_TSF_IDS,
314 };
315
316 ieee80211_iterate_active_interfaces_atomic(
317 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
318 iwl_mvm_mac_tsf_id_iter, &data);
319
320 if (data.preferred_tsf != NUM_TSF_IDS)
321 mvmvif->tsf_id = data.preferred_tsf;
322 else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
323 mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
324 NUM_TSF_IDS);
325}
326
327static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
328 struct ieee80211_vif *vif)
329{
330 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
331 struct iwl_mvm_mac_iface_iterator_data data = {
332 .mvm = mvm,
333 .vif = vif,
334 .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
335 .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
336 /* no preference yet */
337 .preferred_tsf = NUM_TSF_IDS,
338 .found_vif = false,
339 };
340 u32 ac;
341 int ret, i;
342 unsigned long used_hw_queues;
343
344 /*
345 * Allocate a MAC ID and a TSF for this MAC, along with the queues
346 * and other resources.
347 */
348
349 /*
350 * Before the iterator, we start with all MAC IDs and TSFs available.
351 *
352 * During iteration, all MAC IDs are cleared that are in use by other
353 * virtual interfaces, and all TSF IDs are cleared that can't be used
354 * by this new virtual interface because they're used by an interface
355 * that can't share it with the new one.
356 * At the same time, we check if there's a preferred TSF in the case
357 * that we should share it with another interface.
358 */
359
360 /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
361 switch (vif->type) {
362 case NL80211_IFTYPE_ADHOC:
363 break;
364 case NL80211_IFTYPE_STATION:
365 if (!vif->p2p)
366 break;
367 /* fall through */
368 default:
369 __clear_bit(0, data.available_mac_ids);
370 }
371
372 ieee80211_iterate_active_interfaces_atomic(
373 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
374 iwl_mvm_mac_iface_iterator, &data);
375
376 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
377
378 /*
379 * In the case we're getting here during resume, it's similar to
380 * firmware restart, and with RESUME_ALL the iterator will find
381 * the vif being added already.
382 * We don't want to reassign any IDs in either case since doing
383 * so would probably assign different IDs (as interfaces aren't
384 * necessarily added in the same order), but the old IDs were
385 * preserved anyway, so skip ID assignment for both resume and
386 * recovery.
387 */
388 if (data.found_vif)
389 return 0;
390
391 /* Therefore, in recovery, we can't get here */
392 if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
393 return -EBUSY;
394
395 mvmvif->id = find_first_bit(data.available_mac_ids,
396 NUM_MAC_INDEX_DRIVER);
397 if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
398 IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
399 ret = -EIO;
400 goto exit_fail;
401 }
402
403 if (data.preferred_tsf != NUM_TSF_IDS)
404 mvmvif->tsf_id = data.preferred_tsf;
405 else
406 mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
407 NUM_TSF_IDS);
408 if (mvmvif->tsf_id == NUM_TSF_IDS) {
409 IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
410 ret = -EIO;
411 goto exit_fail;
412 }
413
414 mvmvif->color = 0;
415
416 INIT_LIST_HEAD(&mvmvif->time_event_data.list);
417 mvmvif->time_event_data.id = TE_MAX;
418
419 /* No need to allocate data queues to P2P Device MAC.*/
420 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
421 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
422 vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
423
424 return 0;
425 }
426
427 /* Find available queues, and allocate them to the ACs */
428 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
429 u8 queue = find_first_zero_bit(&used_hw_queues,
430 mvm->first_agg_queue);
431
432 if (queue >= mvm->first_agg_queue) {
433 IWL_ERR(mvm, "Failed to allocate queue\n");
434 ret = -EIO;
435 goto exit_fail;
436 }
437
438 __set_bit(queue, &used_hw_queues);
439 vif->hw_queue[ac] = queue;
440 }
441
442 /* Allocate the CAB queue for softAP and GO interfaces */
443 if (vif->type == NL80211_IFTYPE_AP) {
444 u8 queue = find_first_zero_bit(&used_hw_queues,
445 mvm->first_agg_queue);
446
447 if (queue >= mvm->first_agg_queue) {
448 IWL_ERR(mvm, "Failed to allocate cab queue\n");
449 ret = -EIO;
450 goto exit_fail;
451 }
452
453 vif->cab_queue = queue;
454 } else {
455 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
456 }
457
458 mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
459 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
460
461 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
462 mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
463
464 return 0;
465
466exit_fail:
467 memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
468 memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
469 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
470 return ret;
471}
472
473int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
474{
475 unsigned int wdg_timeout =
476 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
477 u32 ac;
478 int ret;
479
480 lockdep_assert_held(&mvm->mutex);
481
482 ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif);
483 if (ret)
484 return ret;
485
486 switch (vif->type) {
487 case NL80211_IFTYPE_P2P_DEVICE:
488 iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
489 IWL_MVM_OFFCHANNEL_QUEUE,
490 IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
491 break;
492 case NL80211_IFTYPE_AP:
493 iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
494 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
495 /* fall through */
496 default:
497 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
498 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
499 vif->hw_queue[ac],
500 iwl_mvm_ac_to_tx_fifo[ac], 0,
501 wdg_timeout);
502 break;
503 }
504
505 return 0;
506}
507
508void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
509{
510 int ac;
511
512 lockdep_assert_held(&mvm->mutex);
513
514 switch (vif->type) {
515 case NL80211_IFTYPE_P2P_DEVICE:
516 iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
517 IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
518 0);
519 break;
520 case NL80211_IFTYPE_AP:
521 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
522 IWL_MAX_TID_COUNT, 0);
523 /* fall through */
524 default:
525 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
526 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
527 vif->hw_queue[ac],
528 IWL_MAX_TID_COUNT, 0);
529 }
530}
531
532static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
533 struct ieee80211_vif *vif,
534 enum ieee80211_band band,
535 u8 *cck_rates, u8 *ofdm_rates)
536{
537 struct ieee80211_supported_band *sband;
538 unsigned long basic = vif->bss_conf.basic_rates;
539 int lowest_present_ofdm = 100;
540 int lowest_present_cck = 100;
541 u8 cck = 0;
542 u8 ofdm = 0;
543 int i;
544
545 sband = mvm->hw->wiphy->bands[band];
546
547 for_each_set_bit(i, &basic, BITS_PER_LONG) {
548 int hw = sband->bitrates[i].hw_value;
549 if (hw >= IWL_FIRST_OFDM_RATE) {
550 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
551 if (lowest_present_ofdm > hw)
552 lowest_present_ofdm = hw;
553 } else {
554 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
555
556 cck |= BIT(hw);
557 if (lowest_present_cck > hw)
558 lowest_present_cck = hw;
559 }
560 }
561
562 /*
563 * Now we've got the basic rates as bitmaps in the ofdm and cck
564 * variables. This isn't sufficient though, as there might not
565 * be all the right rates in the bitmap. E.g. if the only basic
566 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
567 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
568 *
569 * [...] a STA responding to a received frame shall transmit
570 * its Control Response frame [...] at the highest rate in the
571 * BSSBasicRateSet parameter that is less than or equal to the
572 * rate of the immediately previous frame in the frame exchange
573 * sequence ([...]) and that is of the same modulation class
574 * ([...]) as the received frame. If no rate contained in the
575 * BSSBasicRateSet parameter meets these conditions, then the
576 * control frame sent in response to a received frame shall be
577 * transmitted at the highest mandatory rate of the PHY that is
578 * less than or equal to the rate of the received frame, and
579 * that is of the same modulation class as the received frame.
580 *
581 * As a consequence, we need to add all mandatory rates that are
582 * lower than all of the basic rates to these bitmaps.
583 */
584
585 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
586 ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
587 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
588 ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
589 /* 6M already there or needed so always add */
590 ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
591
592 /*
593 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
594 * Note, however:
595 * - if no CCK rates are basic, it must be ERP since there must
596 * be some basic rates at all, so they're OFDM => ERP PHY
597 * (or we're in 5 GHz, and the cck bitmap will never be used)
598 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
599 * - if 5.5M is basic, 1M and 2M are mandatory
600 * - if 2M is basic, 1M is mandatory
601 * - if 1M is basic, that's the only valid ACK rate.
602 * As a consequence, it's not as complicated as it sounds, just add
603 * any lower rates to the ACK rate bitmap.
604 */
605 if (IWL_RATE_11M_INDEX < lowest_present_cck)
606 cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
607 if (IWL_RATE_5M_INDEX < lowest_present_cck)
608 cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
609 if (IWL_RATE_2M_INDEX < lowest_present_cck)
610 cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
611 /* 1M already there or needed so always add */
612 cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
613
614 *cck_rates = cck;
615 *ofdm_rates = ofdm;
616}
617
618static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
619 struct ieee80211_vif *vif,
620 struct iwl_mac_ctx_cmd *cmd)
621{
622 /* for both sta and ap, ht_operation_mode hold the protection_mode */
623 u8 protection_mode = vif->bss_conf.ht_operation_mode &
624 IEEE80211_HT_OP_MODE_PROTECTION;
625 /* The fw does not distinguish between ht and fat */
626 u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
627
628 IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
629 /*
630 * See section 9.23.3.1 of IEEE 80211-2012.
631 * Nongreenfield HT STAs Present is not supported.
632 */
633 switch (protection_mode) {
634 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
635 break;
636 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
637 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
638 cmd->protection_flags |= cpu_to_le32(ht_flag);
639 break;
640 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
641 /* Protect when channel wider than 20MHz */
642 if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
643 cmd->protection_flags |= cpu_to_le32(ht_flag);
644 break;
645 default:
646 IWL_ERR(mvm, "Illegal protection mode %d\n",
647 protection_mode);
648 break;
649 }
650}
651
652static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
653 struct ieee80211_vif *vif,
654 struct iwl_mac_ctx_cmd *cmd,
655 const u8 *bssid_override,
656 u32 action)
657{
658 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
659 struct ieee80211_chanctx_conf *chanctx;
660 bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
661 IEEE80211_HT_OP_MODE_PROTECTION);
662 u8 cck_ack_rates, ofdm_ack_rates;
663 const u8 *bssid = bssid_override ?: vif->bss_conf.bssid;
664 int i;
665
666 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
667 mvmvif->color));
668 cmd->action = cpu_to_le32(action);
669
670 switch (vif->type) {
671 case NL80211_IFTYPE_STATION:
672 if (vif->p2p)
673 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
674 else
675 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
676 break;
677 case NL80211_IFTYPE_AP:
678 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
679 break;
680 case NL80211_IFTYPE_MONITOR:
681 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
682 break;
683 case NL80211_IFTYPE_P2P_DEVICE:
684 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
685 break;
686 case NL80211_IFTYPE_ADHOC:
687 cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
688 break;
689 default:
690 WARN_ON_ONCE(1);
691 }
692
693 cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
694
695 memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
696
697 if (bssid)
698 memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
699 else
700 eth_broadcast_addr(cmd->bssid_addr);
701
702 rcu_read_lock();
703 chanctx = rcu_dereference(vif->chanctx_conf);
704 iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
705 : IEEE80211_BAND_2GHZ,
706 &cck_ack_rates, &ofdm_ack_rates);
707 rcu_read_unlock();
708
709 cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
710 cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
711
712 cmd->cck_short_preamble =
713 cpu_to_le32(vif->bss_conf.use_short_preamble ?
714 MAC_FLG_SHORT_PREAMBLE : 0);
715 cmd->short_slot =
716 cpu_to_le32(vif->bss_conf.use_short_slot ?
717 MAC_FLG_SHORT_SLOT : 0);
718
719 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
720 u8 txf = iwl_mvm_ac_to_tx_fifo[i];
721
722 cmd->ac[txf].cw_min =
723 cpu_to_le16(mvmvif->queue_params[i].cw_min);
724 cmd->ac[txf].cw_max =
725 cpu_to_le16(mvmvif->queue_params[i].cw_max);
726 cmd->ac[txf].edca_txop =
727 cpu_to_le16(mvmvif->queue_params[i].txop * 32);
728 cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
729 cmd->ac[txf].fifos_mask = BIT(txf);
730 }
731
732 /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
733 if (vif->type == NL80211_IFTYPE_AP)
734 cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
735 BIT(IWL_MVM_TX_FIFO_MCAST);
736
737 if (vif->bss_conf.qos)
738 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
739
740 if (vif->bss_conf.use_cts_prot)
741 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
742
743 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
744 vif->bss_conf.use_cts_prot,
745 vif->bss_conf.ht_operation_mode);
746 if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
747 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
748 if (ht_enabled)
749 iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
750
751 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
752}
753
754static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
755 struct iwl_mac_ctx_cmd *cmd)
756{
757 int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
758 sizeof(*cmd), cmd);
759 if (ret)
760 IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
761 le32_to_cpu(cmd->action), ret);
762 return ret;
763}
764
765static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
766 struct ieee80211_vif *vif,
767 u32 action, bool force_assoc_off,
768 const u8 *bssid_override)
769{
770 struct iwl_mac_ctx_cmd cmd = {};
771 struct iwl_mac_data_sta *ctxt_sta;
772
773 WARN_ON(vif->type != NL80211_IFTYPE_STATION);
774
775 /* Fill the common data for all mac context types */
776 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
777
778 if (vif->p2p) {
779 struct ieee80211_p2p_noa_attr *noa =
780 &vif->bss_conf.p2p_noa_attr;
781
782 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
783 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
784 ctxt_sta = &cmd.p2p_sta.sta;
785 } else {
786 ctxt_sta = &cmd.sta;
787 }
788
789 /* We need the dtim_period to set the MAC as associated */
790 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
791 !force_assoc_off) {
792 u32 dtim_offs;
793
794 /*
795 * The DTIM count counts down, so when it is N that means N
796 * more beacon intervals happen until the DTIM TBTT. Therefore
797 * add this to the current time. If that ends up being in the
798 * future, the firmware will handle it.
799 *
800 * Also note that the system_timestamp (which we get here as
801 * "sync_device_ts") and TSF timestamp aren't at exactly the
802 * same offset in the frame -- the TSF is at the first symbol
803 * of the TSF, the system timestamp is at signal acquisition
804 * time. This means there's an offset between them of at most
805 * a few hundred microseconds (24 * 8 bits + PLCP time gives
806 * 384us in the longest case), this is currently not relevant
807 * as the firmware wakes up around 2ms before the TBTT.
808 */
809 dtim_offs = vif->bss_conf.sync_dtim_count *
810 vif->bss_conf.beacon_int;
811 /* convert TU to usecs */
812 dtim_offs *= 1024;
813
814 ctxt_sta->dtim_tsf =
815 cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
816 ctxt_sta->dtim_time =
817 cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
818
819 IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
820 le64_to_cpu(ctxt_sta->dtim_tsf),
821 le32_to_cpu(ctxt_sta->dtim_time),
822 dtim_offs);
823
824 ctxt_sta->is_assoc = cpu_to_le32(1);
825 } else {
826 ctxt_sta->is_assoc = cpu_to_le32(0);
827
828 /* Allow beacons to pass through as long as we are not
829 * associated, or we do not have dtim period information.
830 */
831 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
832 }
833
834 ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
835 ctxt_sta->bi_reciprocal =
836 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
837 ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
838 vif->bss_conf.dtim_period);
839 ctxt_sta->dtim_reciprocal =
840 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
841 vif->bss_conf.dtim_period));
842
843 ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
844 ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
845
846 if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
847 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
848
849 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
850}
851
852static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
853 struct ieee80211_vif *vif,
854 u32 action)
855{
856 struct iwl_mac_ctx_cmd cmd = {};
857
858 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
859
860 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
861
862 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
863 MAC_FILTER_IN_CONTROL_AND_MGMT |
864 MAC_FILTER_IN_BEACON |
865 MAC_FILTER_IN_PROBE_REQUEST |
866 MAC_FILTER_IN_CRC32);
867 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
868
869 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
870}
871
872static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
873 struct ieee80211_vif *vif,
874 u32 action)
875{
876 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
877 struct iwl_mac_ctx_cmd cmd = {};
878
879 WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
880
881 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
882
883 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
884 MAC_FILTER_IN_PROBE_REQUEST);
885
886 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
887 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
888 cmd.ibss.bi_reciprocal =
889 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
890
891 /* TODO: Assumes that the beacon id == mac context id */
892 cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
893
894 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
895}
896
897struct iwl_mvm_go_iterator_data {
898 bool go_active;
899};
900
901static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
902{
903 struct iwl_mvm_go_iterator_data *data = _data;
904 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
905
906 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
907 mvmvif->ap_ibss_active)
908 data->go_active = true;
909}
910
911static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
912 struct ieee80211_vif *vif,
913 u32 action)
914{
915 struct iwl_mac_ctx_cmd cmd = {};
916 struct iwl_mvm_go_iterator_data data = {};
917
918 WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
919
920 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
921
922 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
923
924 /* Override the filter flags to accept only probe requests */
925 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
926
927 /*
928 * This flag should be set to true when the P2P Device is
929 * discoverable and there is at least another active P2P GO. Settings
930 * this flag will allow the P2P Device to be discoverable on other
931 * channels in addition to its listen channel.
932 * Note that this flag should not be set in other cases as it opens the
933 * Rx filters on all MAC and increases the number of interrupts.
934 */
935 ieee80211_iterate_active_interfaces_atomic(
936 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
937 iwl_mvm_go_iterator, &data);
938
939 cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
940 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
941}
942
943static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
944 struct iwl_mac_beacon_cmd *beacon_cmd,
945 u8 *beacon, u32 frame_size)
946{
947 u32 tim_idx;
948 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
949
950 /* The index is relative to frame start but we start looking at the
951 * variable-length part of the beacon. */
952 tim_idx = mgmt->u.beacon.variable - beacon;
953
954 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
955 while ((tim_idx < (frame_size - 2)) &&
956 (beacon[tim_idx] != WLAN_EID_TIM))
957 tim_idx += beacon[tim_idx+1] + 2;
958
959 /* If TIM field was found, set variables */
960 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
961 beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
962 beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
963 } else {
964 IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
965 }
966}
967
968static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
969 struct ieee80211_vif *vif,
970 struct sk_buff *beacon)
971{
972 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
973 struct iwl_host_cmd cmd = {
974 .id = BEACON_TEMPLATE_CMD,
975 .flags = CMD_ASYNC,
976 };
977 struct iwl_mac_beacon_cmd beacon_cmd = {};
978 struct ieee80211_tx_info *info;
979 u32 beacon_skb_len;
980 u32 rate, tx_flags;
981
982 if (WARN_ON(!beacon))
983 return -EINVAL;
984
985 beacon_skb_len = beacon->len;
986
987 /* TODO: for now the beacon template id is set to be the mac context id.
988 * Might be better to handle it as another resource ... */
989 beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
990 info = IEEE80211_SKB_CB(beacon);
991
992 /* Set up TX command fields */
993 beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
994 beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
995 beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
996 tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
997 tx_flags |=
998 iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
999 TX_CMD_FLG_BT_PRIO_POS;
1000 beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
1001
1002 mvm->mgmt_last_antenna_idx =
1003 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
1004 mvm->mgmt_last_antenna_idx);
1005
1006 beacon_cmd.tx.rate_n_flags =
1007 cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
1008 RATE_MCS_ANT_POS);
1009
1010 if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
1011 rate = IWL_FIRST_OFDM_RATE;
1012 } else {
1013 rate = IWL_FIRST_CCK_RATE;
1014 beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
1015 }
1016 beacon_cmd.tx.rate_n_flags |=
1017 cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
1018
1019 /* Set up TX beacon command fields */
1020 if (vif->type == NL80211_IFTYPE_AP)
1021 iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
1022 beacon->data,
1023 beacon_skb_len);
1024
1025 /* Submit command */
1026 cmd.len[0] = sizeof(beacon_cmd);
1027 cmd.data[0] = &beacon_cmd;
1028 cmd.dataflags[0] = 0;
1029 cmd.len[1] = beacon_skb_len;
1030 cmd.data[1] = beacon->data;
1031 cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
1032
1033 return iwl_mvm_send_cmd(mvm, &cmd);
1034}
1035
1036/* The beacon template for the AP/GO/IBSS has changed and needs update */
1037int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
1038 struct ieee80211_vif *vif)
1039{
1040 struct sk_buff *beacon;
1041 int ret;
1042
1043 WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1044 vif->type != NL80211_IFTYPE_ADHOC);
1045
1046 beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
1047 if (!beacon)
1048 return -ENOMEM;
1049
1050 ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
1051 dev_kfree_skb(beacon);
1052 return ret;
1053}
1054
1055struct iwl_mvm_mac_ap_iterator_data {
1056 struct iwl_mvm *mvm;
1057 struct ieee80211_vif *vif;
1058 u32 beacon_device_ts;
1059 u16 beacon_int;
1060};
1061
1062/* Find the beacon_device_ts and beacon_int for a managed interface */
1063static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
1064 struct ieee80211_vif *vif)
1065{
1066 struct iwl_mvm_mac_ap_iterator_data *data = _data;
1067
1068 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
1069 return;
1070
1071 /* Station client has higher priority over P2P client*/
1072 if (vif->p2p && data->beacon_device_ts)
1073 return;
1074
1075 data->beacon_device_ts = vif->bss_conf.sync_device_ts;
1076 data->beacon_int = vif->bss_conf.beacon_int;
1077}
1078
1079/*
1080 * Fill the specific data for mac context of type AP of P2P GO
1081 */
1082static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
1083 struct ieee80211_vif *vif,
1084 struct iwl_mac_data_ap *ctxt_ap,
1085 bool add)
1086{
1087 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1088 struct iwl_mvm_mac_ap_iterator_data data = {
1089 .mvm = mvm,
1090 .vif = vif,
1091 .beacon_device_ts = 0
1092 };
1093
1094 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
1095 ctxt_ap->bi_reciprocal =
1096 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
1097 ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
1098 vif->bss_conf.dtim_period);
1099 ctxt_ap->dtim_reciprocal =
1100 cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
1101 vif->bss_conf.dtim_period));
1102
1103 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
1104
1105 /*
1106 * Only set the beacon time when the MAC is being added, when we
1107 * just modify the MAC then we should keep the time -- the firmware
1108 * can otherwise have a "jumping" TBTT.
1109 */
1110 if (add) {
1111 /*
1112 * If there is a station/P2P client interface which is
1113 * associated, set the AP's TBTT far enough from the station's
1114 * TBTT. Otherwise, set it to the current system time
1115 */
1116 ieee80211_iterate_active_interfaces_atomic(
1117 mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1118 iwl_mvm_mac_ap_iterator, &data);
1119
1120 if (data.beacon_device_ts) {
1121 u32 rand = (prandom_u32() % (64 - 36)) + 36;
1122 mvmvif->ap_beacon_time = data.beacon_device_ts +
1123 ieee80211_tu_to_usec(data.beacon_int * rand /
1124 100);
1125 } else {
1126 mvmvif->ap_beacon_time =
1127 iwl_read_prph(mvm->trans,
1128 DEVICE_SYSTEM_TIME_REG);
1129 }
1130 }
1131
1132 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
1133 ctxt_ap->beacon_tsf = 0; /* unused */
1134
1135 /* TODO: Assume that the beacon id == mac context id */
1136 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
1137}
1138
1139static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
1140 struct ieee80211_vif *vif,
1141 u32 action)
1142{
1143 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1144 struct iwl_mac_ctx_cmd cmd = {};
1145
1146 WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
1147
1148 /* Fill the common data for all mac context types */
1149 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
1150
1151 /*
1152 * pass probe requests and beacons from other APs (needed
1153 * for ht protection); when there're no any associated station
1154 * don't ask FW to pass beacons to prevent unnecessary wake-ups.
1155 */
1156 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
1157 if (mvmvif->ap_assoc_sta_count) {
1158 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
1159 IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
1160 } else {
1161 IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
1162 }
1163
1164 /* Fill the data specific for ap mode */
1165 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
1166 action == FW_CTXT_ACTION_ADD);
1167
1168 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
1169}
1170
1171static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1172 struct ieee80211_vif *vif,
1173 u32 action)
1174{
1175 struct iwl_mac_ctx_cmd cmd = {};
1176 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
1177
1178 WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
1179
1180 /* Fill the common data for all mac context types */
1181 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
1182
1183 /*
1184 * pass probe requests and beacons from other APs (needed
1185 * for ht protection)
1186 */
1187 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1188 MAC_FILTER_IN_BEACON);
1189
1190 /* Fill the data specific for GO mode */
1191 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
1192 action == FW_CTXT_ACTION_ADD);
1193
1194 cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
1195 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
1196 cmd.go.opp_ps_enabled =
1197 cpu_to_le32(!!(noa->oppps_ctwindow &
1198 IEEE80211_P2P_OPPPS_ENABLE_BIT));
1199
1200 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
1201}
1202
1203static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1204 u32 action, bool force_assoc_off,
1205 const u8 *bssid_override)
1206{
1207 switch (vif->type) {
1208 case NL80211_IFTYPE_STATION:
1209 return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
1210 force_assoc_off,
1211 bssid_override);
1212 break;
1213 case NL80211_IFTYPE_AP:
1214 if (!vif->p2p)
1215 return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
1216 else
1217 return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
1218 break;
1219 case NL80211_IFTYPE_MONITOR:
1220 return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
1221 case NL80211_IFTYPE_P2P_DEVICE:
1222 return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
1223 case NL80211_IFTYPE_ADHOC:
1224 return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
1225 default:
1226 break;
1227 }
1228
1229 return -EOPNOTSUPP;
1230}
1231
1232int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1233{
1234 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1235 int ret;
1236
1237 if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
1238 vif->addr, ieee80211_vif_type_p2p(vif)))
1239 return -EIO;
1240
1241 ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
1242 true, NULL);
1243 if (ret)
1244 return ret;
1245
1246 /* will only do anything at resume from D3 time */
1247 iwl_mvm_set_last_nonqos_seq(mvm, vif);
1248
1249 mvmvif->uploaded = true;
1250 return 0;
1251}
1252
1253int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1254 bool force_assoc_off, const u8 *bssid_override)
1255{
1256 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1257
1258 if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
1259 vif->addr, ieee80211_vif_type_p2p(vif)))
1260 return -EIO;
1261
1262 return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
1263 force_assoc_off, bssid_override);
1264}
1265
1266int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1267{
1268 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1269 struct iwl_mac_ctx_cmd cmd;
1270 int ret;
1271
1272 if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
1273 vif->addr, ieee80211_vif_type_p2p(vif)))
1274 return -EIO;
1275
1276 memset(&cmd, 0, sizeof(cmd));
1277
1278 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
1279 mvmvif->color));
1280 cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
1281
1282 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
1283 sizeof(cmd), &cmd);
1284 if (ret) {
1285 IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
1286 return ret;
1287 }
1288
1289 mvmvif->uploaded = false;
1290
1291 if (vif->type == NL80211_IFTYPE_MONITOR)
1292 __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
1293
1294 return 0;
1295}
1296
1297static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
1298 struct ieee80211_vif *csa_vif, u32 gp2,
1299 bool tx_success)
1300{
1301 struct iwl_mvm_vif *mvmvif =
1302 iwl_mvm_vif_from_mac80211(csa_vif);
1303
1304 /* Don't start to countdown from a failed beacon */
1305 if (!tx_success && !mvmvif->csa_countdown)
1306 return;
1307
1308 mvmvif->csa_countdown = true;
1309
1310 if (!ieee80211_csa_is_complete(csa_vif)) {
1311 int c = ieee80211_csa_update_counter(csa_vif);
1312
1313 iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
1314 if (csa_vif->p2p &&
1315 !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
1316 tx_success) {
1317 u32 rel_time = (c + 1) *
1318 csa_vif->bss_conf.beacon_int -
1319 IWL_MVM_CHANNEL_SWITCH_TIME_GO;
1320 u32 apply_time = gp2 + rel_time * 1024;
1321
1322 iwl_mvm_schedule_csa_period(mvm, csa_vif,
1323 IWL_MVM_CHANNEL_SWITCH_TIME_GO -
1324 IWL_MVM_CHANNEL_SWITCH_MARGIN,
1325 apply_time);
1326 }
1327 } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
1328 /* we don't have CSA NoA scheduled yet, switch now */
1329 ieee80211_csa_finish(csa_vif);
1330 RCU_INIT_POINTER(mvm->csa_vif, NULL);
1331 }
1332}
1333
1334void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1335 struct iwl_rx_cmd_buffer *rxb)
1336{
1337 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1338 struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
1339 struct iwl_mvm_tx_resp *beacon_notify_hdr;
1340 struct ieee80211_vif *csa_vif;
1341 struct ieee80211_vif *tx_blocked_vif;
1342 u16 status;
1343
1344 lockdep_assert_held(&mvm->mutex);
1345
1346 beacon_notify_hdr = &beacon->beacon_notify_hdr;
1347 mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
1348
1349 status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
1350 IWL_DEBUG_RX(mvm,
1351 "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
1352 status, beacon_notify_hdr->failure_frame,
1353 le64_to_cpu(beacon->tsf),
1354 mvm->ap_last_beacon_gp2,
1355 le32_to_cpu(beacon_notify_hdr->initial_rate));
1356
1357 csa_vif = rcu_dereference_protected(mvm->csa_vif,
1358 lockdep_is_held(&mvm->mutex));
1359 if (unlikely(csa_vif && csa_vif->csa_active))
1360 iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
1361 (status == TX_STATUS_SUCCESS));
1362
1363 tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
1364 lockdep_is_held(&mvm->mutex));
1365 if (unlikely(tx_blocked_vif)) {
1366 struct iwl_mvm_vif *mvmvif =
1367 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
1368
1369 /*
1370 * The channel switch is started and we have blocked the
1371 * stations. If this is the first beacon (the timeout wasn't
1372 * set), set the unblock timeout, otherwise countdown
1373 */
1374 if (!mvm->csa_tx_block_bcn_timeout)
1375 mvm->csa_tx_block_bcn_timeout =
1376 IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
1377 else
1378 mvm->csa_tx_block_bcn_timeout--;
1379
1380 /* Check if the timeout is expired, and unblock tx */
1381 if (mvm->csa_tx_block_bcn_timeout == 0) {
1382 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
1383 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
1384 }
1385 }
1386}
1387
1388static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
1389 struct ieee80211_vif *vif)
1390{
1391 struct iwl_missed_beacons_notif *missed_beacons = _data;
1392 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1393 struct iwl_mvm *mvm = mvmvif->mvm;
1394 struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
1395 struct iwl_fw_dbg_trigger_tlv *trigger;
1396 u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
1397 u32 rx_missed_bcon, rx_missed_bcon_since_rx;
1398
1399 if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
1400 return;
1401
1402 rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
1403 rx_missed_bcon_since_rx =
1404 le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
1405 /*
1406 * TODO: the threshold should be adjusted based on latency conditions,
1407 * and/or in case of a CS flow on one of the other AP vifs.
1408 */
1409 if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
1410 IWL_MVM_MISSED_BEACONS_THRESHOLD)
1411 ieee80211_beacon_loss(vif);
1412
1413 if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
1414 FW_DBG_TRIGGER_MISSED_BEACONS))
1415 return;
1416
1417 trigger = iwl_fw_dbg_get_trigger(mvm->fw,
1418 FW_DBG_TRIGGER_MISSED_BEACONS);
1419 bcon_trig = (void *)trigger->data;
1420 stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
1421 stop_trig_missed_bcon_since_rx =
1422 le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
1423
1424 /* TODO: implement start trigger */
1425
1426 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
1427 return;
1428
1429 if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
1430 rx_missed_bcon >= stop_trig_missed_bcon)
1431 iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
1432}
1433
1434void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1435 struct iwl_rx_cmd_buffer *rxb)
1436{
1437 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1438 struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
1439
1440 IWL_DEBUG_INFO(mvm,
1441 "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
1442 le32_to_cpu(mb->mac_id),
1443 le32_to_cpu(mb->consec_missed_beacons),
1444 le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
1445 le32_to_cpu(mb->num_recvd_beacons),
1446 le32_to_cpu(mb->num_expected_beacons));
1447
1448 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1449 IEEE80211_IFACE_ITER_NORMAL,
1450 iwl_mvm_beacon_loss_iterator,
1451 mb);
1452}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
new file mode 100644
index 000000000000..1fb684693040
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,4260 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/kernel.h>
66#include <linux/slab.h>
67#include <linux/skbuff.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include <linux/ip.h>
71#include <linux/if_arp.h>
72#include <linux/devcoredump.h>
73#include <net/mac80211.h>
74#include <net/ieee80211_radiotap.h>
75#include <net/tcp.h>
76
77#include "iwl-op-mode.h"
78#include "iwl-io.h"
79#include "mvm.h"
80#include "sta.h"
81#include "time-event.h"
82#include "iwl-eeprom-parse.h"
83#include "iwl-phy-db.h"
84#include "testmode.h"
85#include "iwl-fw-error-dump.h"
86#include "iwl-prph.h"
87#include "iwl-csr.h"
88#include "iwl-nvm-parse.h"
89
90static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 {
92 .max = 1,
93 .types = BIT(NL80211_IFTYPE_STATION),
94 },
95 {
96 .max = 1,
97 .types = BIT(NL80211_IFTYPE_AP) |
98 BIT(NL80211_IFTYPE_P2P_CLIENT) |
99 BIT(NL80211_IFTYPE_P2P_GO),
100 },
101 {
102 .max = 1,
103 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 },
105};
106
107static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 {
109 .num_different_channels = 2,
110 .max_interfaces = 3,
111 .limits = iwl_mvm_limits,
112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 },
114};
115
116#ifdef CONFIG_PM_SLEEP
117static const struct nl80211_wowlan_tcp_data_token_feature
118iwl_mvm_wowlan_tcp_token_feature = {
119 .min_len = 0,
120 .max_len = 255,
121 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122};
123
124static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 .tok = &iwl_mvm_wowlan_tcp_token_feature,
126 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 sizeof(struct ethhdr) -
128 sizeof(struct iphdr) -
129 sizeof(struct tcphdr),
130 .data_interval_max = 65535, /* __le16 in API */
131 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 sizeof(struct ethhdr) -
133 sizeof(struct iphdr) -
134 sizeof(struct tcphdr),
135 .seq = true,
136};
137#endif
138
139#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
140/*
141 * Use the reserved field to indicate magic values.
142 * these values will only be used internally by the driver,
143 * and won't make it to the fw (reserved will be 0).
144 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145 * be the vif's ip address. in case there is not a single
146 * ip address (0, or more than 1), this attribute will
147 * be skipped.
148 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149 * the LSB bytes of the vif's mac address
150 */
151enum {
152 BC_FILTER_MAGIC_NONE = 0,
153 BC_FILTER_MAGIC_IP,
154 BC_FILTER_MAGIC_MAC,
155};
156
157static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 {
159 /* arp */
160 .discard = 0,
161 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 .attrs = {
163 {
164 /* frame type - arp, hw type - ethernet */
165 .offset_type =
166 BCAST_FILTER_OFFSET_PAYLOAD_START,
167 .offset = sizeof(rfc1042_header),
168 .val = cpu_to_be32(0x08060001),
169 .mask = cpu_to_be32(0xffffffff),
170 },
171 {
172 /* arp dest ip */
173 .offset_type =
174 BCAST_FILTER_OFFSET_PAYLOAD_START,
175 .offset = sizeof(rfc1042_header) + 2 +
176 sizeof(struct arphdr) +
177 ETH_ALEN + sizeof(__be32) +
178 ETH_ALEN,
179 .mask = cpu_to_be32(0xffffffff),
180 /* mark it as special field */
181 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 },
183 },
184 },
185 {
186 /* dhcp offer bcast */
187 .discard = 0,
188 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 .attrs = {
190 {
191 /* udp dest port - 68 (bootp client)*/
192 .offset_type = BCAST_FILTER_OFFSET_IP_END,
193 .offset = offsetof(struct udphdr, dest),
194 .val = cpu_to_be32(0x00440000),
195 .mask = cpu_to_be32(0xffff0000),
196 },
197 {
198 /* dhcp - lsb bytes of client hw address */
199 .offset_type = BCAST_FILTER_OFFSET_IP_END,
200 .offset = 38,
201 .mask = cpu_to_be32(0xffffffff),
202 /* mark it as special field */
203 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 },
205 },
206 },
207 /* last filter must be empty */
208 {},
209};
210#endif
211
212void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213{
214 if (!iwl_mvm_is_d0i3_supported(mvm))
215 return;
216
217 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
218 spin_lock_bh(&mvm->refs_lock);
219 mvm->refs[ref_type]++;
220 spin_unlock_bh(&mvm->refs_lock);
221 iwl_trans_ref(mvm->trans);
222}
223
224void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225{
226 if (!iwl_mvm_is_d0i3_supported(mvm))
227 return;
228
229 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
230 spin_lock_bh(&mvm->refs_lock);
231 WARN_ON(!mvm->refs[ref_type]--);
232 spin_unlock_bh(&mvm->refs_lock);
233 iwl_trans_unref(mvm->trans);
234}
235
236static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 enum iwl_mvm_ref_type except_ref)
238{
239 int i, j;
240
241 if (!iwl_mvm_is_d0i3_supported(mvm))
242 return;
243
244 spin_lock_bh(&mvm->refs_lock);
245 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 if (except_ref == i || !mvm->refs[i])
247 continue;
248
249 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 i, mvm->refs[i]);
251 for (j = 0; j < mvm->refs[i]; j++)
252 iwl_trans_unref(mvm->trans);
253 mvm->refs[i] = 0;
254 }
255 spin_unlock_bh(&mvm->refs_lock);
256}
257
258bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259{
260 int i;
261 bool taken = false;
262
263 if (!iwl_mvm_is_d0i3_supported(mvm))
264 return true;
265
266 spin_lock_bh(&mvm->refs_lock);
267 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 if (mvm->refs[i]) {
269 taken = true;
270 break;
271 }
272 }
273 spin_unlock_bh(&mvm->refs_lock);
274
275 return taken;
276}
277
278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
279{
280 iwl_mvm_ref(mvm, ref_type);
281
282 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 HZ)) {
285 WARN_ON_ONCE(1);
286 iwl_mvm_unref(mvm, ref_type);
287 return -EIO;
288 }
289
290 return 0;
291}
292
293static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294{
295 int i;
296
297 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 for (i = 0; i < NUM_PHY_CTX; i++) {
299 mvm->phy_ctxts[i].id = i;
300 mvm->phy_ctxts[i].ref = 0;
301 }
302}
303
304struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
305 const char *alpha2,
306 enum iwl_mcc_source src_id,
307 bool *changed)
308{
309 struct ieee80211_regdomain *regd = NULL;
310 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 struct iwl_mcc_update_resp *resp;
313
314 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315
316 lockdep_assert_held(&mvm->mutex);
317
318 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
319 if (IS_ERR_OR_NULL(resp)) {
320 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
321 PTR_ERR_OR_ZERO(resp));
322 goto out;
323 }
324
325 if (changed)
326 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327
328 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
329 __le32_to_cpu(resp->n_channels),
330 resp->channels,
331 __le16_to_cpu(resp->mcc));
332 /* Store the return source id */
333 src_id = resp->source_id;
334 kfree(resp);
335 if (IS_ERR_OR_NULL(regd)) {
336 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
337 PTR_ERR_OR_ZERO(regd));
338 goto out;
339 }
340
341 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
343 mvm->lar_regdom_set = true;
344 mvm->mcc_src = src_id;
345
346out:
347 return regd;
348}
349
350void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351{
352 bool changed;
353 struct ieee80211_regdomain *regd;
354
355 if (!iwl_mvm_is_lar_supported(mvm))
356 return;
357
358 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 if (!IS_ERR_OR_NULL(regd)) {
360 /* only update the regulatory core if changed */
361 if (changed)
362 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363
364 kfree(regd);
365 }
366}
367
368struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 bool *changed)
370{
371 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 MCC_SOURCE_GET_CURRENT :
374 MCC_SOURCE_OLD_FW, changed);
375}
376
377int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378{
379 enum iwl_mcc_source used_src;
380 struct ieee80211_regdomain *regd;
381 int ret;
382 bool changed;
383 const struct ieee80211_regdomain *r =
384 rtnl_dereference(mvm->hw->wiphy->regd);
385
386 if (!r)
387 return -ENOENT;
388
389 /* save the last source in case we overwrite it below */
390 used_src = mvm->mcc_src;
391 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 /* Notify the firmware we support wifi location updates */
393 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
394 if (!IS_ERR_OR_NULL(regd))
395 kfree(regd);
396 }
397
398 /* Now set our last stored MCC and source */
399 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 &changed);
401 if (IS_ERR_OR_NULL(regd))
402 return -EIO;
403
404 /* update cfg80211 if the regdomain was changed */
405 if (changed)
406 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 else
408 ret = 0;
409
410 kfree(regd);
411 return ret;
412}
413
414int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415{
416 struct ieee80211_hw *hw = mvm->hw;
417 int num_mac, ret, i;
418 static const u32 mvm_ciphers[] = {
419 WLAN_CIPHER_SUITE_WEP40,
420 WLAN_CIPHER_SUITE_WEP104,
421 WLAN_CIPHER_SUITE_TKIP,
422 WLAN_CIPHER_SUITE_CCMP,
423 };
424
425 /* Tell mac80211 our characteristics */
426 ieee80211_hw_set(hw, SIGNAL_DBM);
427 ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 ieee80211_hw_set(hw, QUEUE_CONTROL);
430 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 ieee80211_hw_set(hw, SUPPORTS_PS);
432 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
439
440 hw->queues = mvm->first_agg_queue;
441 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
442 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
444 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
446 hw->rate_control_algorithm = "iwl-mvm-rs";
447 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
449
450 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 hw->wiphy->cipher_suites = mvm->ciphers;
454
455 /*
456 * Enable 11w if advertised by firmware and software crypto
457 * is not enabled (as the firmware will interpret some mgmt
458 * packets, so enabling it with software crypto isn't safe)
459 */
460 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
461 !iwlwifi_mod_params.sw_crypto) {
462 ieee80211_hw_set(hw, MFP_CAPABLE);
463 mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 WLAN_CIPHER_SUITE_AES_CMAC;
465 hw->wiphy->n_cipher_suites++;
466 }
467
468 /* currently FW API supports only one optional cipher scheme */
469 if (mvm->fw->cs[0].cipher) {
470 mvm->hw->n_cipher_schemes = 1;
471 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 mvm->fw->cs[0].cipher;
474 hw->wiphy->n_cipher_suites++;
475 }
476
477 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
478 hw->wiphy->features |=
479 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
480 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
482
483 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
485 hw->chanctx_data_size = sizeof(u16);
486
487 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
488 BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 BIT(NL80211_IFTYPE_AP) |
490 BIT(NL80211_IFTYPE_P2P_GO) |
491 BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 BIT(NL80211_IFTYPE_ADHOC);
493
494 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
495 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 if (iwl_mvm_is_lar_supported(mvm))
497 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 else
499 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 REGULATORY_DISABLE_BEACON_HINTS;
501
502 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504
505 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
506
507 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 hw->wiphy->n_iface_combinations =
509 ARRAY_SIZE(iwl_mvm_iface_combinations);
510
511 hw->wiphy->max_remain_on_channel_duration = 10000;
512 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
513 /* we can compensate an offset of up to 3 channels = 15 MHz */
514 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
515
516 /* Extract MAC address */
517 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 hw->wiphy->addresses = mvm->addresses;
519 hw->wiphy->n_addresses = 1;
520
521 /* Extract additional MAC addresses if available */
522 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524
525 for (i = 1; i < num_mac; i++) {
526 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
527 ETH_ALEN);
528 mvm->addresses[i].addr[5]++;
529 hw->wiphy->n_addresses++;
530 }
531
532 iwl_mvm_reset_phy_ctxts(mvm);
533
534 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
535
536 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537
538 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
539 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541
542 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
543 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 else
545 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546
547 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
550 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
551 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553
554 if (fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 fw_has_api(&mvm->fw->ucode_capa,
557 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
558 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 }
561
562 hw->wiphy->hw_version = mvm->trans->hw_id;
563
564 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
565 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 else
567 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568
569 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 /* we create the 802.11 header and zero length SSID IE. */
573 hw->wiphy->max_sched_scan_ie_len =
574 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
575 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
576 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
577
578 /*
579 * the firmware uses u8 for num of iterations, but 0xff is saved for
580 * infinite loop, so the maximum number of iterations is actually 254.
581 */
582 hw->wiphy->max_sched_scan_plan_iterations = 254;
583
584 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
585 NL80211_FEATURE_LOW_PRIORITY_SCAN |
586 NL80211_FEATURE_P2P_GO_OPPPS |
587 NL80211_FEATURE_DYNAMIC_SMPS |
588 NL80211_FEATURE_STATIC_SMPS |
589 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
590
591 if (fw_has_capa(&mvm->fw->ucode_capa,
592 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
593 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
596 hw->wiphy->features |= NL80211_FEATURE_QUIET;
597
598 if (fw_has_capa(&mvm->fw->ucode_capa,
599 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
600 hw->wiphy->features |=
601 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
602
603 if (fw_has_capa(&mvm->fw->ucode_capa,
604 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
605 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
606
607 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
608
609#ifdef CONFIG_PM_SLEEP
610 if (iwl_mvm_is_d0i3_supported(mvm) &&
611 device_can_wakeup(mvm->trans->dev)) {
612 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
613 hw->wiphy->wowlan = &mvm->wowlan;
614 }
615
616 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
617 mvm->trans->ops->d3_suspend &&
618 mvm->trans->ops->d3_resume &&
619 device_can_wakeup(mvm->trans->dev)) {
620 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
621 WIPHY_WOWLAN_DISCONNECT |
622 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
623 WIPHY_WOWLAN_RFKILL_RELEASE |
624 WIPHY_WOWLAN_NET_DETECT;
625 if (!iwlwifi_mod_params.sw_crypto)
626 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
627 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
628 WIPHY_WOWLAN_4WAY_HANDSHAKE;
629
630 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
631 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
632 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
633 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
634 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
635 hw->wiphy->wowlan = &mvm->wowlan;
636 }
637#endif
638
639#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
640 /* assign default bcast filtering configuration */
641 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
642#endif
643
644 ret = iwl_mvm_leds_init(mvm);
645 if (ret)
646 return ret;
647
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
650 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
651 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
652 ieee80211_hw_set(hw, TDLS_WIDER_BW);
653 }
654
655 if (fw_has_capa(&mvm->fw->ucode_capa,
656 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
657 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
658 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
659 }
660
661 hw->netdev_features |= mvm->cfg->features;
662 if (!iwl_mvm_is_csum_supported(mvm))
663 hw->netdev_features &= ~NETIF_F_RXCSUM;
664
665 ret = ieee80211_register_hw(mvm->hw);
666 if (ret)
667 iwl_mvm_leds_exit(mvm);
668
669 return ret;
670}
671
672static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
673 struct ieee80211_sta *sta,
674 struct sk_buff *skb)
675{
676 struct iwl_mvm_sta *mvmsta;
677 bool defer = false;
678
679 /*
680 * double check the IN_D0I3 flag both before and after
681 * taking the spinlock, in order to prevent taking
682 * the spinlock when not needed.
683 */
684 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
685 return false;
686
687 spin_lock(&mvm->d0i3_tx_lock);
688 /*
689 * testing the flag again ensures the skb dequeue
690 * loop (on d0i3 exit) hasn't run yet.
691 */
692 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
693 goto out;
694
695 mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
697 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
698 goto out;
699
700 __skb_queue_tail(&mvm->d0i3_tx, skb);
701 ieee80211_stop_queues(mvm->hw);
702
703 /* trigger wakeup */
704 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
705 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
706
707 defer = true;
708out:
709 spin_unlock(&mvm->d0i3_tx_lock);
710 return defer;
711}
712
713static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
714 struct ieee80211_tx_control *control,
715 struct sk_buff *skb)
716{
717 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
718 struct ieee80211_sta *sta = control->sta;
719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
720 struct ieee80211_hdr *hdr = (void *)skb->data;
721
722 if (iwl_mvm_is_radio_killed(mvm)) {
723 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
724 goto drop;
725 }
726
727 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
728 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
729 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
730 goto drop;
731
732 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
733 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
734 ieee80211_is_mgmt(hdr->frame_control) &&
735 !ieee80211_is_deauth(hdr->frame_control) &&
736 !ieee80211_is_disassoc(hdr->frame_control) &&
737 !ieee80211_is_action(hdr->frame_control)))
738 sta = NULL;
739
740 if (sta) {
741 if (iwl_mvm_defer_tx(mvm, sta, skb))
742 return;
743 if (iwl_mvm_tx_skb(mvm, skb, sta))
744 goto drop;
745 return;
746 }
747
748 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
749 goto drop;
750 return;
751 drop:
752 ieee80211_free_txskb(hw, skb);
753}
754
755static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
756{
757 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
758 return false;
759 return true;
760}
761
762static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
763{
764 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
765 return false;
766 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
767 return true;
768
769 /* enabled by default */
770 return true;
771}
772
773#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
774 do { \
775 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
776 break; \
777 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
778 } while (0)
779
780static void
781iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
782 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
783 enum ieee80211_ampdu_mlme_action action)
784{
785 struct iwl_fw_dbg_trigger_tlv *trig;
786 struct iwl_fw_dbg_trigger_ba *ba_trig;
787
788 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
789 return;
790
791 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
792 ba_trig = (void *)trig->data;
793
794 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
795 return;
796
797 switch (action) {
798 case IEEE80211_AMPDU_TX_OPERATIONAL: {
799 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
800 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
801
802 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
803 "TX AGG START: MAC %pM tid %d ssn %d\n",
804 sta->addr, tid, tid_data->ssn);
805 break;
806 }
807 case IEEE80211_AMPDU_TX_STOP_CONT:
808 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
809 "TX AGG STOP: MAC %pM tid %d\n",
810 sta->addr, tid);
811 break;
812 case IEEE80211_AMPDU_RX_START:
813 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
814 "RX AGG START: MAC %pM tid %d ssn %d\n",
815 sta->addr, tid, rx_ba_ssn);
816 break;
817 case IEEE80211_AMPDU_RX_STOP:
818 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
819 "RX AGG STOP: MAC %pM tid %d\n",
820 sta->addr, tid);
821 break;
822 default:
823 break;
824 }
825}
826
827static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
828 struct ieee80211_vif *vif,
829 enum ieee80211_ampdu_mlme_action action,
830 struct ieee80211_sta *sta, u16 tid,
831 u16 *ssn, u8 buf_size, bool amsdu)
832{
833 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
834 int ret;
835 bool tx_agg_ref = false;
836
837 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
838 sta->addr, tid, action);
839
840 if (!(mvm->nvm_data->sku_cap_11n_enable))
841 return -EACCES;
842
843 /* return from D0i3 before starting a new Tx aggregation */
844 switch (action) {
845 case IEEE80211_AMPDU_TX_START:
846 case IEEE80211_AMPDU_TX_STOP_CONT:
847 case IEEE80211_AMPDU_TX_STOP_FLUSH:
848 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
849 case IEEE80211_AMPDU_TX_OPERATIONAL:
850 /*
851 * for tx start, wait synchronously until D0i3 exit to
852 * get the correct sequence number for the tid.
853 * additionally, some other ampdu actions use direct
854 * target access, which is not handled automatically
855 * by the trans layer (unlike commands), so wait for
856 * d0i3 exit in these cases as well.
857 */
858 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
859 if (ret)
860 return ret;
861
862 tx_agg_ref = true;
863 break;
864 default:
865 break;
866 }
867
868 mutex_lock(&mvm->mutex);
869
870 switch (action) {
871 case IEEE80211_AMPDU_RX_START:
872 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
873 ret = -EINVAL;
874 break;
875 }
876 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
877 break;
878 case IEEE80211_AMPDU_RX_STOP:
879 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
880 break;
881 case IEEE80211_AMPDU_TX_START:
882 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
883 ret = -EINVAL;
884 break;
885 }
886 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
887 break;
888 case IEEE80211_AMPDU_TX_STOP_CONT:
889 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
890 break;
891 case IEEE80211_AMPDU_TX_STOP_FLUSH:
892 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
893 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
894 break;
895 case IEEE80211_AMPDU_TX_OPERATIONAL:
896 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
897 break;
898 default:
899 WARN_ON_ONCE(1);
900 ret = -EINVAL;
901 break;
902 }
903
904 if (!ret) {
905 u16 rx_ba_ssn = 0;
906
907 if (action == IEEE80211_AMPDU_RX_START)
908 rx_ba_ssn = *ssn;
909
910 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
911 rx_ba_ssn, action);
912 }
913 mutex_unlock(&mvm->mutex);
914
915 /*
916 * If the tid is marked as started, we won't use it for offloaded
917 * traffic on the next D0i3 entry. It's safe to unref.
918 */
919 if (tx_agg_ref)
920 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
921
922 return ret;
923}
924
925static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
926 struct ieee80211_vif *vif)
927{
928 struct iwl_mvm *mvm = data;
929 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
930
931 mvmvif->uploaded = false;
932 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
933
934 spin_lock_bh(&mvm->time_event_lock);
935 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
936 spin_unlock_bh(&mvm->time_event_lock);
937
938 mvmvif->phy_ctxt = NULL;
939 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
940}
941
942static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
943 const void *data, size_t datalen)
944{
945 const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
946 ssize_t bytes_read;
947 ssize_t bytes_read_trans;
948
949 if (offset < dump_ptrs->op_mode_len) {
950 bytes_read = min_t(ssize_t, count,
951 dump_ptrs->op_mode_len - offset);
952 memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
953 bytes_read);
954 offset += bytes_read;
955 count -= bytes_read;
956
957 if (count == 0)
958 return bytes_read;
959 } else {
960 bytes_read = 0;
961 }
962
963 if (!dump_ptrs->trans_ptr)
964 return bytes_read;
965
966 offset -= dump_ptrs->op_mode_len;
967 bytes_read_trans = min_t(ssize_t, count,
968 dump_ptrs->trans_ptr->len - offset);
969 memcpy(buffer + bytes_read,
970 (u8 *)dump_ptrs->trans_ptr->data + offset,
971 bytes_read_trans);
972
973 return bytes_read + bytes_read_trans;
974}
975
976static void iwl_mvm_free_coredump(const void *data)
977{
978 const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
979
980 vfree(fw_error_dump->op_mode_ptr);
981 vfree(fw_error_dump->trans_ptr);
982 kfree(fw_error_dump);
983}
984
985static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
986 struct iwl_fw_error_dump_data **dump_data)
987{
988 struct iwl_fw_error_dump_fifo *fifo_hdr;
989 u32 *fifo_data;
990 u32 fifo_len;
991 unsigned long flags;
992 int i, j;
993
994 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
995 return;
996
997 /* Pull RXF data from all RXFs */
998 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
999 /*
1000 * Keep aside the additional offset that might be needed for
1001 * next RXF
1002 */
1003 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1004
1005 fifo_hdr = (void *)(*dump_data)->data;
1006 fifo_data = (void *)fifo_hdr->data;
1007 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1008
1009 /* No need to try to read the data if the length is 0 */
1010 if (fifo_len == 0)
1011 continue;
1012
1013 /* Add a TLV for the RXF */
1014 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1015 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1016
1017 fifo_hdr->fifo_num = cpu_to_le32(i);
1018 fifo_hdr->available_bytes =
1019 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1020 RXF_RD_D_SPACE +
1021 offset_diff));
1022 fifo_hdr->wr_ptr =
1023 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1024 RXF_RD_WR_PTR +
1025 offset_diff));
1026 fifo_hdr->rd_ptr =
1027 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1028 RXF_RD_RD_PTR +
1029 offset_diff));
1030 fifo_hdr->fence_ptr =
1031 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1032 RXF_RD_FENCE_PTR +
1033 offset_diff));
1034 fifo_hdr->fence_mode =
1035 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1036 RXF_SET_FENCE_MODE +
1037 offset_diff));
1038
1039 /* Lock fence */
1040 iwl_trans_write_prph(mvm->trans,
1041 RXF_SET_FENCE_MODE + offset_diff, 0x1);
1042 /* Set fence pointer to the same place like WR pointer */
1043 iwl_trans_write_prph(mvm->trans,
1044 RXF_LD_WR2FENCE + offset_diff, 0x1);
1045 /* Set fence offset */
1046 iwl_trans_write_prph(mvm->trans,
1047 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1048 0x0);
1049
1050 /* Read FIFO */
1051 fifo_len /= sizeof(u32); /* Size in DWORDS */
1052 for (j = 0; j < fifo_len; j++)
1053 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1054 RXF_FIFO_RD_FENCE_INC +
1055 offset_diff);
1056 *dump_data = iwl_fw_error_next_data(*dump_data);
1057 }
1058
1059 /* Pull TXF data from all TXFs */
1060 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1061 /* Mark the number of TXF we're pulling now */
1062 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1063
1064 fifo_hdr = (void *)(*dump_data)->data;
1065 fifo_data = (void *)fifo_hdr->data;
1066 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1067
1068 /* No need to try to read the data if the length is 0 */
1069 if (fifo_len == 0)
1070 continue;
1071
1072 /* Add a TLV for the FIFO */
1073 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1074 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1075
1076 fifo_hdr->fifo_num = cpu_to_le32(i);
1077 fifo_hdr->available_bytes =
1078 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1079 TXF_FIFO_ITEM_CNT));
1080 fifo_hdr->wr_ptr =
1081 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1082 TXF_WR_PTR));
1083 fifo_hdr->rd_ptr =
1084 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1085 TXF_RD_PTR));
1086 fifo_hdr->fence_ptr =
1087 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1088 TXF_FENCE_PTR));
1089 fifo_hdr->fence_mode =
1090 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1091 TXF_LOCK_FENCE));
1092
1093 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1094 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1095 TXF_WR_PTR);
1096
1097 /* Dummy-read to advance the read pointer to the head */
1098 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1099
1100 /* Read FIFO */
1101 fifo_len /= sizeof(u32); /* Size in DWORDS */
1102 for (j = 0; j < fifo_len; j++)
1103 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1104 TXF_READ_MODIFY_DATA);
1105 *dump_data = iwl_fw_error_next_data(*dump_data);
1106 }
1107
1108 iwl_trans_release_nic_access(mvm->trans, &flags);
1109}
1110
1111void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1112{
1113 if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1114 !mvm->fw_dump_desc)
1115 return;
1116
1117 kfree(mvm->fw_dump_desc);
1118 mvm->fw_dump_desc = NULL;
1119}
1120
1121#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
1122#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
1123
1124void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1125{
1126 struct iwl_fw_error_dump_file *dump_file;
1127 struct iwl_fw_error_dump_data *dump_data;
1128 struct iwl_fw_error_dump_info *dump_info;
1129 struct iwl_fw_error_dump_mem *dump_mem;
1130 struct iwl_fw_error_dump_trigger_desc *dump_trig;
1131 struct iwl_mvm_dump_ptrs *fw_error_dump;
1132 u32 sram_len, sram_ofs;
1133 u32 file_len, fifo_data_len = 0;
1134 u32 smem_len = mvm->cfg->smem_len;
1135 u32 sram2_len = mvm->cfg->dccm2_len;
1136 bool monitor_dump_only = false;
1137
1138 lockdep_assert_held(&mvm->mutex);
1139
1140 /* there's no point in fw dump if the bus is dead */
1141 if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1142 IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1143 return;
1144 }
1145
1146 if (mvm->fw_dump_trig &&
1147 mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1148 monitor_dump_only = true;
1149
1150 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1151 if (!fw_error_dump)
1152 return;
1153
1154 /* SRAM - include stack CCM if driver knows the values for it */
1155 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1156 const struct fw_img *img;
1157
1158 img = &mvm->fw->img[mvm->cur_ucode];
1159 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1160 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1161 } else {
1162 sram_ofs = mvm->cfg->dccm_offset;
1163 sram_len = mvm->cfg->dccm_len;
1164 }
1165
1166 /* reading RXF/TXF sizes */
1167 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1168 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1169 int i;
1170
1171 fifo_data_len = 0;
1172
1173 /* Count RXF size */
1174 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1175 if (!mem_cfg->rxfifo_size[i])
1176 continue;
1177
1178 /* Add header info */
1179 fifo_data_len += mem_cfg->rxfifo_size[i] +
1180 sizeof(*dump_data) +
1181 sizeof(struct iwl_fw_error_dump_fifo);
1182 }
1183
1184 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1185 if (!mem_cfg->txfifo_size[i])
1186 continue;
1187
1188 /* Add header info */
1189 fifo_data_len += mem_cfg->txfifo_size[i] +
1190 sizeof(*dump_data) +
1191 sizeof(struct iwl_fw_error_dump_fifo);
1192 }
1193 }
1194
1195 file_len = sizeof(*dump_file) +
1196 sizeof(*dump_data) * 2 +
1197 sram_len + sizeof(*dump_mem) +
1198 fifo_data_len +
1199 sizeof(*dump_info);
1200
1201 /* Make room for the SMEM, if it exists */
1202 if (smem_len)
1203 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1204
1205 /* Make room for the secondary SRAM, if it exists */
1206 if (sram2_len)
1207 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1208
1209 /* Make room for fw's virtual image pages, if it exists */
1210 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1211 file_len += mvm->num_of_paging_blk *
1212 (sizeof(*dump_data) +
1213 sizeof(struct iwl_fw_error_dump_paging) +
1214 PAGING_BLOCK_SIZE);
1215
1216 /* If we only want a monitor dump, reset the file length */
1217 if (monitor_dump_only) {
1218 file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1219 sizeof(*dump_info);
1220 }
1221
1222 /*
1223 * In 8000 HW family B-step include the ICCM (which resides separately)
1224 */
1225 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1226 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1227 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1228 IWL8260_ICCM_LEN;
1229
1230 if (mvm->fw_dump_desc)
1231 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1232 mvm->fw_dump_desc->len;
1233
1234 dump_file = vzalloc(file_len);
1235 if (!dump_file) {
1236 kfree(fw_error_dump);
1237 iwl_mvm_free_fw_dump_desc(mvm);
1238 return;
1239 }
1240
1241 fw_error_dump->op_mode_ptr = dump_file;
1242
1243 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1244 dump_data = (void *)dump_file->data;
1245
1246 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1247 dump_data->len = cpu_to_le32(sizeof(*dump_info));
1248 dump_info = (void *) dump_data->data;
1249 dump_info->device_family =
1250 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1251 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1252 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1253 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1254 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1255 sizeof(dump_info->fw_human_readable));
1256 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1257 sizeof(dump_info->dev_human_readable));
1258 strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1259 sizeof(dump_info->bus_human_readable));
1260
1261 dump_data = iwl_fw_error_next_data(dump_data);
1262 /* We only dump the FIFOs if the FW is in error state */
1263 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1264 iwl_mvm_dump_fifos(mvm, &dump_data);
1265
1266 if (mvm->fw_dump_desc) {
1267 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1268 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1269 mvm->fw_dump_desc->len);
1270 dump_trig = (void *)dump_data->data;
1271 memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1272 sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1273
1274 /* now we can free this copy */
1275 iwl_mvm_free_fw_dump_desc(mvm);
1276 dump_data = iwl_fw_error_next_data(dump_data);
1277 }
1278
1279 /* In case we only want monitor dump, skip to dump trasport data */
1280 if (monitor_dump_only)
1281 goto dump_trans_data;
1282
1283 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1284 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1285 dump_mem = (void *)dump_data->data;
1286 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1287 dump_mem->offset = cpu_to_le32(sram_ofs);
1288 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1289 sram_len);
1290
1291 if (smem_len) {
1292 dump_data = iwl_fw_error_next_data(dump_data);
1293 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1294 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1295 dump_mem = (void *)dump_data->data;
1296 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1297 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1298 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1299 dump_mem->data, smem_len);
1300 }
1301
1302 if (sram2_len) {
1303 dump_data = iwl_fw_error_next_data(dump_data);
1304 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1305 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1306 dump_mem = (void *)dump_data->data;
1307 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1308 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1309 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1310 dump_mem->data, sram2_len);
1311 }
1312
1313 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1314 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1315 dump_data = iwl_fw_error_next_data(dump_data);
1316 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1317 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1318 sizeof(*dump_mem));
1319 dump_mem = (void *)dump_data->data;
1320 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1321 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1322 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1323 dump_mem->data, IWL8260_ICCM_LEN);
1324 }
1325
1326 /* Dump fw's virtual image */
1327 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1328 u32 i;
1329
1330 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1331 struct iwl_fw_error_dump_paging *paging;
1332 struct page *pages =
1333 mvm->fw_paging_db[i].fw_paging_block;
1334
1335 dump_data = iwl_fw_error_next_data(dump_data);
1336 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1337 dump_data->len = cpu_to_le32(sizeof(*paging) +
1338 PAGING_BLOCK_SIZE);
1339 paging = (void *)dump_data->data;
1340 paging->index = cpu_to_le32(i);
1341 memcpy(paging->data, page_address(pages),
1342 PAGING_BLOCK_SIZE);
1343 }
1344 }
1345
1346dump_trans_data:
1347 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1348 mvm->fw_dump_trig);
1349 fw_error_dump->op_mode_len = file_len;
1350 if (fw_error_dump->trans_ptr)
1351 file_len += fw_error_dump->trans_ptr->len;
1352 dump_file->file_len = cpu_to_le32(file_len);
1353
1354 dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1355 GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1356
1357 mvm->fw_dump_trig = NULL;
1358 clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1359}
1360
1361struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1362 .trig_desc = {
1363 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1364 },
1365};
1366
1367static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1368{
1369 /* clear the D3 reconfig, we only need it to avoid dumping a
1370 * firmware coredump on reconfiguration, we shouldn't do that
1371 * on D3->D0 transition
1372 */
1373 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1374 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1375 iwl_mvm_fw_error_dump(mvm);
1376 }
1377
1378 /* cleanup all stale references (scan, roc), but keep the
1379 * ucode_down ref until reconfig is complete
1380 */
1381 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1382
1383 iwl_trans_stop_device(mvm->trans);
1384
1385 mvm->scan_status = 0;
1386 mvm->ps_disabled = false;
1387 mvm->calibrating = false;
1388
1389 /* just in case one was running */
1390 ieee80211_remain_on_channel_expired(mvm->hw);
1391
1392 /*
1393 * cleanup all interfaces, even inactive ones, as some might have
1394 * gone down during the HW restart
1395 */
1396 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1397
1398 mvm->p2p_device_vif = NULL;
1399 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1400
1401 iwl_mvm_reset_phy_ctxts(mvm);
1402 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1403 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1404 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1405 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1406 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1407 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1408 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1409 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1410
1411 ieee80211_wake_queues(mvm->hw);
1412
1413 /* clear any stale d0i3 state */
1414 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1415
1416 mvm->vif_count = 0;
1417 mvm->rx_ba_sessions = 0;
1418 mvm->fw_dbg_conf = FW_DBG_INVALID;
1419
1420 /* keep statistics ticking */
1421 iwl_mvm_accu_radio_stats(mvm);
1422}
1423
1424int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1425{
1426 int ret;
1427
1428 lockdep_assert_held(&mvm->mutex);
1429
1430 /* Clean up some internal and mac80211 state on restart */
1431 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1432 iwl_mvm_restart_cleanup(mvm);
1433
1434 ret = iwl_mvm_up(mvm);
1435
1436 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1437 /* Something went wrong - we need to finish some cleanup
1438 * that normally iwl_mvm_mac_restart_complete() below
1439 * would do.
1440 */
1441 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1442 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1443 }
1444
1445 return ret;
1446}
1447
1448static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1449{
1450 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1451 int ret;
1452
1453 /* Some hw restart cleanups must not hold the mutex */
1454 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1455 /*
1456 * Make sure we are out of d0i3. This is needed
1457 * to make sure the reference accounting is correct
1458 * (and there is no stale d0i3_exit_work).
1459 */
1460 wait_event_timeout(mvm->d0i3_exit_waitq,
1461 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1462 &mvm->status),
1463 HZ);
1464 }
1465
1466 mutex_lock(&mvm->mutex);
1467 ret = __iwl_mvm_mac_start(mvm);
1468 mutex_unlock(&mvm->mutex);
1469
1470 return ret;
1471}
1472
1473static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1474{
1475 int ret;
1476
1477 mutex_lock(&mvm->mutex);
1478
1479 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1480 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1481 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1482 if (ret)
1483 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1484 ret);
1485
1486 /* allow transport/FW low power modes */
1487 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1488
1489 /*
1490 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1491 * of packets the FW sent out, so we must reconnect.
1492 */
1493 iwl_mvm_teardown_tdls_peers(mvm);
1494
1495 mutex_unlock(&mvm->mutex);
1496}
1497
1498static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1499{
1500 if (!iwl_mvm_is_d0i3_supported(mvm))
1501 return;
1502
1503 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1504 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1505 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1506 &mvm->status),
1507 HZ))
1508 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1509}
1510
1511static void
1512iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1513 enum ieee80211_reconfig_type reconfig_type)
1514{
1515 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1516
1517 switch (reconfig_type) {
1518 case IEEE80211_RECONFIG_TYPE_RESTART:
1519 iwl_mvm_restart_complete(mvm);
1520 break;
1521 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1522 iwl_mvm_resume_complete(mvm);
1523 break;
1524 }
1525}
1526
1527void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1528{
1529 lockdep_assert_held(&mvm->mutex);
1530
1531 /* firmware counters are obviously reset now, but we shouldn't
1532 * partially track so also clear the fw_reset_accu counters.
1533 */
1534 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1535
1536 /*
1537 * Disallow low power states when the FW is down by taking
1538 * the UCODE_DOWN ref. in case of ongoing hw restart the
1539 * ref is already taken, so don't take it again.
1540 */
1541 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1542 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1543
1544 /* async_handlers_wk is now blocked */
1545
1546 /*
1547 * The work item could be running or queued if the
1548 * ROC time event stops just as we get here.
1549 */
1550 flush_work(&mvm->roc_done_wk);
1551
1552 iwl_trans_stop_device(mvm->trans);
1553
1554 iwl_mvm_async_handlers_purge(mvm);
1555 /* async_handlers_list is empty and will stay empty: HW is stopped */
1556
1557 /* the fw is stopped, the aux sta is dead: clean up driver state */
1558 iwl_mvm_del_aux_sta(mvm);
1559
1560 /*
1561 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1562 * won't be called in this case).
1563 * But make sure to cleanup interfaces that have gone down before/during
1564 * HW restart was requested.
1565 */
1566 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1567 ieee80211_iterate_interfaces(mvm->hw, 0,
1568 iwl_mvm_cleanup_iterator, mvm);
1569
1570 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1571 * make sure there's nothing left there and warn if any is found.
1572 */
1573 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1574 int i;
1575
1576 for (i = 0; i < mvm->max_scans; i++) {
1577 if (WARN_ONCE(mvm->scan_uid_status[i],
1578 "UMAC scan UID %d status was not cleaned\n",
1579 i))
1580 mvm->scan_uid_status[i] = 0;
1581 }
1582 }
1583
1584 mvm->ucode_loaded = false;
1585}
1586
1587static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1588{
1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1590
1591 flush_work(&mvm->d0i3_exit_work);
1592 flush_work(&mvm->async_handlers_wk);
1593 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1594 iwl_mvm_free_fw_dump_desc(mvm);
1595
1596 mutex_lock(&mvm->mutex);
1597 __iwl_mvm_mac_stop(mvm);
1598 mutex_unlock(&mvm->mutex);
1599
1600 /*
1601 * The worker might have been waiting for the mutex, let it run and
1602 * discover that its list is now empty.
1603 */
1604 cancel_work_sync(&mvm->async_handlers_wk);
1605}
1606
1607static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1608{
1609 u16 i;
1610
1611 lockdep_assert_held(&mvm->mutex);
1612
1613 for (i = 0; i < NUM_PHY_CTX; i++)
1614 if (!mvm->phy_ctxts[i].ref)
1615 return &mvm->phy_ctxts[i];
1616
1617 IWL_ERR(mvm, "No available PHY context\n");
1618 return NULL;
1619}
1620
1621static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1622 s16 tx_power)
1623{
1624 struct iwl_dev_tx_power_cmd cmd = {
1625 .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1626 .v2.mac_context_id =
1627 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1628 .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1629 };
1630 int len = sizeof(cmd);
1631
1632 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1633 cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1634
1635 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1636 len = sizeof(cmd.v2);
1637
1638 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1639}
1640
1641static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1642 struct ieee80211_vif *vif)
1643{
1644 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1645 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1646 int ret;
1647
1648 mvmvif->mvm = mvm;
1649
1650 /*
1651 * make sure D0i3 exit is completed, otherwise a target access
1652 * during tx queue configuration could be done when still in
1653 * D0i3 state.
1654 */
1655 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1656 if (ret)
1657 return ret;
1658
1659 /*
1660 * Not much to do here. The stack will not allow interface
1661 * types or combinations that we didn't advertise, so we
1662 * don't really have to check the types.
1663 */
1664
1665 mutex_lock(&mvm->mutex);
1666
1667 /* make sure that beacon statistics don't go backwards with FW reset */
1668 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1669 mvmvif->beacon_stats.accu_num_beacons +=
1670 mvmvif->beacon_stats.num_beacons;
1671
1672 /* Allocate resources for the MAC context, and add it to the fw */
1673 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1674 if (ret)
1675 goto out_unlock;
1676
1677 /* Counting number of interfaces is needed for legacy PM */
1678 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1679 mvm->vif_count++;
1680
1681 /*
1682 * The AP binding flow can be done only after the beacon
1683 * template is configured (which happens only in the mac80211
1684 * start_ap() flow), and adding the broadcast station can happen
1685 * only after the binding.
1686 * In addition, since modifying the MAC before adding a bcast
1687 * station is not allowed by the FW, delay the adding of MAC context to
1688 * the point where we can also add the bcast station.
1689 * In short: there's not much we can do at this point, other than
1690 * allocating resources :)
1691 */
1692 if (vif->type == NL80211_IFTYPE_AP ||
1693 vif->type == NL80211_IFTYPE_ADHOC) {
1694 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1695 if (ret) {
1696 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1697 goto out_release;
1698 }
1699
1700 iwl_mvm_vif_dbgfs_register(mvm, vif);
1701 goto out_unlock;
1702 }
1703
1704 mvmvif->features |= hw->netdev_features;
1705
1706 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1707 if (ret)
1708 goto out_release;
1709
1710 ret = iwl_mvm_power_update_mac(mvm);
1711 if (ret)
1712 goto out_remove_mac;
1713
1714 /* beacon filtering */
1715 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1716 if (ret)
1717 goto out_remove_mac;
1718
1719 if (!mvm->bf_allowed_vif &&
1720 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1721 mvm->bf_allowed_vif = mvmvif;
1722 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1723 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1724 }
1725
1726 /*
1727 * P2P_DEVICE interface does not have a channel context assigned to it,
1728 * so a dedicated PHY context is allocated to it and the corresponding
1729 * MAC context is bound to it at this stage.
1730 */
1731 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1732
1733 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1734 if (!mvmvif->phy_ctxt) {
1735 ret = -ENOSPC;
1736 goto out_free_bf;
1737 }
1738
1739 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1740 ret = iwl_mvm_binding_add_vif(mvm, vif);
1741 if (ret)
1742 goto out_unref_phy;
1743
1744 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1745 if (ret)
1746 goto out_unbind;
1747
1748 /* Save a pointer to p2p device vif, so it can later be used to
1749 * update the p2p device MAC when a GO is started/stopped */
1750 mvm->p2p_device_vif = vif;
1751 }
1752
1753 iwl_mvm_vif_dbgfs_register(mvm, vif);
1754 goto out_unlock;
1755
1756 out_unbind:
1757 iwl_mvm_binding_remove_vif(mvm, vif);
1758 out_unref_phy:
1759 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1760 out_free_bf:
1761 if (mvm->bf_allowed_vif == mvmvif) {
1762 mvm->bf_allowed_vif = NULL;
1763 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1764 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1765 }
1766 out_remove_mac:
1767 mvmvif->phy_ctxt = NULL;
1768 iwl_mvm_mac_ctxt_remove(mvm, vif);
1769 out_release:
1770 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1771 mvm->vif_count--;
1772
1773 iwl_mvm_mac_ctxt_release(mvm, vif);
1774 out_unlock:
1775 mutex_unlock(&mvm->mutex);
1776
1777 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1778
1779 return ret;
1780}
1781
1782static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1783 struct ieee80211_vif *vif)
1784{
1785 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1786
1787 if (tfd_msk) {
1788 /*
1789 * mac80211 first removes all the stations of the vif and
1790 * then removes the vif. When it removes a station it also
1791 * flushes the AMPDU session. So by now, all the AMPDU sessions
1792 * of all the stations of this vif are closed, and the queues
1793 * of these AMPDU sessions are properly closed.
1794 * We still need to take care of the shared queues of the vif.
1795 * Flush them here.
1796 */
1797 mutex_lock(&mvm->mutex);
1798 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1799 mutex_unlock(&mvm->mutex);
1800
1801 /*
1802 * There are transports that buffer a few frames in the host.
1803 * For these, the flush above isn't enough since while we were
1804 * flushing, the transport might have sent more frames to the
1805 * device. To solve this, wait here until the transport is
1806 * empty. Technically, this could have replaced the flush
1807 * above, but flush is much faster than draining. So flush
1808 * first, and drain to make sure we have no frames in the
1809 * transport anymore.
1810 * If a station still had frames on the shared queues, it is
1811 * already marked as draining, so to complete the draining, we
1812 * just need to wait until the transport is empty.
1813 */
1814 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1815 }
1816
1817 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1818 /*
1819 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1820 * We assume here that all the packets sent to the OFFCHANNEL
1821 * queue are sent in ROC session.
1822 */
1823 flush_work(&mvm->roc_done_wk);
1824 } else {
1825 /*
1826 * By now, all the AC queues are empty. The AGG queues are
1827 * empty too. We already got all the Tx responses for all the
1828 * packets in the queues. The drain work can have been
1829 * triggered. Flush it.
1830 */
1831 flush_work(&mvm->sta_drained_wk);
1832 }
1833}
1834
1835static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1836 struct ieee80211_vif *vif)
1837{
1838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1840
1841 iwl_mvm_prepare_mac_removal(mvm, vif);
1842
1843 mutex_lock(&mvm->mutex);
1844
1845 if (mvm->bf_allowed_vif == mvmvif) {
1846 mvm->bf_allowed_vif = NULL;
1847 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1848 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1849 }
1850
1851 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1852
1853 /*
1854 * For AP/GO interface, the tear down of the resources allocated to the
1855 * interface is be handled as part of the stop_ap flow.
1856 */
1857 if (vif->type == NL80211_IFTYPE_AP ||
1858 vif->type == NL80211_IFTYPE_ADHOC) {
1859#ifdef CONFIG_NL80211_TESTMODE
1860 if (vif == mvm->noa_vif) {
1861 mvm->noa_vif = NULL;
1862 mvm->noa_duration = 0;
1863 }
1864#endif
1865 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1866 goto out_release;
1867 }
1868
1869 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1870 mvm->p2p_device_vif = NULL;
1871 iwl_mvm_rm_bcast_sta(mvm, vif);
1872 iwl_mvm_binding_remove_vif(mvm, vif);
1873 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1874 mvmvif->phy_ctxt = NULL;
1875 }
1876
1877 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1878 mvm->vif_count--;
1879
1880 iwl_mvm_power_update_mac(mvm);
1881 iwl_mvm_mac_ctxt_remove(mvm, vif);
1882
1883out_release:
1884 iwl_mvm_mac_ctxt_release(mvm, vif);
1885 mutex_unlock(&mvm->mutex);
1886}
1887
1888static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1889{
1890 return 0;
1891}
1892
1893struct iwl_mvm_mc_iter_data {
1894 struct iwl_mvm *mvm;
1895 int port_id;
1896};
1897
1898static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1899 struct ieee80211_vif *vif)
1900{
1901 struct iwl_mvm_mc_iter_data *data = _data;
1902 struct iwl_mvm *mvm = data->mvm;
1903 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1904 int ret, len;
1905
1906 /* if we don't have free ports, mcast frames will be dropped */
1907 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1908 return;
1909
1910 if (vif->type != NL80211_IFTYPE_STATION ||
1911 !vif->bss_conf.assoc)
1912 return;
1913
1914 cmd->port_id = data->port_id++;
1915 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1916 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1917
1918 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1919 if (ret)
1920 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1921}
1922
1923static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1924{
1925 struct iwl_mvm_mc_iter_data iter_data = {
1926 .mvm = mvm,
1927 };
1928
1929 lockdep_assert_held(&mvm->mutex);
1930
1931 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1932 return;
1933
1934 ieee80211_iterate_active_interfaces_atomic(
1935 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1936 iwl_mvm_mc_iface_iterator, &iter_data);
1937}
1938
1939static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1940 struct netdev_hw_addr_list *mc_list)
1941{
1942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1943 struct iwl_mcast_filter_cmd *cmd;
1944 struct netdev_hw_addr *addr;
1945 int addr_count;
1946 bool pass_all;
1947 int len;
1948
1949 addr_count = netdev_hw_addr_list_count(mc_list);
1950 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1951 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1952 if (pass_all)
1953 addr_count = 0;
1954
1955 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1956 cmd = kzalloc(len, GFP_ATOMIC);
1957 if (!cmd)
1958 return 0;
1959
1960 if (pass_all) {
1961 cmd->pass_all = 1;
1962 return (u64)(unsigned long)cmd;
1963 }
1964
1965 netdev_hw_addr_list_for_each(addr, mc_list) {
1966 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1967 cmd->count, addr->addr);
1968 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1969 addr->addr, ETH_ALEN);
1970 cmd->count++;
1971 }
1972
1973 return (u64)(unsigned long)cmd;
1974}
1975
1976static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1977 unsigned int changed_flags,
1978 unsigned int *total_flags,
1979 u64 multicast)
1980{
1981 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1982 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1983
1984 mutex_lock(&mvm->mutex);
1985
1986 /* replace previous configuration */
1987 kfree(mvm->mcast_filter_cmd);
1988 mvm->mcast_filter_cmd = cmd;
1989
1990 if (!cmd)
1991 goto out;
1992
1993 iwl_mvm_recalc_multicast(mvm);
1994out:
1995 mutex_unlock(&mvm->mutex);
1996 *total_flags = 0;
1997}
1998
1999static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
2000 struct ieee80211_vif *vif,
2001 unsigned int filter_flags,
2002 unsigned int changed_flags)
2003{
2004 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2005
2006 /* We support only filter for probe requests */
2007 if (!(changed_flags & FIF_PROBE_REQ))
2008 return;
2009
2010 /* Supported only for p2p client interfaces */
2011 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
2012 !vif->p2p)
2013 return;
2014
2015 mutex_lock(&mvm->mutex);
2016 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2017 mutex_unlock(&mvm->mutex);
2018}
2019
2020#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2021struct iwl_bcast_iter_data {
2022 struct iwl_mvm *mvm;
2023 struct iwl_bcast_filter_cmd *cmd;
2024 u8 current_filter;
2025};
2026
2027static void
2028iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2029 const struct iwl_fw_bcast_filter *in_filter,
2030 struct iwl_fw_bcast_filter *out_filter)
2031{
2032 struct iwl_fw_bcast_filter_attr *attr;
2033 int i;
2034
2035 memcpy(out_filter, in_filter, sizeof(*out_filter));
2036
2037 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2038 attr = &out_filter->attrs[i];
2039
2040 if (!attr->mask)
2041 break;
2042
2043 switch (attr->reserved1) {
2044 case cpu_to_le16(BC_FILTER_MAGIC_IP):
2045 if (vif->bss_conf.arp_addr_cnt != 1) {
2046 attr->mask = 0;
2047 continue;
2048 }
2049
2050 attr->val = vif->bss_conf.arp_addr_list[0];
2051 break;
2052 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2053 attr->val = *(__be32 *)&vif->addr[2];
2054 break;
2055 default:
2056 break;
2057 }
2058 attr->reserved1 = 0;
2059 out_filter->num_attrs++;
2060 }
2061}
2062
2063static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2064 struct ieee80211_vif *vif)
2065{
2066 struct iwl_bcast_iter_data *data = _data;
2067 struct iwl_mvm *mvm = data->mvm;
2068 struct iwl_bcast_filter_cmd *cmd = data->cmd;
2069 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2070 struct iwl_fw_bcast_mac *bcast_mac;
2071 int i;
2072
2073 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2074 return;
2075
2076 bcast_mac = &cmd->macs[mvmvif->id];
2077
2078 /*
2079 * enable filtering only for associated stations, but not for P2P
2080 * Clients
2081 */
2082 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2083 !vif->bss_conf.assoc)
2084 return;
2085
2086 bcast_mac->default_discard = 1;
2087
2088 /* copy all configured filters */
2089 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2090 /*
2091 * Make sure we don't exceed our filters limit.
2092 * if there is still a valid filter to be configured,
2093 * be on the safe side and just allow bcast for this mac.
2094 */
2095 if (WARN_ON_ONCE(data->current_filter >=
2096 ARRAY_SIZE(cmd->filters))) {
2097 bcast_mac->default_discard = 0;
2098 bcast_mac->attached_filters = 0;
2099 break;
2100 }
2101
2102 iwl_mvm_set_bcast_filter(vif,
2103 &mvm->bcast_filters[i],
2104 &cmd->filters[data->current_filter]);
2105
2106 /* skip current filter if it contains no attributes */
2107 if (!cmd->filters[data->current_filter].num_attrs)
2108 continue;
2109
2110 /* attach the filter to current mac */
2111 bcast_mac->attached_filters |=
2112 cpu_to_le16(BIT(data->current_filter));
2113
2114 data->current_filter++;
2115 }
2116}
2117
2118bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2119 struct iwl_bcast_filter_cmd *cmd)
2120{
2121 struct iwl_bcast_iter_data iter_data = {
2122 .mvm = mvm,
2123 .cmd = cmd,
2124 };
2125
2126 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2127 return false;
2128
2129 memset(cmd, 0, sizeof(*cmd));
2130 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2131 cmd->max_macs = ARRAY_SIZE(cmd->macs);
2132
2133#ifdef CONFIG_IWLWIFI_DEBUGFS
2134 /* use debugfs filters/macs if override is configured */
2135 if (mvm->dbgfs_bcast_filtering.override) {
2136 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2137 sizeof(cmd->filters));
2138 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2139 sizeof(cmd->macs));
2140 return true;
2141 }
2142#endif
2143
2144 /* if no filters are configured, do nothing */
2145 if (!mvm->bcast_filters)
2146 return false;
2147
2148 /* configure and attach these filters for each associated sta vif */
2149 ieee80211_iterate_active_interfaces(
2150 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2151 iwl_mvm_bcast_filter_iterator, &iter_data);
2152
2153 return true;
2154}
2155static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2156 struct ieee80211_vif *vif)
2157{
2158 struct iwl_bcast_filter_cmd cmd;
2159
2160 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2161 return 0;
2162
2163 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2164 return 0;
2165
2166 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2167 sizeof(cmd), &cmd);
2168}
2169#else
2170static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2171 struct ieee80211_vif *vif)
2172{
2173 return 0;
2174}
2175#endif
2176
2177static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2178 struct ieee80211_vif *vif,
2179 struct ieee80211_bss_conf *bss_conf,
2180 u32 changes)
2181{
2182 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2183 int ret;
2184
2185 /*
2186 * Re-calculate the tsf id, as the master-slave relations depend on the
2187 * beacon interval, which was not known when the station interface was
2188 * added.
2189 */
2190 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2191 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2192
2193 /*
2194 * If we're not associated yet, take the (new) BSSID before associating
2195 * so the firmware knows. If we're already associated, then use the old
2196 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2197 * branch for disassociation below.
2198 */
2199 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2200 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2201
2202 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2203 if (ret)
2204 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2205
2206 /* after sending it once, adopt mac80211 data */
2207 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2208 mvmvif->associated = bss_conf->assoc;
2209
2210 if (changes & BSS_CHANGED_ASSOC) {
2211 if (bss_conf->assoc) {
2212 /* clear statistics to get clean beacon counter */
2213 iwl_mvm_request_statistics(mvm, true);
2214 memset(&mvmvif->beacon_stats, 0,
2215 sizeof(mvmvif->beacon_stats));
2216
2217 /* add quota for this interface */
2218 ret = iwl_mvm_update_quotas(mvm, true, NULL);
2219 if (ret) {
2220 IWL_ERR(mvm, "failed to update quotas\n");
2221 return;
2222 }
2223
2224 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2225 &mvm->status)) {
2226 /*
2227 * If we're restarting then the firmware will
2228 * obviously have lost synchronisation with
2229 * the AP. It will attempt to synchronise by
2230 * itself, but we can make it more reliable by
2231 * scheduling a session protection time event.
2232 *
2233 * The firmware needs to receive a beacon to
2234 * catch up with synchronisation, use 110% of
2235 * the beacon interval.
2236 *
2237 * Set a large maximum delay to allow for more
2238 * than a single interface.
2239 */
2240 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2241 iwl_mvm_protect_session(mvm, vif, dur, dur,
2242 5 * dur, false);
2243 }
2244
2245 iwl_mvm_sf_update(mvm, vif, false);
2246 iwl_mvm_power_vif_assoc(mvm, vif);
2247 if (vif->p2p) {
2248 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2249 iwl_mvm_update_smps(mvm, vif,
2250 IWL_MVM_SMPS_REQ_PROT,
2251 IEEE80211_SMPS_DYNAMIC);
2252 }
2253 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2254 /*
2255 * If update fails - SF might be running in associated
2256 * mode while disassociated - which is forbidden.
2257 */
2258 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2259 "Failed to update SF upon disassociation\n");
2260
2261 /* remove AP station now that the MAC is unassoc */
2262 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2263 if (ret)
2264 IWL_ERR(mvm, "failed to remove AP station\n");
2265
2266 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2267 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2268 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2269 /* remove quota for this interface */
2270 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2271 if (ret)
2272 IWL_ERR(mvm, "failed to update quotas\n");
2273
2274 if (vif->p2p)
2275 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2276
2277 /* this will take the cleared BSSID from bss_conf */
2278 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2279 if (ret)
2280 IWL_ERR(mvm,
2281 "failed to update MAC %pM (clear after unassoc)\n",
2282 vif->addr);
2283 }
2284
2285 iwl_mvm_recalc_multicast(mvm);
2286 iwl_mvm_configure_bcast_filter(mvm, vif);
2287
2288 /* reset rssi values */
2289 mvmvif->bf_data.ave_beacon_signal = 0;
2290
2291 iwl_mvm_bt_coex_vif_change(mvm);
2292 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2293 IEEE80211_SMPS_AUTOMATIC);
2294 } else if (changes & BSS_CHANGED_BEACON_INFO) {
2295 /*
2296 * We received a beacon _after_ association so
2297 * remove the session protection.
2298 */
2299 iwl_mvm_remove_time_event(mvm, mvmvif,
2300 &mvmvif->time_event_data);
2301 }
2302
2303 if (changes & BSS_CHANGED_BEACON_INFO) {
2304 iwl_mvm_sf_update(mvm, vif, false);
2305 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2306 }
2307
2308 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2309 ret = iwl_mvm_power_update_mac(mvm);
2310 if (ret)
2311 IWL_ERR(mvm, "failed to update power mode\n");
2312 }
2313
2314 if (changes & BSS_CHANGED_TXPOWER) {
2315 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2316 bss_conf->txpower);
2317 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2318 }
2319
2320 if (changes & BSS_CHANGED_CQM) {
2321 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2322 /* reset cqm events tracking */
2323 mvmvif->bf_data.last_cqm_event = 0;
2324 if (mvmvif->bf_data.bf_enabled) {
2325 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2326 if (ret)
2327 IWL_ERR(mvm,
2328 "failed to update CQM thresholds\n");
2329 }
2330 }
2331
2332 if (changes & BSS_CHANGED_ARP_FILTER) {
2333 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2334 iwl_mvm_configure_bcast_filter(mvm, vif);
2335 }
2336}
2337
2338static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2339 struct ieee80211_vif *vif)
2340{
2341 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2343 int ret;
2344
2345 /*
2346 * iwl_mvm_mac_ctxt_add() might read directly from the device
2347 * (the system time), so make sure it is available.
2348 */
2349 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2350 if (ret)
2351 return ret;
2352
2353 mutex_lock(&mvm->mutex);
2354
2355 /* Send the beacon template */
2356 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2357 if (ret)
2358 goto out_unlock;
2359
2360 /*
2361 * Re-calculate the tsf id, as the master-slave relations depend on the
2362 * beacon interval, which was not known when the AP interface was added.
2363 */
2364 if (vif->type == NL80211_IFTYPE_AP)
2365 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2366
2367 mvmvif->ap_assoc_sta_count = 0;
2368
2369 /* Add the mac context */
2370 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2371 if (ret)
2372 goto out_unlock;
2373
2374 /* Perform the binding */
2375 ret = iwl_mvm_binding_add_vif(mvm, vif);
2376 if (ret)
2377 goto out_remove;
2378
2379 /* Send the bcast station. At this stage the TBTT and DTIM time events
2380 * are added and applied to the scheduler */
2381 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2382 if (ret)
2383 goto out_unbind;
2384
2385 /* must be set before quota calculations */
2386 mvmvif->ap_ibss_active = true;
2387
2388 /* power updated needs to be done before quotas */
2389 iwl_mvm_power_update_mac(mvm);
2390
2391 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2392 if (ret)
2393 goto out_quota_failed;
2394
2395 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2396 if (vif->p2p && mvm->p2p_device_vif)
2397 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2398
2399 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2400
2401 iwl_mvm_bt_coex_vif_change(mvm);
2402
2403 /* we don't support TDLS during DCM */
2404 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2405 iwl_mvm_teardown_tdls_peers(mvm);
2406
2407 goto out_unlock;
2408
2409out_quota_failed:
2410 iwl_mvm_power_update_mac(mvm);
2411 mvmvif->ap_ibss_active = false;
2412 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2413out_unbind:
2414 iwl_mvm_binding_remove_vif(mvm, vif);
2415out_remove:
2416 iwl_mvm_mac_ctxt_remove(mvm, vif);
2417out_unlock:
2418 mutex_unlock(&mvm->mutex);
2419 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2420 return ret;
2421}
2422
2423static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2424 struct ieee80211_vif *vif)
2425{
2426 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2427 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2428
2429 iwl_mvm_prepare_mac_removal(mvm, vif);
2430
2431 mutex_lock(&mvm->mutex);
2432
2433 /* Handle AP stop while in CSA */
2434 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2435 iwl_mvm_remove_time_event(mvm, mvmvif,
2436 &mvmvif->time_event_data);
2437 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2438 mvmvif->csa_countdown = false;
2439 }
2440
2441 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2442 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2443 mvm->csa_tx_block_bcn_timeout = 0;
2444 }
2445
2446 mvmvif->ap_ibss_active = false;
2447 mvm->ap_last_beacon_gp2 = 0;
2448
2449 iwl_mvm_bt_coex_vif_change(mvm);
2450
2451 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2452
2453 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2454 if (vif->p2p && mvm->p2p_device_vif)
2455 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2456
2457 iwl_mvm_update_quotas(mvm, false, NULL);
2458 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2459 iwl_mvm_binding_remove_vif(mvm, vif);
2460
2461 iwl_mvm_power_update_mac(mvm);
2462
2463 iwl_mvm_mac_ctxt_remove(mvm, vif);
2464
2465 mutex_unlock(&mvm->mutex);
2466}
2467
2468static void
2469iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2470 struct ieee80211_vif *vif,
2471 struct ieee80211_bss_conf *bss_conf,
2472 u32 changes)
2473{
2474 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2475
2476 /* Changes will be applied when the AP/IBSS is started */
2477 if (!mvmvif->ap_ibss_active)
2478 return;
2479
2480 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2481 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2482 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2483 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2484
2485 /* Need to send a new beacon template to the FW */
2486 if (changes & BSS_CHANGED_BEACON &&
2487 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2488 IWL_WARN(mvm, "Failed updating beacon data\n");
2489
2490 if (changes & BSS_CHANGED_TXPOWER) {
2491 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2492 bss_conf->txpower);
2493 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2494 }
2495
2496}
2497
2498static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2499 struct ieee80211_vif *vif,
2500 struct ieee80211_bss_conf *bss_conf,
2501 u32 changes)
2502{
2503 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2504
2505 /*
2506 * iwl_mvm_bss_info_changed_station() might call
2507 * iwl_mvm_protect_session(), which reads directly from
2508 * the device (the system time), so make sure it is available.
2509 */
2510 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2511 return;
2512
2513 mutex_lock(&mvm->mutex);
2514
2515 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2516 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2517
2518 switch (vif->type) {
2519 case NL80211_IFTYPE_STATION:
2520 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2521 break;
2522 case NL80211_IFTYPE_AP:
2523 case NL80211_IFTYPE_ADHOC:
2524 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2525 break;
2526 default:
2527 /* shouldn't happen */
2528 WARN_ON_ONCE(1);
2529 }
2530
2531 mutex_unlock(&mvm->mutex);
2532 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2533}
2534
2535static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2536 struct ieee80211_vif *vif,
2537 struct ieee80211_scan_request *hw_req)
2538{
2539 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2540 int ret;
2541
2542 if (hw_req->req.n_channels == 0 ||
2543 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2544 return -EINVAL;
2545
2546 mutex_lock(&mvm->mutex);
2547 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2548 mutex_unlock(&mvm->mutex);
2549
2550 return ret;
2551}
2552
2553static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2554 struct ieee80211_vif *vif)
2555{
2556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2557
2558 mutex_lock(&mvm->mutex);
2559
2560 /* Due to a race condition, it's possible that mac80211 asks
2561 * us to stop a hw_scan when it's already stopped. This can
2562 * happen, for instance, if we stopped the scan ourselves,
2563 * called ieee80211_scan_completed() and the userspace called
2564 * cancel scan scan before ieee80211_scan_work() could run.
2565 * To handle that, simply return if the scan is not running.
2566 */
2567 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2568 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2569
2570 mutex_unlock(&mvm->mutex);
2571}
2572
2573static void
2574iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2575 struct ieee80211_sta *sta, u16 tids,
2576 int num_frames,
2577 enum ieee80211_frame_release_type reason,
2578 bool more_data)
2579{
2580 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2581
2582 /* Called when we need to transmit (a) frame(s) from mac80211 */
2583
2584 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2585 tids, more_data, false);
2586}
2587
2588static void
2589iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2590 struct ieee80211_sta *sta, u16 tids,
2591 int num_frames,
2592 enum ieee80211_frame_release_type reason,
2593 bool more_data)
2594{
2595 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2596
2597 /* Called when we need to transmit (a) frame(s) from agg queue */
2598
2599 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2600 tids, more_data, true);
2601}
2602
2603static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2604 struct ieee80211_vif *vif,
2605 enum sta_notify_cmd cmd,
2606 struct ieee80211_sta *sta)
2607{
2608 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2609 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2610 unsigned long txqs = 0, tids = 0;
2611 int tid;
2612
2613 spin_lock_bh(&mvmsta->lock);
2614 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2615 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2616
2617 if (tid_data->state != IWL_AGG_ON &&
2618 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2619 continue;
2620
2621 __set_bit(tid_data->txq_id, &txqs);
2622
2623 if (iwl_mvm_tid_queued(tid_data) == 0)
2624 continue;
2625
2626 __set_bit(tid, &tids);
2627 }
2628
2629 switch (cmd) {
2630 case STA_NOTIFY_SLEEP:
2631 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2632 ieee80211_sta_block_awake(hw, sta, true);
2633
2634 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2635 ieee80211_sta_set_buffered(sta, tid, true);
2636
2637 if (txqs)
2638 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2639 /*
2640 * The fw updates the STA to be asleep. Tx packets on the Tx
2641 * queues to this station will not be transmitted. The fw will
2642 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2643 */
2644 break;
2645 case STA_NOTIFY_AWAKE:
2646 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2647 break;
2648
2649 if (txqs)
2650 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2651 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2652 break;
2653 default:
2654 break;
2655 }
2656 spin_unlock_bh(&mvmsta->lock);
2657}
2658
2659static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2660 struct ieee80211_vif *vif,
2661 struct ieee80211_sta *sta)
2662{
2663 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2664 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2665 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2666
2667 /*
2668 * This is called before mac80211 does RCU synchronisation,
2669 * so here we already invalidate our internal RCU-protected
2670 * station pointer. The rest of the code will thus no longer
2671 * be able to find the station this way, and we don't rely
2672 * on further RCU synchronisation after the sta_state()
2673 * callback deleted the station.
2674 */
2675 mutex_lock(&mvm->mutex);
2676 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2677 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2678 ERR_PTR(-ENOENT));
2679
2680 if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2681 mvmvif->ap_assoc_sta_count--;
2682 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2683 }
2684
2685 mutex_unlock(&mvm->mutex);
2686}
2687
2688static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2689 const u8 *bssid)
2690{
2691 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2692 return;
2693
2694 if (iwlwifi_mod_params.uapsd_disable) {
2695 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2696 return;
2697 }
2698
2699 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2700}
2701
2702static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2703 struct ieee80211_vif *vif,
2704 struct ieee80211_sta *sta,
2705 enum ieee80211_sta_state old_state,
2706 enum ieee80211_sta_state new_state)
2707{
2708 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2709 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2710 int ret;
2711
2712 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2713 sta->addr, old_state, new_state);
2714
2715 /* this would be a mac80211 bug ... but don't crash */
2716 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2717 return -EINVAL;
2718
2719 /* if a STA is being removed, reuse its ID */
2720 flush_work(&mvm->sta_drained_wk);
2721
2722 mutex_lock(&mvm->mutex);
2723 if (old_state == IEEE80211_STA_NOTEXIST &&
2724 new_state == IEEE80211_STA_NONE) {
2725 /*
2726 * Firmware bug - it'll crash if the beacon interval is less
2727 * than 16. We can't avoid connecting at all, so refuse the
2728 * station state change, this will cause mac80211 to abandon
2729 * attempts to connect to this AP, and eventually wpa_s will
2730 * blacklist the AP...
2731 */
2732 if (vif->type == NL80211_IFTYPE_STATION &&
2733 vif->bss_conf.beacon_int < 16) {
2734 IWL_ERR(mvm,
2735 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2736 sta->addr, vif->bss_conf.beacon_int);
2737 ret = -EINVAL;
2738 goto out_unlock;
2739 }
2740
2741 if (sta->tdls &&
2742 (vif->p2p ||
2743 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2744 IWL_MVM_TDLS_STA_COUNT ||
2745 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2746 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2747 ret = -EBUSY;
2748 goto out_unlock;
2749 }
2750
2751 ret = iwl_mvm_add_sta(mvm, vif, sta);
2752 if (sta->tdls && ret == 0)
2753 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2754 } else if (old_state == IEEE80211_STA_NONE &&
2755 new_state == IEEE80211_STA_AUTH) {
2756 /*
2757 * EBS may be disabled due to previous failures reported by FW.
2758 * Reset EBS status here assuming environment has been changed.
2759 */
2760 mvm->last_ebs_successful = true;
2761 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2762 ret = 0;
2763 } else if (old_state == IEEE80211_STA_AUTH &&
2764 new_state == IEEE80211_STA_ASSOC) {
2765 ret = iwl_mvm_update_sta(mvm, vif, sta);
2766 if (ret == 0)
2767 iwl_mvm_rs_rate_init(mvm, sta,
2768 mvmvif->phy_ctxt->channel->band,
2769 true);
2770 } else if (old_state == IEEE80211_STA_ASSOC &&
2771 new_state == IEEE80211_STA_AUTHORIZED) {
2772
2773 /* we don't support TDLS during DCM */
2774 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2775 iwl_mvm_teardown_tdls_peers(mvm);
2776
2777 /* enable beacon filtering */
2778 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2779 ret = 0;
2780 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2781 new_state == IEEE80211_STA_ASSOC) {
2782 /* disable beacon filtering */
2783 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2784 ret = 0;
2785 } else if (old_state == IEEE80211_STA_ASSOC &&
2786 new_state == IEEE80211_STA_AUTH) {
2787 ret = 0;
2788 } else if (old_state == IEEE80211_STA_AUTH &&
2789 new_state == IEEE80211_STA_NONE) {
2790 ret = 0;
2791 } else if (old_state == IEEE80211_STA_NONE &&
2792 new_state == IEEE80211_STA_NOTEXIST) {
2793 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2794 if (sta->tdls)
2795 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2796 } else {
2797 ret = -EIO;
2798 }
2799 out_unlock:
2800 mutex_unlock(&mvm->mutex);
2801
2802 if (sta->tdls && ret == 0) {
2803 if (old_state == IEEE80211_STA_NOTEXIST &&
2804 new_state == IEEE80211_STA_NONE)
2805 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2806 else if (old_state == IEEE80211_STA_NONE &&
2807 new_state == IEEE80211_STA_NOTEXIST)
2808 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2809 }
2810
2811 return ret;
2812}
2813
2814static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2815{
2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2817
2818 mvm->rts_threshold = value;
2819
2820 return 0;
2821}
2822
2823static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2824 struct ieee80211_vif *vif,
2825 struct ieee80211_sta *sta, u32 changed)
2826{
2827 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2828
2829 if (vif->type == NL80211_IFTYPE_STATION &&
2830 changed & IEEE80211_RC_NSS_CHANGED)
2831 iwl_mvm_sf_update(mvm, vif, false);
2832}
2833
2834static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2835 struct ieee80211_vif *vif, u16 ac,
2836 const struct ieee80211_tx_queue_params *params)
2837{
2838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2840
2841 mvmvif->queue_params[ac] = *params;
2842
2843 /*
2844 * No need to update right away, we'll get BSS_CHANGED_QOS
2845 * The exception is P2P_DEVICE interface which needs immediate update.
2846 */
2847 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2848 int ret;
2849
2850 mutex_lock(&mvm->mutex);
2851 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2852 mutex_unlock(&mvm->mutex);
2853 return ret;
2854 }
2855 return 0;
2856}
2857
2858static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2859 struct ieee80211_vif *vif)
2860{
2861 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2862 u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2863 200 + vif->bss_conf.beacon_int);
2864 u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2865 100 + vif->bss_conf.beacon_int);
2866
2867 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2868 return;
2869
2870 /*
2871 * iwl_mvm_protect_session() reads directly from the device
2872 * (the system time), so make sure it is available.
2873 */
2874 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2875 return;
2876
2877 mutex_lock(&mvm->mutex);
2878 /* Try really hard to protect the session and hear a beacon */
2879 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2880 mutex_unlock(&mvm->mutex);
2881
2882 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2883}
2884
2885static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2886 struct ieee80211_vif *vif,
2887 struct cfg80211_sched_scan_request *req,
2888 struct ieee80211_scan_ies *ies)
2889{
2890 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2891
2892 int ret;
2893
2894 mutex_lock(&mvm->mutex);
2895
2896 if (!vif->bss_conf.idle) {
2897 ret = -EBUSY;
2898 goto out;
2899 }
2900
2901 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2902
2903out:
2904 mutex_unlock(&mvm->mutex);
2905 return ret;
2906}
2907
2908static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2909 struct ieee80211_vif *vif)
2910{
2911 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2912 int ret;
2913
2914 mutex_lock(&mvm->mutex);
2915
2916 /* Due to a race condition, it's possible that mac80211 asks
2917 * us to stop a sched_scan when it's already stopped. This
2918 * can happen, for instance, if we stopped the scan ourselves,
2919 * called ieee80211_sched_scan_stopped() and the userspace called
2920 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2921 * could run. To handle this, simply return if the scan is
2922 * not running.
2923 */
2924 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2925 mutex_unlock(&mvm->mutex);
2926 return 0;
2927 }
2928
2929 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2930 mutex_unlock(&mvm->mutex);
2931 iwl_mvm_wait_for_async_handlers(mvm);
2932
2933 return ret;
2934}
2935
2936static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2937 enum set_key_cmd cmd,
2938 struct ieee80211_vif *vif,
2939 struct ieee80211_sta *sta,
2940 struct ieee80211_key_conf *key)
2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret;
2944
2945 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2947 return -EOPNOTSUPP;
2948 }
2949
2950 switch (key->cipher) {
2951 case WLAN_CIPHER_SUITE_TKIP:
2952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2953 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2954 break;
2955 case WLAN_CIPHER_SUITE_CCMP:
2956 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2957 break;
2958 case WLAN_CIPHER_SUITE_AES_CMAC:
2959 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2960 break;
2961 case WLAN_CIPHER_SUITE_WEP40:
2962 case WLAN_CIPHER_SUITE_WEP104:
2963 /* For non-client mode, only use WEP keys for TX as we probably
2964 * don't have a station yet anyway and would then have to keep
2965 * track of the keys, linking them to each of the clients/peers
2966 * as they appear. For now, don't do that, for performance WEP
2967 * offload doesn't really matter much, but we need it for some
2968 * other offload features in client mode.
2969 */
2970 if (vif->type != NL80211_IFTYPE_STATION)
2971 return 0;
2972 break;
2973 default:
2974 /* currently FW supports only one optional cipher scheme */
2975 if (hw->n_cipher_schemes &&
2976 hw->cipher_schemes->cipher == key->cipher)
2977 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2978 else
2979 return -EOPNOTSUPP;
2980 }
2981
2982 mutex_lock(&mvm->mutex);
2983
2984 switch (cmd) {
2985 case SET_KEY:
2986 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2987 vif->type == NL80211_IFTYPE_AP) && !sta) {
2988 /*
2989 * GTK on AP interface is a TX-only key, return 0;
2990 * on IBSS they're per-station and because we're lazy
2991 * we don't support them for RX, so do the same.
2992 */
2993 ret = 0;
2994 key->hw_key_idx = STA_KEY_IDX_INVALID;
2995 break;
2996 }
2997
2998 /* During FW restart, in order to restore the state as it was,
2999 * don't try to reprogram keys we previously failed for.
3000 */
3001 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3002 key->hw_key_idx == STA_KEY_IDX_INVALID) {
3003 IWL_DEBUG_MAC80211(mvm,
3004 "skip invalid idx key programming during restart\n");
3005 ret = 0;
3006 break;
3007 }
3008
3009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
3013 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n");
3015 /*
3016 * can't add key for RX, but we don't need it
3017 * in the device for TX so still return 0
3018 */
3019 key->hw_key_idx = STA_KEY_IDX_INVALID;
3020 ret = 0;
3021 }
3022
3023 break;
3024 case DISABLE_KEY:
3025 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3026 ret = 0;
3027 break;
3028 }
3029
3030 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3031 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3032 break;
3033 default:
3034 ret = -EINVAL;
3035 }
3036
3037 mutex_unlock(&mvm->mutex);
3038 return ret;
3039}
3040
3041static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3042 struct ieee80211_vif *vif,
3043 struct ieee80211_key_conf *keyconf,
3044 struct ieee80211_sta *sta,
3045 u32 iv32, u16 *phase1key)
3046{
3047 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3048
3049 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3050 return;
3051
3052 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3053}
3054
3055
3056static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3057 struct iwl_rx_packet *pkt, void *data)
3058{
3059 struct iwl_mvm *mvm =
3060 container_of(notif_wait, struct iwl_mvm, notif_wait);
3061 struct iwl_hs20_roc_res *resp;
3062 int resp_len = iwl_rx_packet_payload_len(pkt);
3063 struct iwl_mvm_time_event_data *te_data = data;
3064
3065 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3066 return true;
3067
3068 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3069 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3070 return true;
3071 }
3072
3073 resp = (void *)pkt->data;
3074
3075 IWL_DEBUG_TE(mvm,
3076 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3077 resp->status, resp->event_unique_id);
3078
3079 te_data->uid = le32_to_cpu(resp->event_unique_id);
3080 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3081 te_data->uid);
3082
3083 spin_lock_bh(&mvm->time_event_lock);
3084 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3085 spin_unlock_bh(&mvm->time_event_lock);
3086
3087 return true;
3088}
3089
3090#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
3091static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3092 struct ieee80211_channel *channel,
3093 struct ieee80211_vif *vif,
3094 int duration)
3095{
3096 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3097 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3098 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3099 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3100 struct iwl_notification_wait wait_time_event;
3101 struct iwl_hs20_roc_req aux_roc_req = {
3102 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3103 .id_and_color =
3104 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3105 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3106 /* Set the channel info data */
3107 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3108 PHY_BAND_24 : PHY_BAND_5,
3109 .channel_info.channel = channel->hw_value,
3110 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3111 /* Set the time and duration */
3112 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3113 .apply_time_max_delay =
3114 cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3115 .duration = cpu_to_le32(MSEC_TO_TU(duration)),
3116 };
3117
3118 /* Set the node address */
3119 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3120
3121 lockdep_assert_held(&mvm->mutex);
3122
3123 spin_lock_bh(&mvm->time_event_lock);
3124
3125 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3126 spin_unlock_bh(&mvm->time_event_lock);
3127 return -EIO;
3128 }
3129
3130 te_data->vif = vif;
3131 te_data->duration = duration;
3132 te_data->id = HOT_SPOT_CMD;
3133
3134 spin_unlock_bh(&mvm->time_event_lock);
3135
3136 /*
3137 * Use a notification wait, which really just processes the
3138 * command response and doesn't wait for anything, in order
3139 * to be able to process the response and get the UID inside
3140 * the RX path. Using CMD_WANT_SKB doesn't work because it
3141 * stores the buffer and then wakes up this thread, by which
3142 * time another notification (that the time event started)
3143 * might already be processed unsuccessfully.
3144 */
3145 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3146 time_event_response,
3147 ARRAY_SIZE(time_event_response),
3148 iwl_mvm_rx_aux_roc, te_data);
3149
3150 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3151 &aux_roc_req);
3152
3153 if (res) {
3154 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3155 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3156 goto out_clear_te;
3157 }
3158
3159 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3160 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3161 /* should never fail */
3162 WARN_ON_ONCE(res);
3163
3164 if (res) {
3165 out_clear_te:
3166 spin_lock_bh(&mvm->time_event_lock);
3167 iwl_mvm_te_clear_data(mvm, te_data);
3168 spin_unlock_bh(&mvm->time_event_lock);
3169 }
3170
3171 return res;
3172}
3173
3174static int iwl_mvm_roc(struct ieee80211_hw *hw,
3175 struct ieee80211_vif *vif,
3176 struct ieee80211_channel *channel,
3177 int duration,
3178 enum ieee80211_roc_type type)
3179{
3180 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3182 struct cfg80211_chan_def chandef;
3183 struct iwl_mvm_phy_ctxt *phy_ctxt;
3184 int ret, i;
3185
3186 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3187 duration, type);
3188
3189 flush_work(&mvm->roc_done_wk);
3190
3191 mutex_lock(&mvm->mutex);
3192
3193 switch (vif->type) {
3194 case NL80211_IFTYPE_STATION:
3195 if (fw_has_capa(&mvm->fw->ucode_capa,
3196 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3197 /* Use aux roc framework (HS20) */
3198 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3199 vif, duration);
3200 goto out_unlock;
3201 }
3202 IWL_ERR(mvm, "hotspot not supported\n");
3203 ret = -EINVAL;
3204 goto out_unlock;
3205 case NL80211_IFTYPE_P2P_DEVICE:
3206 /* handle below */
3207 break;
3208 default:
3209 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3210 ret = -EINVAL;
3211 goto out_unlock;
3212 }
3213
3214 for (i = 0; i < NUM_PHY_CTX; i++) {
3215 phy_ctxt = &mvm->phy_ctxts[i];
3216 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3217 continue;
3218
3219 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3220 /*
3221 * Unbind the P2P_DEVICE from the current PHY context,
3222 * and if the PHY context is not used remove it.
3223 */
3224 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3225 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3226 goto out_unlock;
3227
3228 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3229
3230 /* Bind the P2P_DEVICE to the current PHY Context */
3231 mvmvif->phy_ctxt = phy_ctxt;
3232
3233 ret = iwl_mvm_binding_add_vif(mvm, vif);
3234 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3235 goto out_unlock;
3236
3237 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3238 goto schedule_time_event;
3239 }
3240 }
3241
3242 /* Need to update the PHY context only if the ROC channel changed */
3243 if (channel == mvmvif->phy_ctxt->channel)
3244 goto schedule_time_event;
3245
3246 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3247
3248 /*
3249 * Change the PHY context configuration as it is currently referenced
3250 * only by the P2P Device MAC
3251 */
3252 if (mvmvif->phy_ctxt->ref == 1) {
3253 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3254 &chandef, 1, 1);
3255 if (ret)
3256 goto out_unlock;
3257 } else {
3258 /*
3259 * The PHY context is shared with other MACs. Need to remove the
3260 * P2P Device from the binding, allocate an new PHY context and
3261 * create a new binding
3262 */
3263 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3264 if (!phy_ctxt) {
3265 ret = -ENOSPC;
3266 goto out_unlock;
3267 }
3268
3269 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3270 1, 1);
3271 if (ret) {
3272 IWL_ERR(mvm, "Failed to change PHY context\n");
3273 goto out_unlock;
3274 }
3275
3276 /* Unbind the P2P_DEVICE from the current PHY context */
3277 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3278 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3279 goto out_unlock;
3280
3281 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3282
3283 /* Bind the P2P_DEVICE to the new allocated PHY context */
3284 mvmvif->phy_ctxt = phy_ctxt;
3285
3286 ret = iwl_mvm_binding_add_vif(mvm, vif);
3287 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3288 goto out_unlock;
3289
3290 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3291 }
3292
3293schedule_time_event:
3294 /* Schedule the time events */
3295 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3296
3297out_unlock:
3298 mutex_unlock(&mvm->mutex);
3299 IWL_DEBUG_MAC80211(mvm, "leave\n");
3300 return ret;
3301}
3302
3303static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3304{
3305 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3306
3307 IWL_DEBUG_MAC80211(mvm, "enter\n");
3308
3309 mutex_lock(&mvm->mutex);
3310 iwl_mvm_stop_roc(mvm);
3311 mutex_unlock(&mvm->mutex);
3312
3313 IWL_DEBUG_MAC80211(mvm, "leave\n");
3314 return 0;
3315}
3316
3317static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3318 struct ieee80211_chanctx_conf *ctx)
3319{
3320 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3321 struct iwl_mvm_phy_ctxt *phy_ctxt;
3322 int ret;
3323
3324 lockdep_assert_held(&mvm->mutex);
3325
3326 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3327
3328 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3329 if (!phy_ctxt) {
3330 ret = -ENOSPC;
3331 goto out;
3332 }
3333
3334 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3335 ctx->rx_chains_static,
3336 ctx->rx_chains_dynamic);
3337 if (ret) {
3338 IWL_ERR(mvm, "Failed to add PHY context\n");
3339 goto out;
3340 }
3341
3342 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3343 *phy_ctxt_id = phy_ctxt->id;
3344out:
3345 return ret;
3346}
3347
3348static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3349 struct ieee80211_chanctx_conf *ctx)
3350{
3351 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3352 int ret;
3353
3354 mutex_lock(&mvm->mutex);
3355 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3356 mutex_unlock(&mvm->mutex);
3357
3358 return ret;
3359}
3360
3361static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3362 struct ieee80211_chanctx_conf *ctx)
3363{
3364 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3365 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3366
3367 lockdep_assert_held(&mvm->mutex);
3368
3369 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3370}
3371
3372static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3373 struct ieee80211_chanctx_conf *ctx)
3374{
3375 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3376
3377 mutex_lock(&mvm->mutex);
3378 __iwl_mvm_remove_chanctx(mvm, ctx);
3379 mutex_unlock(&mvm->mutex);
3380}
3381
3382static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3383 struct ieee80211_chanctx_conf *ctx,
3384 u32 changed)
3385{
3386 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3387 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3388 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3389
3390 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3391 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3392 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3393 IEEE80211_CHANCTX_CHANGE_RADAR |
3394 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3395 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3396 phy_ctxt->ref, changed))
3397 return;
3398
3399 mutex_lock(&mvm->mutex);
3400 iwl_mvm_bt_coex_vif_change(mvm);
3401 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3402 ctx->rx_chains_static,
3403 ctx->rx_chains_dynamic);
3404 mutex_unlock(&mvm->mutex);
3405}
3406
3407static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3408 struct ieee80211_vif *vif,
3409 struct ieee80211_chanctx_conf *ctx,
3410 bool switching_chanctx)
3411{
3412 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3413 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3414 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3415 int ret;
3416
3417 lockdep_assert_held(&mvm->mutex);
3418
3419 mvmvif->phy_ctxt = phy_ctxt;
3420
3421 switch (vif->type) {
3422 case NL80211_IFTYPE_AP:
3423 /* only needed if we're switching chanctx (i.e. during CSA) */
3424 if (switching_chanctx) {
3425 mvmvif->ap_ibss_active = true;
3426 break;
3427 }
3428 case NL80211_IFTYPE_ADHOC:
3429 /*
3430 * The AP binding flow is handled as part of the start_ap flow
3431 * (in bss_info_changed), similarly for IBSS.
3432 */
3433 ret = 0;
3434 goto out;
3435 case NL80211_IFTYPE_STATION:
3436 break;
3437 case NL80211_IFTYPE_MONITOR:
3438 /* always disable PS when a monitor interface is active */
3439 mvmvif->ps_disabled = true;
3440 break;
3441 default:
3442 ret = -EINVAL;
3443 goto out;
3444 }
3445
3446 ret = iwl_mvm_binding_add_vif(mvm, vif);
3447 if (ret)
3448 goto out;
3449
3450 /*
3451 * Power state must be updated before quotas,
3452 * otherwise fw will complain.
3453 */
3454 iwl_mvm_power_update_mac(mvm);
3455
3456 /* Setting the quota at this stage is only required for monitor
3457 * interfaces. For the other types, the bss_info changed flow
3458 * will handle quota settings.
3459 */
3460 if (vif->type == NL80211_IFTYPE_MONITOR) {
3461 mvmvif->monitor_active = true;
3462 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3463 if (ret)
3464 goto out_remove_binding;
3465 }
3466
3467 /* Handle binding during CSA */
3468 if (vif->type == NL80211_IFTYPE_AP) {
3469 iwl_mvm_update_quotas(mvm, false, NULL);
3470 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3471 }
3472
3473 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3474 u32 duration = 2 * vif->bss_conf.beacon_int;
3475
3476 /* iwl_mvm_protect_session() reads directly from the
3477 * device (the system time), so make sure it is
3478 * available.
3479 */
3480 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3481 if (ret)
3482 goto out_remove_binding;
3483
3484 /* Protect the session to make sure we hear the first
3485 * beacon on the new channel.
3486 */
3487 iwl_mvm_protect_session(mvm, vif, duration, duration,
3488 vif->bss_conf.beacon_int / 2,
3489 true);
3490
3491 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3492
3493 iwl_mvm_update_quotas(mvm, false, NULL);
3494 }
3495
3496 goto out;
3497
3498out_remove_binding:
3499 iwl_mvm_binding_remove_vif(mvm, vif);
3500 iwl_mvm_power_update_mac(mvm);
3501out:
3502 if (ret)
3503 mvmvif->phy_ctxt = NULL;
3504 return ret;
3505}
3506static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3507 struct ieee80211_vif *vif,
3508 struct ieee80211_chanctx_conf *ctx)
3509{
3510 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3511 int ret;
3512
3513 mutex_lock(&mvm->mutex);
3514 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3515 mutex_unlock(&mvm->mutex);
3516
3517 return ret;
3518}
3519
3520static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3521 struct ieee80211_vif *vif,
3522 struct ieee80211_chanctx_conf *ctx,
3523 bool switching_chanctx)
3524{
3525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3526 struct ieee80211_vif *disabled_vif = NULL;
3527
3528 lockdep_assert_held(&mvm->mutex);
3529
3530 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3531
3532 switch (vif->type) {
3533 case NL80211_IFTYPE_ADHOC:
3534 goto out;
3535 case NL80211_IFTYPE_MONITOR:
3536 mvmvif->monitor_active = false;
3537 mvmvif->ps_disabled = false;
3538 break;
3539 case NL80211_IFTYPE_AP:
3540 /* This part is triggered only during CSA */
3541 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3542 goto out;
3543
3544 mvmvif->csa_countdown = false;
3545
3546 /* Set CS bit on all the stations */
3547 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3548
3549 /* Save blocked iface, the timeout is set on the next beacon */
3550 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3551
3552 mvmvif->ap_ibss_active = false;
3553 break;
3554 case NL80211_IFTYPE_STATION:
3555 if (!switching_chanctx)
3556 break;
3557
3558 disabled_vif = vif;
3559
3560 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3561 break;
3562 default:
3563 break;
3564 }
3565
3566 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3567 iwl_mvm_binding_remove_vif(mvm, vif);
3568
3569out:
3570 mvmvif->phy_ctxt = NULL;
3571 iwl_mvm_power_update_mac(mvm);
3572}
3573
3574static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3575 struct ieee80211_vif *vif,
3576 struct ieee80211_chanctx_conf *ctx)
3577{
3578 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3579
3580 mutex_lock(&mvm->mutex);
3581 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3582 mutex_unlock(&mvm->mutex);
3583}
3584
3585static int
3586iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3587 struct ieee80211_vif_chanctx_switch *vifs)
3588{
3589 int ret;
3590
3591 mutex_lock(&mvm->mutex);
3592 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3593 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3594
3595 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3596 if (ret) {
3597 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3598 goto out_reassign;
3599 }
3600
3601 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3602 true);
3603 if (ret) {
3604 IWL_ERR(mvm,
3605 "failed to assign new_ctx during channel switch\n");
3606 goto out_remove;
3607 }
3608
3609 /* we don't support TDLS during DCM - can be caused by channel switch */
3610 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3611 iwl_mvm_teardown_tdls_peers(mvm);
3612
3613 goto out;
3614
3615out_remove:
3616 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3617
3618out_reassign:
3619 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3620 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3621 goto out_restart;
3622 }
3623
3624 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3625 true)) {
3626 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3627 goto out_restart;
3628 }
3629
3630 goto out;
3631
3632out_restart:
3633 /* things keep failing, better restart the hw */
3634 iwl_mvm_nic_restart(mvm, false);
3635
3636out:
3637 mutex_unlock(&mvm->mutex);
3638
3639 return ret;
3640}
3641
3642static int
3643iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3644 struct ieee80211_vif_chanctx_switch *vifs)
3645{
3646 int ret;
3647
3648 mutex_lock(&mvm->mutex);
3649 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3650
3651 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3652 true);
3653 if (ret) {
3654 IWL_ERR(mvm,
3655 "failed to assign new_ctx during channel switch\n");
3656 goto out_reassign;
3657 }
3658
3659 goto out;
3660
3661out_reassign:
3662 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3663 true)) {
3664 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3665 goto out_restart;
3666 }
3667
3668 goto out;
3669
3670out_restart:
3671 /* things keep failing, better restart the hw */
3672 iwl_mvm_nic_restart(mvm, false);
3673
3674out:
3675 mutex_unlock(&mvm->mutex);
3676
3677 return ret;
3678}
3679
3680static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3681 struct ieee80211_vif_chanctx_switch *vifs,
3682 int n_vifs,
3683 enum ieee80211_chanctx_switch_mode mode)
3684{
3685 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3686 int ret;
3687
3688 /* we only support a single-vif right now */
3689 if (n_vifs > 1)
3690 return -EOPNOTSUPP;
3691
3692 switch (mode) {
3693 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3694 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3695 break;
3696 case CHANCTX_SWMODE_REASSIGN_VIF:
3697 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3698 break;
3699 default:
3700 ret = -EOPNOTSUPP;
3701 break;
3702 }
3703
3704 return ret;
3705}
3706
3707static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3708 struct ieee80211_sta *sta,
3709 bool set)
3710{
3711 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3712 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3713
3714 if (!mvm_sta || !mvm_sta->vif) {
3715 IWL_ERR(mvm, "Station is not associated to a vif\n");
3716 return -EINVAL;
3717 }
3718
3719 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3720}
3721
3722#ifdef CONFIG_NL80211_TESTMODE
3723static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3724 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3725 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3726 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3727};
3728
3729static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3730 struct ieee80211_vif *vif,
3731 void *data, int len)
3732{
3733 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3734 int err;
3735 u32 noa_duration;
3736
3737 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3738 if (err)
3739 return err;
3740
3741 if (!tb[IWL_MVM_TM_ATTR_CMD])
3742 return -EINVAL;
3743
3744 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3745 case IWL_MVM_TM_CMD_SET_NOA:
3746 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3747 !vif->bss_conf.enable_beacon ||
3748 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3749 return -EINVAL;
3750
3751 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3752 if (noa_duration >= vif->bss_conf.beacon_int)
3753 return -EINVAL;
3754
3755 mvm->noa_duration = noa_duration;
3756 mvm->noa_vif = vif;
3757
3758 return iwl_mvm_update_quotas(mvm, false, NULL);
3759 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3760 /* must be associated client vif - ignore authorized */
3761 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3762 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3763 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3764 return -EINVAL;
3765
3766 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3767 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3768 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3769 }
3770
3771 return -EOPNOTSUPP;
3772}
3773
3774static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3775 struct ieee80211_vif *vif,
3776 void *data, int len)
3777{
3778 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3779 int err;
3780
3781 mutex_lock(&mvm->mutex);
3782 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3783 mutex_unlock(&mvm->mutex);
3784
3785 return err;
3786}
3787#endif
3788
3789static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3790 struct ieee80211_vif *vif,
3791 struct ieee80211_channel_switch *chsw)
3792{
3793 /* By implementing this operation, we prevent mac80211 from
3794 * starting its own channel switch timer, so that we can call
3795 * ieee80211_chswitch_done() ourselves at the right time
3796 * (which is when the absence time event starts).
3797 */
3798
3799 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3800 "dummy channel switch op\n");
3801}
3802
3803static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3804 struct ieee80211_vif *vif,
3805 struct ieee80211_channel_switch *chsw)
3806{
3807 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3808 struct ieee80211_vif *csa_vif;
3809 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3810 u32 apply_time;
3811 int ret;
3812
3813 mutex_lock(&mvm->mutex);
3814
3815 mvmvif->csa_failed = false;
3816
3817 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3818 chsw->chandef.center_freq1);
3819
3820 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3821
3822 switch (vif->type) {
3823 case NL80211_IFTYPE_AP:
3824 csa_vif =
3825 rcu_dereference_protected(mvm->csa_vif,
3826 lockdep_is_held(&mvm->mutex));
3827 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3828 "Another CSA is already in progress")) {
3829 ret = -EBUSY;
3830 goto out_unlock;
3831 }
3832
3833 rcu_assign_pointer(mvm->csa_vif, vif);
3834
3835 if (WARN_ONCE(mvmvif->csa_countdown,
3836 "Previous CSA countdown didn't complete")) {
3837 ret = -EBUSY;
3838 goto out_unlock;
3839 }
3840
3841 break;
3842 case NL80211_IFTYPE_STATION:
3843 /* Schedule the time event to a bit before beacon 1,
3844 * to make sure we're in the new channel when the
3845 * GO/AP arrives.
3846 */
3847 apply_time = chsw->device_timestamp +
3848 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3849 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3850
3851 if (chsw->block_tx)
3852 iwl_mvm_csa_client_absent(mvm, vif);
3853
3854 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3855 apply_time);
3856 if (mvmvif->bf_data.bf_enabled) {
3857 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3858 if (ret)
3859 goto out_unlock;
3860 }
3861
3862 break;
3863 default:
3864 break;
3865 }
3866
3867 mvmvif->ps_disabled = true;
3868
3869 ret = iwl_mvm_power_update_ps(mvm);
3870 if (ret)
3871 goto out_unlock;
3872
3873 /* we won't be on this channel any longer */
3874 iwl_mvm_teardown_tdls_peers(mvm);
3875
3876out_unlock:
3877 mutex_unlock(&mvm->mutex);
3878
3879 return ret;
3880}
3881
3882static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3883 struct ieee80211_vif *vif)
3884{
3885 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3886 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3887 int ret;
3888
3889 mutex_lock(&mvm->mutex);
3890
3891 if (mvmvif->csa_failed) {
3892 mvmvif->csa_failed = false;
3893 ret = -EIO;
3894 goto out_unlock;
3895 }
3896
3897 if (vif->type == NL80211_IFTYPE_STATION) {
3898 struct iwl_mvm_sta *mvmsta;
3899
3900 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3901 mvmvif->ap_sta_id);
3902
3903 if (WARN_ON(!mvmsta)) {
3904 ret = -EIO;
3905 goto out_unlock;
3906 }
3907
3908 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3909
3910 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3911
3912 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3913 if (ret)
3914 goto out_unlock;
3915
3916 iwl_mvm_stop_session_protection(mvm, vif);
3917 }
3918
3919 mvmvif->ps_disabled = false;
3920
3921 ret = iwl_mvm_power_update_ps(mvm);
3922
3923out_unlock:
3924 mutex_unlock(&mvm->mutex);
3925
3926 return ret;
3927}
3928
3929static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3930 struct ieee80211_vif *vif, u32 queues, bool drop)
3931{
3932 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3933 struct iwl_mvm_vif *mvmvif;
3934 struct iwl_mvm_sta *mvmsta;
3935 struct ieee80211_sta *sta;
3936 int i;
3937 u32 msk = 0;
3938
3939 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3940 return;
3941
3942 mutex_lock(&mvm->mutex);
3943 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3944
3945 /* flush the AP-station and all TDLS peers */
3946 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3947 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3948 lockdep_is_held(&mvm->mutex));
3949 if (IS_ERR_OR_NULL(sta))
3950 continue;
3951
3952 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3953 if (mvmsta->vif != vif)
3954 continue;
3955
3956 /* make sure only TDLS peers or the AP are flushed */
3957 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3958
3959 msk |= mvmsta->tfd_queue_msk;
3960 }
3961
3962 if (drop) {
3963 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3964 IWL_ERR(mvm, "flush request fail\n");
3965 mutex_unlock(&mvm->mutex);
3966 } else {
3967 mutex_unlock(&mvm->mutex);
3968
3969 /* this can take a while, and we may need/want other operations
3970 * to succeed while doing this, so do it without the mutex held
3971 */
3972 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3973 }
3974}
3975
3976static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3977 struct survey_info *survey)
3978{
3979 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3980 int ret;
3981
3982 memset(survey, 0, sizeof(*survey));
3983
3984 /* only support global statistics right now */
3985 if (idx != 0)
3986 return -ENOENT;
3987
3988 if (fw_has_capa(&mvm->fw->ucode_capa,
3989 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3990 return -ENOENT;
3991
3992 mutex_lock(&mvm->mutex);
3993
3994 if (mvm->ucode_loaded) {
3995 ret = iwl_mvm_request_statistics(mvm, false);
3996 if (ret)
3997 goto out;
3998 }
3999
4000 survey->filled = SURVEY_INFO_TIME |
4001 SURVEY_INFO_TIME_RX |
4002 SURVEY_INFO_TIME_TX |
4003 SURVEY_INFO_TIME_SCAN;
4004 survey->time = mvm->accu_radio_stats.on_time_rf +
4005 mvm->radio_stats.on_time_rf;
4006 do_div(survey->time, USEC_PER_MSEC);
4007
4008 survey->time_rx = mvm->accu_radio_stats.rx_time +
4009 mvm->radio_stats.rx_time;
4010 do_div(survey->time_rx, USEC_PER_MSEC);
4011
4012 survey->time_tx = mvm->accu_radio_stats.tx_time +
4013 mvm->radio_stats.tx_time;
4014 do_div(survey->time_tx, USEC_PER_MSEC);
4015
4016 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4017 mvm->radio_stats.on_time_scan;
4018 do_div(survey->time_scan, USEC_PER_MSEC);
4019
4020 ret = 0;
4021 out:
4022 mutex_unlock(&mvm->mutex);
4023 return ret;
4024}
4025
4026static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4027 struct ieee80211_vif *vif,
4028 struct ieee80211_sta *sta,
4029 struct station_info *sinfo)
4030{
4031 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4032 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4033 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4034
4035 if (fw_has_capa(&mvm->fw->ucode_capa,
4036 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4037 return;
4038
4039 /* if beacon filtering isn't on mac80211 does it anyway */
4040 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4041 return;
4042
4043 if (!vif->bss_conf.assoc)
4044 return;
4045
4046 mutex_lock(&mvm->mutex);
4047
4048 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4049 goto unlock;
4050
4051 if (iwl_mvm_request_statistics(mvm, false))
4052 goto unlock;
4053
4054 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4055 mvmvif->beacon_stats.accu_num_beacons;
4056 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4057 if (mvmvif->beacon_stats.avg_signal) {
4058 /* firmware only reports a value after RXing a few beacons */
4059 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4060 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4061 }
4062 unlock:
4063 mutex_unlock(&mvm->mutex);
4064}
4065
4066static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4067 struct ieee80211_vif *vif,
4068 const struct ieee80211_event *event)
4069{
4070#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4071 do { \
4072 if ((_cnt) && --(_cnt)) \
4073 break; \
4074 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4075 } while (0)
4076
4077 struct iwl_fw_dbg_trigger_tlv *trig;
4078 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4079
4080 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4081 return;
4082
4083 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4084 trig_mlme = (void *)trig->data;
4085 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4086 return;
4087
4088 if (event->u.mlme.data == ASSOC_EVENT) {
4089 if (event->u.mlme.status == MLME_DENIED)
4090 CHECK_MLME_TRIGGER(mvm, trig, buf,
4091 trig_mlme->stop_assoc_denied,
4092 "DENIED ASSOC: reason %d",
4093 event->u.mlme.reason);
4094 else if (event->u.mlme.status == MLME_TIMEOUT)
4095 CHECK_MLME_TRIGGER(mvm, trig, buf,
4096 trig_mlme->stop_assoc_timeout,
4097 "ASSOC TIMEOUT");
4098 } else if (event->u.mlme.data == AUTH_EVENT) {
4099 if (event->u.mlme.status == MLME_DENIED)
4100 CHECK_MLME_TRIGGER(mvm, trig, buf,
4101 trig_mlme->stop_auth_denied,
4102 "DENIED AUTH: reason %d",
4103 event->u.mlme.reason);
4104 else if (event->u.mlme.status == MLME_TIMEOUT)
4105 CHECK_MLME_TRIGGER(mvm, trig, buf,
4106 trig_mlme->stop_auth_timeout,
4107 "AUTH TIMEOUT");
4108 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4109 CHECK_MLME_TRIGGER(mvm, trig, buf,
4110 trig_mlme->stop_rx_deauth,
4111 "DEAUTH RX %d", event->u.mlme.reason);
4112 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4113 CHECK_MLME_TRIGGER(mvm, trig, buf,
4114 trig_mlme->stop_tx_deauth,
4115 "DEAUTH TX %d", event->u.mlme.reason);
4116 }
4117#undef CHECK_MLME_TRIGGER
4118}
4119
4120static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4121 struct ieee80211_vif *vif,
4122 const struct ieee80211_event *event)
4123{
4124 struct iwl_fw_dbg_trigger_tlv *trig;
4125 struct iwl_fw_dbg_trigger_ba *ba_trig;
4126
4127 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4128 return;
4129
4130 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4131 ba_trig = (void *)trig->data;
4132 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4133 return;
4134
4135 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4136 return;
4137
4138 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4139 "BAR received from %pM, tid %d, ssn %d",
4140 event->u.ba.sta->addr, event->u.ba.tid,
4141 event->u.ba.ssn);
4142}
4143
4144static void
4145iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4146 struct ieee80211_vif *vif,
4147 const struct ieee80211_event *event)
4148{
4149 struct iwl_fw_dbg_trigger_tlv *trig;
4150 struct iwl_fw_dbg_trigger_ba *ba_trig;
4151
4152 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4153 return;
4154
4155 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4156 ba_trig = (void *)trig->data;
4157 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4158 return;
4159
4160 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4161 return;
4162
4163 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4164 "Frame from %pM timed out, tid %d",
4165 event->u.ba.sta->addr, event->u.ba.tid);
4166}
4167
4168static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4169 struct ieee80211_vif *vif,
4170 const struct ieee80211_event *event)
4171{
4172 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4173
4174 switch (event->type) {
4175 case MLME_EVENT:
4176 iwl_mvm_event_mlme_callback(mvm, vif, event);
4177 break;
4178 case BAR_RX_EVENT:
4179 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4180 break;
4181 case BA_FRAME_TIMEOUT:
4182 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4183 break;
4184 default:
4185 break;
4186 }
4187}
4188
4189const struct ieee80211_ops iwl_mvm_hw_ops = {
4190 .tx = iwl_mvm_mac_tx,
4191 .ampdu_action = iwl_mvm_mac_ampdu_action,
4192 .start = iwl_mvm_mac_start,
4193 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4194 .stop = iwl_mvm_mac_stop,
4195 .add_interface = iwl_mvm_mac_add_interface,
4196 .remove_interface = iwl_mvm_mac_remove_interface,
4197 .config = iwl_mvm_mac_config,
4198 .prepare_multicast = iwl_mvm_prepare_multicast,
4199 .configure_filter = iwl_mvm_configure_filter,
4200 .config_iface_filter = iwl_mvm_config_iface_filter,
4201 .bss_info_changed = iwl_mvm_bss_info_changed,
4202 .hw_scan = iwl_mvm_mac_hw_scan,
4203 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4204 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4205 .sta_state = iwl_mvm_mac_sta_state,
4206 .sta_notify = iwl_mvm_mac_sta_notify,
4207 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4208 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4209 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4210 .sta_rc_update = iwl_mvm_sta_rc_update,
4211 .conf_tx = iwl_mvm_mac_conf_tx,
4212 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4213 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4214 .flush = iwl_mvm_mac_flush,
4215 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4216 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4217 .set_key = iwl_mvm_mac_set_key,
4218 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4219 .remain_on_channel = iwl_mvm_roc,
4220 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4221 .add_chanctx = iwl_mvm_add_chanctx,
4222 .remove_chanctx = iwl_mvm_remove_chanctx,
4223 .change_chanctx = iwl_mvm_change_chanctx,
4224 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4225 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4226 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4227
4228 .start_ap = iwl_mvm_start_ap_ibss,
4229 .stop_ap = iwl_mvm_stop_ap_ibss,
4230 .join_ibss = iwl_mvm_start_ap_ibss,
4231 .leave_ibss = iwl_mvm_stop_ap_ibss,
4232
4233 .set_tim = iwl_mvm_set_tim,
4234
4235 .channel_switch = iwl_mvm_channel_switch,
4236 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4237 .post_channel_switch = iwl_mvm_post_channel_switch,
4238
4239 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4240 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4241 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4242
4243 .event_callback = iwl_mvm_mac_event_callback,
4244
4245 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4246
4247#ifdef CONFIG_PM_SLEEP
4248 /* look at d3.c */
4249 .suspend = iwl_mvm_suspend,
4250 .resume = iwl_mvm_resume,
4251 .set_wakeup = iwl_mvm_set_wakeup,
4252 .set_rekey_data = iwl_mvm_set_rekey_data,
4253#if IS_ENABLED(CONFIG_IPV6)
4254 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4255#endif
4256 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4257#endif
4258 .get_survey = iwl_mvm_mac_get_survey,
4259 .sta_statistics = iwl_mvm_mac_sta_statistics,
4260};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
new file mode 100644
index 000000000000..4bde2d027dcd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -0,0 +1,1535 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __IWL_MVM_H__
67#define __IWL_MVM_H__
68
69#include <linux/list.h>
70#include <linux/spinlock.h>
71#include <linux/leds.h>
72#include <linux/in6.h>
73
74#include "iwl-op-mode.h"
75#include "iwl-trans.h"
76#include "iwl-notif-wait.h"
77#include "iwl-eeprom-parse.h"
78#include "iwl-fw-file.h"
79#include "iwl-config.h"
80#include "sta.h"
81#include "fw-api.h"
82#include "constants.h"
83#include "tof.h"
84
85#define IWL_MVM_MAX_ADDRESSES 5
86/* RSSI offset for WkP */
87#define IWL_RSSI_OFFSET 50
88#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
89/* A TimeUnit is 1024 microsecond */
90#define MSEC_TO_TU(_msec) (_msec*1000/1024)
91
92/* For GO, this value represents the number of TUs before CSA "beacon
93 * 0" TBTT when the CSA time-event needs to be scheduled to start. It
94 * must be big enough to ensure that we switch in time.
95 */
96#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
97
98/* For client, this value represents the number of TUs before CSA
99 * "beacon 1" TBTT, instead. This is because we don't know when the
100 * GO/AP will be in the new channel, so we switch early enough.
101 */
102#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
103
104/*
105 * This value (in TUs) is used to fine tune the CSA NoA end time which should
106 * be just before "beacon 0" TBTT.
107 */
108#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
109
110/*
111 * Number of beacons to transmit on a new channel until we unblock tx to
112 * the stations, even if we didn't identify them on a new channel
113 */
114#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
115
116extern const struct ieee80211_ops iwl_mvm_hw_ops;
117
118/**
119 * struct iwl_mvm_mod_params - module parameters for iwlmvm
120 * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
121 * We will register to mac80211 to have testmode working. The NIC must not
122 * be up'ed after the INIT fw asserted. This is useful to be able to use
123 * proprietary tools over testmode to debug the INIT fw.
124 * @tfd_q_hang_detect: enabled the detection of hung transmit queues
125 * @power_scheme: one of enum iwl_power_scheme
126 */
127struct iwl_mvm_mod_params {
128 bool init_dbg;
129 bool tfd_q_hang_detect;
130 int power_scheme;
131};
132extern struct iwl_mvm_mod_params iwlmvm_mod_params;
133
134/**
135 * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump
136 *
137 * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode
138 * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
139 * transport's data.
140 * @trans_len: length of the valid data in trans_ptr
141 * @op_mode_len: length of the valid data in op_mode_ptr
142 */
143struct iwl_mvm_dump_ptrs {
144 struct iwl_trans_dump_data *trans_ptr;
145 void *op_mode_ptr;
146 u32 op_mode_len;
147};
148
149/**
150 * struct iwl_mvm_dump_desc - describes the dump
151 * @len: length of trig_desc->data
152 * @trig_desc: the description of the dump
153 */
154struct iwl_mvm_dump_desc {
155 size_t len;
156 /* must be last */
157 struct iwl_fw_error_dump_trigger_desc trig_desc;
158};
159
160extern struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert;
161
162struct iwl_mvm_phy_ctxt {
163 u16 id;
164 u16 color;
165 u32 ref;
166
167 /*
168 * TODO: This should probably be removed. Currently here only for rate
169 * scaling algorithm
170 */
171 struct ieee80211_channel *channel;
172};
173
174struct iwl_mvm_time_event_data {
175 struct ieee80211_vif *vif;
176 struct list_head list;
177 unsigned long end_jiffies;
178 u32 duration;
179 bool running;
180 u32 uid;
181
182 /*
183 * The access to the 'id' field must be done when the
184 * mvm->time_event_lock is held, as it value is used to indicate
185 * if the te is in the time event list or not (when id == TE_MAX)
186 */
187 u32 id;
188};
189
190 /* Power management */
191
192/**
193 * enum iwl_power_scheme
194 * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
195 * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
196 * @IWL_POWER_LEVEL_LP - Low Power
197 */
198enum iwl_power_scheme {
199 IWL_POWER_SCHEME_CAM = 1,
200 IWL_POWER_SCHEME_BPS,
201 IWL_POWER_SCHEME_LP
202};
203
204#define IWL_CONN_MAX_LISTEN_INTERVAL 10
205#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
206
207#ifdef CONFIG_IWLWIFI_DEBUGFS
208enum iwl_dbgfs_pm_mask {
209 MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
210 MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
211 MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
212 MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
213 MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
214 MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
215 MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
216 MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
217 MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
218 MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
219};
220
221struct iwl_dbgfs_pm {
222 u16 keep_alive_seconds;
223 u32 rx_data_timeout;
224 u32 tx_data_timeout;
225 bool skip_over_dtim;
226 u8 skip_dtim_periods;
227 bool lprx_ena;
228 u32 lprx_rssi_threshold;
229 bool snooze_ena;
230 bool uapsd_misbehaving;
231 bool use_ps_poll;
232 int mask;
233};
234
235/* beacon filtering */
236
237enum iwl_dbgfs_bf_mask {
238 MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
239 MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
240 MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
241 MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
242 MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
243 MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
244 MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
245 MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
246 MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
247 MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
248 MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
249};
250
251struct iwl_dbgfs_bf {
252 u32 bf_energy_delta;
253 u32 bf_roaming_energy_delta;
254 u32 bf_roaming_state;
255 u32 bf_temp_threshold;
256 u32 bf_temp_fast_filter;
257 u32 bf_temp_slow_filter;
258 u32 bf_enable_beacon_filter;
259 u32 bf_debug_flag;
260 u32 bf_escape_timer;
261 u32 ba_escape_timer;
262 u32 ba_enable_beacon_abort;
263 int mask;
264};
265#endif
266
267enum iwl_mvm_smps_type_request {
268 IWL_MVM_SMPS_REQ_BT_COEX,
269 IWL_MVM_SMPS_REQ_TT,
270 IWL_MVM_SMPS_REQ_PROT,
271 NUM_IWL_MVM_SMPS_REQ,
272};
273
274enum iwl_mvm_ref_type {
275 IWL_MVM_REF_UCODE_DOWN,
276 IWL_MVM_REF_SCAN,
277 IWL_MVM_REF_ROC,
278 IWL_MVM_REF_ROC_AUX,
279 IWL_MVM_REF_P2P_CLIENT,
280 IWL_MVM_REF_AP_IBSS,
281 IWL_MVM_REF_USER,
282 IWL_MVM_REF_TX,
283 IWL_MVM_REF_TX_AGG,
284 IWL_MVM_REF_ADD_IF,
285 IWL_MVM_REF_START_AP,
286 IWL_MVM_REF_BSS_CHANGED,
287 IWL_MVM_REF_PREPARE_TX,
288 IWL_MVM_REF_PROTECT_TDLS,
289 IWL_MVM_REF_CHECK_CTKILL,
290 IWL_MVM_REF_PRPH_READ,
291 IWL_MVM_REF_PRPH_WRITE,
292 IWL_MVM_REF_NMI,
293 IWL_MVM_REF_TM_CMD,
294 IWL_MVM_REF_EXIT_WORK,
295 IWL_MVM_REF_PROTECT_CSA,
296 IWL_MVM_REF_FW_DBG_COLLECT,
297
298 /* update debugfs.c when changing this */
299
300 IWL_MVM_REF_COUNT,
301};
302
303enum iwl_bt_force_ant_mode {
304 BT_FORCE_ANT_DIS = 0,
305 BT_FORCE_ANT_AUTO,
306 BT_FORCE_ANT_BT,
307 BT_FORCE_ANT_WIFI,
308
309 BT_FORCE_ANT_MAX,
310};
311
312/**
313* struct iwl_mvm_vif_bf_data - beacon filtering related data
314* @bf_enabled: indicates if beacon filtering is enabled
315* @ba_enabled: indicated if beacon abort is enabled
316* @ave_beacon_signal: average beacon signal
317* @last_cqm_event: rssi of the last cqm event
318* @bt_coex_min_thold: minimum threshold for BT coex
319* @bt_coex_max_thold: maximum threshold for BT coex
320* @last_bt_coex_event: rssi of the last BT coex event
321*/
322struct iwl_mvm_vif_bf_data {
323 bool bf_enabled;
324 bool ba_enabled;
325 int ave_beacon_signal;
326 int last_cqm_event;
327 int bt_coex_min_thold;
328 int bt_coex_max_thold;
329 int last_bt_coex_event;
330};
331
332/**
333 * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
334 * @id: between 0 and 3
335 * @color: to solve races upon MAC addition and removal
336 * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
337 * @bssid: BSSID for this (client) interface
338 * @associated: indicates that we're currently associated, used only for
339 * managing the firmware state in iwl_mvm_bss_info_changed_station()
340 * @ap_assoc_sta_count: count of stations associated to us - valid only
341 * if VIF type is AP
342 * @uploaded: indicates the MAC context has been added to the device
343 * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
344 * should get quota etc.
345 * @pm_enabled - Indicate if MAC power management is allowed
346 * @monitor_active: indicates that monitor context is configured, and that the
347 * interface should get quota etc.
348 * @low_latency: indicates that this interface is in low-latency mode
349 * (VMACLowLatencyMode)
350 * @ps_disabled: indicates that this interface requires PS to be disabled
351 * @queue_params: QoS params for this MAC
352 * @bcast_sta: station used for broadcast packets. Used by the following
353 * vifs: P2P_DEVICE, GO and AP.
354 * @beacon_skb: the skb used to hold the AP/GO beacon template
355 * @smps_requests: the SMPS requests of different parts of the driver,
356 * combined on update to yield the overall request to mac80211.
357 * @beacon_stats: beacon statistics, containing the # of received beacons,
358 * # of received beacons accumulated over FW restart, and the current
359 * average signal of beacons retrieved from the firmware
360 * @csa_failed: CSA failed to schedule time event, report an error later
361 * @features: hw features active for this vif
362 */
363struct iwl_mvm_vif {
364 struct iwl_mvm *mvm;
365 u16 id;
366 u16 color;
367 u8 ap_sta_id;
368
369 u8 bssid[ETH_ALEN];
370 bool associated;
371 u8 ap_assoc_sta_count;
372
373 bool uploaded;
374 bool ap_ibss_active;
375 bool pm_enabled;
376 bool monitor_active;
377 bool low_latency;
378 bool ps_disabled;
379 struct iwl_mvm_vif_bf_data bf_data;
380
381 struct {
382 u32 num_beacons, accu_num_beacons;
383 u8 avg_signal;
384 } beacon_stats;
385
386 u32 ap_beacon_time;
387
388 enum iwl_tsf_id tsf_id;
389
390 /*
391 * QoS data from mac80211, need to store this here
392 * as mac80211 has a separate callback but we need
393 * to have the data for the MAC context
394 */
395 struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
396 struct iwl_mvm_time_event_data time_event_data;
397 struct iwl_mvm_time_event_data hs_time_event_data;
398
399 struct iwl_mvm_int_sta bcast_sta;
400
401 /*
402 * Assigned while mac80211 has the interface in a channel context,
403 * or, for P2P Device, while it exists.
404 */
405 struct iwl_mvm_phy_ctxt *phy_ctxt;
406
407#ifdef CONFIG_PM_SLEEP
408 /* WoWLAN GTK rekey data */
409 struct {
410 u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
411 __le64 replay_ctr;
412 bool valid;
413 } rekey_data;
414
415 int tx_key_idx;
416
417 bool seqno_valid;
418 u16 seqno;
419#endif
420
421#if IS_ENABLED(CONFIG_IPV6)
422 /* IPv6 addresses for WoWLAN */
423 struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
424 int num_target_ipv6_addrs;
425#endif
426
427#ifdef CONFIG_IWLWIFI_DEBUGFS
428 struct dentry *dbgfs_dir;
429 struct dentry *dbgfs_slink;
430 struct iwl_dbgfs_pm dbgfs_pm;
431 struct iwl_dbgfs_bf dbgfs_bf;
432 struct iwl_mac_power_cmd mac_pwr_cmd;
433#endif
434
435 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
436
437 /* FW identified misbehaving AP */
438 u8 uapsd_misbehaving_bssid[ETH_ALEN];
439
440 /* Indicates that CSA countdown may be started */
441 bool csa_countdown;
442 bool csa_failed;
443
444 /* TCP Checksum Offload */
445 netdev_features_t features;
446};
447
448static inline struct iwl_mvm_vif *
449iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
450{
451 return (void *)vif->drv_priv;
452}
453
454extern const u8 tid_to_mac80211_ac[];
455
456#define IWL_MVM_SCAN_STOPPING_SHIFT 8
457
458enum iwl_scan_status {
459 IWL_MVM_SCAN_REGULAR = BIT(0),
460 IWL_MVM_SCAN_SCHED = BIT(1),
461 IWL_MVM_SCAN_NETDETECT = BIT(2),
462
463 IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
464 IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
465 IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
466
467 IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
468 IWL_MVM_SCAN_STOPPING_REGULAR,
469 IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
470 IWL_MVM_SCAN_STOPPING_SCHED,
471 IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
472 IWL_MVM_SCAN_STOPPING_NETDETECT,
473
474 IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
475 IWL_MVM_SCAN_MASK = 0xff,
476};
477
478/**
479 * struct iwl_nvm_section - describes an NVM section in memory.
480 *
481 * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
482 * and saved for later use by the driver. Not all NVM sections are saved
483 * this way, only the needed ones.
484 */
485struct iwl_nvm_section {
486 u16 length;
487 const u8 *data;
488};
489
490/**
491 * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
492 * @ct_kill_exit: worker to exit thermal kill
493 * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
494 * @tx_backoff: The current thremal throttling tx backoff in uSec.
495 * @min_backoff: The minimal tx backoff due to power restrictions
496 * @params: Parameters to configure the thermal throttling algorithm.
497 * @throttle: Is thermal throttling is active?
498 */
499struct iwl_mvm_tt_mgmt {
500 struct delayed_work ct_kill_exit;
501 bool dynamic_smps;
502 u32 tx_backoff;
503 u32 min_backoff;
504 struct iwl_tt_params params;
505 bool throttle;
506};
507
508#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
509
510struct iwl_mvm_frame_stats {
511 u32 legacy_frames;
512 u32 ht_frames;
513 u32 vht_frames;
514 u32 bw_20_frames;
515 u32 bw_40_frames;
516 u32 bw_80_frames;
517 u32 bw_160_frames;
518 u32 sgi_frames;
519 u32 ngi_frames;
520 u32 siso_frames;
521 u32 mimo2_frames;
522 u32 agg_frames;
523 u32 ampdu_count;
524 u32 success_frames;
525 u32 fail_frames;
526 u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
527 int last_frame_idx;
528};
529
530enum {
531 D0I3_DEFER_WAKEUP,
532 D0I3_PENDING_WAKEUP,
533};
534
535#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
536#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
537#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
538
539enum iwl_mvm_tdls_cs_state {
540 IWL_MVM_TDLS_SW_IDLE = 0,
541 IWL_MVM_TDLS_SW_REQ_SENT,
542 IWL_MVM_TDLS_SW_RESP_RCVD,
543 IWL_MVM_TDLS_SW_REQ_RCVD,
544 IWL_MVM_TDLS_SW_ACTIVE,
545};
546
547struct iwl_mvm_shared_mem_cfg {
548 u32 shared_mem_addr;
549 u32 shared_mem_size;
550 u32 sample_buff_addr;
551 u32 sample_buff_size;
552 u32 txfifo_addr;
553 u32 txfifo_size[TX_FIFO_MAX_NUM];
554 u32 rxfifo_size[RX_FIFO_MAX_NUM];
555 u32 page_buff_addr;
556 u32 page_buff_size;
557};
558
559struct iwl_mvm {
560 /* for logger access */
561 struct device *dev;
562
563 struct iwl_trans *trans;
564 const struct iwl_fw *fw;
565 const struct iwl_cfg *cfg;
566 struct iwl_phy_db *phy_db;
567 struct ieee80211_hw *hw;
568
569 /* for protecting access to iwl_mvm */
570 struct mutex mutex;
571 struct list_head async_handlers_list;
572 spinlock_t async_handlers_lock;
573 struct work_struct async_handlers_wk;
574
575 struct work_struct roc_done_wk;
576
577 unsigned long status;
578
579 /*
580 * for beacon filtering -
581 * currently only one interface can be supported
582 */
583 struct iwl_mvm_vif *bf_allowed_vif;
584
585 enum iwl_ucode_type cur_ucode;
586 bool ucode_loaded;
587 bool calibrating;
588 u32 error_event_table;
589 u32 log_event_table;
590 u32 umac_error_event_table;
591 bool support_umac_log;
592 struct iwl_sf_region sf_space;
593
594 u32 ampdu_ref;
595
596 struct iwl_notif_wait_data notif_wait;
597
598 struct mvm_statistics_rx rx_stats;
599
600 struct {
601 u64 rx_time;
602 u64 tx_time;
603 u64 on_time_rf;
604 u64 on_time_scan;
605 } radio_stats, accu_radio_stats;
606
607 struct {
608 /* Map to HW queue */
609 u32 hw_queue_to_mac80211;
610 u8 hw_queue_refcount;
611 bool setup_reserved;
612 u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
613 } queue_info[IWL_MAX_HW_QUEUES];
614 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
615 atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
616
617 const char *nvm_file_name;
618 struct iwl_nvm_data *nvm_data;
619 /* NVM sections */
620 struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
621
622 /* Paging section */
623 struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
624 u16 num_of_paging_blk;
625 u16 num_of_pages_in_last_blk;
626
627 /* EEPROM MAC addresses */
628 struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
629
630 /* data related to data path */
631 struct iwl_rx_phy_info last_phy_info;
632 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
633 struct work_struct sta_drained_wk;
634 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
635 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
636 u32 tfd_drained[IWL_MVM_STATION_COUNT];
637 u8 rx_ba_sessions;
638
639 /* configured by mac80211 */
640 u32 rts_threshold;
641
642 /* Scan status, cmd (pre-allocated) and auxiliary station */
643 unsigned int scan_status;
644 void *scan_cmd;
645 struct iwl_mcast_filter_cmd *mcast_filter_cmd;
646
647 /* max number of simultaneous scans the FW supports */
648 unsigned int max_scans;
649
650 /* UMAC scan tracking */
651 u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
652
653 /* rx chain antennas set through debugfs for the scan command */
654 u8 scan_rx_ant;
655
656#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
657 /* broadcast filters to configure for each associated station */
658 const struct iwl_fw_bcast_filter *bcast_filters;
659#ifdef CONFIG_IWLWIFI_DEBUGFS
660 struct {
661 bool override;
662 struct iwl_bcast_filter_cmd cmd;
663 } dbgfs_bcast_filtering;
664#endif
665#endif
666
667 /* Internal station */
668 struct iwl_mvm_int_sta aux_sta;
669
670 bool last_ebs_successful;
671
672 u8 scan_last_antenna_idx; /* to toggle TX between antennas */
673 u8 mgmt_last_antenna_idx;
674
675 /* last smart fifo state that was successfully sent to firmware */
676 enum iwl_sf_state sf_state;
677
678#ifdef CONFIG_IWLWIFI_DEBUGFS
679 struct dentry *debugfs_dir;
680 u32 dbgfs_sram_offset, dbgfs_sram_len;
681 u32 dbgfs_prph_reg_addr;
682 bool disable_power_off;
683 bool disable_power_off_d3;
684
685 bool scan_iter_notif_enabled;
686
687 struct debugfs_blob_wrapper nvm_hw_blob;
688 struct debugfs_blob_wrapper nvm_sw_blob;
689 struct debugfs_blob_wrapper nvm_calib_blob;
690 struct debugfs_blob_wrapper nvm_prod_blob;
691 struct debugfs_blob_wrapper nvm_phy_sku_blob;
692
693 struct iwl_mvm_frame_stats drv_rx_stats;
694 spinlock_t drv_stats_lock;
695 u16 dbgfs_rx_phyinfo;
696#endif
697
698 struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
699
700 struct list_head time_event_list;
701 spinlock_t time_event_lock;
702
703 /*
704 * A bitmap indicating the index of the key in use. The firmware
705 * can hold 16 keys at most. Reflect this fact.
706 */
707 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
708 u8 fw_key_deleted[STA_KEY_MAX_NUM];
709
710 /* references taken by the driver and spinlock protecting them */
711 spinlock_t refs_lock;
712 u8 refs[IWL_MVM_REF_COUNT];
713
714 u8 vif_count;
715
716 /* -1 for always, 0 for never, >0 for that many times */
717 s8 restart_fw;
718 u8 fw_dbg_conf;
719 struct delayed_work fw_dump_wk;
720 struct iwl_mvm_dump_desc *fw_dump_desc;
721 struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
722
723#ifdef CONFIG_IWLWIFI_LEDS
724 struct led_classdev led;
725#endif
726
727 struct ieee80211_vif *p2p_device_vif;
728
729#ifdef CONFIG_PM_SLEEP
730 struct wiphy_wowlan_support wowlan;
731 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
732
733 /* sched scan settings for net detect */
734 struct cfg80211_sched_scan_request *nd_config;
735 struct ieee80211_scan_ies nd_ies;
736 struct cfg80211_match_set *nd_match_sets;
737 int n_nd_match_sets;
738 struct ieee80211_channel **nd_channels;
739 int n_nd_channels;
740 bool net_detect;
741#ifdef CONFIG_IWLWIFI_DEBUGFS
742 bool d3_wake_sysassert;
743 bool d3_test_active;
744 bool store_d3_resume_sram;
745 void *d3_resume_sram;
746 u32 d3_test_pme_ptr;
747 struct ieee80211_vif *keep_vif;
748 u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
749#endif
750#endif
751
752 /* d0i3 */
753 u8 d0i3_ap_sta_id;
754 bool d0i3_offloading;
755 struct work_struct d0i3_exit_work;
756 struct sk_buff_head d0i3_tx;
757 /* protect d0i3_suspend_flags */
758 struct mutex d0i3_suspend_mutex;
759 unsigned long d0i3_suspend_flags;
760 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
761 spinlock_t d0i3_tx_lock;
762 wait_queue_head_t d0i3_exit_waitq;
763
764 /* BT-Coex */
765 u8 bt_ack_kill_msk[NUM_PHY_CTX];
766 u8 bt_cts_kill_msk[NUM_PHY_CTX];
767
768 struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
769 struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
770 struct iwl_bt_coex_profile_notif last_bt_notif;
771 struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
772
773 u32 last_ant_isol;
774 u8 last_corun_lut;
775 u8 bt_tx_prio;
776 enum iwl_bt_force_ant_mode bt_force_ant_mode;
777
778 /* Aux ROC */
779 struct list_head aux_roc_te_list;
780
781 /* Thermal Throttling and CTkill */
782 struct iwl_mvm_tt_mgmt thermal_throttle;
783 s32 temperature; /* Celsius */
784 /*
785 * Debug option to set the NIC temperature. This option makes the
786 * driver think this is the actual NIC temperature, and ignore the
787 * real temperature that is received from the fw
788 */
789 bool temperature_test; /* Debug test temperature is enabled */
790
791 struct iwl_time_quota_cmd last_quota_cmd;
792
793#ifdef CONFIG_NL80211_TESTMODE
794 u32 noa_duration;
795 struct ieee80211_vif *noa_vif;
796#endif
797
798 /* Tx queues */
799 u8 aux_queue;
800 u8 first_agg_queue;
801 u8 last_agg_queue;
802
803 /* Indicate if device power save is allowed */
804 u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
805
806 struct ieee80211_vif __rcu *csa_vif;
807 struct ieee80211_vif __rcu *csa_tx_blocked_vif;
808 u8 csa_tx_block_bcn_timeout;
809
810 /* system time of last beacon (for AP/GO interface) */
811 u32 ap_last_beacon_gp2;
812
813 bool lar_regdom_set;
814 enum iwl_mcc_source mcc_src;
815
816 u8 low_latency_agg_frame_limit;
817
818 /* TDLS channel switch data */
819 struct {
820 struct delayed_work dwork;
821 enum iwl_mvm_tdls_cs_state state;
822
823 /*
824 * Current cs sta - might be different from periodic cs peer
825 * station. Value is meaningless when the cs-state is idle.
826 */
827 u8 cur_sta_id;
828
829 /* TDLS periodic channel-switch peer */
830 struct {
831 u8 sta_id;
832 u8 op_class;
833 bool initiator; /* are we the link initiator */
834 struct cfg80211_chan_def chandef;
835 struct sk_buff *skb; /* ch sw template */
836 u32 ch_sw_tm_ie;
837
838 /* timestamp of last ch-sw request sent (GP2 time) */
839 u32 sent_timestamp;
840 } peer;
841 } tdls_cs;
842
843 struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
844
845 u32 ciphers[6];
846 struct iwl_mvm_tof_data tof_data;
847};
848
849/* Extract MVM priv from op_mode and _hw */
850#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
851 ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
852
853#define IWL_MAC80211_GET_MVM(_hw) \
854 IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
855
856enum iwl_mvm_status {
857 IWL_MVM_STATUS_HW_RFKILL,
858 IWL_MVM_STATUS_HW_CTKILL,
859 IWL_MVM_STATUS_ROC_RUNNING,
860 IWL_MVM_STATUS_IN_HW_RESTART,
861 IWL_MVM_STATUS_IN_D0I3,
862 IWL_MVM_STATUS_ROC_AUX_RUNNING,
863 IWL_MVM_STATUS_D3_RECONFIG,
864 IWL_MVM_STATUS_DUMPING_FW_LOG,
865};
866
867static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
868{
869 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
870 test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
871}
872
873static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
874{
875 return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
876}
877
878/* Must be called with rcu_read_lock() held and it can only be
879 * released when mvmsta is not needed anymore.
880 */
881static inline struct iwl_mvm_sta *
882iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
883{
884 struct ieee80211_sta *sta;
885
886 if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
887 return NULL;
888
889 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
890
891 /* This can happen if the station has been removed right now */
892 if (IS_ERR_OR_NULL(sta))
893 return NULL;
894
895 return iwl_mvm_sta_from_mac80211(sta);
896}
897
898static inline struct iwl_mvm_sta *
899iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
900{
901 struct ieee80211_sta *sta;
902
903 if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
904 return NULL;
905
906 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
907 lockdep_is_held(&mvm->mutex));
908
909 /* This can happen if the station has been removed right now */
910 if (IS_ERR_OR_NULL(sta))
911 return NULL;
912
913 return iwl_mvm_sta_from_mac80211(sta);
914}
915
916static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
917{
918 return mvm->trans->cfg->d0i3 &&
919 mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
920 !iwlwifi_mod_params.d0i3_disable &&
921 fw_has_capa(&mvm->fw->ucode_capa,
922 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
923}
924
925static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
926{
927 return fw_has_capa(&mvm->fw->ucode_capa,
928 IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
929}
930
931static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
932{
933 bool nvm_lar = mvm->nvm_data->lar_enabled;
934 bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
935 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
936
937 if (iwlwifi_mod_params.lar_disable)
938 return false;
939
940 /*
941 * Enable LAR only if it is supported by the FW (TLV) &&
942 * enabled in the NVM
943 */
944 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
945 return nvm_lar && tlv_lar;
946 else
947 return tlv_lar;
948}
949
950static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
951{
952 return fw_has_api(&mvm->fw->ucode_capa,
953 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
954 fw_has_capa(&mvm->fw->ucode_capa,
955 IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
956}
957
958static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
959{
960 return fw_has_capa(&mvm->fw->ucode_capa,
961 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
962 IWL_MVM_BT_COEX_CORUNNING;
963}
964
965static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
966{
967 return fw_has_capa(&mvm->fw->ucode_capa,
968 IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
969 IWL_MVM_BT_COEX_RRC;
970}
971
972static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
973{
974 return fw_has_capa(&mvm->fw->ucode_capa,
975 IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
976}
977
978static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
979{
980 /* firmware flag isn't defined yet */
981 return false;
982}
983
984extern const u8 iwl_mvm_ac_to_tx_fifo[];
985
986struct iwl_rate_info {
987 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
988 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
989 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
990 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
991 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
992};
993
994void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
995int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
996
997/******************
998 * MVM Methods
999 ******************/
1000/* uCode */
1001int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
1002
1003/* Utils */
1004int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
1005 enum ieee80211_band band);
1006void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1007 enum ieee80211_band band,
1008 struct ieee80211_tx_rate *r);
1009u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
1010void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
1011u8 first_antenna(u8 mask);
1012u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
1013
1014/* Tx / Host Commands */
1015int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
1016 struct iwl_host_cmd *cmd);
1017int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
1018 u32 flags, u16 len, const void *data);
1019int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
1020 struct iwl_host_cmd *cmd,
1021 u32 *status);
1022int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
1023 u16 len, const void *data,
1024 u32 *status);
1025int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1026 struct ieee80211_sta *sta);
1027int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
1028void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
1029 struct iwl_tx_cmd *tx_cmd,
1030 struct ieee80211_tx_info *info, u8 sta_id);
1031void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
1032 struct ieee80211_tx_info *info,
1033 struct ieee80211_sta *sta, __le16 fc);
1034#ifdef CONFIG_IWLWIFI_DEBUG
1035const char *iwl_mvm_get_tx_fail_reason(u32 status);
1036#else
1037static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
1038#endif
1039int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
1040void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
1041
1042static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
1043 struct iwl_tx_cmd *tx_cmd)
1044{
1045 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1046
1047 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1048 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1049 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1050 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
1051}
1052
1053static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
1054{
1055 flush_work(&mvm->async_handlers_wk);
1056}
1057
1058/* Statistics */
1059void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
1060 struct iwl_rx_packet *pkt);
1061void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
1062 struct iwl_rx_cmd_buffer *rxb);
1063int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
1064void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
1065
1066/* NVM */
1067int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
1068int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
1069
1070static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
1071{
1072 return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
1073 mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
1074 mvm->fw->valid_tx_ant;
1075}
1076
1077static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
1078{
1079 return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
1080 mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
1081 mvm->fw->valid_rx_ant;
1082}
1083
1084static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
1085{
1086 u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
1087 FW_PHY_CFG_RX_CHAIN);
1088 u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
1089 u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
1090
1091 phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
1092 valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
1093
1094 return mvm->fw->phy_config & phy_config;
1095}
1096
1097int iwl_mvm_up(struct iwl_mvm *mvm);
1098int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
1099
1100int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
1101bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1102 struct iwl_bcast_filter_cmd *cmd);
1103
1104/*
1105 * FW notifications / CMD responses handlers
1106 * Convention: iwl_mvm_rx_<NAME OF THE CMD>
1107 */
1108void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1109void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
1110 struct iwl_rx_cmd_buffer *rxb);
1111void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1112void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1113void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1114 struct iwl_rx_cmd_buffer *rxb);
1115void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1116void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1117 struct iwl_rx_cmd_buffer *rxb);
1118void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1119 struct iwl_rx_cmd_buffer *rxb);
1120void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
1121 struct iwl_rx_cmd_buffer *rxb);
1122
1123/* MVM PHY */
1124int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1125 struct cfg80211_chan_def *chandef,
1126 u8 chains_static, u8 chains_dynamic);
1127int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1128 struct cfg80211_chan_def *chandef,
1129 u8 chains_static, u8 chains_dynamic);
1130void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
1131 struct iwl_mvm_phy_ctxt *ctxt);
1132void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
1133 struct iwl_mvm_phy_ctxt *ctxt);
1134int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
1135u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
1136u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
1137
1138/* MAC (virtual interface) programming */
1139int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1140void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1141int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1142int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1143 bool force_assoc_off, const u8 *bssid_override);
1144int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1145u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
1146int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
1147 struct ieee80211_vif *vif);
1148void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1149 struct iwl_rx_cmd_buffer *rxb);
1150void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1151 struct iwl_rx_cmd_buffer *rxb);
1152void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
1153 struct ieee80211_vif *vif);
1154unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
1155 struct ieee80211_vif *exclude_vif);
1156/* Bindings */
1157int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1158int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1159
1160/* Quota management */
1161int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
1162 struct ieee80211_vif *disabled_vif);
1163
1164/* Scanning */
1165int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1166 struct cfg80211_scan_request *req,
1167 struct ieee80211_scan_ies *ies);
1168int iwl_mvm_scan_size(struct iwl_mvm *mvm);
1169int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
1170int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
1171void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
1172
1173/* Scheduled scan */
1174void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
1175 struct iwl_rx_cmd_buffer *rxb);
1176void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1177 struct iwl_rx_cmd_buffer *rxb);
1178int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1179 struct ieee80211_vif *vif,
1180 struct cfg80211_sched_scan_request *req,
1181 struct ieee80211_scan_ies *ies,
1182 int type);
1183void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
1184 struct iwl_rx_cmd_buffer *rxb);
1185
1186/* UMAC scan */
1187int iwl_mvm_config_scan(struct iwl_mvm *mvm);
1188void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1189 struct iwl_rx_cmd_buffer *rxb);
1190void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1191 struct iwl_rx_cmd_buffer *rxb);
1192
1193/* MVM debugfs */
1194#ifdef CONFIG_IWLWIFI_DEBUGFS
1195int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
1196void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1197void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1198#else
1199static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
1200 struct dentry *dbgfs_dir)
1201{
1202 return 0;
1203}
1204static inline void
1205iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1206{
1207}
1208static inline void
1209iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1210{
1211}
1212#endif /* CONFIG_IWLWIFI_DEBUGFS */
1213
1214/* rate scaling */
1215int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
1216void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
1217int rs_pretty_print_rate(char *buf, const u32 rate);
1218void rs_update_last_rssi(struct iwl_mvm *mvm,
1219 struct iwl_lq_sta *lq_sta,
1220 struct ieee80211_rx_status *rx_status);
1221
1222/* power management */
1223int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
1224int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
1225int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
1226int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1227 char *buf, int bufsz);
1228
1229void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1230void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
1231 struct iwl_rx_cmd_buffer *rxb);
1232
1233#ifdef CONFIG_IWLWIFI_LEDS
1234int iwl_mvm_leds_init(struct iwl_mvm *mvm);
1235void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
1236#else
1237static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
1238{
1239 return 0;
1240}
1241static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
1242{
1243}
1244#endif
1245
1246/* D3 (WoWLAN, NetDetect) */
1247int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
1248int iwl_mvm_resume(struct ieee80211_hw *hw);
1249void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
1250void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
1251 struct ieee80211_vif *vif,
1252 struct cfg80211_gtk_rekey_data *data);
1253void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
1254 struct ieee80211_vif *vif,
1255 struct inet6_dev *idev);
1256void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
1257 struct ieee80211_vif *vif, int idx);
1258extern const struct file_operations iwl_dbgfs_d3_test_ops;
1259#ifdef CONFIG_PM_SLEEP
1260void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
1261 struct ieee80211_vif *vif);
1262#else
1263static inline void
1264iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1265{
1266}
1267#endif
1268void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
1269 struct iwl_wowlan_config_cmd *cmd);
1270int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
1271 struct ieee80211_vif *vif,
1272 bool disable_offloading,
1273 u32 cmd_flags);
1274
1275/* D0i3 */
1276void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1277void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1279bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
1280void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
1281int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
1282int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
1283int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
1284
1285/* BT Coex */
1286int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
1287void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
1288 struct iwl_rx_cmd_buffer *rxb);
1289void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1290 enum ieee80211_rssi_event_data);
1291void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
1292u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
1293 struct ieee80211_sta *sta);
1294bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
1295 struct ieee80211_sta *sta);
1296bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
1297bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
1298bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
1299 enum ieee80211_band band);
1300u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
1301 struct ieee80211_tx_info *info, u8 ac);
1302
1303bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
1304void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
1305int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
1306void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1307 struct iwl_rx_cmd_buffer *rxb);
1308void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1309 enum ieee80211_rssi_event_data);
1310u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
1311 struct ieee80211_sta *sta);
1312bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
1313 struct ieee80211_sta *sta);
1314bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
1315 enum ieee80211_band band);
1316void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1317 struct iwl_rx_cmd_buffer *rxb);
1318
1319/* beacon filtering */
1320#ifdef CONFIG_IWLWIFI_DEBUGFS
1321void
1322iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1323 struct iwl_beacon_filter_cmd *cmd);
1324#else
1325static inline void
1326iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
1327 struct iwl_beacon_filter_cmd *cmd)
1328{}
1329#endif
1330int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
1331 struct ieee80211_vif *vif,
1332 bool enable, u32 flags);
1333int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
1334 struct ieee80211_vif *vif,
1335 u32 flags);
1336int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
1337 struct ieee80211_vif *vif,
1338 u32 flags);
1339/* SMPS */
1340void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1341 enum iwl_mvm_smps_type_request req_type,
1342 enum ieee80211_smps_mode smps_request);
1343bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
1344
1345/* Low latency */
1346int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1347 bool value);
1348/* get SystemLowLatencyMode - only needed for beacon threshold? */
1349bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
1350/* get VMACLowLatencyMode */
1351static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1352{
1353 /*
1354 * should this consider associated/active/... state?
1355 *
1356 * Normally low-latency should only be active on interfaces
1357 * that are active, but at least with debugfs it can also be
1358 * enabled on interfaces that aren't active. However, when
1359 * interface aren't active then they aren't added into the
1360 * binding, so this has no real impact. For now, just return
1361 * the current desired low-latency state.
1362 */
1363
1364 return mvmvif->low_latency;
1365}
1366
1367/* hw scheduler queue config */
1368void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1369 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
1370 unsigned int wdg_timeout);
1371/*
1372 * Disable a TXQ.
1373 * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
1374 */
1375void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1376 u8 tid, u8 flags);
1377int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
1378
1379static inline
1380void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1381 u8 fifo, u16 ssn, unsigned int wdg_timeout)
1382{
1383 struct iwl_trans_txq_scd_cfg cfg = {
1384 .fifo = fifo,
1385 .tid = IWL_MAX_TID_COUNT,
1386 .aggregate = false,
1387 .frame_limit = IWL_FRAME_LIMIT,
1388 };
1389
1390 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1391}
1392
1393static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
1394 int mac80211_queue, int fifo,
1395 int sta_id, int tid, int frame_limit,
1396 u16 ssn, unsigned int wdg_timeout)
1397{
1398 struct iwl_trans_txq_scd_cfg cfg = {
1399 .fifo = fifo,
1400 .sta_id = sta_id,
1401 .tid = tid,
1402 .frame_limit = frame_limit,
1403 .aggregate = true,
1404 };
1405
1406 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1407}
1408
1409/* Thermal management and CT-kill */
1410void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1411void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
1412void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
1413 struct iwl_rx_cmd_buffer *rxb);
1414void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
1415void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
1416void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
1417void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
1418int iwl_mvm_get_temp(struct iwl_mvm *mvm);
1419
1420/* Location Aware Regulatory */
1421struct iwl_mcc_update_resp *
1422iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
1423 enum iwl_mcc_source src_id);
1424int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
1425void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
1426 struct iwl_rx_cmd_buffer *rxb);
1427struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
1428 const char *alpha2,
1429 enum iwl_mcc_source src_id,
1430 bool *changed);
1431struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
1432 bool *changed);
1433int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
1434void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
1435
1436/* smart fifo */
1437int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1438 bool added_vif);
1439
1440/* TDLS */
1441
1442/*
1443 * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
1444 * This TID is marked as used vs the AP and all connected TDLS peers.
1445 */
1446#define IWL_MVM_TDLS_FW_TID 4
1447
1448int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1449void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
1450void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1451 bool sta_added);
1452void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
1453 struct ieee80211_vif *vif);
1454int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
1455 struct ieee80211_vif *vif,
1456 struct ieee80211_sta *sta, u8 oper_class,
1457 struct cfg80211_chan_def *chandef,
1458 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
1459void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
1460 struct ieee80211_vif *vif,
1461 struct ieee80211_tdls_ch_sw_params *params);
1462void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
1463 struct ieee80211_vif *vif,
1464 struct ieee80211_sta *sta);
1465void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1466void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
1467
1468struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
1469
1470void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
1471void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
1472
1473int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
1474int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
1475 const char *str, size_t len,
1476 struct iwl_fw_dbg_trigger_tlv *trigger);
1477int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
1478 struct iwl_mvm_dump_desc *desc,
1479 struct iwl_fw_dbg_trigger_tlv *trigger);
1480void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
1481int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
1482 struct iwl_fw_dbg_trigger_tlv *trigger,
1483 const char *fmt, ...) __printf(3, 4);
1484unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1485 struct ieee80211_vif *vif,
1486 bool tdls, bool cmd_q);
1487void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1488 const char *errmsg);
1489static inline bool
1490iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
1491 struct ieee80211_vif *vif)
1492{
1493 u32 trig_vif = le32_to_cpu(trig->vif_type);
1494
1495 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
1496}
1497
1498static inline bool
1499iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm,
1500 struct iwl_fw_dbg_trigger_tlv *trig)
1501{
1502 return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
1503 (mvm->fw_dbg_conf == FW_DBG_INVALID ||
1504 (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids))));
1505}
1506
1507static inline bool
1508iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm,
1509 struct ieee80211_vif *vif,
1510 struct iwl_fw_dbg_trigger_tlv *trig)
1511{
1512 if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif))
1513 return false;
1514
1515 return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig);
1516}
1517
1518static inline void
1519iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm,
1520 struct ieee80211_vif *vif,
1521 enum iwl_fw_dbg_trigger trig)
1522{
1523 struct iwl_fw_dbg_trigger_tlv *trigger;
1524
1525 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trig))
1526 return;
1527
1528 trigger = iwl_fw_dbg_get_trigger(mvm->fw, trig);
1529 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger))
1530 return;
1531
1532 iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
1533}
1534
1535#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
new file mode 100644
index 000000000000..2ee0f6fe56a1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -0,0 +1,864 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/firmware.h>
66#include <linux/rtnetlink.h>
67#include <linux/pci.h>
68#include <linux/acpi.h>
69#include "iwl-trans.h"
70#include "iwl-csr.h"
71#include "mvm.h"
72#include "iwl-eeprom-parse.h"
73#include "iwl-eeprom-read.h"
74#include "iwl-nvm-parse.h"
75#include "iwl-prph.h"
76
77/* Default NVM size to read */
78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
79#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
80#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc
81
82#define NVM_WRITE_OPCODE 1
83#define NVM_READ_OPCODE 0
84
85/* load nvm chunk response */
86enum {
87 READ_NVM_CHUNK_SUCCEED = 0,
88 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
89};
90
91/*
92 * prepare the NVM host command w/ the pointers to the nvm buffer
93 * and send it to fw
94 */
95static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
96 u16 offset, u16 length, const u8 *data)
97{
98 struct iwl_nvm_access_cmd nvm_access_cmd = {
99 .offset = cpu_to_le16(offset),
100 .length = cpu_to_le16(length),
101 .type = cpu_to_le16(section),
102 .op_code = NVM_WRITE_OPCODE,
103 };
104 struct iwl_host_cmd cmd = {
105 .id = NVM_ACCESS_CMD,
106 .len = { sizeof(struct iwl_nvm_access_cmd), length },
107 .flags = CMD_SEND_IN_RFKILL,
108 .data = { &nvm_access_cmd, data },
109 /* data may come from vmalloc, so use _DUP */
110 .dataflags = { 0, IWL_HCMD_DFL_DUP },
111 };
112
113 return iwl_mvm_send_cmd(mvm, &cmd);
114}
115
116static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
117 u16 offset, u16 length, u8 *data)
118{
119 struct iwl_nvm_access_cmd nvm_access_cmd = {
120 .offset = cpu_to_le16(offset),
121 .length = cpu_to_le16(length),
122 .type = cpu_to_le16(section),
123 .op_code = NVM_READ_OPCODE,
124 };
125 struct iwl_nvm_access_resp *nvm_resp;
126 struct iwl_rx_packet *pkt;
127 struct iwl_host_cmd cmd = {
128 .id = NVM_ACCESS_CMD,
129 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
130 .data = { &nvm_access_cmd, },
131 };
132 int ret, bytes_read, offset_read;
133 u8 *resp_data;
134
135 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
136
137 ret = iwl_mvm_send_cmd(mvm, &cmd);
138 if (ret)
139 return ret;
140
141 pkt = cmd.resp_pkt;
142
143 /* Extract NVM response */
144 nvm_resp = (void *)pkt->data;
145 ret = le16_to_cpu(nvm_resp->status);
146 bytes_read = le16_to_cpu(nvm_resp->length);
147 offset_read = le16_to_cpu(nvm_resp->offset);
148 resp_data = nvm_resp->data;
149 if (ret) {
150 if ((offset != 0) &&
151 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
152 /*
153 * meaning of NOT_VALID_ADDRESS:
154 * driver try to read chunk from address that is
155 * multiple of 2K and got an error since addr is empty.
156 * meaning of (offset != 0): driver already
157 * read valid data from another chunk so this case
158 * is not an error.
159 */
160 IWL_DEBUG_EEPROM(mvm->trans->dev,
161 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
162 offset);
163 ret = 0;
164 } else {
165 IWL_DEBUG_EEPROM(mvm->trans->dev,
166 "NVM access command failed with status %d (device: %s)\n",
167 ret, mvm->cfg->name);
168 ret = -EIO;
169 }
170 goto exit;
171 }
172
173 if (offset_read != offset) {
174 IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
175 offset_read);
176 ret = -EINVAL;
177 goto exit;
178 }
179
180 /* Write data to NVM */
181 memcpy(data + offset, resp_data, bytes_read);
182 ret = bytes_read;
183
184exit:
185 iwl_free_resp(&cmd);
186 return ret;
187}
188
189static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
190 const u8 *data, u16 length)
191{
192 int offset = 0;
193
194 /* copy data in chunks of 2k (and remainder if any) */
195
196 while (offset < length) {
197 int chunk_size, ret;
198
199 chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
200 length - offset);
201
202 ret = iwl_nvm_write_chunk(mvm, section, offset,
203 chunk_size, data + offset);
204 if (ret < 0)
205 return ret;
206
207 offset += chunk_size;
208 }
209
210 return 0;
211}
212
213/*
214 * Reads an NVM section completely.
215 * NICs prior to 7000 family doesn't have a real NVM, but just read
216 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
217 * by uCode, we need to manually check in this case that we don't
218 * overflow and try to read more than the EEPROM size.
219 * For 7000 family NICs, we supply the maximal size we can read, and
220 * the uCode fills the response with as much data as we can,
221 * without overflowing, so no check is needed.
222 */
223static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
224 u8 *data, u32 size_read)
225{
226 u16 length, offset = 0;
227 int ret;
228
229 /* Set nvm section read length */
230 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
231
232 ret = length;
233
234 /* Read the NVM until exhausted (reading less than requested) */
235 while (ret == length) {
236 /* Check no memory assumptions fail and cause an overflow */
237 if ((size_read + offset + length) >
238 mvm->cfg->base_params->eeprom_size) {
239 IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
240 return -ENOBUFS;
241 }
242
243 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
244 if (ret < 0) {
245 IWL_DEBUG_EEPROM(mvm->trans->dev,
246 "Cannot read NVM from section %d offset %d, length %d\n",
247 section, offset, length);
248 return ret;
249 }
250 offset += ret;
251 }
252
253 IWL_DEBUG_EEPROM(mvm->trans->dev,
254 "NVM section %d read completed\n", section);
255 return offset;
256}
257
258static struct iwl_nvm_data *
259iwl_parse_nvm_sections(struct iwl_mvm *mvm)
260{
261 struct iwl_nvm_section *sections = mvm->nvm_sections;
262 const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
263 bool lar_enabled;
264 u32 mac_addr0, mac_addr1;
265
266 /* Checking for required sections */
267 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
268 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
269 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
270 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
271 return NULL;
272 }
273 } else {
274 /* SW and REGULATORY sections are mandatory */
275 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
276 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
277 IWL_ERR(mvm,
278 "Can't parse empty family 8000 OTP/NVM sections\n");
279 return NULL;
280 }
281 /* MAC_OVERRIDE or at least HW section must exist */
282 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
283 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
284 IWL_ERR(mvm,
285 "Can't parse mac_address, empty sections\n");
286 return NULL;
287 }
288
289 /* PHY_SKU section is mandatory in B0 */
290 if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
291 IWL_ERR(mvm,
292 "Can't parse phy_sku in B0, empty sections\n");
293 return NULL;
294 }
295 }
296
297 if (WARN_ON(!mvm->cfg))
298 return NULL;
299
300 /* read the mac address from WFMP registers */
301 mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
302 mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
303
304 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
305 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
306 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
307 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
308 mac_override =
309 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
310 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
311
312 lar_enabled = !iwlwifi_mod_params.lar_disable &&
313 fw_has_capa(&mvm->fw->ucode_capa,
314 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
315
316 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
317 regulatory, mac_override, phy_sku,
318 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
319 lar_enabled, mac_addr0, mac_addr1,
320 mvm->trans->hw_id);
321}
322
323#define MAX_NVM_FILE_LEN 16384
324
325/*
326 * Reads external NVM from a file into mvm->nvm_sections
327 *
328 * HOW TO CREATE THE NVM FILE FORMAT:
329 * ------------------------------
330 * 1. create hex file, format:
331 * 3800 -> header
332 * 0000 -> header
333 * 5a40 -> data
334 *
335 * rev - 6 bit (word1)
336 * len - 10 bit (word1)
337 * id - 4 bit (word2)
338 * rsv - 12 bit (word2)
339 *
340 * 2. flip 8bits with 8 bits per line to get the right NVM file format
341 *
342 * 3. create binary file from the hex file
343 *
344 * 4. save as "iNVM_xxx.bin" under /lib/firmware
345 */
346static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
347{
348 int ret, section_size;
349 u16 section_id;
350 const struct firmware *fw_entry;
351 const struct {
352 __le16 word1;
353 __le16 word2;
354 u8 data[];
355 } *file_sec;
356 const u8 *eof, *temp;
357 int max_section_size;
358 const __le32 *dword_buff;
359
360#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
361#define NVM_WORD2_ID(x) (x >> 12)
362#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
363#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
364#define NVM_HEADER_0 (0x2A504C54)
365#define NVM_HEADER_1 (0x4E564D2A)
366#define NVM_HEADER_SIZE (4 * sizeof(u32))
367
368 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
369
370 /* Maximal size depends on HW family and step */
371 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
372 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
373 else
374 max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
375
376 /*
377 * Obtain NVM image via request_firmware. Since we already used
378 * request_firmware_nowait() for the firmware binary load and only
379 * get here after that we assume the NVM request can be satisfied
380 * synchronously.
381 */
382 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
383 mvm->trans->dev);
384 if (ret) {
385 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
386 mvm->nvm_file_name, ret);
387 return ret;
388 }
389
390 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
391 mvm->nvm_file_name, fw_entry->size);
392
393 if (fw_entry->size > MAX_NVM_FILE_LEN) {
394 IWL_ERR(mvm, "NVM file too large\n");
395 ret = -EINVAL;
396 goto out;
397 }
398
399 eof = fw_entry->data + fw_entry->size;
400 dword_buff = (__le32 *)fw_entry->data;
401
402 /* some NVM file will contain a header.
403 * The header is identified by 2 dwords header as follow:
404 * dword[0] = 0x2A504C54
405 * dword[1] = 0x4E564D2A
406 *
407 * This header must be skipped when providing the NVM data to the FW.
408 */
409 if (fw_entry->size > NVM_HEADER_SIZE &&
410 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
411 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
412 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
413 IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
414 IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
415 le32_to_cpu(dword_buff[3]));
416
417 /* nvm file validation, dword_buff[2] holds the file version */
418 if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
419 le32_to_cpu(dword_buff[2]) < 0xE4A) ||
420 (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
421 le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
422 ret = -EFAULT;
423 goto out;
424 }
425 } else {
426 file_sec = (void *)fw_entry->data;
427 }
428
429 while (true) {
430 if (file_sec->data > eof) {
431 IWL_ERR(mvm,
432 "ERROR - NVM file too short for section header\n");
433 ret = -EINVAL;
434 break;
435 }
436
437 /* check for EOF marker */
438 if (!file_sec->word1 && !file_sec->word2) {
439 ret = 0;
440 break;
441 }
442
443 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
444 section_size =
445 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
446 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
447 } else {
448 section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
449 le16_to_cpu(file_sec->word2));
450 section_id = NVM_WORD1_ID_FAMILY_8000(
451 le16_to_cpu(file_sec->word1));
452 }
453
454 if (section_size > max_section_size) {
455 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
456 section_size);
457 ret = -EINVAL;
458 break;
459 }
460
461 if (!section_size) {
462 IWL_ERR(mvm, "ERROR - section empty\n");
463 ret = -EINVAL;
464 break;
465 }
466
467 if (file_sec->data + section_size > eof) {
468 IWL_ERR(mvm,
469 "ERROR - NVM file too short for section (%d bytes)\n",
470 section_size);
471 ret = -EINVAL;
472 break;
473 }
474
475 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
476 "Invalid NVM section ID %d\n", section_id)) {
477 ret = -EINVAL;
478 break;
479 }
480
481 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
482 if (!temp) {
483 ret = -ENOMEM;
484 break;
485 }
486 kfree(mvm->nvm_sections[section_id].data);
487 mvm->nvm_sections[section_id].data = temp;
488 mvm->nvm_sections[section_id].length = section_size;
489
490 /* advance to the next section */
491 file_sec = (void *)(file_sec->data + section_size);
492 }
493out:
494 release_firmware(fw_entry);
495 return ret;
496}
497
498/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
499int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
500{
501 int i, ret = 0;
502 struct iwl_nvm_section *sections = mvm->nvm_sections;
503
504 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
505
506 for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
507 if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
508 continue;
509 ret = iwl_nvm_write_section(mvm, i, sections[i].data,
510 sections[i].length);
511 if (ret < 0) {
512 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
513 break;
514 }
515 }
516 return ret;
517}
518
519int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
520{
521 int ret, section;
522 u32 size_read = 0;
523 u8 *nvm_buffer, *temp;
524 const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
525 const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
526
527 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
528 return -EINVAL;
529
530 /* load NVM values from nic */
531 if (read_nvm_from_nic) {
532 /* Read From FW NVM */
533 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
534
535 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
536 GFP_KERNEL);
537 if (!nvm_buffer)
538 return -ENOMEM;
539 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
540 /* we override the constness for initial read */
541 ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
542 size_read);
543 if (ret < 0)
544 continue;
545 size_read += ret;
546 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
547 if (!temp) {
548 ret = -ENOMEM;
549 break;
550 }
551 mvm->nvm_sections[section].data = temp;
552 mvm->nvm_sections[section].length = ret;
553
554#ifdef CONFIG_IWLWIFI_DEBUGFS
555 switch (section) {
556 case NVM_SECTION_TYPE_SW:
557 mvm->nvm_sw_blob.data = temp;
558 mvm->nvm_sw_blob.size = ret;
559 break;
560 case NVM_SECTION_TYPE_CALIBRATION:
561 mvm->nvm_calib_blob.data = temp;
562 mvm->nvm_calib_blob.size = ret;
563 break;
564 case NVM_SECTION_TYPE_PRODUCTION:
565 mvm->nvm_prod_blob.data = temp;
566 mvm->nvm_prod_blob.size = ret;
567 break;
568 case NVM_SECTION_TYPE_PHY_SKU:
569 mvm->nvm_phy_sku_blob.data = temp;
570 mvm->nvm_phy_sku_blob.size = ret;
571 break;
572 default:
573 if (section == mvm->cfg->nvm_hw_section_num) {
574 mvm->nvm_hw_blob.data = temp;
575 mvm->nvm_hw_blob.size = ret;
576 break;
577 }
578 }
579#endif
580 }
581 if (!size_read)
582 IWL_ERR(mvm, "OTP is blank\n");
583 kfree(nvm_buffer);
584 }
585
586 /* Only if PNVM selected in the mod param - load external NVM */
587 if (mvm->nvm_file_name) {
588 /* read External NVM file from the mod param */
589 ret = iwl_mvm_read_external_nvm(mvm);
590 if (ret) {
591 /* choose the nvm_file name according to the
592 * HW step
593 */
594 if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
595 SILICON_B_STEP)
596 mvm->nvm_file_name = nvm_file_B;
597 else
598 mvm->nvm_file_name = nvm_file_C;
599
600 if (ret == -EFAULT && mvm->nvm_file_name) {
601 /* in case nvm file was failed try again */
602 ret = iwl_mvm_read_external_nvm(mvm);
603 if (ret)
604 return ret;
605 } else {
606 return ret;
607 }
608 }
609 }
610
611 /* parse the relevant nvm sections */
612 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
613 if (!mvm->nvm_data)
614 return -ENODATA;
615 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
616 mvm->nvm_data->nvm_version);
617
618 return 0;
619}
620
621struct iwl_mcc_update_resp *
622iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
623 enum iwl_mcc_source src_id)
624{
625 struct iwl_mcc_update_cmd mcc_update_cmd = {
626 .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
627 .source_id = (u8)src_id,
628 };
629 struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
630 struct iwl_rx_packet *pkt;
631 struct iwl_host_cmd cmd = {
632 .id = MCC_UPDATE_CMD,
633 .flags = CMD_WANT_SKB,
634 .data = { &mcc_update_cmd },
635 };
636
637 int ret;
638 u32 status;
639 int resp_len, n_channels;
640 u16 mcc;
641
642 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
643 return ERR_PTR(-EOPNOTSUPP);
644
645 cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
646
647 IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
648 alpha2[0], alpha2[1], src_id);
649
650 ret = iwl_mvm_send_cmd(mvm, &cmd);
651 if (ret)
652 return ERR_PTR(ret);
653
654 pkt = cmd.resp_pkt;
655
656 /* Extract MCC response */
657 mcc_resp = (void *)pkt->data;
658 status = le32_to_cpu(mcc_resp->status);
659
660 mcc = le16_to_cpu(mcc_resp->mcc);
661
662 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
663 if (mcc == 0) {
664 mcc = 0x3030; /* "00" - world */
665 mcc_resp->mcc = cpu_to_le16(mcc);
666 }
667
668 n_channels = __le32_to_cpu(mcc_resp->n_channels);
669 IWL_DEBUG_LAR(mvm,
670 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
671 status, mcc, mcc >> 8, mcc & 0xff,
672 !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
673
674 resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
675 resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
676 if (!resp_cp) {
677 ret = -ENOMEM;
678 goto exit;
679 }
680
681 ret = 0;
682exit:
683 iwl_free_resp(&cmd);
684 if (ret)
685 return ERR_PTR(ret);
686 return resp_cp;
687}
688
689#ifdef CONFIG_ACPI
690#define WRD_METHOD "WRDD"
691#define WRDD_WIFI (0x07)
692#define WRDD_WIGIG (0x10)
693
694static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
695{
696 union acpi_object *mcc_pkg, *domain_type, *mcc_value;
697 u32 i;
698
699 if (wrdd->type != ACPI_TYPE_PACKAGE ||
700 wrdd->package.count < 2 ||
701 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
702 wrdd->package.elements[0].integer.value != 0) {
703 IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
704 return 0;
705 }
706
707 for (i = 1 ; i < wrdd->package.count ; ++i) {
708 mcc_pkg = &wrdd->package.elements[i];
709
710 if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
711 mcc_pkg->package.count < 2 ||
712 mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
713 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
714 mcc_pkg = NULL;
715 continue;
716 }
717
718 domain_type = &mcc_pkg->package.elements[0];
719 if (domain_type->integer.value == WRDD_WIFI)
720 break;
721
722 mcc_pkg = NULL;
723 }
724
725 if (mcc_pkg) {
726 mcc_value = &mcc_pkg->package.elements[1];
727 return mcc_value->integer.value;
728 }
729
730 return 0;
731}
732
733static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
734{
735 acpi_handle root_handle;
736 acpi_handle handle;
737 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
738 acpi_status status;
739 u32 mcc_val;
740 struct pci_dev *pdev = to_pci_dev(mvm->dev);
741
742 root_handle = ACPI_HANDLE(&pdev->dev);
743 if (!root_handle) {
744 IWL_DEBUG_LAR(mvm,
745 "Could not retrieve root port ACPI handle\n");
746 return -ENOENT;
747 }
748
749 /* Get the method's handle */
750 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
751 if (ACPI_FAILURE(status)) {
752 IWL_DEBUG_LAR(mvm, "WRD method not found\n");
753 return -ENOENT;
754 }
755
756 /* Call WRDD with no arguments */
757 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
758 if (ACPI_FAILURE(status)) {
759 IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
760 return -ENOENT;
761 }
762
763 mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
764 kfree(wrdd.pointer);
765 if (!mcc_val)
766 return -ENOENT;
767
768 mcc[0] = (mcc_val >> 8) & 0xff;
769 mcc[1] = mcc_val & 0xff;
770 mcc[2] = '\0';
771 return 0;
772}
773#else /* CONFIG_ACPI */
774static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
775{
776 return -ENOENT;
777}
778#endif
779
780int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
781{
782 bool tlv_lar;
783 bool nvm_lar;
784 int retval;
785 struct ieee80211_regdomain *regd;
786 char mcc[3];
787
788 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
789 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
790 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
791 nvm_lar = mvm->nvm_data->lar_enabled;
792 if (tlv_lar != nvm_lar)
793 IWL_INFO(mvm,
794 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
795 tlv_lar ? "enabled" : "disabled",
796 nvm_lar ? "enabled" : "disabled");
797 }
798
799 if (!iwl_mvm_is_lar_supported(mvm))
800 return 0;
801
802 /*
803 * try to replay the last set MCC to FW. If it doesn't exist,
804 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
805 */
806 retval = iwl_mvm_init_fw_regd(mvm);
807 if (retval != -ENOENT)
808 return retval;
809
810 /*
811 * Driver regulatory hint for initial update, this also informs the
812 * firmware we support wifi location updates.
813 * Disallow scans that might crash the FW while the LAR regdomain
814 * is not set.
815 */
816 mvm->lar_regdom_set = false;
817
818 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
819 if (IS_ERR_OR_NULL(regd))
820 return -EIO;
821
822 if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
823 !iwl_mvm_get_bios_mcc(mvm, mcc)) {
824 kfree(regd);
825 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
826 MCC_SOURCE_BIOS, NULL);
827 if (IS_ERR_OR_NULL(regd))
828 return -EIO;
829 }
830
831 retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
832 kfree(regd);
833 return retval;
834}
835
836void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
837 struct iwl_rx_cmd_buffer *rxb)
838{
839 struct iwl_rx_packet *pkt = rxb_addr(rxb);
840 struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
841 enum iwl_mcc_source src;
842 char mcc[3];
843 struct ieee80211_regdomain *regd;
844
845 lockdep_assert_held(&mvm->mutex);
846
847 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
848 return;
849
850 mcc[0] = notif->mcc >> 8;
851 mcc[1] = notif->mcc & 0xff;
852 mcc[2] = '\0';
853 src = notif->source_id;
854
855 IWL_DEBUG_LAR(mvm,
856 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
857 mcc, src);
858 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
859 if (IS_ERR_OR_NULL(regd))
860 return;
861
862 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
863 kfree(regd);
864}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000000..68b0169c8892
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -0,0 +1,217 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/ipv6.h>
66#include <net/addrconf.h>
67#include "mvm.h"
68
69void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
70 struct iwl_wowlan_config_cmd *cmd)
71{
72 int i;
73
74 /*
75 * For QoS counters, we store the one to use next, so subtract 0x10
76 * since the uCode will add 0x10 *before* using the value while we
77 * increment after using the value (i.e. store the next value to use).
78 */
79 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
80 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
81 seq -= 0x10;
82 cmd->qos_seq[i] = cpu_to_le16(seq);
83 }
84}
85
86int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
87 struct ieee80211_vif *vif,
88 bool disable_offloading,
89 u32 cmd_flags)
90{
91 union {
92 struct iwl_proto_offload_cmd_v1 v1;
93 struct iwl_proto_offload_cmd_v2 v2;
94 struct iwl_proto_offload_cmd_v3_small v3s;
95 struct iwl_proto_offload_cmd_v3_large v3l;
96 } cmd = {};
97 struct iwl_host_cmd hcmd = {
98 .id = PROT_OFFLOAD_CONFIG_CMD,
99 .flags = cmd_flags,
100 .data[0] = &cmd,
101 .dataflags[0] = IWL_HCMD_DFL_DUP,
102 };
103 struct iwl_proto_offload_cmd_common *common;
104 u32 enabled = 0, size;
105 u32 capa_flags = mvm->fw->ucode_capa.flags;
106#if IS_ENABLED(CONFIG_IPV6)
107 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
108 int i;
109
110 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
111 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
112 struct iwl_ns_config *nsc;
113 struct iwl_targ_addr *addrs;
114 int n_nsc, n_addrs;
115 int c;
116
117 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
118 nsc = cmd.v3s.ns_config;
119 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
120 addrs = cmd.v3s.targ_addrs;
121 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
122 } else {
123 nsc = cmd.v3l.ns_config;
124 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
125 addrs = cmd.v3l.targ_addrs;
126 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
127 }
128
129 if (mvmvif->num_target_ipv6_addrs)
130 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
131
132 /*
133 * For each address we have (and that will fit) fill a target
134 * address struct and combine for NS offload structs with the
135 * solicited node addresses.
136 */
137 for (i = 0, c = 0;
138 i < mvmvif->num_target_ipv6_addrs &&
139 i < n_addrs && c < n_nsc; i++) {
140 struct in6_addr solicited_addr;
141 int j;
142
143 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
144 &solicited_addr);
145 for (j = 0; j < c; j++)
146 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
147 &solicited_addr) == 0)
148 break;
149 if (j == c)
150 c++;
151 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
152 addrs[i].config_num = cpu_to_le32(j);
153 nsc[j].dest_ipv6_addr = solicited_addr;
154 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
155 }
156
157 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
158 cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
159 else
160 cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
161 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
162 if (mvmvif->num_target_ipv6_addrs) {
163 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
164 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
165 }
166
167 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
168 sizeof(mvmvif->target_ipv6_addrs[0]));
169
170 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
171 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
172 memcpy(cmd.v2.target_ipv6_addr[i],
173 &mvmvif->target_ipv6_addrs[i],
174 sizeof(cmd.v2.target_ipv6_addr[i]));
175 } else {
176 if (mvmvif->num_target_ipv6_addrs) {
177 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
178 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
179 }
180
181 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
182 sizeof(mvmvif->target_ipv6_addrs[0]));
183
184 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
185 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
186 memcpy(cmd.v1.target_ipv6_addr[i],
187 &mvmvif->target_ipv6_addrs[i],
188 sizeof(cmd.v1.target_ipv6_addr[i]));
189 }
190#endif
191
192 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
193 common = &cmd.v3s.common;
194 size = sizeof(cmd.v3s);
195 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
196 common = &cmd.v3l.common;
197 size = sizeof(cmd.v3l);
198 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
199 common = &cmd.v2.common;
200 size = sizeof(cmd.v2);
201 } else {
202 common = &cmd.v1.common;
203 size = sizeof(cmd.v1);
204 }
205
206 if (vif->bss_conf.arp_addr_cnt) {
207 enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
208 common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
209 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
210 }
211
212 if (!disable_offloading)
213 common->enabled = cpu_to_le32(enabled);
214
215 hcmd.len[0] = size;
216 return iwl_mvm_send_cmd(mvm, &hcmd);
217}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
new file mode 100644
index 000000000000..13c97f665ba8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -0,0 +1,1434 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/module.h>
66#include <linux/vmalloc.h>
67#include <net/mac80211.h>
68
69#include "iwl-notif-wait.h"
70#include "iwl-trans.h"
71#include "iwl-op-mode.h"
72#include "iwl-fw.h"
73#include "iwl-debug.h"
74#include "iwl-drv.h"
75#include "iwl-modparams.h"
76#include "mvm.h"
77#include "iwl-phy-db.h"
78#include "iwl-eeprom-parse.h"
79#include "iwl-csr.h"
80#include "iwl-io.h"
81#include "iwl-prph.h"
82#include "rs.h"
83#include "fw-api-scan.h"
84#include "time-event.h"
85
86#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
87MODULE_DESCRIPTION(DRV_DESCRIPTION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91static const struct iwl_op_mode_ops iwl_mvm_ops;
92static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
93
94struct iwl_mvm_mod_params iwlmvm_mod_params = {
95 .power_scheme = IWL_POWER_SCHEME_BPS,
96 .tfd_q_hang_detect = true
97 /* rest of fields are 0 by default */
98};
99
100module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
101MODULE_PARM_DESC(init_dbg,
102 "set to true to debug an ASSERT in INIT fw (default: false");
103module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
104MODULE_PARM_DESC(power_scheme,
105 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
106module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
107 bool, S_IRUGO);
108MODULE_PARM_DESC(tfd_q_hang_detect,
109 "TFD queues hang detection (default: true");
110
111/*
112 * module init and exit functions
113 */
114static int __init iwl_mvm_init(void)
115{
116 int ret;
117
118 ret = iwl_mvm_rate_control_register();
119 if (ret) {
120 pr_err("Unable to register rate control algorithm: %d\n", ret);
121 return ret;
122 }
123
124 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
125
126 if (ret) {
127 pr_err("Unable to register MVM op_mode: %d\n", ret);
128 iwl_mvm_rate_control_unregister();
129 }
130
131 return ret;
132}
133module_init(iwl_mvm_init);
134
135static void __exit iwl_mvm_exit(void)
136{
137 iwl_opmode_deregister("iwlmvm");
138 iwl_mvm_rate_control_unregister();
139}
140module_exit(iwl_mvm_exit);
141
142static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
143{
144 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
145 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
146 u32 reg_val = 0;
147 u32 phy_config = iwl_mvm_get_phy_config(mvm);
148
149 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
150 FW_PHY_CFG_RADIO_TYPE_POS;
151 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
152 FW_PHY_CFG_RADIO_STEP_POS;
153 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
154 FW_PHY_CFG_RADIO_DASH_POS;
155
156 /* SKU control */
157 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
158 CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
159 reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
160 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
161
162 /* radio configuration */
163 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
164 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
165 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
166
167 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
168 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
169
170 /*
171 * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
172 * shouldn't be set to any non-zero value. The same is supposed to be
173 * true of the other HW, but unsetting them (such as the 7260) causes
174 * automatic tests to fail on seemingly unrelated errors. Need to
175 * further investigate this, but for now we'll separate cases.
176 */
177 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
178 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
179
180 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
181 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
182 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
183 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
184 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
185 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
186 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
187 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
188 reg_val);
189
190 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
191 radio_cfg_step, radio_cfg_dash);
192
193 /*
194 * W/A : NIC is stuck in a reset state after Early PCIe power off
195 * (PCIe power is lost before PERST# is asserted), causing ME FW
196 * to lose ownership and not being able to obtain it back.
197 */
198 if (!mvm->trans->cfg->apmg_not_supported)
199 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
200 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
201 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
202}
203
204struct iwl_rx_handlers {
205 u16 cmd_id;
206 bool async;
207 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
208};
209
210#define RX_HANDLER(_cmd_id, _fn, _async) \
211 { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
212#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
213 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
214
215/*
216 * Handlers for fw notifications
217 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
218 * This list should be in order of frequency for performance purposes.
219 *
220 * The handler can be SYNC - this means that it will be called in the Rx path
221 * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
222 * only in this case!), it should be set as ASYNC. In that case, it will be
223 * called from a worker with mvm->mutex held.
224 */
225static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
226 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
227 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
228
229 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
230 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
231 RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
232 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
233 iwl_mvm_rx_ant_coupling_notif, true),
234
235 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
236 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
237
238 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
239
240 RX_HANDLER(SCAN_ITERATION_COMPLETE,
241 iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
242 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
243 iwl_mvm_rx_lmac_scan_complete_notif, true),
244 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
245 false),
246 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
247 true),
248 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
249 iwl_mvm_rx_umac_scan_iter_complete_notif, false),
250
251 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
252
253 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
254 false),
255
256 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
257 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
258 iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
259 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
260 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
261 iwl_mvm_temp_notif, true),
262
263 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
264 true),
265 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
266 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
267
268};
269#undef RX_HANDLER
270#undef RX_HANDLER_GRP
271#define CMD(x) [x] = #x
272
273static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
274 CMD(MVM_ALIVE),
275 CMD(REPLY_ERROR),
276 CMD(ECHO_CMD),
277 CMD(INIT_COMPLETE_NOTIF),
278 CMD(PHY_CONTEXT_CMD),
279 CMD(MGMT_MCAST_KEY),
280 CMD(TX_CMD),
281 CMD(TXPATH_FLUSH),
282 CMD(SHARED_MEM_CFG),
283 CMD(MAC_CONTEXT_CMD),
284 CMD(TIME_EVENT_CMD),
285 CMD(TIME_EVENT_NOTIFICATION),
286 CMD(BINDING_CONTEXT_CMD),
287 CMD(TIME_QUOTA_CMD),
288 CMD(NON_QOS_TX_COUNTER_CMD),
289 CMD(DC2DC_CONFIG_CMD),
290 CMD(NVM_ACCESS_CMD),
291 CMD(PHY_CONFIGURATION_CMD),
292 CMD(CALIB_RES_NOTIF_PHY_DB),
293 CMD(SET_CALIB_DEFAULT_CMD),
294 CMD(FW_PAGING_BLOCK_CMD),
295 CMD(ADD_STA_KEY),
296 CMD(ADD_STA),
297 CMD(FW_GET_ITEM_CMD),
298 CMD(REMOVE_STA),
299 CMD(LQ_CMD),
300 CMD(SCAN_OFFLOAD_CONFIG_CMD),
301 CMD(MATCH_FOUND_NOTIFICATION),
302 CMD(SCAN_OFFLOAD_REQUEST_CMD),
303 CMD(SCAN_OFFLOAD_ABORT_CMD),
304 CMD(HOT_SPOT_CMD),
305 CMD(SCAN_OFFLOAD_COMPLETE),
306 CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
307 CMD(SCAN_ITERATION_COMPLETE),
308 CMD(POWER_TABLE_CMD),
309 CMD(WEP_KEY),
310 CMD(REPLY_RX_PHY_CMD),
311 CMD(REPLY_RX_MPDU_CMD),
312 CMD(BEACON_NOTIFICATION),
313 CMD(BEACON_TEMPLATE_CMD),
314 CMD(STATISTICS_CMD),
315 CMD(STATISTICS_NOTIFICATION),
316 CMD(EOSP_NOTIFICATION),
317 CMD(REDUCE_TX_POWER_CMD),
318 CMD(TX_ANT_CONFIGURATION_CMD),
319 CMD(D3_CONFIG_CMD),
320 CMD(D0I3_END_CMD),
321 CMD(PROT_OFFLOAD_CONFIG_CMD),
322 CMD(OFFLOADS_QUERY_CMD),
323 CMD(REMOTE_WAKE_CONFIG_CMD),
324 CMD(WOWLAN_PATTERNS),
325 CMD(WOWLAN_CONFIGURATION),
326 CMD(WOWLAN_TSC_RSC_PARAM),
327 CMD(WOWLAN_TKIP_PARAM),
328 CMD(WOWLAN_KEK_KCK_MATERIAL),
329 CMD(WOWLAN_GET_STATUSES),
330 CMD(WOWLAN_TX_POWER_PER_DB),
331 CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
332 CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
333 CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
334 CMD(CARD_STATE_NOTIFICATION),
335 CMD(MISSED_BEACONS_NOTIFICATION),
336 CMD(BT_COEX_PRIO_TABLE),
337 CMD(BT_COEX_PROT_ENV),
338 CMD(BT_PROFILE_NOTIFICATION),
339 CMD(BT_CONFIG),
340 CMD(MCAST_FILTER_CMD),
341 CMD(BCAST_FILTER_CMD),
342 CMD(REPLY_SF_CFG_CMD),
343 CMD(REPLY_BEACON_FILTERING_CMD),
344 CMD(CMD_DTS_MEASUREMENT_TRIGGER),
345 CMD(DTS_MEASUREMENT_NOTIFICATION),
346 CMD(REPLY_THERMAL_MNG_BACKOFF),
347 CMD(MAC_PM_POWER_TABLE),
348 CMD(LTR_CONFIG),
349 CMD(BT_COEX_CI),
350 CMD(BT_COEX_UPDATE_SW_BOOST),
351 CMD(BT_COEX_UPDATE_CORUN_LUT),
352 CMD(BT_COEX_UPDATE_REDUCED_TXP),
353 CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
354 CMD(ANTENNA_COUPLING_NOTIFICATION),
355 CMD(SCD_QUEUE_CFG),
356 CMD(SCAN_CFG_CMD),
357 CMD(SCAN_REQ_UMAC),
358 CMD(SCAN_ABORT_UMAC),
359 CMD(SCAN_COMPLETE_UMAC),
360 CMD(TDLS_CHANNEL_SWITCH_CMD),
361 CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
362 CMD(TDLS_CONFIG_CMD),
363 CMD(MCC_UPDATE_CMD),
364 CMD(SCAN_ITERATION_COMPLETE_UMAC),
365};
366#undef CMD
367
368/* this forward declaration can avoid to export the function */
369static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
370static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
371
372static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
373{
374 const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
375
376 if (!pwr_tx_backoff)
377 return 0;
378
379 while (pwr_tx_backoff->pwr) {
380 if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
381 return pwr_tx_backoff->backoff;
382
383 pwr_tx_backoff++;
384 }
385
386 return 0;
387}
388
389static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
390
391static struct iwl_op_mode *
392iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
393 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
394{
395 struct ieee80211_hw *hw;
396 struct iwl_op_mode *op_mode;
397 struct iwl_mvm *mvm;
398 struct iwl_trans_config trans_cfg = {};
399 static const u8 no_reclaim_cmds[] = {
400 TX_CMD,
401 };
402 int err, scan_size;
403 u32 min_backoff;
404
405 /*
406 * We use IWL_MVM_STATION_COUNT to check the validity of the station
407 * index all over the driver - check that its value corresponds to the
408 * array size.
409 */
410 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
411
412 /********************************
413 * 1. Allocating and configuring HW data
414 ********************************/
415 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
416 sizeof(struct iwl_mvm),
417 &iwl_mvm_hw_ops);
418 if (!hw)
419 return NULL;
420
421 if (cfg->max_rx_agg_size)
422 hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
423
424 if (cfg->max_tx_agg_size)
425 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
426
427 op_mode = hw->priv;
428
429 mvm = IWL_OP_MODE_GET_MVM(op_mode);
430 mvm->dev = trans->dev;
431 mvm->trans = trans;
432 mvm->cfg = cfg;
433 mvm->fw = fw;
434 mvm->hw = hw;
435
436 if (iwl_mvm_has_new_rx_api(mvm)) {
437 op_mode->ops = &iwl_mvm_ops_mq;
438 } else {
439 op_mode->ops = &iwl_mvm_ops;
440
441 if (WARN_ON(trans->num_rx_queues > 1))
442 goto out_free;
443 }
444
445 mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
446
447 mvm->aux_queue = 15;
448 mvm->first_agg_queue = 16;
449 mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
450 if (mvm->cfg->base_params->num_of_queues == 16) {
451 mvm->aux_queue = 11;
452 mvm->first_agg_queue = 12;
453 }
454 mvm->sf_state = SF_UNINIT;
455 mvm->low_latency_agg_frame_limit = 6;
456 mvm->cur_ucode = IWL_UCODE_INIT;
457
458 mutex_init(&mvm->mutex);
459 mutex_init(&mvm->d0i3_suspend_mutex);
460 spin_lock_init(&mvm->async_handlers_lock);
461 INIT_LIST_HEAD(&mvm->time_event_list);
462 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
463 INIT_LIST_HEAD(&mvm->async_handlers_list);
464 spin_lock_init(&mvm->time_event_lock);
465 spin_lock_init(&mvm->queue_info_lock);
466
467 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
468 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
469 INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
470 INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
471 INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
472 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
473
474 spin_lock_init(&mvm->d0i3_tx_lock);
475 spin_lock_init(&mvm->refs_lock);
476 skb_queue_head_init(&mvm->d0i3_tx);
477 init_waitqueue_head(&mvm->d0i3_exit_waitq);
478
479 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
480
481 /*
482 * Populate the state variables that the transport layer needs
483 * to know about.
484 */
485 trans_cfg.op_mode = op_mode;
486 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
487 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
488 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
489 trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
490 IWL_UCODE_TLV_API_WIDE_CMD_HDR);
491
492 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
493 trans_cfg.bc_table_dword = true;
494
495 trans_cfg.command_names = iwl_mvm_cmd_strings;
496
497 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
498 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
499 trans_cfg.scd_set_active = true;
500
501 trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
502
503 /* Set a short watchdog for the command queue */
504 trans_cfg.cmd_q_wdg_timeout =
505 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
506
507 snprintf(mvm->hw->wiphy->fw_version,
508 sizeof(mvm->hw->wiphy->fw_version),
509 "%s", fw->fw_version);
510
511 /* Configure transport layer */
512 iwl_trans_configure(mvm->trans, &trans_cfg);
513
514 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
515 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
516 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
517 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
518 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
519 sizeof(trans->dbg_conf_tlv));
520 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
521
522 /* set up notification wait support */
523 iwl_notification_wait_init(&mvm->notif_wait);
524
525 /* Init phy db */
526 mvm->phy_db = iwl_phy_db_init(trans);
527 if (!mvm->phy_db) {
528 IWL_ERR(mvm, "Cannot init phy_db\n");
529 goto out_free;
530 }
531
532 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
533 mvm->cfg->name, mvm->trans->hw_rev);
534
535 min_backoff = calc_min_backoff(trans, cfg);
536 iwl_mvm_tt_initialize(mvm, min_backoff);
537
538 if (iwlwifi_mod_params.nvm_file)
539 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
540 else
541 IWL_DEBUG_EEPROM(mvm->trans->dev,
542 "working without external nvm file\n");
543
544 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
545 "not allowing power-up and not having nvm_file\n"))
546 goto out_free;
547
548 /*
549 * Even if nvm exists in the nvm_file driver should read again the nvm
550 * from the nic because there might be entries that exist in the OTP
551 * and not in the file.
552 * for nics with no_power_up_nic_in_init: rely completley on nvm_file
553 */
554 if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
555 err = iwl_nvm_init(mvm, false);
556 if (err)
557 goto out_free;
558 } else {
559 err = iwl_trans_start_hw(mvm->trans);
560 if (err)
561 goto out_free;
562
563 mutex_lock(&mvm->mutex);
564 err = iwl_run_init_mvm_ucode(mvm, true);
565 if (!err || !iwlmvm_mod_params.init_dbg)
566 iwl_trans_stop_device(trans);
567 mutex_unlock(&mvm->mutex);
568 /* returns 0 if successful, 1 if success but in rfkill */
569 if (err < 0 && !iwlmvm_mod_params.init_dbg) {
570 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
571 goto out_free;
572 }
573 }
574
575 scan_size = iwl_mvm_scan_size(mvm);
576
577 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
578 if (!mvm->scan_cmd)
579 goto out_free;
580
581 /* Set EBS as successful as long as not stated otherwise by the FW. */
582 mvm->last_ebs_successful = true;
583
584 err = iwl_mvm_mac_setup_register(mvm);
585 if (err)
586 goto out_free;
587
588 err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
589 if (err)
590 goto out_unregister;
591
592 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
593
594 /* rpm starts with a taken ref. only set the appropriate bit here. */
595 mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
596
597 iwl_mvm_tof_init(mvm);
598
599 return op_mode;
600
601 out_unregister:
602 ieee80211_unregister_hw(mvm->hw);
603 iwl_mvm_leds_exit(mvm);
604 out_free:
605 flush_delayed_work(&mvm->fw_dump_wk);
606 iwl_phy_db_free(mvm->phy_db);
607 kfree(mvm->scan_cmd);
608 if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
609 iwl_trans_op_mode_leave(trans);
610 ieee80211_free_hw(mvm->hw);
611 return NULL;
612}
613
614static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
615{
616 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
617 int i;
618
619 iwl_mvm_leds_exit(mvm);
620
621 iwl_mvm_tt_exit(mvm);
622
623 ieee80211_unregister_hw(mvm->hw);
624
625 kfree(mvm->scan_cmd);
626 kfree(mvm->mcast_filter_cmd);
627 mvm->mcast_filter_cmd = NULL;
628
629#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
630 kfree(mvm->d3_resume_sram);
631 if (mvm->nd_config) {
632 kfree(mvm->nd_config->match_sets);
633 kfree(mvm->nd_config->scan_plans);
634 kfree(mvm->nd_config);
635 mvm->nd_config = NULL;
636 }
637#endif
638
639 iwl_trans_op_mode_leave(mvm->trans);
640
641 iwl_phy_db_free(mvm->phy_db);
642 mvm->phy_db = NULL;
643
644 iwl_free_nvm_data(mvm->nvm_data);
645 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
646 kfree(mvm->nvm_sections[i].data);
647
648 iwl_mvm_tof_clean(mvm);
649
650 ieee80211_free_hw(mvm->hw);
651}
652
653struct iwl_async_handler_entry {
654 struct list_head list;
655 struct iwl_rx_cmd_buffer rxb;
656 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
657};
658
659void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
660{
661 struct iwl_async_handler_entry *entry, *tmp;
662
663 spin_lock_bh(&mvm->async_handlers_lock);
664 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
665 iwl_free_rxb(&entry->rxb);
666 list_del(&entry->list);
667 kfree(entry);
668 }
669 spin_unlock_bh(&mvm->async_handlers_lock);
670}
671
672static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
673{
674 struct iwl_mvm *mvm =
675 container_of(wk, struct iwl_mvm, async_handlers_wk);
676 struct iwl_async_handler_entry *entry, *tmp;
677 struct list_head local_list;
678
679 INIT_LIST_HEAD(&local_list);
680
681 /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
682 mutex_lock(&mvm->mutex);
683
684 /*
685 * Sync with Rx path with a lock. Remove all the entries from this list,
686 * add them to a local one (lock free), and then handle them.
687 */
688 spin_lock_bh(&mvm->async_handlers_lock);
689 list_splice_init(&mvm->async_handlers_list, &local_list);
690 spin_unlock_bh(&mvm->async_handlers_lock);
691
692 list_for_each_entry_safe(entry, tmp, &local_list, list) {
693 entry->fn(mvm, &entry->rxb);
694 iwl_free_rxb(&entry->rxb);
695 list_del(&entry->list);
696 kfree(entry);
697 }
698 mutex_unlock(&mvm->mutex);
699}
700
701static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
702 struct iwl_rx_packet *pkt)
703{
704 struct iwl_fw_dbg_trigger_tlv *trig;
705 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
706 int i;
707
708 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
709 return;
710
711 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
712 cmds_trig = (void *)trig->data;
713
714 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
715 return;
716
717 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
718 /* don't collect on CMD 0 */
719 if (!cmds_trig->cmds[i].cmd_id)
720 break;
721
722 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
723 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
724 continue;
725
726 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
727 "CMD 0x%02x.%02x received",
728 pkt->hdr.group_id, pkt->hdr.cmd);
729 break;
730 }
731}
732
733static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
734 struct iwl_rx_cmd_buffer *rxb,
735 struct iwl_rx_packet *pkt)
736{
737 int i;
738
739 iwl_mvm_rx_check_trigger(mvm, pkt);
740
741 /*
742 * Do the notification wait before RX handlers so
743 * even if the RX handler consumes the RXB we have
744 * access to it in the notification wait entry.
745 */
746 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
747
748 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
749 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
750 struct iwl_async_handler_entry *entry;
751
752 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
753 continue;
754
755 if (!rx_h->async) {
756 rx_h->fn(mvm, rxb);
757 return;
758 }
759
760 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
761 /* we can't do much... */
762 if (!entry)
763 return;
764
765 entry->rxb._page = rxb_steal_page(rxb);
766 entry->rxb._offset = rxb->_offset;
767 entry->rxb._rx_page_order = rxb->_rx_page_order;
768 entry->fn = rx_h->fn;
769 spin_lock(&mvm->async_handlers_lock);
770 list_add_tail(&entry->list, &mvm->async_handlers_list);
771 spin_unlock(&mvm->async_handlers_lock);
772 schedule_work(&mvm->async_handlers_wk);
773 break;
774 }
775}
776
777static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
778 struct napi_struct *napi,
779 struct iwl_rx_cmd_buffer *rxb)
780{
781 struct iwl_rx_packet *pkt = rxb_addr(rxb);
782 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
783
784 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
785 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
786 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
787 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
788 else
789 iwl_mvm_rx_common(mvm, rxb, pkt);
790}
791
792static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
793 struct napi_struct *napi,
794 struct iwl_rx_cmd_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
798
799 if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
800 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
801 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
802 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
803 else
804 iwl_mvm_rx_common(mvm, rxb, pkt);
805}
806
807static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
808{
809 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
810 unsigned long mq;
811 int q;
812
813 spin_lock_bh(&mvm->queue_info_lock);
814 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
815 spin_unlock_bh(&mvm->queue_info_lock);
816
817 if (WARN_ON_ONCE(!mq))
818 return;
819
820 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
821 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
822 IWL_DEBUG_TX_QUEUES(mvm,
823 "queue %d (mac80211 %d) already stopped\n",
824 queue, q);
825 continue;
826 }
827
828 ieee80211_stop_queue(mvm->hw, q);
829 }
830}
831
832static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
833{
834 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
835 unsigned long mq;
836 int q;
837
838 spin_lock_bh(&mvm->queue_info_lock);
839 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
840 spin_unlock_bh(&mvm->queue_info_lock);
841
842 if (WARN_ON_ONCE(!mq))
843 return;
844
845 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
846 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
847 IWL_DEBUG_TX_QUEUES(mvm,
848 "queue %d (mac80211 %d) still stopped\n",
849 queue, q);
850 continue;
851 }
852
853 ieee80211_wake_queue(mvm->hw, q);
854 }
855}
856
857void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
858{
859 if (state)
860 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
861 else
862 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
863
864 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
865}
866
867static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
868{
869 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
870 bool calibrating = ACCESS_ONCE(mvm->calibrating);
871
872 if (state)
873 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
874 else
875 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
876
877 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
878
879 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
880 if (calibrating)
881 iwl_abort_notification_waits(&mvm->notif_wait);
882
883 /*
884 * Stop the device if we run OPERATIONAL firmware or if we are in the
885 * middle of the calibrations.
886 */
887 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
888}
889
890static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
891{
892 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
893 struct ieee80211_tx_info *info;
894
895 info = IEEE80211_SKB_CB(skb);
896 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
897 ieee80211_free_txskb(mvm->hw, skb);
898}
899
900struct iwl_mvm_reprobe {
901 struct device *dev;
902 struct work_struct work;
903};
904
905static void iwl_mvm_reprobe_wk(struct work_struct *wk)
906{
907 struct iwl_mvm_reprobe *reprobe;
908
909 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
910 if (device_reprobe(reprobe->dev))
911 dev_err(reprobe->dev, "reprobe failed!\n");
912 kfree(reprobe);
913 module_put(THIS_MODULE);
914}
915
916static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
917{
918 struct iwl_mvm *mvm =
919 container_of(work, struct iwl_mvm, fw_dump_wk.work);
920
921 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
922 return;
923
924 mutex_lock(&mvm->mutex);
925
926 /* stop recording */
927 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
928 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
929 } else {
930 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
931 /* wait before we collect the data till the DBGC stop */
932 udelay(100);
933 }
934
935 iwl_mvm_fw_error_dump(mvm);
936
937 /* start recording again if the firmware is not crashed */
938 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
939 mvm->fw->dbg_dest_tlv &&
940 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
941
942 mutex_unlock(&mvm->mutex);
943
944 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
945}
946
947void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
948{
949 iwl_abort_notification_waits(&mvm->notif_wait);
950
951 /*
952 * This is a bit racy, but worst case we tell mac80211 about
953 * a stopped/aborted scan when that was already done which
954 * is not a problem. It is necessary to abort any os scan
955 * here because mac80211 requires having the scan cleared
956 * before restarting.
957 * We'll reset the scan_status to NONE in restart cleanup in
958 * the next start() call from mac80211. If restart isn't called
959 * (no fw restart) scan status will stay busy.
960 */
961 iwl_mvm_report_scan_aborted(mvm);
962
963 /*
964 * If we're restarting already, don't cycle restarts.
965 * If INIT fw asserted, it will likely fail again.
966 * If WoWLAN fw asserted, don't restart either, mac80211
967 * can't recover this since we're already half suspended.
968 */
969 if (!mvm->restart_fw && fw_error) {
970 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
971 NULL);
972 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
973 &mvm->status)) {
974 struct iwl_mvm_reprobe *reprobe;
975
976 IWL_ERR(mvm,
977 "Firmware error during reconfiguration - reprobe!\n");
978
979 /*
980 * get a module reference to avoid doing this while unloading
981 * anyway and to avoid scheduling a work with code that's
982 * being removed.
983 */
984 if (!try_module_get(THIS_MODULE)) {
985 IWL_ERR(mvm, "Module is being unloaded - abort\n");
986 return;
987 }
988
989 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
990 if (!reprobe) {
991 module_put(THIS_MODULE);
992 return;
993 }
994 reprobe->dev = mvm->trans->dev;
995 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
996 schedule_work(&reprobe->work);
997 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
998 /* don't let the transport/FW power down */
999 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1000
1001 if (fw_error && mvm->restart_fw > 0)
1002 mvm->restart_fw--;
1003 ieee80211_restart_hw(mvm->hw);
1004 }
1005}
1006
1007static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
1008{
1009 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1010
1011 iwl_mvm_dump_nic_error_log(mvm);
1012
1013 iwl_mvm_nic_restart(mvm, true);
1014}
1015
1016static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
1017{
1018 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1019
1020 WARN_ON(1);
1021 iwl_mvm_nic_restart(mvm, true);
1022}
1023
1024struct iwl_d0i3_iter_data {
1025 struct iwl_mvm *mvm;
1026 u8 ap_sta_id;
1027 u8 vif_count;
1028 u8 offloading_tid;
1029 bool disable_offloading;
1030};
1031
1032static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
1033 struct ieee80211_vif *vif,
1034 struct iwl_d0i3_iter_data *iter_data)
1035{
1036 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1037 struct ieee80211_sta *ap_sta;
1038 struct iwl_mvm_sta *mvmsta;
1039 u32 available_tids = 0;
1040 u8 tid;
1041
1042 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
1043 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
1044 return false;
1045
1046 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
1047 if (IS_ERR_OR_NULL(ap_sta))
1048 return false;
1049
1050 mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
1051 spin_lock_bh(&mvmsta->lock);
1052 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1053 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1054
1055 /*
1056 * in case of pending tx packets, don't use this tid
1057 * for offloading in order to prevent reuse of the same
1058 * qos seq counters.
1059 */
1060 if (iwl_mvm_tid_queued(tid_data))
1061 continue;
1062
1063 if (tid_data->state != IWL_AGG_OFF)
1064 continue;
1065
1066 available_tids |= BIT(tid);
1067 }
1068 spin_unlock_bh(&mvmsta->lock);
1069
1070 /*
1071 * disallow protocol offloading if we have no available tid
1072 * (with no pending frames and no active aggregation,
1073 * as we don't handle "holes" properly - the scheduler needs the
1074 * frame's seq number and TFD index to match)
1075 */
1076 if (!available_tids)
1077 return true;
1078
1079 /* for simplicity, just use the first available tid */
1080 iter_data->offloading_tid = ffs(available_tids) - 1;
1081 return false;
1082}
1083
1084static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
1085 struct ieee80211_vif *vif)
1086{
1087 struct iwl_d0i3_iter_data *data = _data;
1088 struct iwl_mvm *mvm = data->mvm;
1089 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1090 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1091
1092 IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
1093 if (vif->type != NL80211_IFTYPE_STATION ||
1094 !vif->bss_conf.assoc)
1095 return;
1096
1097 /*
1098 * in case of pending tx packets or active aggregations,
1099 * avoid offloading features in order to prevent reuse of
1100 * the same qos seq counters.
1101 */
1102 if (iwl_mvm_disallow_offloading(mvm, vif, data))
1103 data->disable_offloading = true;
1104
1105 iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
1106 iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
1107
1108 /*
1109 * on init/association, mvm already configures POWER_TABLE_CMD
1110 * and REPLY_MCAST_FILTER_CMD, so currently don't
1111 * reconfigure them (we might want to use different
1112 * params later on, though).
1113 */
1114 data->ap_sta_id = mvmvif->ap_sta_id;
1115 data->vif_count++;
1116}
1117
1118static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1119 struct iwl_wowlan_config_cmd *cmd,
1120 struct iwl_d0i3_iter_data *iter_data)
1121{
1122 struct ieee80211_sta *ap_sta;
1123 struct iwl_mvm_sta *mvm_ap_sta;
1124
1125 if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
1126 return;
1127
1128 rcu_read_lock();
1129
1130 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
1131 if (IS_ERR_OR_NULL(ap_sta))
1132 goto out;
1133
1134 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1135 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
1136 cmd->offloading_tid = iter_data->offloading_tid;
1137
1138 /*
1139 * The d0i3 uCode takes care of the nonqos counters,
1140 * so configure only the qos seq ones.
1141 */
1142 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
1143out:
1144 rcu_read_unlock();
1145}
1146
1147int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1148{
1149 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1150 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
1151 int ret;
1152 struct iwl_d0i3_iter_data d0i3_iter_data = {
1153 .mvm = mvm,
1154 };
1155 struct iwl_wowlan_config_cmd wowlan_config_cmd = {
1156 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
1157 IWL_WOWLAN_WAKEUP_BEACON_MISS |
1158 IWL_WOWLAN_WAKEUP_LINK_CHANGE |
1159 IWL_WOWLAN_WAKEUP_BCN_FILTERING),
1160 };
1161 struct iwl_d3_manager_config d3_cfg_cmd = {
1162 .min_sleep_time = cpu_to_le32(1000),
1163 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1164 };
1165
1166 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
1167
1168 set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1169
1170 /*
1171 * iwl_mvm_ref_sync takes a reference before checking the flag.
1172 * so by checking there is no held reference we prevent a state
1173 * in which iwl_mvm_ref_sync continues successfully while we
1174 * configure the firmware to enter d0i3
1175 */
1176 if (iwl_mvm_ref_taken(mvm)) {
1177 IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
1178 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1179 wake_up(&mvm->d0i3_exit_waitq);
1180 return 1;
1181 }
1182
1183 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1184 IEEE80211_IFACE_ITER_NORMAL,
1185 iwl_mvm_enter_d0i3_iterator,
1186 &d0i3_iter_data);
1187 if (d0i3_iter_data.vif_count == 1) {
1188 mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
1189 mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
1190 } else {
1191 WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
1192 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1193 mvm->d0i3_offloading = false;
1194 }
1195
1196 /* make sure we have no running tx while configuring the seqno */
1197 synchronize_net();
1198
1199 /* configure wowlan configuration only if needed */
1200 if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
1201 iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
1202 &d0i3_iter_data);
1203
1204 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
1205 sizeof(wowlan_config_cmd),
1206 &wowlan_config_cmd);
1207 if (ret)
1208 return ret;
1209 }
1210
1211 return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
1212 flags | CMD_MAKE_TRANS_IDLE,
1213 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1214}
1215
1216static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
1217 struct ieee80211_vif *vif)
1218{
1219 struct iwl_mvm *mvm = _data;
1220 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
1221
1222 IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
1223 if (vif->type != NL80211_IFTYPE_STATION ||
1224 !vif->bss_conf.assoc)
1225 return;
1226
1227 iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
1228}
1229
1230struct iwl_mvm_wakeup_reason_iter_data {
1231 struct iwl_mvm *mvm;
1232 u32 wakeup_reasons;
1233};
1234
1235static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
1236 struct ieee80211_vif *vif)
1237{
1238 struct iwl_mvm_wakeup_reason_iter_data *data = _data;
1239 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1240
1241 if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
1242 data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
1243 if (data->wakeup_reasons &
1244 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
1245 iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
1246 else
1247 ieee80211_beacon_loss(vif);
1248 }
1249}
1250
1251void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1252{
1253 struct ieee80211_sta *sta = NULL;
1254 struct iwl_mvm_sta *mvm_ap_sta;
1255 int i;
1256 bool wake_queues = false;
1257
1258 lockdep_assert_held(&mvm->mutex);
1259
1260 spin_lock_bh(&mvm->d0i3_tx_lock);
1261
1262 if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
1263 goto out;
1264
1265 IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
1266
1267 /* get the sta in order to update seq numbers and re-enqueue skbs */
1268 sta = rcu_dereference_protected(
1269 mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
1270 lockdep_is_held(&mvm->mutex));
1271
1272 if (IS_ERR_OR_NULL(sta)) {
1273 sta = NULL;
1274 goto out;
1275 }
1276
1277 if (mvm->d0i3_offloading && qos_seq) {
1278 /* update qos seq numbers if offloading was enabled */
1279 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1280 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1281 u16 seq = le16_to_cpu(qos_seq[i]);
1282 /* firmware stores last-used one, we store next one */
1283 seq += 0x10;
1284 mvm_ap_sta->tid_data[i].seq_number = seq;
1285 }
1286 }
1287out:
1288 /* re-enqueue (or drop) all packets */
1289 while (!skb_queue_empty(&mvm->d0i3_tx)) {
1290 struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
1291
1292 if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
1293 ieee80211_free_txskb(mvm->hw, skb);
1294
1295 /* if the skb_queue is not empty, we need to wake queues */
1296 wake_queues = true;
1297 }
1298 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1299 wake_up(&mvm->d0i3_exit_waitq);
1300 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1301 if (wake_queues)
1302 ieee80211_wake_queues(mvm->hw);
1303
1304 spin_unlock_bh(&mvm->d0i3_tx_lock);
1305}
1306
1307static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1308{
1309 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
1310 struct iwl_host_cmd get_status_cmd = {
1311 .id = WOWLAN_GET_STATUSES,
1312 .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
1313 };
1314 struct iwl_wowlan_status *status;
1315 int ret;
1316 u32 handled_reasons, wakeup_reasons = 0;
1317 __le16 *qos_seq = NULL;
1318
1319 mutex_lock(&mvm->mutex);
1320 ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
1321 if (ret)
1322 goto out;
1323
1324 if (!get_status_cmd.resp_pkt)
1325 goto out;
1326
1327 status = (void *)get_status_cmd.resp_pkt->data;
1328 wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
1329 qos_seq = status->qos_seq_ctr;
1330
1331 IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
1332
1333 handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1334 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1335 if (wakeup_reasons & handled_reasons) {
1336 struct iwl_mvm_wakeup_reason_iter_data data = {
1337 .mvm = mvm,
1338 .wakeup_reasons = wakeup_reasons,
1339 };
1340
1341 ieee80211_iterate_active_interfaces(
1342 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1343 iwl_mvm_d0i3_wakeup_reason_iter, &data);
1344 }
1345out:
1346 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1347
1348 IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
1349 wakeup_reasons);
1350
1351 /* qos_seq might point inside resp_pkt, so free it only now */
1352 if (get_status_cmd.resp_pkt)
1353 iwl_free_resp(&get_status_cmd);
1354
1355 /* the FW might have updated the regdomain */
1356 iwl_mvm_update_changed_regdom(mvm);
1357
1358 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1359 mutex_unlock(&mvm->mutex);
1360}
1361
1362int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
1363{
1364 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
1365 CMD_WAKE_UP_TRANS;
1366 int ret;
1367
1368 IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
1369
1370 mutex_lock(&mvm->d0i3_suspend_mutex);
1371 if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
1372 IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
1373 __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
1374 mutex_unlock(&mvm->d0i3_suspend_mutex);
1375 return 0;
1376 }
1377 mutex_unlock(&mvm->d0i3_suspend_mutex);
1378
1379 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
1380 if (ret)
1381 goto out;
1382
1383 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1384 IEEE80211_IFACE_ITER_NORMAL,
1385 iwl_mvm_exit_d0i3_iterator,
1386 mvm);
1387out:
1388 schedule_work(&mvm->d0i3_exit_work);
1389 return ret;
1390}
1391
1392int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1393{
1394 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1395
1396 iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
1397 return _iwl_mvm_exit_d0i3(mvm);
1398}
1399
1400#define IWL_MVM_COMMON_OPS \
1401 /* these could be differentiated */ \
1402 .queue_full = iwl_mvm_stop_sw_queue, \
1403 .queue_not_full = iwl_mvm_wake_sw_queue, \
1404 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
1405 .free_skb = iwl_mvm_free_skb, \
1406 .nic_error = iwl_mvm_nic_error, \
1407 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
1408 .nic_config = iwl_mvm_nic_config, \
1409 .enter_d0i3 = iwl_mvm_enter_d0i3, \
1410 .exit_d0i3 = iwl_mvm_exit_d0i3, \
1411 /* as we only register one, these MUST be common! */ \
1412 .start = iwl_op_mode_mvm_start, \
1413 .stop = iwl_op_mode_mvm_stop
1414
1415static const struct iwl_op_mode_ops iwl_mvm_ops = {
1416 IWL_MVM_COMMON_OPS,
1417 .rx = iwl_mvm_rx,
1418};
1419
1420static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1421 struct napi_struct *napi,
1422 struct iwl_rx_cmd_buffer *rxb,
1423 unsigned int queue)
1424{
1425 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1426
1427 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1428}
1429
1430static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
1431 IWL_MVM_COMMON_OPS,
1432 .rx = iwl_mvm_rx_mq,
1433 .rx_rss = iwl_mvm_rx_mq_rss,
1434};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 000000000000..e68a475e3071
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,295 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <net/mac80211.h>
67#include "fw-api.h"
68#include "mvm.h"
69
70/* Maps the driver specific channel width definition to the fw values */
71u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
72{
73 switch (chandef->width) {
74 case NL80211_CHAN_WIDTH_20_NOHT:
75 case NL80211_CHAN_WIDTH_20:
76 return PHY_VHT_CHANNEL_MODE20;
77 case NL80211_CHAN_WIDTH_40:
78 return PHY_VHT_CHANNEL_MODE40;
79 case NL80211_CHAN_WIDTH_80:
80 return PHY_VHT_CHANNEL_MODE80;
81 case NL80211_CHAN_WIDTH_160:
82 return PHY_VHT_CHANNEL_MODE160;
83 default:
84 WARN(1, "Invalid channel width=%u", chandef->width);
85 return PHY_VHT_CHANNEL_MODE20;
86 }
87}
88
89/*
90 * Maps the driver specific control channel position (relative to the center
91 * freq) definitions to the the fw values
92 */
93u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
94{
95 switch (chandef->chan->center_freq - chandef->center_freq1) {
96 case -70:
97 return PHY_VHT_CTRL_POS_4_BELOW;
98 case -50:
99 return PHY_VHT_CTRL_POS_3_BELOW;
100 case -30:
101 return PHY_VHT_CTRL_POS_2_BELOW;
102 case -10:
103 return PHY_VHT_CTRL_POS_1_BELOW;
104 case 10:
105 return PHY_VHT_CTRL_POS_1_ABOVE;
106 case 30:
107 return PHY_VHT_CTRL_POS_2_ABOVE;
108 case 50:
109 return PHY_VHT_CTRL_POS_3_ABOVE;
110 case 70:
111 return PHY_VHT_CTRL_POS_4_ABOVE;
112 default:
113 WARN(1, "Invalid channel definition");
114 case 0:
115 /*
116 * The FW is expected to check the control channel position only
117 * when in HT/VHT and the channel width is not 20MHz. Return
118 * this value as the default one.
119 */
120 return PHY_VHT_CTRL_POS_1_BELOW;
121 }
122}
123
124/*
125 * Construct the generic fields of the PHY context command
126 */
127static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
128 struct iwl_phy_context_cmd *cmd,
129 u32 action, u32 apply_time)
130{
131 memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
132
133 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
134 ctxt->color));
135 cmd->action = cpu_to_le32(action);
136 cmd->apply_time = cpu_to_le32(apply_time);
137}
138
139/*
140 * Add the phy configuration to the PHY context command
141 */
142static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
143 struct iwl_phy_context_cmd *cmd,
144 struct cfg80211_chan_def *chandef,
145 u8 chains_static, u8 chains_dynamic)
146{
147 u8 active_cnt, idle_cnt;
148
149 /* Set the channel info data */
150 cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
151 PHY_BAND_24 : PHY_BAND_5);
152
153 cmd->ci.channel = chandef->chan->hw_value;
154 cmd->ci.width = iwl_mvm_get_channel_width(chandef);
155 cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
156
157 /* Set rx the chains */
158 idle_cnt = chains_static;
159 active_cnt = chains_dynamic;
160
161 /* In scenarios where we only ever use a single-stream rates,
162 * i.e. legacy 11b/g/a associations, single-stream APs or even
163 * static SMPS, enable both chains to get diversity, improving
164 * the case where we're far enough from the AP that attenuation
165 * between the two antennas is sufficiently different to impact
166 * performance.
167 */
168 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
169 idle_cnt = 2;
170 active_cnt = 2;
171 }
172
173 cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
174 PHY_RX_CHAIN_VALID_POS);
175 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
176 cmd->rxchain_info |= cpu_to_le32(active_cnt <<
177 PHY_RX_CHAIN_MIMO_CNT_POS);
178#ifdef CONFIG_IWLWIFI_DEBUGFS
179 if (unlikely(mvm->dbgfs_rx_phyinfo))
180 cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
181#endif
182
183 cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
184}
185
186/*
187 * Send a command to apply the current phy configuration. The command is send
188 * only if something in the configuration changed: in case that this is the
189 * first time that the phy configuration is applied or in case that the phy
190 * configuration changed from the previous apply.
191 */
192static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
193 struct iwl_mvm_phy_ctxt *ctxt,
194 struct cfg80211_chan_def *chandef,
195 u8 chains_static, u8 chains_dynamic,
196 u32 action, u32 apply_time)
197{
198 struct iwl_phy_context_cmd cmd;
199 int ret;
200
201 /* Set the command header fields */
202 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
203
204 /* Set the command data */
205 iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
206 chains_static, chains_dynamic);
207
208 ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
209 sizeof(struct iwl_phy_context_cmd),
210 &cmd);
211 if (ret)
212 IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
213 return ret;
214}
215
216/*
217 * Send a command to add a PHY context based on the current HW configuration.
218 */
219int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
220 struct cfg80211_chan_def *chandef,
221 u8 chains_static, u8 chains_dynamic)
222{
223 WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
224 ctxt->ref);
225 lockdep_assert_held(&mvm->mutex);
226
227 ctxt->channel = chandef->chan;
228
229 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
230 chains_static, chains_dynamic,
231 FW_CTXT_ACTION_ADD, 0);
232}
233
234/*
235 * Update the number of references to the given PHY context. This is valid only
236 * in case the PHY context was already created, i.e., its reference count > 0.
237 */
238void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
239{
240 lockdep_assert_held(&mvm->mutex);
241 ctxt->ref++;
242}
243
244/*
245 * Send a command to modify the PHY context based on the current HW
246 * configuration. Note that the function does not check that the configuration
247 * changed.
248 */
249int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
250 struct cfg80211_chan_def *chandef,
251 u8 chains_static, u8 chains_dynamic)
252{
253 lockdep_assert_held(&mvm->mutex);
254
255 ctxt->channel = chandef->chan;
256 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
257 chains_static, chains_dynamic,
258 FW_CTXT_ACTION_MODIFY, 0);
259}
260
261void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
262{
263 lockdep_assert_held(&mvm->mutex);
264
265 if (WARN_ON_ONCE(!ctxt))
266 return;
267
268 ctxt->ref--;
269}
270
271static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
272 struct ieee80211_vif *vif)
273{
274 unsigned long *data = _data;
275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
276
277 if (!mvmvif->phy_ctxt)
278 return;
279
280 if (vif->type == NL80211_IFTYPE_STATION ||
281 vif->type == NL80211_IFTYPE_AP)
282 __set_bit(mvmvif->phy_ctxt->id, data);
283}
284
285int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
286{
287 unsigned long phy_ctxt_counter = 0;
288
289 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
290 IEEE80211_IFACE_ITER_NORMAL,
291 iwl_mvm_binding_iterator,
292 &phy_ctxt_counter);
293
294 return hweight8(phy_ctxt_counter);
295}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
new file mode 100644
index 000000000000..bed9696ee410
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -0,0 +1,1040 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright(c) 2015 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/slab.h>
71#include <linux/etherdevice.h>
72
73#include <net/mac80211.h>
74
75#include "iwl-debug.h"
76#include "mvm.h"
77#include "iwl-modparams.h"
78#include "fw-api-power.h"
79
80#define POWER_KEEP_ALIVE_PERIOD_SEC 25
81
82static
83int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
84 struct iwl_beacon_filter_cmd *cmd,
85 u32 flags)
86{
87 IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
88 le32_to_cpu(cmd->ba_enable_beacon_abort));
89 IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
90 le32_to_cpu(cmd->ba_escape_timer));
91 IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
92 le32_to_cpu(cmd->bf_debug_flag));
93 IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
94 le32_to_cpu(cmd->bf_enable_beacon_filter));
95 IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
96 le32_to_cpu(cmd->bf_energy_delta));
97 IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
98 le32_to_cpu(cmd->bf_escape_timer));
99 IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
100 le32_to_cpu(cmd->bf_roaming_energy_delta));
101 IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
102 le32_to_cpu(cmd->bf_roaming_state));
103 IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
104 le32_to_cpu(cmd->bf_temp_threshold));
105 IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
106 le32_to_cpu(cmd->bf_temp_fast_filter));
107 IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
108 le32_to_cpu(cmd->bf_temp_slow_filter));
109
110 return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
111 sizeof(struct iwl_beacon_filter_cmd), cmd);
112}
113
114static
115void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
116 struct ieee80211_vif *vif,
117 struct iwl_beacon_filter_cmd *cmd,
118 bool d0i3)
119{
120 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
121
122 if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
123 cmd->bf_energy_delta =
124 cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
125 /* fw uses an absolute value for this */
126 cmd->bf_roaming_state =
127 cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
128 }
129 cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
130}
131
132static void iwl_mvm_power_log(struct iwl_mvm *mvm,
133 struct iwl_mac_power_cmd *cmd)
134{
135 IWL_DEBUG_POWER(mvm,
136 "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
137 cmd->id_and_color, iwlmvm_mod_params.power_scheme,
138 le16_to_cpu(cmd->flags));
139 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
140 le16_to_cpu(cmd->keep_alive_seconds));
141
142 if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
143 IWL_DEBUG_POWER(mvm, "Disable power management\n");
144 return;
145 }
146
147 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
148 le32_to_cpu(cmd->rx_data_timeout));
149 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
150 le32_to_cpu(cmd->tx_data_timeout));
151 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
152 IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
153 cmd->skip_dtim_periods);
154 if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
155 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
156 cmd->lprx_rssi_threshold);
157 if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
158 IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
159 IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
160 le32_to_cpu(cmd->rx_data_timeout_uapsd));
161 IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
162 le32_to_cpu(cmd->tx_data_timeout_uapsd));
163 IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
164 IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
165 IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
166 }
167}
168
169static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
170 struct ieee80211_vif *vif,
171 struct iwl_mac_power_cmd *cmd)
172{
173 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
174 enum ieee80211_ac_numbers ac;
175 bool tid_found = false;
176
177 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
178 if (!mvmvif->queue_params[ac].uapsd)
179 continue;
180
181 if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
182 cmd->flags |=
183 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
184
185 cmd->uapsd_ac_flags |= BIT(ac);
186
187 /* QNDP TID - the highest TID with no admission control */
188 if (!tid_found && !mvmvif->queue_params[ac].acm) {
189 tid_found = true;
190 switch (ac) {
191 case IEEE80211_AC_VO:
192 cmd->qndp_tid = 6;
193 break;
194 case IEEE80211_AC_VI:
195 cmd->qndp_tid = 5;
196 break;
197 case IEEE80211_AC_BE:
198 cmd->qndp_tid = 0;
199 break;
200 case IEEE80211_AC_BK:
201 cmd->qndp_tid = 1;
202 break;
203 }
204 }
205 }
206
207 if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
208#ifdef CONFIG_IWLWIFI_DEBUGFS
209 /* set advanced pm flag with no uapsd ACs to enable ps-poll */
210 if (mvmvif->dbgfs_pm.use_ps_poll)
211 cmd->flags |=
212 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
213#endif
214 return;
215 }
216
217 cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
218
219 if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
220 BIT(IEEE80211_AC_VI) |
221 BIT(IEEE80211_AC_BE) |
222 BIT(IEEE80211_AC_BK))) {
223 cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
224 cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
225 cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
226 cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
227 cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
228 }
229
230 cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
231
232 if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
233 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
234 cmd->rx_data_timeout_uapsd =
235 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
236 cmd->tx_data_timeout_uapsd =
237 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
238 } else {
239 cmd->rx_data_timeout_uapsd =
240 cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
241 cmd->tx_data_timeout_uapsd =
242 cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
243 }
244
245 if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
246 cmd->heavy_tx_thld_packets =
247 IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
248 cmd->heavy_rx_thld_packets =
249 IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
250 } else {
251 cmd->heavy_tx_thld_packets =
252 IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
253 cmd->heavy_rx_thld_packets =
254 IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
255 }
256 cmd->heavy_tx_thld_percentage =
257 IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
258 cmd->heavy_rx_thld_percentage =
259 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
260}
261
262static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
263 struct ieee80211_vif *vif)
264{
265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
266
267 if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
268 ETH_ALEN))
269 return false;
270
271 if (vif->p2p &&
272 !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
273 return false;
274 /*
275 * Avoid using uAPSD if P2P client is associated to GO that uses
276 * opportunistic power save. This is due to current FW limitation.
277 */
278 if (vif->p2p &&
279 (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
280 IEEE80211_P2P_OPPPS_ENABLE_BIT))
281 return false;
282
283 /*
284 * Avoid using uAPSD if client is in DCM -
285 * low latency issue in Miracast
286 */
287 if (iwl_mvm_phy_ctx_count(mvm) >= 2)
288 return false;
289
290 return true;
291}
292
293static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
294{
295 struct ieee80211_chanctx_conf *chanctx_conf;
296 struct ieee80211_channel *chan;
297 bool radar_detect = false;
298
299 rcu_read_lock();
300 chanctx_conf = rcu_dereference(vif->chanctx_conf);
301 WARN_ON(!chanctx_conf);
302 if (chanctx_conf) {
303 chan = chanctx_conf->def.chan;
304 radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
305 }
306 rcu_read_unlock();
307
308 return radar_detect;
309}
310
311static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
312 struct ieee80211_vif *vif,
313 struct iwl_mac_power_cmd *cmd,
314 bool host_awake)
315{
316 int dtimper = vif->bss_conf.dtim_period ?: 1;
317 int skip;
318
319 /* disable, in case we're supposed to override */
320 cmd->skip_dtim_periods = 0;
321 cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
322
323 if (iwl_mvm_power_is_radar(vif))
324 return;
325
326 if (dtimper >= 10)
327 return;
328
329 /* TODO: check that multicast wake lock is off */
330
331 if (host_awake) {
332 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
333 return;
334 skip = 2;
335 } else {
336 int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
337
338 if (WARN_ON(!dtimper_tu))
339 return;
340 /* configure skip over dtim up to 306TU - 314 msec */
341 skip = max_t(u8, 1, 306 / dtimper_tu);
342 }
343
344 /* the firmware really expects "look at every X DTIMs", so add 1 */
345 cmd->skip_dtim_periods = 1 + skip;
346 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
347}
348
349static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
350 struct ieee80211_vif *vif,
351 struct iwl_mac_power_cmd *cmd,
352 bool host_awake)
353{
354 int dtimper, bi;
355 int keep_alive;
356 struct iwl_mvm_vif *mvmvif __maybe_unused =
357 iwl_mvm_vif_from_mac80211(vif);
358
359 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
360 mvmvif->color));
361 dtimper = vif->bss_conf.dtim_period;
362 bi = vif->bss_conf.beacon_int;
363
364 /*
365 * Regardless of power management state the driver must set
366 * keep alive period. FW will use it for sending keep alive NDPs
367 * immediately after association. Check that keep alive period
368 * is at least 3 * DTIM
369 */
370 keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
371 USEC_PER_SEC);
372 keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
373 cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
374
375 if (mvm->ps_disabled)
376 return;
377
378 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
379
380 if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
381 return;
382
383 if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
384 (!fw_has_capa(&mvm->fw->ucode_capa,
385 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
386 !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
387 return;
388
389 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
390
391 if (vif->bss_conf.beacon_rate &&
392 (vif->bss_conf.beacon_rate->bitrate == 10 ||
393 vif->bss_conf.beacon_rate->bitrate == 60)) {
394 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
395 cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
396 }
397
398 iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
399
400 if (!host_awake) {
401 cmd->rx_data_timeout =
402 cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
403 cmd->tx_data_timeout =
404 cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
405 } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
406 fw_has_capa(&mvm->fw->ucode_capa,
407 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
408 cmd->tx_data_timeout =
409 cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
410 cmd->rx_data_timeout =
411 cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
412 } else {
413 cmd->rx_data_timeout =
414 cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
415 cmd->tx_data_timeout =
416 cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
417 }
418
419 if (iwl_mvm_power_allow_uapsd(mvm, vif))
420 iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
421
422#ifdef CONFIG_IWLWIFI_DEBUGFS
423 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
424 cmd->keep_alive_seconds =
425 cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
426 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
427 if (mvmvif->dbgfs_pm.skip_over_dtim)
428 cmd->flags |=
429 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
430 else
431 cmd->flags &=
432 cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
433 }
434 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
435 cmd->rx_data_timeout =
436 cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
437 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
438 cmd->tx_data_timeout =
439 cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
440 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
441 cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
442 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
443 if (mvmvif->dbgfs_pm.lprx_ena)
444 cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
445 else
446 cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
447 }
448 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
449 cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
450 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
451 if (mvmvif->dbgfs_pm.snooze_ena)
452 cmd->flags |=
453 cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
454 else
455 cmd->flags &=
456 cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
457 }
458 if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
459 u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
460 if (mvmvif->dbgfs_pm.uapsd_misbehaving)
461 cmd->flags |= cpu_to_le16(flag);
462 else
463 cmd->flags &= cpu_to_le16(flag);
464 }
465#endif /* CONFIG_IWLWIFI_DEBUGFS */
466}
467
468static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
469 struct ieee80211_vif *vif)
470{
471 struct iwl_mac_power_cmd cmd = {};
472
473 iwl_mvm_power_build_cmd(mvm, vif, &cmd,
474 mvm->cur_ucode != IWL_UCODE_WOWLAN);
475 iwl_mvm_power_log(mvm, &cmd);
476#ifdef CONFIG_IWLWIFI_DEBUGFS
477 memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
478#endif
479
480 return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
481 sizeof(cmd), &cmd);
482}
483
484int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
485{
486 struct iwl_device_power_cmd cmd = {
487 .flags = 0,
488 };
489
490 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
491 mvm->ps_disabled = true;
492
493 if (!mvm->ps_disabled)
494 cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
495
496#ifdef CONFIG_IWLWIFI_DEBUGFS
497 if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
498 mvm->disable_power_off)
499 cmd.flags &=
500 cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
501#endif
502 IWL_DEBUG_POWER(mvm,
503 "Sending device power command with flags = 0x%X\n",
504 cmd.flags);
505
506 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
507 &cmd);
508}
509
510void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
511{
512 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
513
514 if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
515 ETH_ALEN))
516 eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
517}
518
519static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
520 struct ieee80211_vif *vif)
521{
522 u8 *ap_sta_id = _data;
523 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
524
525 /* The ap_sta_id is not expected to change during current association
526 * so no explicit protection is needed
527 */
528 if (mvmvif->ap_sta_id == *ap_sta_id)
529 memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
530 ETH_ALEN);
531}
532
533void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
534 struct iwl_rx_cmd_buffer *rxb)
535{
536 struct iwl_rx_packet *pkt = rxb_addr(rxb);
537 struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
538 u8 ap_sta_id = le32_to_cpu(notif->sta_id);
539
540 ieee80211_iterate_active_interfaces_atomic(
541 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
542 iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
543}
544
545struct iwl_power_vifs {
546 struct iwl_mvm *mvm;
547 struct ieee80211_vif *bf_vif;
548 struct ieee80211_vif *bss_vif;
549 struct ieee80211_vif *p2p_vif;
550 struct ieee80211_vif *ap_vif;
551 struct ieee80211_vif *monitor_vif;
552 bool p2p_active;
553 bool bss_active;
554 bool ap_active;
555 bool monitor_active;
556};
557
558static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
559 struct ieee80211_vif *vif)
560{
561 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
562
563 mvmvif->pm_enabled = false;
564}
565
566static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
567 struct ieee80211_vif *vif)
568{
569 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
570 bool *disable_ps = _data;
571
572 if (mvmvif->phy_ctxt)
573 if (mvmvif->phy_ctxt->id < MAX_PHYS)
574 *disable_ps |= mvmvif->ps_disabled;
575}
576
577static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
578 struct ieee80211_vif *vif)
579{
580 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
581 struct iwl_power_vifs *power_iterator = _data;
582
583 switch (ieee80211_vif_type_p2p(vif)) {
584 case NL80211_IFTYPE_P2P_DEVICE:
585 break;
586
587 case NL80211_IFTYPE_P2P_GO:
588 case NL80211_IFTYPE_AP:
589 /* only a single MAC of the same type */
590 WARN_ON(power_iterator->ap_vif);
591 power_iterator->ap_vif = vif;
592 if (mvmvif->phy_ctxt)
593 if (mvmvif->phy_ctxt->id < MAX_PHYS)
594 power_iterator->ap_active = true;
595 break;
596
597 case NL80211_IFTYPE_MONITOR:
598 /* only a single MAC of the same type */
599 WARN_ON(power_iterator->monitor_vif);
600 power_iterator->monitor_vif = vif;
601 if (mvmvif->phy_ctxt)
602 if (mvmvif->phy_ctxt->id < MAX_PHYS)
603 power_iterator->monitor_active = true;
604 break;
605
606 case NL80211_IFTYPE_P2P_CLIENT:
607 /* only a single MAC of the same type */
608 WARN_ON(power_iterator->p2p_vif);
609 power_iterator->p2p_vif = vif;
610 if (mvmvif->phy_ctxt)
611 if (mvmvif->phy_ctxt->id < MAX_PHYS)
612 power_iterator->p2p_active = true;
613 break;
614
615 case NL80211_IFTYPE_STATION:
616 /* only a single MAC of the same type */
617 WARN_ON(power_iterator->bss_vif);
618 power_iterator->bss_vif = vif;
619 if (mvmvif->phy_ctxt)
620 if (mvmvif->phy_ctxt->id < MAX_PHYS)
621 power_iterator->bss_active = true;
622
623 if (mvmvif->bf_data.bf_enabled &&
624 !WARN_ON(power_iterator->bf_vif))
625 power_iterator->bf_vif = vif;
626
627 break;
628
629 default:
630 break;
631 }
632}
633
634static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
635 struct iwl_power_vifs *vifs)
636{
637 struct iwl_mvm_vif *bss_mvmvif = NULL;
638 struct iwl_mvm_vif *p2p_mvmvif = NULL;
639 struct iwl_mvm_vif *ap_mvmvif = NULL;
640 bool client_same_channel = false;
641 bool ap_same_channel = false;
642
643 lockdep_assert_held(&mvm->mutex);
644
645 /* set pm_enable to false */
646 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
647 IEEE80211_IFACE_ITER_NORMAL,
648 iwl_mvm_power_disable_pm_iterator,
649 NULL);
650
651 if (vifs->bss_vif)
652 bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
653
654 if (vifs->p2p_vif)
655 p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
656
657 if (vifs->ap_vif)
658 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
659
660 /* don't allow PM if any TDLS stations exist */
661 if (iwl_mvm_tdls_sta_count(mvm, NULL))
662 return;
663
664 /* enable PM on bss if bss stand alone */
665 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
666 bss_mvmvif->pm_enabled = true;
667 return;
668 }
669
670 /* enable PM on p2p if p2p stand alone */
671 if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
672 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
673 p2p_mvmvif->pm_enabled = true;
674 return;
675 }
676
677 if (vifs->bss_active && vifs->p2p_active)
678 client_same_channel = (bss_mvmvif->phy_ctxt->id ==
679 p2p_mvmvif->phy_ctxt->id);
680 if (vifs->bss_active && vifs->ap_active)
681 ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
682 ap_mvmvif->phy_ctxt->id);
683
684 /* clients are not stand alone: enable PM if DCM */
685 if (!(client_same_channel || ap_same_channel) &&
686 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
687 if (vifs->bss_active)
688 bss_mvmvif->pm_enabled = true;
689 if (vifs->p2p_active &&
690 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
691 p2p_mvmvif->pm_enabled = true;
692 return;
693 }
694
695 /*
696 * There is only one channel in the system and there are only
697 * bss and p2p clients that share it
698 */
699 if (client_same_channel && !vifs->ap_active &&
700 (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
701 /* share same channel*/
702 bss_mvmvif->pm_enabled = true;
703 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
704 p2p_mvmvif->pm_enabled = true;
705 }
706}
707
708#ifdef CONFIG_IWLWIFI_DEBUGFS
709int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
710 struct ieee80211_vif *vif, char *buf,
711 int bufsz)
712{
713 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
714 struct iwl_mac_power_cmd cmd = {};
715 int pos = 0;
716
717 mutex_lock(&mvm->mutex);
718 memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
719 mutex_unlock(&mvm->mutex);
720
721 pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
722 iwlmvm_mod_params.power_scheme);
723 pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
724 le16_to_cpu(cmd.flags));
725 pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
726 le16_to_cpu(cmd.keep_alive_seconds));
727
728 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
729 return pos;
730
731 pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
732 (cmd.flags &
733 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
734 pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
735 cmd.skip_dtim_periods);
736 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
737 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
738 le32_to_cpu(cmd.rx_data_timeout));
739 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
740 le32_to_cpu(cmd.tx_data_timeout));
741 }
742 if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
743 pos += scnprintf(buf+pos, bufsz-pos,
744 "lprx_rssi_threshold = %d\n",
745 cmd.lprx_rssi_threshold);
746
747 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
748 return pos;
749
750 pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
751 le32_to_cpu(cmd.rx_data_timeout_uapsd));
752 pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
753 le32_to_cpu(cmd.tx_data_timeout_uapsd));
754 pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
755 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
756 cmd.uapsd_ac_flags);
757 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
758 cmd.uapsd_max_sp);
759 pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
760 cmd.heavy_tx_thld_packets);
761 pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
762 cmd.heavy_rx_thld_packets);
763 pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
764 cmd.heavy_tx_thld_percentage);
765 pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
766 cmd.heavy_rx_thld_percentage);
767 pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
768 (cmd.flags &
769 cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
770 1 : 0);
771
772 if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
773 return pos;
774
775 pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
776 cmd.snooze_interval);
777 pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
778 cmd.snooze_window);
779
780 return pos;
781}
782
783void
784iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
785 struct iwl_beacon_filter_cmd *cmd)
786{
787 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
788 struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
789
790 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
791 cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
792 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
793 cmd->bf_roaming_energy_delta =
794 cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
795 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
796 cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
797 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
798 cmd->bf_temp_threshold =
799 cpu_to_le32(dbgfs_bf->bf_temp_threshold);
800 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
801 cmd->bf_temp_fast_filter =
802 cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
803 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
804 cmd->bf_temp_slow_filter =
805 cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
806 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
807 cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
808 if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
809 cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
810 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
811 cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
812 if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
813 cmd->ba_enable_beacon_abort =
814 cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
815}
816#endif
817
818static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
819 struct ieee80211_vif *vif,
820 struct iwl_beacon_filter_cmd *cmd,
821 u32 cmd_flags,
822 bool d0i3)
823{
824 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
825 int ret;
826
827 if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
828 vif->type != NL80211_IFTYPE_STATION || vif->p2p)
829 return 0;
830
831 iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
832 if (!d0i3)
833 iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
834 ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
835
836 /* don't change bf_enabled in case of temporary d0i3 configuration */
837 if (!ret && !d0i3)
838 mvmvif->bf_data.bf_enabled = true;
839
840 return ret;
841}
842
843int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
844 struct ieee80211_vif *vif,
845 u32 flags)
846{
847 struct iwl_beacon_filter_cmd cmd = {
848 IWL_BF_CMD_CONFIG_DEFAULTS,
849 .bf_enable_beacon_filter = cpu_to_le32(1),
850 };
851
852 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
853}
854
855static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
856 struct ieee80211_vif *vif,
857 bool enable)
858{
859 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
860 struct iwl_beacon_filter_cmd cmd = {
861 IWL_BF_CMD_CONFIG_DEFAULTS,
862 .bf_enable_beacon_filter = cpu_to_le32(1),
863 };
864
865 if (!mvmvif->bf_data.bf_enabled)
866 return 0;
867
868 if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
869 cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
870
871 mvmvif->bf_data.ba_enabled = enable;
872 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
873}
874
875int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
876 struct ieee80211_vif *vif,
877 u32 flags)
878{
879 struct iwl_beacon_filter_cmd cmd = {};
880 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
881 int ret;
882
883 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
884 return 0;
885
886 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
887
888 if (!ret)
889 mvmvif->bf_data.bf_enabled = false;
890
891 return ret;
892}
893
894static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
895{
896 bool disable_ps;
897 int ret;
898
899 /* disable PS if CAM */
900 disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
901 /* ...or if any of the vifs require PS to be off */
902 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
903 IEEE80211_IFACE_ITER_NORMAL,
904 iwl_mvm_power_ps_disabled_iterator,
905 &disable_ps);
906
907 /* update device power state if it has changed */
908 if (mvm->ps_disabled != disable_ps) {
909 bool old_ps_disabled = mvm->ps_disabled;
910
911 mvm->ps_disabled = disable_ps;
912 ret = iwl_mvm_power_update_device(mvm);
913 if (ret) {
914 mvm->ps_disabled = old_ps_disabled;
915 return ret;
916 }
917 }
918
919 return 0;
920}
921
922static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
923 struct iwl_power_vifs *vifs)
924{
925 struct iwl_mvm_vif *mvmvif;
926 bool ba_enable;
927
928 if (!vifs->bf_vif)
929 return 0;
930
931 mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif);
932
933 ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
934 !vifs->bf_vif->bss_conf.ps ||
935 iwl_mvm_vif_low_latency(mvmvif));
936
937 return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable);
938}
939
940int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
941{
942 struct iwl_power_vifs vifs = {
943 .mvm = mvm,
944 };
945 int ret;
946
947 lockdep_assert_held(&mvm->mutex);
948
949 /* get vifs info */
950 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
951 IEEE80211_IFACE_ITER_NORMAL,
952 iwl_mvm_power_get_vifs_iterator, &vifs);
953
954 ret = iwl_mvm_power_set_ps(mvm);
955 if (ret)
956 return ret;
957
958 return iwl_mvm_power_set_ba(mvm, &vifs);
959}
960
961int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
962{
963 struct iwl_power_vifs vifs = {
964 .mvm = mvm,
965 };
966 int ret;
967
968 lockdep_assert_held(&mvm->mutex);
969
970 /* get vifs info */
971 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
972 IEEE80211_IFACE_ITER_NORMAL,
973 iwl_mvm_power_get_vifs_iterator, &vifs);
974
975 iwl_mvm_power_set_pm(mvm, &vifs);
976
977 ret = iwl_mvm_power_set_ps(mvm);
978 if (ret)
979 return ret;
980
981 if (vifs.bss_vif) {
982 ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
983 if (ret)
984 return ret;
985 }
986
987 if (vifs.p2p_vif) {
988 ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
989 if (ret)
990 return ret;
991 }
992
993 return iwl_mvm_power_set_ba(mvm, &vifs);
994}
995
996int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
997 struct ieee80211_vif *vif,
998 bool enable, u32 flags)
999{
1000 int ret;
1001 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1002 struct iwl_mac_power_cmd cmd = {};
1003
1004 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1005 return 0;
1006
1007 if (!vif->bss_conf.assoc)
1008 return 0;
1009
1010 iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
1011
1012 iwl_mvm_power_log(mvm, &cmd);
1013#ifdef CONFIG_IWLWIFI_DEBUGFS
1014 memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
1015#endif
1016 ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
1017 sizeof(cmd), &cmd);
1018 if (ret)
1019 return ret;
1020
1021 /* configure beacon filtering */
1022 if (mvmvif != mvm->bf_allowed_vif)
1023 return 0;
1024
1025 if (enable) {
1026 struct iwl_beacon_filter_cmd cmd_bf = {
1027 IWL_BF_CMD_CONFIG_D0I3,
1028 .bf_enable_beacon_filter = cpu_to_le32(1),
1029 };
1030 ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
1031 flags, true);
1032 } else {
1033 if (mvmvif->bf_data.bf_enabled)
1034 ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
1035 else
1036 ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
1037 }
1038
1039 return ret;
1040}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
new file mode 100644
index 000000000000..509a66d05245
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -0,0 +1,328 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <net/mac80211.h>
67#include "fw-api.h"
68#include "mvm.h"
69
70#define QUOTA_100 IWL_MVM_MAX_QUOTA
71#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
72
73struct iwl_mvm_quota_iterator_data {
74 int n_interfaces[MAX_BINDINGS];
75 int colors[MAX_BINDINGS];
76 int low_latency[MAX_BINDINGS];
77 int n_low_latency_bindings;
78 struct ieee80211_vif *disabled_vif;
79};
80
81static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
82 struct ieee80211_vif *vif)
83{
84 struct iwl_mvm_quota_iterator_data *data = _data;
85 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
86 u16 id;
87
88 /* skip disabled interfaces here immediately */
89 if (vif == data->disabled_vif)
90 return;
91
92 if (!mvmvif->phy_ctxt)
93 return;
94
95 /* currently, PHY ID == binding ID */
96 id = mvmvif->phy_ctxt->id;
97
98 /* need at least one binding per PHY */
99 BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
100
101 if (WARN_ON_ONCE(id >= MAX_BINDINGS))
102 return;
103
104 switch (vif->type) {
105 case NL80211_IFTYPE_STATION:
106 if (vif->bss_conf.assoc)
107 break;
108 return;
109 case NL80211_IFTYPE_AP:
110 case NL80211_IFTYPE_ADHOC:
111 if (mvmvif->ap_ibss_active)
112 break;
113 return;
114 case NL80211_IFTYPE_MONITOR:
115 if (mvmvif->monitor_active)
116 break;
117 return;
118 case NL80211_IFTYPE_P2P_DEVICE:
119 return;
120 default:
121 WARN_ON_ONCE(1);
122 return;
123 }
124
125 if (data->colors[id] < 0)
126 data->colors[id] = mvmvif->phy_ctxt->color;
127 else
128 WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
129
130 data->n_interfaces[id]++;
131
132 if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
133 data->n_low_latency_bindings++;
134 data->low_latency[id] = true;
135 }
136}
137
138static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
139 struct iwl_time_quota_cmd *cmd)
140{
141#ifdef CONFIG_NL80211_TESTMODE
142 struct iwl_mvm_vif *mvmvif;
143 int i, phy_id = -1, beacon_int = 0;
144
145 if (!mvm->noa_duration || !mvm->noa_vif)
146 return;
147
148 mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
149 if (!mvmvif->ap_ibss_active)
150 return;
151
152 phy_id = mvmvif->phy_ctxt->id;
153 beacon_int = mvm->noa_vif->bss_conf.beacon_int;
154
155 for (i = 0; i < MAX_BINDINGS; i++) {
156 u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
157 u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
158 u32 quota = le32_to_cpu(cmd->quotas[i].quota);
159
160 if (id != phy_id)
161 continue;
162
163 quota *= (beacon_int - mvm->noa_duration);
164 quota /= beacon_int;
165
166 IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
167 le32_to_cpu(cmd->quotas[i].quota), quota);
168
169 cmd->quotas[i].quota = cpu_to_le32(quota);
170 }
171#endif
172}
173
174int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
175 bool force_update,
176 struct ieee80211_vif *disabled_vif)
177{
178 struct iwl_time_quota_cmd cmd = {};
179 int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
180 struct iwl_mvm_quota_iterator_data data = {
181 .n_interfaces = {},
182 .colors = { -1, -1, -1, -1 },
183 .disabled_vif = disabled_vif,
184 };
185 struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
186 bool send = false;
187
188 lockdep_assert_held(&mvm->mutex);
189
190 /* update all upon completion */
191 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
192 return 0;
193
194 /* iterator data above must match */
195 BUILD_BUG_ON(MAX_BINDINGS != 4);
196
197 ieee80211_iterate_active_interfaces_atomic(
198 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
199 iwl_mvm_quota_iterator, &data);
200
201 /*
202 * The FW's scheduling session consists of
203 * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
204 * equally between all the bindings that require quota
205 */
206 num_active_macs = 0;
207 for (i = 0; i < MAX_BINDINGS; i++) {
208 cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
209 num_active_macs += data.n_interfaces[i];
210 }
211
212 n_non_lowlat = num_active_macs;
213
214 if (data.n_low_latency_bindings == 1) {
215 for (i = 0; i < MAX_BINDINGS; i++) {
216 if (data.low_latency[i]) {
217 n_non_lowlat -= data.n_interfaces[i];
218 break;
219 }
220 }
221 }
222
223 if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
224 /*
225 * Reserve quota for the low latency binding in case that
226 * there are several data bindings but only a single
227 * low latency one. Split the rest of the quota equally
228 * between the other data interfaces.
229 */
230 quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
231 quota_rem = QUOTA_100 - n_non_lowlat * quota -
232 QUOTA_LOWLAT_MIN;
233 IWL_DEBUG_QUOTA(mvm,
234 "quota: low-latency binding active, remaining quota per other binding: %d\n",
235 quota);
236 } else if (num_active_macs) {
237 /*
238 * There are 0 or more than 1 low latency bindings, or all the
239 * data interfaces belong to the single low latency binding.
240 * Split the quota equally between the data interfaces.
241 */
242 quota = QUOTA_100 / num_active_macs;
243 quota_rem = QUOTA_100 % num_active_macs;
244 IWL_DEBUG_QUOTA(mvm,
245 "quota: splitting evenly per binding: %d\n",
246 quota);
247 } else {
248 /* values don't really matter - won't be used */
249 quota = 0;
250 quota_rem = 0;
251 }
252
253 for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
254 if (data.colors[i] < 0)
255 continue;
256
257 cmd.quotas[idx].id_and_color =
258 cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
259
260 if (data.n_interfaces[i] <= 0)
261 cmd.quotas[idx].quota = cpu_to_le32(0);
262 else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
263 data.low_latency[i])
264 /*
265 * There is more than one binding, but only one of the
266 * bindings is in low latency. For this case, allocate
267 * the minimal required quota for the low latency
268 * binding.
269 */
270 cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
271 else
272 cmd.quotas[idx].quota =
273 cpu_to_le32(quota * data.n_interfaces[i]);
274
275 WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
276 "Binding=%d, quota=%u > max=%u\n",
277 idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
278
279 cmd.quotas[idx].max_duration = cpu_to_le32(0);
280
281 idx++;
282 }
283
284 /* Give the remainder of the session to the first data binding */
285 for (i = 0; i < MAX_BINDINGS; i++) {
286 if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
287 le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
288 IWL_DEBUG_QUOTA(mvm,
289 "quota: giving remainder of %d to binding %d\n",
290 quota_rem, i);
291 break;
292 }
293 }
294
295 iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
296
297 /* check that we have non-zero quota for all valid bindings */
298 for (i = 0; i < MAX_BINDINGS; i++) {
299 if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color)
300 send = true;
301 if (cmd.quotas[i].max_duration != last->quotas[i].max_duration)
302 send = true;
303 if (abs((int)le32_to_cpu(cmd.quotas[i].quota) -
304 (int)le32_to_cpu(last->quotas[i].quota))
305 > IWL_MVM_QUOTA_THRESHOLD)
306 send = true;
307 if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID))
308 continue;
309 WARN_ONCE(cmd.quotas[i].quota == 0,
310 "zero quota on binding %d\n", i);
311 }
312
313 if (!send && !force_update) {
314 /* don't send a practically unchanged command, the firmware has
315 * to re-initialize a lot of state and that can have an adverse
316 * impact on it
317 */
318 return 0;
319 }
320
321 err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
322
323 if (err)
324 IWL_ERR(mvm, "Failed to send quota: %d\n", err);
325 else
326 mvm->last_quota_cmd = cmd;
327 return err;
328}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
new file mode 100644
index 000000000000..d1ad10391b47
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -0,0 +1,3983 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27#include <linux/kernel.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37#include "rs.h"
38#include "fw-api.h"
39#include "sta.h"
40#include "iwl-op-mode.h"
41#include "mvm.h"
42#include "debugfs.h"
43
44#define RS_NAME "iwl-mvm-rs"
45
46#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
47
48/* Calculations of success ratio are done in fixed point where 12800 is 100%.
49 * Use this macro when dealing with thresholds consts set as a percentage
50 */
51#define RS_PERCENT(x) (128 * x)
52
53static u8 rs_ht_to_legacy[] = {
54 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
55 [IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
56 [IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
57 [IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
58 [IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
59 [IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
60 [IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
61 [IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
62 [IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
63 [IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
64};
65
66static const u8 ant_toggle_lookup[] = {
67 [ANT_NONE] = ANT_NONE,
68 [ANT_A] = ANT_B,
69 [ANT_B] = ANT_C,
70 [ANT_AB] = ANT_BC,
71 [ANT_C] = ANT_A,
72 [ANT_AC] = ANT_AB,
73 [ANT_BC] = ANT_AC,
74 [ANT_ABC] = ANT_ABC,
75};
76
77#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
78 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
79 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
80 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
81 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
82 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
83 IWL_RATE_##rp##M_INDEX, \
84 IWL_RATE_##rn##M_INDEX }
85
86#define IWL_DECLARE_MCS_RATE(s) \
87 [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP, \
88 IWL_RATE_HT_SISO_MCS_##s##_PLCP, \
89 IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
90 IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
91 IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
92 IWL_RATE_INVM_INDEX, \
93 IWL_RATE_INVM_INDEX }
94
95/*
96 * Parameter order:
97 * rate, ht rate, prev rate, next rate
98 *
99 * If there isn't a valid next or previous rate then INV is used which
100 * maps to IWL_RATE_INVALID
101 *
102 */
103static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
104 IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
105 IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
106 IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
107 IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
108 IWL_DECLARE_RATE_INFO(6, 0, 5, 11), /* 6mbps ; MCS 0 */
109 IWL_DECLARE_RATE_INFO(9, INV, 6, 11), /* 9mbps */
110 IWL_DECLARE_RATE_INFO(12, 1, 11, 18), /* 12mbps ; MCS 1 */
111 IWL_DECLARE_RATE_INFO(18, 2, 12, 24), /* 18mbps ; MCS 2 */
112 IWL_DECLARE_RATE_INFO(24, 3, 18, 36), /* 24mbps ; MCS 3 */
113 IWL_DECLARE_RATE_INFO(36, 4, 24, 48), /* 36mbps ; MCS 4 */
114 IWL_DECLARE_RATE_INFO(48, 5, 36, 54), /* 48mbps ; MCS 5 */
115 IWL_DECLARE_RATE_INFO(54, 6, 48, INV), /* 54mbps ; MCS 6 */
116 IWL_DECLARE_MCS_RATE(7), /* MCS 7 */
117 IWL_DECLARE_MCS_RATE(8), /* MCS 8 */
118 IWL_DECLARE_MCS_RATE(9), /* MCS 9 */
119};
120
121enum rs_action {
122 RS_ACTION_STAY = 0,
123 RS_ACTION_DOWNSCALE = -1,
124 RS_ACTION_UPSCALE = 1,
125};
126
127enum rs_column_mode {
128 RS_INVALID = 0,
129 RS_LEGACY,
130 RS_SISO,
131 RS_MIMO2,
132};
133
134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3
136
137struct rs_tx_column;
138
139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
140 struct ieee80211_sta *sta,
141 struct rs_rate *rate,
142 const struct rs_tx_column *next_col);
143
144struct rs_tx_column {
145 enum rs_column_mode mode;
146 u8 ant;
147 bool sgi;
148 enum rs_column next_columns[MAX_NEXT_COLUMNS];
149 allow_column_func_t checks[MAX_COLUMN_CHECKS];
150};
151
152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
153 struct rs_rate *rate,
154 const struct rs_tx_column *next_col)
155{
156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
157}
158
159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
160 struct rs_rate *rate,
161 const struct rs_tx_column *next_col)
162{
163 struct iwl_mvm_sta *mvmsta;
164 struct iwl_mvm_vif *mvmvif;
165
166 if (!sta->ht_cap.ht_supported)
167 return false;
168
169 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
170 return false;
171
172 if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
173 return false;
174
175 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
176 return false;
177
178 mvmsta = iwl_mvm_sta_from_mac80211(sta);
179 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
180
181 if (mvm->nvm_data->sku_cap_mimo_disabled)
182 return false;
183
184 return true;
185}
186
187static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
188 struct rs_rate *rate,
189 const struct rs_tx_column *next_col)
190{
191 if (!sta->ht_cap.ht_supported)
192 return false;
193
194 return true;
195}
196
197static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
198 struct rs_rate *rate,
199 const struct rs_tx_column *next_col)
200{
201 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
202 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
203
204 if (is_ht20(rate) && (ht_cap->cap &
205 IEEE80211_HT_CAP_SGI_20))
206 return true;
207 if (is_ht40(rate) && (ht_cap->cap &
208 IEEE80211_HT_CAP_SGI_40))
209 return true;
210 if (is_ht80(rate) && (vht_cap->cap &
211 IEEE80211_VHT_CAP_SHORT_GI_80))
212 return true;
213
214 return false;
215}
216
217static const struct rs_tx_column rs_tx_columns[] = {
218 [RS_COLUMN_LEGACY_ANT_A] = {
219 .mode = RS_LEGACY,
220 .ant = ANT_A,
221 .next_columns = {
222 RS_COLUMN_LEGACY_ANT_B,
223 RS_COLUMN_SISO_ANT_A,
224 RS_COLUMN_MIMO2,
225 RS_COLUMN_INVALID,
226 RS_COLUMN_INVALID,
227 RS_COLUMN_INVALID,
228 RS_COLUMN_INVALID,
229 },
230 .checks = {
231 rs_ant_allow,
232 },
233 },
234 [RS_COLUMN_LEGACY_ANT_B] = {
235 .mode = RS_LEGACY,
236 .ant = ANT_B,
237 .next_columns = {
238 RS_COLUMN_LEGACY_ANT_A,
239 RS_COLUMN_SISO_ANT_B,
240 RS_COLUMN_MIMO2,
241 RS_COLUMN_INVALID,
242 RS_COLUMN_INVALID,
243 RS_COLUMN_INVALID,
244 RS_COLUMN_INVALID,
245 },
246 .checks = {
247 rs_ant_allow,
248 },
249 },
250 [RS_COLUMN_SISO_ANT_A] = {
251 .mode = RS_SISO,
252 .ant = ANT_A,
253 .next_columns = {
254 RS_COLUMN_SISO_ANT_B,
255 RS_COLUMN_MIMO2,
256 RS_COLUMN_SISO_ANT_A_SGI,
257 RS_COLUMN_LEGACY_ANT_A,
258 RS_COLUMN_LEGACY_ANT_B,
259 RS_COLUMN_INVALID,
260 RS_COLUMN_INVALID,
261 },
262 .checks = {
263 rs_siso_allow,
264 rs_ant_allow,
265 },
266 },
267 [RS_COLUMN_SISO_ANT_B] = {
268 .mode = RS_SISO,
269 .ant = ANT_B,
270 .next_columns = {
271 RS_COLUMN_SISO_ANT_A,
272 RS_COLUMN_MIMO2,
273 RS_COLUMN_SISO_ANT_B_SGI,
274 RS_COLUMN_LEGACY_ANT_A,
275 RS_COLUMN_LEGACY_ANT_B,
276 RS_COLUMN_INVALID,
277 RS_COLUMN_INVALID,
278 },
279 .checks = {
280 rs_siso_allow,
281 rs_ant_allow,
282 },
283 },
284 [RS_COLUMN_SISO_ANT_A_SGI] = {
285 .mode = RS_SISO,
286 .ant = ANT_A,
287 .sgi = true,
288 .next_columns = {
289 RS_COLUMN_SISO_ANT_B_SGI,
290 RS_COLUMN_MIMO2_SGI,
291 RS_COLUMN_SISO_ANT_A,
292 RS_COLUMN_LEGACY_ANT_A,
293 RS_COLUMN_LEGACY_ANT_B,
294 RS_COLUMN_INVALID,
295 RS_COLUMN_INVALID,
296 },
297 .checks = {
298 rs_siso_allow,
299 rs_ant_allow,
300 rs_sgi_allow,
301 },
302 },
303 [RS_COLUMN_SISO_ANT_B_SGI] = {
304 .mode = RS_SISO,
305 .ant = ANT_B,
306 .sgi = true,
307 .next_columns = {
308 RS_COLUMN_SISO_ANT_A_SGI,
309 RS_COLUMN_MIMO2_SGI,
310 RS_COLUMN_SISO_ANT_B,
311 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B,
313 RS_COLUMN_INVALID,
314 RS_COLUMN_INVALID,
315 },
316 .checks = {
317 rs_siso_allow,
318 rs_ant_allow,
319 rs_sgi_allow,
320 },
321 },
322 [RS_COLUMN_MIMO2] = {
323 .mode = RS_MIMO2,
324 .ant = ANT_AB,
325 .next_columns = {
326 RS_COLUMN_SISO_ANT_A,
327 RS_COLUMN_MIMO2_SGI,
328 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B,
330 RS_COLUMN_INVALID,
331 RS_COLUMN_INVALID,
332 RS_COLUMN_INVALID,
333 },
334 .checks = {
335 rs_mimo_allow,
336 },
337 },
338 [RS_COLUMN_MIMO2_SGI] = {
339 .mode = RS_MIMO2,
340 .ant = ANT_AB,
341 .sgi = true,
342 .next_columns = {
343 RS_COLUMN_SISO_ANT_A_SGI,
344 RS_COLUMN_MIMO2,
345 RS_COLUMN_LEGACY_ANT_A,
346 RS_COLUMN_LEGACY_ANT_B,
347 RS_COLUMN_INVALID,
348 RS_COLUMN_INVALID,
349 RS_COLUMN_INVALID,
350 },
351 .checks = {
352 rs_mimo_allow,
353 rs_sgi_allow,
354 },
355 },
356};
357
358static inline u8 rs_extract_rate(u32 rate_n_flags)
359{
360 /* also works for HT because bits 7:6 are zero there */
361 return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
362}
363
364static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
365{
366 int idx = 0;
367
368 if (rate_n_flags & RATE_MCS_HT_MSK) {
369 idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
370 idx += IWL_RATE_MCS_0_INDEX;
371
372 /* skip 9M not supported in HT*/
373 if (idx >= IWL_RATE_9M_INDEX)
374 idx += 1;
375 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
376 return idx;
377 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
378 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
379 idx += IWL_RATE_MCS_0_INDEX;
380
381 /* skip 9M not supported in VHT*/
382 if (idx >= IWL_RATE_9M_INDEX)
383 idx++;
384 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
385 return idx;
386 } else {
387 /* legacy rate format, search for match in table */
388
389 u8 legacy_rate = rs_extract_rate(rate_n_flags);
390 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
391 if (iwl_rates[idx].plcp == legacy_rate)
392 return idx;
393 }
394
395 return IWL_RATE_INVALID;
396}
397
398static void rs_rate_scale_perform(struct iwl_mvm *mvm,
399 struct ieee80211_sta *sta,
400 struct iwl_lq_sta *lq_sta,
401 int tid);
402static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
403 struct ieee80211_sta *sta,
404 struct iwl_lq_sta *lq_sta,
405 const struct rs_rate *initial_rate);
406static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
407
408/**
409 * The following tables contain the expected throughput metrics for all rates
410 *
411 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
412 *
413 * where invalid entries are zeros.
414 *
415 * CCK rates are only valid in legacy table and will only be used in G
416 * (2.4 GHz) band.
417 */
418
419static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
420 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
421};
422
423/* Expected TpT tables. 4 indexes:
424 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
425 */
426static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
427 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
428 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
429 {0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
430 {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
431};
432
433static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
434 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
435 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
436 {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
437 {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
438};
439
440static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
441 {0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
442 {0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
443 {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
444 {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
445};
446
447static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
448 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
449 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
450 {0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
451 {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
452};
453
454static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
455 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
456 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
457 {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
458 {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
459};
460
461static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
462 {0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
463 {0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
464 {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
465 {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
466};
467
468/* mbps, mcs */
469static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
470 { "1", "BPSK DSSS"},
471 { "2", "QPSK DSSS"},
472 {"5.5", "BPSK CCK"},
473 { "11", "QPSK CCK"},
474 { "6", "BPSK 1/2"},
475 { "9", "BPSK 1/2"},
476 { "12", "QPSK 1/2"},
477 { "18", "QPSK 3/4"},
478 { "24", "16QAM 1/2"},
479 { "36", "16QAM 3/4"},
480 { "48", "64QAM 2/3"},
481 { "54", "64QAM 3/4"},
482 { "60", "64QAM 5/6"},
483};
484
485#define MCS_INDEX_PER_STREAM (8)
486
487static const char *rs_pretty_ant(u8 ant)
488{
489 static const char * const ant_name[] = {
490 [ANT_NONE] = "None",
491 [ANT_A] = "A",
492 [ANT_B] = "B",
493 [ANT_AB] = "AB",
494 [ANT_C] = "C",
495 [ANT_AC] = "AC",
496 [ANT_BC] = "BC",
497 [ANT_ABC] = "ABC",
498 };
499
500 if (ant > ANT_ABC)
501 return "UNKNOWN";
502
503 return ant_name[ant];
504}
505
506static const char *rs_pretty_lq_type(enum iwl_table_type type)
507{
508 static const char * const lq_types[] = {
509 [LQ_NONE] = "NONE",
510 [LQ_LEGACY_A] = "LEGACY_A",
511 [LQ_LEGACY_G] = "LEGACY_G",
512 [LQ_HT_SISO] = "HT SISO",
513 [LQ_HT_MIMO2] = "HT MIMO",
514 [LQ_VHT_SISO] = "VHT SISO",
515 [LQ_VHT_MIMO2] = "VHT MIMO",
516 };
517
518 if (type < LQ_NONE || type >= LQ_MAX)
519 return "UNKNOWN";
520
521 return lq_types[type];
522}
523
524static char *rs_pretty_rate(const struct rs_rate *rate)
525{
526 static char buf[40];
527 static const char * const legacy_rates[] = {
528 [IWL_RATE_1M_INDEX] = "1M",
529 [IWL_RATE_2M_INDEX] = "2M",
530 [IWL_RATE_5M_INDEX] = "5.5M",
531 [IWL_RATE_11M_INDEX] = "11M",
532 [IWL_RATE_6M_INDEX] = "6M",
533 [IWL_RATE_9M_INDEX] = "9M",
534 [IWL_RATE_12M_INDEX] = "12M",
535 [IWL_RATE_18M_INDEX] = "18M",
536 [IWL_RATE_24M_INDEX] = "24M",
537 [IWL_RATE_36M_INDEX] = "36M",
538 [IWL_RATE_48M_INDEX] = "48M",
539 [IWL_RATE_54M_INDEX] = "54M",
540 };
541 static const char *const ht_vht_rates[] = {
542 [IWL_RATE_MCS_0_INDEX] = "MCS0",
543 [IWL_RATE_MCS_1_INDEX] = "MCS1",
544 [IWL_RATE_MCS_2_INDEX] = "MCS2",
545 [IWL_RATE_MCS_3_INDEX] = "MCS3",
546 [IWL_RATE_MCS_4_INDEX] = "MCS4",
547 [IWL_RATE_MCS_5_INDEX] = "MCS5",
548 [IWL_RATE_MCS_6_INDEX] = "MCS6",
549 [IWL_RATE_MCS_7_INDEX] = "MCS7",
550 [IWL_RATE_MCS_8_INDEX] = "MCS8",
551 [IWL_RATE_MCS_9_INDEX] = "MCS9",
552 };
553 const char *rate_str;
554
555 if (is_type_legacy(rate->type))
556 rate_str = legacy_rates[rate->index];
557 else if (is_type_ht(rate->type) || is_type_vht(rate->type))
558 rate_str = ht_vht_rates[rate->index];
559 else
560 rate_str = "BAD_RATE";
561
562 sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
563 rs_pretty_ant(rate->ant), rate_str);
564 return buf;
565}
566
567static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
568 const char *prefix)
569{
570 IWL_DEBUG_RATE(mvm,
571 "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
572 prefix, rs_pretty_rate(rate), rate->bw,
573 rate->sgi, rate->ldpc, rate->stbc);
574}
575
576static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
577{
578 window->data = 0;
579 window->success_counter = 0;
580 window->success_ratio = IWL_INVALID_VALUE;
581 window->counter = 0;
582 window->average_tpt = IWL_INVALID_VALUE;
583}
584
585static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
586 struct iwl_scale_tbl_info *tbl)
587{
588 int i;
589
590 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
591 for (i = 0; i < IWL_RATE_COUNT; i++)
592 rs_rate_scale_clear_window(&tbl->win[i]);
593
594 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
595 rs_rate_scale_clear_window(&tbl->tpc_win[i]);
596}
597
598static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
599{
600 return (ant_type & valid_antenna) == ant_type;
601}
602
603static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
604 struct iwl_lq_sta *lq_data, u8 tid,
605 struct ieee80211_sta *sta)
606{
607 int ret = -EAGAIN;
608
609 IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
610 sta->addr, tid);
611 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
612 if (ret == -EAGAIN) {
613 /*
614 * driver and mac80211 is out of sync
615 * this might be cause by reloading firmware
616 * stop the tx ba session here
617 */
618 IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
619 tid);
620 ieee80211_stop_tx_ba_session(sta, tid);
621 }
622 return ret;
623}
624
625static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
626 struct iwl_lq_sta *lq_data,
627 struct ieee80211_sta *sta)
628{
629 if (tid < IWL_MAX_TID_COUNT)
630 rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
631 else
632 IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
633 tid, IWL_MAX_TID_COUNT);
634}
635
636static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
637{
638 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
639 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
640 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
641}
642
643/*
644 * Static function to get the expected throughput from an iwl_scale_tbl_info
645 * that wraps a NULL pointer check
646 */
647static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
648{
649 if (tbl->expected_tpt)
650 return tbl->expected_tpt[rs_index];
651 return 0;
652}
653
654/**
655 * rs_collect_tx_data - Update the success/failure sliding window
656 *
657 * We keep a sliding window of the last 62 packets transmitted
658 * at this rate. window->data contains the bitmask of successful
659 * packets.
660 */
661static int _rs_collect_tx_data(struct iwl_mvm *mvm,
662 struct iwl_scale_tbl_info *tbl,
663 int scale_index, int attempts, int successes,
664 struct iwl_rate_scale_data *window)
665{
666 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
667 s32 fail_count, tpt;
668
669 /* Get expected throughput */
670 tpt = get_expected_tpt(tbl, scale_index);
671
672 /*
673 * Keep track of only the latest 62 tx frame attempts in this rate's
674 * history window; anything older isn't really relevant any more.
675 * If we have filled up the sliding window, drop the oldest attempt;
676 * if the oldest attempt (highest bit in bitmap) shows "success",
677 * subtract "1" from the success counter (this is the main reason
678 * we keep these bitmaps!).
679 */
680 while (attempts > 0) {
681 if (window->counter >= IWL_RATE_MAX_WINDOW) {
682 /* remove earliest */
683 window->counter = IWL_RATE_MAX_WINDOW - 1;
684
685 if (window->data & mask) {
686 window->data &= ~mask;
687 window->success_counter--;
688 }
689 }
690
691 /* Increment frames-attempted counter */
692 window->counter++;
693
694 /* Shift bitmap by one frame to throw away oldest history */
695 window->data <<= 1;
696
697 /* Mark the most recent #successes attempts as successful */
698 if (successes > 0) {
699 window->success_counter++;
700 window->data |= 0x1;
701 successes--;
702 }
703
704 attempts--;
705 }
706
707 /* Calculate current success ratio, avoid divide-by-0! */
708 if (window->counter > 0)
709 window->success_ratio = 128 * (100 * window->success_counter)
710 / window->counter;
711 else
712 window->success_ratio = IWL_INVALID_VALUE;
713
714 fail_count = window->counter - window->success_counter;
715
716 /* Calculate average throughput, if we have enough history. */
717 if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
718 (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
719 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
720 else
721 window->average_tpt = IWL_INVALID_VALUE;
722
723 return 0;
724}
725
726static int rs_collect_tx_data(struct iwl_mvm *mvm,
727 struct iwl_lq_sta *lq_sta,
728 struct iwl_scale_tbl_info *tbl,
729 int scale_index, int attempts, int successes,
730 u8 reduced_txp)
731{
732 struct iwl_rate_scale_data *window = NULL;
733 int ret;
734
735 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
736 return -EINVAL;
737
738 if (tbl->column != RS_COLUMN_INVALID) {
739 struct lq_sta_pers *pers = &lq_sta->pers;
740
741 pers->tx_stats[tbl->column][scale_index].total += attempts;
742 pers->tx_stats[tbl->column][scale_index].success += successes;
743 }
744
745 /* Select window for current tx bit rate */
746 window = &(tbl->win[scale_index]);
747
748 ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
749 window);
750 if (ret)
751 return ret;
752
753 if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
754 return -EINVAL;
755
756 window = &tbl->tpc_win[reduced_txp];
757 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
758 window);
759}
760
761/* Convert rs_rate object into ucode rate bitmask */
762static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
763 struct rs_rate *rate)
764{
765 u32 ucode_rate = 0;
766 int index = rate->index;
767
768 ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
769 RATE_MCS_ANT_ABC_MSK);
770
771 if (is_legacy(rate)) {
772 ucode_rate |= iwl_rates[index].plcp;
773 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
774 ucode_rate |= RATE_MCS_CCK_MSK;
775 return ucode_rate;
776 }
777
778 if (is_ht(rate)) {
779 if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
780 IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
781 index = IWL_LAST_HT_RATE;
782 }
783 ucode_rate |= RATE_MCS_HT_MSK;
784
785 if (is_ht_siso(rate))
786 ucode_rate |= iwl_rates[index].plcp_ht_siso;
787 else if (is_ht_mimo2(rate))
788 ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
789 else
790 WARN_ON_ONCE(1);
791 } else if (is_vht(rate)) {
792 if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
793 IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
794 index = IWL_LAST_VHT_RATE;
795 }
796 ucode_rate |= RATE_MCS_VHT_MSK;
797 if (is_vht_siso(rate))
798 ucode_rate |= iwl_rates[index].plcp_vht_siso;
799 else if (is_vht_mimo2(rate))
800 ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
801 else
802 WARN_ON_ONCE(1);
803
804 } else {
805 IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
806 }
807
808 if (is_siso(rate) && rate->stbc) {
809 /* To enable STBC we need to set both a flag and ANT_AB */
810 ucode_rate |= RATE_MCS_ANT_AB_MSK;
811 ucode_rate |= RATE_MCS_VHT_STBC_MSK;
812 }
813
814 ucode_rate |= rate->bw;
815 if (rate->sgi)
816 ucode_rate |= RATE_MCS_SGI_MSK;
817 if (rate->ldpc)
818 ucode_rate |= RATE_MCS_LDPC_MSK;
819
820 return ucode_rate;
821}
822
823/* Convert a ucode rate into an rs_rate object */
824static int rs_rate_from_ucode_rate(const u32 ucode_rate,
825 enum ieee80211_band band,
826 struct rs_rate *rate)
827{
828 u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
829 u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
830 u8 nss;
831
832 memset(rate, 0, sizeof(*rate));
833 rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
834
835 if (rate->index == IWL_RATE_INVALID)
836 return -EINVAL;
837
838 rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
839
840 /* Legacy */
841 if (!(ucode_rate & RATE_MCS_HT_MSK) &&
842 !(ucode_rate & RATE_MCS_VHT_MSK)) {
843 if (num_of_ant == 1) {
844 if (band == IEEE80211_BAND_5GHZ)
845 rate->type = LQ_LEGACY_A;
846 else
847 rate->type = LQ_LEGACY_G;
848 }
849
850 return 0;
851 }
852
853 /* HT or VHT */
854 if (ucode_rate & RATE_MCS_SGI_MSK)
855 rate->sgi = true;
856 if (ucode_rate & RATE_MCS_LDPC_MSK)
857 rate->ldpc = true;
858 if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
859 rate->stbc = true;
860 if (ucode_rate & RATE_MCS_BF_MSK)
861 rate->bfer = true;
862
863 rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
864
865 if (ucode_rate & RATE_MCS_HT_MSK) {
866 nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
867 RATE_HT_MCS_NSS_POS) + 1;
868
869 if (nss == 1) {
870 rate->type = LQ_HT_SISO;
871 WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
872 "stbc %d bfer %d",
873 rate->stbc, rate->bfer);
874 } else if (nss == 2) {
875 rate->type = LQ_HT_MIMO2;
876 WARN_ON_ONCE(num_of_ant != 2);
877 } else {
878 WARN_ON_ONCE(1);
879 }
880 } else if (ucode_rate & RATE_MCS_VHT_MSK) {
881 nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
882 RATE_VHT_MCS_NSS_POS) + 1;
883
884 if (nss == 1) {
885 rate->type = LQ_VHT_SISO;
886 WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
887 "stbc %d bfer %d",
888 rate->stbc, rate->bfer);
889 } else if (nss == 2) {
890 rate->type = LQ_VHT_MIMO2;
891 WARN_ON_ONCE(num_of_ant != 2);
892 } else {
893 WARN_ON_ONCE(1);
894 }
895 }
896
897 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
898 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
899 !is_vht(rate));
900
901 return 0;
902}
903
904/* switch to another antenna/antennas and return 1 */
905/* if no other valid antenna found, return 0 */
906static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
907{
908 u8 new_ant_type;
909
910 if (!rate->ant || rate->ant > ANT_ABC)
911 return 0;
912
913 if (!rs_is_valid_ant(valid_ant, rate->ant))
914 return 0;
915
916 new_ant_type = ant_toggle_lookup[rate->ant];
917
918 while ((new_ant_type != rate->ant) &&
919 !rs_is_valid_ant(valid_ant, new_ant_type))
920 new_ant_type = ant_toggle_lookup[new_ant_type];
921
922 if (new_ant_type == rate->ant)
923 return 0;
924
925 rate->ant = new_ant_type;
926
927 return 1;
928}
929
930static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
931 struct rs_rate *rate)
932{
933 if (is_legacy(rate))
934 return lq_sta->active_legacy_rate;
935 else if (is_siso(rate))
936 return lq_sta->active_siso_rate;
937 else if (is_mimo2(rate))
938 return lq_sta->active_mimo2_rate;
939
940 WARN_ON_ONCE(1);
941 return 0;
942}
943
944static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
945 int rate_type)
946{
947 u8 high = IWL_RATE_INVALID;
948 u8 low = IWL_RATE_INVALID;
949
950 /* 802.11A or ht walks to the next literal adjacent rate in
951 * the rate table */
952 if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
953 int i;
954 u32 mask;
955
956 /* Find the previous rate that is in the rate mask */
957 i = index - 1;
958 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
959 if (rate_mask & mask) {
960 low = i;
961 break;
962 }
963 }
964
965 /* Find the next rate that is in the rate mask */
966 i = index + 1;
967 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
968 if (rate_mask & mask) {
969 high = i;
970 break;
971 }
972 }
973
974 return (high << 8) | low;
975 }
976
977 low = index;
978 while (low != IWL_RATE_INVALID) {
979 low = iwl_rates[low].prev_rs;
980 if (low == IWL_RATE_INVALID)
981 break;
982 if (rate_mask & (1 << low))
983 break;
984 }
985
986 high = index;
987 while (high != IWL_RATE_INVALID) {
988 high = iwl_rates[high].next_rs;
989 if (high == IWL_RATE_INVALID)
990 break;
991 if (rate_mask & (1 << high))
992 break;
993 }
994
995 return (high << 8) | low;
996}
997
998static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
999 struct rs_rate *rate)
1000{
1001 return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
1002}
1003
1004/* Get the next supported lower rate in the current column.
1005 * Return true if bottom rate in the current column was reached
1006 */
1007static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
1008 struct rs_rate *rate)
1009{
1010 u8 low;
1011 u16 high_low;
1012 u16 rate_mask;
1013 struct iwl_mvm *mvm = lq_sta->pers.drv;
1014
1015 rate_mask = rs_get_supported_rates(lq_sta, rate);
1016 high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
1017 rate->type);
1018 low = high_low & 0xff;
1019
1020 /* Bottom rate of column reached */
1021 if (low == IWL_RATE_INVALID)
1022 return true;
1023
1024 rate->index = low;
1025 return false;
1026}
1027
1028/* Get the next rate to use following a column downgrade */
1029static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1030 struct rs_rate *rate)
1031{
1032 struct iwl_mvm *mvm = lq_sta->pers.drv;
1033
1034 if (is_legacy(rate)) {
1035 /* No column to downgrade from Legacy */
1036 return;
1037 } else if (is_siso(rate)) {
1038 /* Downgrade to Legacy if we were in SISO */
1039 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1040 rate->type = LQ_LEGACY_A;
1041 else
1042 rate->type = LQ_LEGACY_G;
1043
1044 rate->bw = RATE_MCS_CHAN_WIDTH_20;
1045
1046 WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
1047 rate->index > IWL_RATE_MCS_9_INDEX);
1048
1049 rate->index = rs_ht_to_legacy[rate->index];
1050 rate->ldpc = false;
1051 } else {
1052 /* Downgrade to SISO with same MCS if in MIMO */
1053 rate->type = is_vht_mimo2(rate) ?
1054 LQ_VHT_SISO : LQ_HT_SISO;
1055 }
1056
1057 if (num_of_ant(rate->ant) > 1)
1058 rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
1059
1060 /* Relevant in both switching to SISO or Legacy */
1061 rate->sgi = false;
1062
1063 if (!rs_rate_supported(lq_sta, rate))
1064 rs_get_lower_rate_in_column(lq_sta, rate);
1065}
1066
1067/* Check if both rates are identical
1068 * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
1069 * with a rate indicating STBC/BFER and ANT_AB.
1070 */
1071static inline bool rs_rate_equal(struct rs_rate *a,
1072 struct rs_rate *b,
1073 bool allow_ant_mismatch)
1074
1075{
1076 bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
1077 (a->bfer == b->bfer);
1078
1079 if (allow_ant_mismatch) {
1080 if (a->stbc || a->bfer) {
1081 WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
1082 a->stbc, a->bfer, a->ant);
1083 ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
1084 } else if (b->stbc || b->bfer) {
1085 WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
1086 b->stbc, b->bfer, b->ant);
1087 ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
1088 }
1089 }
1090
1091 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
1092 (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
1093}
1094
1095/* Check if both rates share the same column */
1096static inline bool rs_rate_column_match(struct rs_rate *a,
1097 struct rs_rate *b)
1098{
1099 bool ant_match;
1100
1101 if (a->stbc || a->bfer)
1102 ant_match = (b->ant == ANT_A || b->ant == ANT_B);
1103 else
1104 ant_match = (a->ant == b->ant);
1105
1106 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
1107 && ant_match;
1108}
1109
1110static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
1111{
1112 if (is_legacy(rate)) {
1113 if (rate->ant == ANT_A)
1114 return RS_COLUMN_LEGACY_ANT_A;
1115
1116 if (rate->ant == ANT_B)
1117 return RS_COLUMN_LEGACY_ANT_B;
1118
1119 goto err;
1120 }
1121
1122 if (is_siso(rate)) {
1123 if (rate->ant == ANT_A || rate->stbc || rate->bfer)
1124 return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
1125 RS_COLUMN_SISO_ANT_A;
1126
1127 if (rate->ant == ANT_B)
1128 return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
1129 RS_COLUMN_SISO_ANT_B;
1130
1131 goto err;
1132 }
1133
1134 if (is_mimo(rate))
1135 return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
1136
1137err:
1138 return RS_COLUMN_INVALID;
1139}
1140
1141static u8 rs_get_tid(struct ieee80211_hdr *hdr)
1142{
1143 u8 tid = IWL_MAX_TID_COUNT;
1144
1145 if (ieee80211_is_data_qos(hdr->frame_control)) {
1146 u8 *qc = ieee80211_get_qos_ctl(hdr);
1147 tid = qc[0] & 0xf;
1148 }
1149
1150 if (unlikely(tid > IWL_MAX_TID_COUNT))
1151 tid = IWL_MAX_TID_COUNT;
1152
1153 return tid;
1154}
1155
1156void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1157 int tid, struct ieee80211_tx_info *info)
1158{
1159 int legacy_success;
1160 int retries;
1161 int i;
1162 struct iwl_lq_cmd *table;
1163 u32 lq_hwrate;
1164 struct rs_rate lq_rate, tx_resp_rate;
1165 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1166 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
1167 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1168 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
1170 bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
1171 IWL_UCODE_TLV_API_LQ_SS_PARAMS);
1172
1173 /* Treat uninitialized rate scaling data same as non-existing. */
1174 if (!lq_sta) {
1175 IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
1176 return;
1177 } else if (!lq_sta->pers.drv) {
1178 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
1179 return;
1180 }
1181
1182 /* This packet was aggregated but doesn't carry status info */
1183 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1184 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1185 return;
1186
1187 rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
1188
1189#ifdef CONFIG_MAC80211_DEBUGFS
1190 /* Disable last tx check if we are debugging with fixed rate but
1191 * update tx stats */
1192 if (lq_sta->pers.dbg_fixed_rate) {
1193 int index = tx_resp_rate.index;
1194 enum rs_column column;
1195 int attempts, success;
1196
1197 column = rs_get_column_from_rate(&tx_resp_rate);
1198 if (WARN_ONCE(column == RS_COLUMN_INVALID,
1199 "Can't map rate 0x%x to column",
1200 tx_resp_hwrate))
1201 return;
1202
1203 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1204 attempts = info->status.ampdu_len;
1205 success = info->status.ampdu_ack_len;
1206 } else {
1207 attempts = info->status.rates[0].count;
1208 success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1209 }
1210
1211 lq_sta->pers.tx_stats[column][index].total += attempts;
1212 lq_sta->pers.tx_stats[column][index].success += success;
1213
1214 IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
1215 tx_resp_hwrate, success, attempts);
1216 return;
1217 }
1218#endif
1219
1220 if (time_after(jiffies,
1221 (unsigned long)(lq_sta->last_tx +
1222 (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
1223 int t;
1224
1225 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1226 for (t = 0; t < IWL_MAX_TID_COUNT; t++)
1227 ieee80211_stop_tx_ba_session(sta, t);
1228
1229 iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
1230 return;
1231 }
1232 lq_sta->last_tx = jiffies;
1233
1234 /* Ignore this Tx frame response if its initial rate doesn't match
1235 * that of latest Link Quality command. There may be stragglers
1236 * from a previous Link Quality command, but we're no longer interested
1237 * in those; they're either from the "active" mode while we're trying
1238 * to check "search" mode, or a prior "search" mode after we've moved
1239 * to a new "search" mode (which might become the new "active" mode).
1240 */
1241 table = &lq_sta->lq;
1242 lq_hwrate = le32_to_cpu(table->rs_table[0]);
1243 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1244
1245 /* Here we actually compare this rate to the latest LQ command */
1246 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
1247 IWL_DEBUG_RATE(mvm,
1248 "initial tx resp rate 0x%x does not match 0x%x\n",
1249 tx_resp_hwrate, lq_hwrate);
1250
1251 /*
1252 * Since rates mis-match, the last LQ command may have failed.
1253 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
1254 * ... driver.
1255 */
1256 lq_sta->missed_rate_counter++;
1257 if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
1258 lq_sta->missed_rate_counter = 0;
1259 IWL_DEBUG_RATE(mvm,
1260 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
1261 lq_sta->rs_state);
1262 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1263 }
1264 /* Regardless, ignore this status info for outdated rate */
1265 return;
1266 } else
1267 /* Rate did match, so reset the missed_rate_counter */
1268 lq_sta->missed_rate_counter = 0;
1269
1270 if (!lq_sta->search_better_tbl) {
1271 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1272 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1273 } else {
1274 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1275 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1276 }
1277
1278 if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
1279 IWL_DEBUG_RATE(mvm,
1280 "Neither active nor search matches tx rate\n");
1281 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1282 rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
1283 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
1284 rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
1285 rs_dump_rate(mvm, &lq_rate, "ACTUAL");
1286
1287 /*
1288 * no matching table found, let's by-pass the data collection
1289 * and continue to perform rate scale to find the rate table
1290 */
1291 rs_stay_in_table(lq_sta, true);
1292 goto done;
1293 }
1294
1295 /*
1296 * Updating the frame history depends on whether packets were
1297 * aggregated.
1298 *
1299 * For aggregation, all packets were transmitted at the same rate, the
1300 * first index into rate scale table.
1301 */
1302 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1303 /* ampdu_ack_len = 0 marks no BA was received. In this case
1304 * treat it as a single frame loss as we don't want the success
1305 * ratio to dip too quickly because a BA wasn't received
1306 */
1307 if (info->status.ampdu_ack_len == 0)
1308 info->status.ampdu_len = 1;
1309
1310 rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
1311 info->status.ampdu_len,
1312 info->status.ampdu_ack_len,
1313 reduced_txp);
1314
1315 /* Update success/fail counts if not searching for new mode */
1316 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1317 lq_sta->total_success += info->status.ampdu_ack_len;
1318 lq_sta->total_failed += (info->status.ampdu_len -
1319 info->status.ampdu_ack_len);
1320 }
1321 } else {
1322 /* For legacy, update frame history with for each Tx retry. */
1323 retries = info->status.rates[0].count - 1;
1324 /* HW doesn't send more than 15 retries */
1325 retries = min(retries, 15);
1326
1327 /* The last transmission may have been successful */
1328 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1329 /* Collect data for each rate used during failed TX attempts */
1330 for (i = 0; i <= retries; ++i) {
1331 lq_hwrate = le32_to_cpu(table->rs_table[i]);
1332 rs_rate_from_ucode_rate(lq_hwrate, info->band,
1333 &lq_rate);
1334 /*
1335 * Only collect stats if retried rate is in the same RS
1336 * table as active/search.
1337 */
1338 if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
1339 tmp_tbl = curr_tbl;
1340 else if (rs_rate_column_match(&lq_rate,
1341 &other_tbl->rate))
1342 tmp_tbl = other_tbl;
1343 else
1344 continue;
1345
1346 rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
1347 1, i < retries ? 0 : legacy_success,
1348 reduced_txp);
1349 }
1350
1351 /* Update success/fail counts if not searching for new mode */
1352 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1353 lq_sta->total_success += legacy_success;
1354 lq_sta->total_failed += retries + (1 - legacy_success);
1355 }
1356 }
1357 /* The last TX rate is cached in lq_sta; it's set in if/else above */
1358 lq_sta->last_rate_n_flags = lq_hwrate;
1359 IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
1360done:
1361 /* See if there's a better rate or modulation mode to try. */
1362 if (sta->supp_rates[info->band])
1363 rs_rate_scale_perform(mvm, sta, lq_sta, tid);
1364}
1365
1366/*
1367 * mac80211 sends us Tx status
1368 */
1369static void rs_mac80211_tx_status(void *mvm_r,
1370 struct ieee80211_supported_band *sband,
1371 struct ieee80211_sta *sta, void *priv_sta,
1372 struct sk_buff *skb)
1373{
1374 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1375 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
1376 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1378
1379 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
1380 return;
1381
1382 if (!ieee80211_is_data(hdr->frame_control) ||
1383 info->flags & IEEE80211_TX_CTL_NO_ACK)
1384 return;
1385
1386 iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info);
1387}
1388
1389/*
1390 * Begin a period of staying with a selected modulation mode.
1391 * Set "stay_in_tbl" flag to prevent any mode switches.
1392 * Set frame tx success limits according to legacy vs. high-throughput,
1393 * and reset overall (spanning all rates) tx success history statistics.
1394 * These control how long we stay using same modulation mode before
1395 * searching for a new mode.
1396 */
1397static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1398 struct iwl_lq_sta *lq_sta)
1399{
1400 IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
1401 lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
1402 if (is_legacy) {
1403 lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
1404 lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
1405 lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
1406 } else {
1407 lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
1408 lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
1409 lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
1410 }
1411 lq_sta->table_count = 0;
1412 lq_sta->total_failed = 0;
1413 lq_sta->total_success = 0;
1414 lq_sta->flush_timer = jiffies;
1415 lq_sta->visited_columns = 0;
1416}
1417
1418static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
1419{
1420 if (rate_mask)
1421 return find_last_bit(&rate_mask, BITS_PER_LONG);
1422 return IWL_RATE_INVALID;
1423}
1424
1425static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1426 const struct rs_tx_column *column)
1427{
1428 switch (column->mode) {
1429 case RS_LEGACY:
1430 return lq_sta->max_legacy_rate_idx;
1431 case RS_SISO:
1432 return lq_sta->max_siso_rate_idx;
1433 case RS_MIMO2:
1434 return lq_sta->max_mimo2_rate_idx;
1435 default:
1436 WARN_ON_ONCE(1);
1437 }
1438
1439 return lq_sta->max_legacy_rate_idx;
1440}
1441
1442static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1443 const struct rs_tx_column *column,
1444 u32 bw)
1445{
1446 /* Used to choose among HT tables */
1447 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1448
1449 if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
1450 column->mode != RS_SISO &&
1451 column->mode != RS_MIMO2))
1452 return expected_tpt_legacy;
1453
1454 /* Legacy rates have only one table */
1455 if (column->mode == RS_LEGACY)
1456 return expected_tpt_legacy;
1457
1458 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1459 /* Choose among many HT tables depending on number of streams
1460 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
1461 * status */
1462 if (column->mode == RS_SISO) {
1463 switch (bw) {
1464 case RATE_MCS_CHAN_WIDTH_20:
1465 ht_tbl_pointer = expected_tpt_siso_20MHz;
1466 break;
1467 case RATE_MCS_CHAN_WIDTH_40:
1468 ht_tbl_pointer = expected_tpt_siso_40MHz;
1469 break;
1470 case RATE_MCS_CHAN_WIDTH_80:
1471 ht_tbl_pointer = expected_tpt_siso_80MHz;
1472 break;
1473 default:
1474 WARN_ON_ONCE(1);
1475 }
1476 } else if (column->mode == RS_MIMO2) {
1477 switch (bw) {
1478 case RATE_MCS_CHAN_WIDTH_20:
1479 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1480 break;
1481 case RATE_MCS_CHAN_WIDTH_40:
1482 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1483 break;
1484 case RATE_MCS_CHAN_WIDTH_80:
1485 ht_tbl_pointer = expected_tpt_mimo2_80MHz;
1486 break;
1487 default:
1488 WARN_ON_ONCE(1);
1489 }
1490 } else {
1491 WARN_ON_ONCE(1);
1492 }
1493
1494 if (!column->sgi && !lq_sta->is_agg) /* Normal */
1495 return ht_tbl_pointer[0];
1496 else if (column->sgi && !lq_sta->is_agg) /* SGI */
1497 return ht_tbl_pointer[1];
1498 else if (!column->sgi && lq_sta->is_agg) /* AGG */
1499 return ht_tbl_pointer[2];
1500 else /* AGG+SGI */
1501 return ht_tbl_pointer[3];
1502}
1503
1504static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1505 struct iwl_scale_tbl_info *tbl)
1506{
1507 struct rs_rate *rate = &tbl->rate;
1508 const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
1509
1510 tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
1511}
1512
1513static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1514 struct iwl_lq_sta *lq_sta,
1515 struct iwl_scale_tbl_info *tbl, /* "search" */
1516 unsigned long rate_mask, s8 index)
1517{
1518 struct iwl_scale_tbl_info *active_tbl =
1519 &(lq_sta->lq_info[lq_sta->active_tbl]);
1520 s32 success_ratio = active_tbl->win[index].success_ratio;
1521 u16 expected_current_tpt = active_tbl->expected_tpt[index];
1522 const u16 *tpt_tbl = tbl->expected_tpt;
1523 u16 high_low;
1524 u32 target_tpt;
1525 int rate_idx;
1526
1527 if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
1528 target_tpt = 100 * expected_current_tpt;
1529 IWL_DEBUG_RATE(mvm,
1530 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
1531 success_ratio, target_tpt);
1532 } else {
1533 target_tpt = lq_sta->last_tpt;
1534 IWL_DEBUG_RATE(mvm,
1535 "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
1536 success_ratio, target_tpt);
1537 }
1538
1539 rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
1540
1541 while (rate_idx != IWL_RATE_INVALID) {
1542 if (target_tpt < (100 * tpt_tbl[rate_idx]))
1543 break;
1544
1545 high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
1546 tbl->rate.type);
1547
1548 rate_idx = (high_low >> 8) & 0xff;
1549 }
1550
1551 IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
1552 rate_idx, target_tpt,
1553 rate_idx != IWL_RATE_INVALID ?
1554 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
1555
1556 return rate_idx;
1557}
1558
1559static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
1560{
1561 if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
1562 return RATE_MCS_CHAN_WIDTH_80;
1563 else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
1564 return RATE_MCS_CHAN_WIDTH_40;
1565
1566 return RATE_MCS_CHAN_WIDTH_20;
1567}
1568
1569/*
1570 * Check whether we should continue using same modulation mode, or
1571 * begin search for a new mode, based on:
1572 * 1) # tx successes or failures while using this mode
1573 * 2) # times calling this function
1574 * 3) elapsed time in this mode (not used, for now)
1575 */
1576static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1577{
1578 struct iwl_scale_tbl_info *tbl;
1579 int active_tbl;
1580 int flush_interval_passed = 0;
1581 struct iwl_mvm *mvm;
1582
1583 mvm = lq_sta->pers.drv;
1584 active_tbl = lq_sta->active_tbl;
1585
1586 tbl = &(lq_sta->lq_info[active_tbl]);
1587
1588 /* If we've been disallowing search, see if we should now allow it */
1589 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
1590 /* Elapsed time using current modulation mode */
1591 if (lq_sta->flush_timer)
1592 flush_interval_passed =
1593 time_after(jiffies,
1594 (unsigned long)(lq_sta->flush_timer +
1595 (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
1596
1597 /*
1598 * Check if we should allow search for new modulation mode.
1599 * If many frames have failed or succeeded, or we've used
1600 * this same modulation for a long time, allow search, and
1601 * reset history stats that keep track of whether we should
1602 * allow a new search. Also (below) reset all bitmaps and
1603 * stats in active history.
1604 */
1605 if (force_search ||
1606 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1607 (lq_sta->total_success > lq_sta->max_success_limit) ||
1608 ((!lq_sta->search_better_tbl) &&
1609 (lq_sta->flush_timer) && (flush_interval_passed))) {
1610 IWL_DEBUG_RATE(mvm,
1611 "LQ: stay is expired %d %d %d\n",
1612 lq_sta->total_failed,
1613 lq_sta->total_success,
1614 flush_interval_passed);
1615
1616 /* Allow search for new mode */
1617 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
1618 IWL_DEBUG_RATE(mvm,
1619 "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
1620 lq_sta->total_failed = 0;
1621 lq_sta->total_success = 0;
1622 lq_sta->flush_timer = 0;
1623 /* mark the current column as visited */
1624 lq_sta->visited_columns = BIT(tbl->column);
1625 /*
1626 * Else if we've used this modulation mode enough repetitions
1627 * (regardless of elapsed time or success/failure), reset
1628 * history bitmaps and rate-specific stats for all rates in
1629 * active table.
1630 */
1631 } else {
1632 lq_sta->table_count++;
1633 if (lq_sta->table_count >=
1634 lq_sta->table_count_limit) {
1635 lq_sta->table_count = 0;
1636
1637 IWL_DEBUG_RATE(mvm,
1638 "LQ: stay in table clear win\n");
1639 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1640 }
1641 }
1642
1643 /* If transitioning to allow "search", reset all history
1644 * bitmaps and stats in active table (this will become the new
1645 * "search" table). */
1646 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1647 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1648 }
1649 }
1650}
1651
1652/*
1653 * setup rate table in uCode
1654 */
1655static void rs_update_rate_tbl(struct iwl_mvm *mvm,
1656 struct ieee80211_sta *sta,
1657 struct iwl_lq_sta *lq_sta,
1658 struct iwl_scale_tbl_info *tbl)
1659{
1660 rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
1661 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
1662}
1663
1664static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
1665 struct ieee80211_sta *sta,
1666 struct iwl_lq_sta *lq_sta,
1667 struct iwl_scale_tbl_info *tbl,
1668 enum rs_action scale_action)
1669{
1670 if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
1671 return false;
1672
1673 if (!is_vht_siso(&tbl->rate))
1674 return false;
1675
1676 if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
1677 (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
1678 (scale_action == RS_ACTION_DOWNSCALE)) {
1679 tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
1680 tbl->rate.index = IWL_RATE_MCS_4_INDEX;
1681 IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
1682 goto tweaked;
1683 }
1684
1685 /* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
1686 * sustainable, i.e. we're past the test window. We can't go back
1687 * if MCS5 is just tested as this will happen always after switching
1688 * to 20Mhz MCS4 because the rate stats are cleared.
1689 */
1690 if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
1691 (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
1692 (scale_action == RS_ACTION_STAY)) ||
1693 ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
1694 (scale_action == RS_ACTION_UPSCALE)))) {
1695 tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
1696 tbl->rate.index = IWL_RATE_MCS_1_INDEX;
1697 IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
1698 goto tweaked;
1699 }
1700
1701 return false;
1702
1703tweaked:
1704 rs_set_expected_tpt_table(lq_sta, tbl);
1705 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1706 return true;
1707}
1708
1709static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1710 struct iwl_lq_sta *lq_sta,
1711 struct ieee80211_sta *sta,
1712 struct iwl_scale_tbl_info *tbl)
1713{
1714 int i, j, max_rate;
1715 enum rs_column next_col_id;
1716 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1717 const struct rs_tx_column *next_col;
1718 allow_column_func_t allow_func;
1719 u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
1720 const u16 *expected_tpt_tbl;
1721 u16 tpt, max_expected_tpt;
1722
1723 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1724 next_col_id = curr_col->next_columns[i];
1725
1726 if (next_col_id == RS_COLUMN_INVALID)
1727 continue;
1728
1729 if (lq_sta->visited_columns & BIT(next_col_id)) {
1730 IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
1731 next_col_id);
1732 continue;
1733 }
1734
1735 next_col = &rs_tx_columns[next_col_id];
1736
1737 if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
1738 IWL_DEBUG_RATE(mvm,
1739 "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
1740 next_col_id, valid_ants, next_col->ant);
1741 continue;
1742 }
1743
1744 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1745 allow_func = next_col->checks[j];
1746 if (allow_func && !allow_func(mvm, sta, &tbl->rate,
1747 next_col))
1748 break;
1749 }
1750
1751 if (j != MAX_COLUMN_CHECKS) {
1752 IWL_DEBUG_RATE(mvm,
1753 "Skip column %d: not allowed (check %d failed)\n",
1754 next_col_id, j);
1755
1756 continue;
1757 }
1758
1759 tpt = lq_sta->last_tpt / 100;
1760 expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
1761 rs_bw_from_sta_bw(sta));
1762 if (WARN_ON_ONCE(!expected_tpt_tbl))
1763 continue;
1764
1765 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1766 if (max_rate == IWL_RATE_INVALID) {
1767 IWL_DEBUG_RATE(mvm,
1768 "Skip column %d: no rate is allowed in this column\n",
1769 next_col_id);
1770 continue;
1771 }
1772
1773 max_expected_tpt = expected_tpt_tbl[max_rate];
1774 if (tpt >= max_expected_tpt) {
1775 IWL_DEBUG_RATE(mvm,
1776 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
1777 next_col_id, max_expected_tpt, tpt);
1778 continue;
1779 }
1780
1781 IWL_DEBUG_RATE(mvm,
1782 "Found potential column %d. Max expected %d current %d\n",
1783 next_col_id, max_expected_tpt, tpt);
1784 break;
1785 }
1786
1787 if (i == MAX_NEXT_COLUMNS)
1788 return RS_COLUMN_INVALID;
1789
1790 return next_col_id;
1791}
1792
1793static int rs_switch_to_column(struct iwl_mvm *mvm,
1794 struct iwl_lq_sta *lq_sta,
1795 struct ieee80211_sta *sta,
1796 enum rs_column col_id)
1797{
1798 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1799 struct iwl_scale_tbl_info *search_tbl =
1800 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1801 struct rs_rate *rate = &search_tbl->rate;
1802 const struct rs_tx_column *column = &rs_tx_columns[col_id];
1803 const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
1804 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1805 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1806 unsigned long rate_mask = 0;
1807 u32 rate_idx = 0;
1808
1809 memcpy(search_tbl, tbl, sz);
1810
1811 rate->sgi = column->sgi;
1812 rate->ant = column->ant;
1813
1814 if (column->mode == RS_LEGACY) {
1815 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1816 rate->type = LQ_LEGACY_A;
1817 else
1818 rate->type = LQ_LEGACY_G;
1819
1820 rate->bw = RATE_MCS_CHAN_WIDTH_20;
1821 rate->ldpc = false;
1822 rate_mask = lq_sta->active_legacy_rate;
1823 } else if (column->mode == RS_SISO) {
1824 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
1825 rate_mask = lq_sta->active_siso_rate;
1826 } else if (column->mode == RS_MIMO2) {
1827 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
1828 rate_mask = lq_sta->active_mimo2_rate;
1829 } else {
1830 WARN_ON_ONCE("Bad column mode");
1831 }
1832
1833 if (column->mode != RS_LEGACY) {
1834 rate->bw = rs_bw_from_sta_bw(sta);
1835 rate->ldpc = lq_sta->ldpc;
1836 }
1837
1838 search_tbl->column = col_id;
1839 rs_set_expected_tpt_table(lq_sta, search_tbl);
1840
1841 lq_sta->visited_columns |= BIT(col_id);
1842
1843 /* Get the best matching rate if we're changing modes. e.g.
1844 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
1845 */
1846 if (curr_column->mode != column->mode) {
1847 rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
1848 rate_mask, rate->index);
1849
1850 if ((rate_idx == IWL_RATE_INVALID) ||
1851 !(BIT(rate_idx) & rate_mask)) {
1852 IWL_DEBUG_RATE(mvm,
1853 "can not switch with index %d"
1854 " rate mask %lx\n",
1855 rate_idx, rate_mask);
1856
1857 goto err;
1858 }
1859
1860 rate->index = rate_idx;
1861 }
1862
1863 IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
1864 col_id, rate->index);
1865
1866 return 0;
1867
1868err:
1869 rate->type = LQ_NONE;
1870 return -1;
1871}
1872
1873static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1874 struct iwl_scale_tbl_info *tbl,
1875 s32 sr, int low, int high,
1876 int current_tpt,
1877 int low_tpt, int high_tpt)
1878{
1879 enum rs_action action = RS_ACTION_STAY;
1880
1881 if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
1882 (current_tpt == 0)) {
1883 IWL_DEBUG_RATE(mvm,
1884 "Decrease rate because of low SR\n");
1885 return RS_ACTION_DOWNSCALE;
1886 }
1887
1888 if ((low_tpt == IWL_INVALID_VALUE) &&
1889 (high_tpt == IWL_INVALID_VALUE) &&
1890 (high != IWL_RATE_INVALID)) {
1891 IWL_DEBUG_RATE(mvm,
1892 "No data about high/low rates. Increase rate\n");
1893 return RS_ACTION_UPSCALE;
1894 }
1895
1896 if ((high_tpt == IWL_INVALID_VALUE) &&
1897 (high != IWL_RATE_INVALID) &&
1898 (low_tpt != IWL_INVALID_VALUE) &&
1899 (low_tpt < current_tpt)) {
1900 IWL_DEBUG_RATE(mvm,
1901 "No data about high rate and low rate is worse. Increase rate\n");
1902 return RS_ACTION_UPSCALE;
1903 }
1904
1905 if ((high_tpt != IWL_INVALID_VALUE) &&
1906 (high_tpt > current_tpt)) {
1907 IWL_DEBUG_RATE(mvm,
1908 "Higher rate is better. Increate rate\n");
1909 return RS_ACTION_UPSCALE;
1910 }
1911
1912 if ((low_tpt != IWL_INVALID_VALUE) &&
1913 (high_tpt != IWL_INVALID_VALUE) &&
1914 (low_tpt < current_tpt) &&
1915 (high_tpt < current_tpt)) {
1916 IWL_DEBUG_RATE(mvm,
1917 "Both high and low are worse. Maintain rate\n");
1918 return RS_ACTION_STAY;
1919 }
1920
1921 if ((low_tpt != IWL_INVALID_VALUE) &&
1922 (low_tpt > current_tpt)) {
1923 IWL_DEBUG_RATE(mvm,
1924 "Lower rate is better\n");
1925 action = RS_ACTION_DOWNSCALE;
1926 goto out;
1927 }
1928
1929 if ((low_tpt == IWL_INVALID_VALUE) &&
1930 (low != IWL_RATE_INVALID)) {
1931 IWL_DEBUG_RATE(mvm,
1932 "No data about lower rate\n");
1933 action = RS_ACTION_DOWNSCALE;
1934 goto out;
1935 }
1936
1937 IWL_DEBUG_RATE(mvm, "Maintain rate\n");
1938
1939out:
1940 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1941 if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
1942 IWL_DEBUG_RATE(mvm,
1943 "SR is above NO DECREASE. Avoid downscale\n");
1944 action = RS_ACTION_STAY;
1945 } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
1946 IWL_DEBUG_RATE(mvm,
1947 "Current TPT is higher than max expected in low rate. Avoid downscale\n");
1948 action = RS_ACTION_STAY;
1949 } else {
1950 IWL_DEBUG_RATE(mvm, "Decrease rate\n");
1951 }
1952 }
1953
1954 return action;
1955}
1956
1957static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1958 struct iwl_lq_sta *lq_sta)
1959{
1960 /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
1961 * supports STBC of at least 1*SS
1962 */
1963 if (!lq_sta->stbc_capable)
1964 return false;
1965
1966 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
1967 return false;
1968
1969 return true;
1970}
1971
1972static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1973 int *weaker, int *stronger)
1974{
1975 *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
1976 if (*weaker > TPC_MAX_REDUCTION)
1977 *weaker = TPC_INVALID;
1978
1979 *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
1980 if (*stronger < 0)
1981 *stronger = TPC_INVALID;
1982}
1983
1984static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1985 struct rs_rate *rate, enum ieee80211_band band)
1986{
1987 int index = rate->index;
1988 bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
1989 bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
1990 !vif->bss_conf.ps);
1991
1992 IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
1993 cam, sta_ps_disabled);
1994 /*
1995 * allow tpc only if power management is enabled, or bt coex
1996 * activity grade allows it and we are on 2.4Ghz.
1997 */
1998 if ((cam || sta_ps_disabled) &&
1999 !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
2000 return false;
2001
2002 IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
2003 if (is_legacy(rate))
2004 return index == IWL_RATE_54M_INDEX;
2005 if (is_ht(rate))
2006 return index == IWL_RATE_MCS_7_INDEX;
2007 if (is_vht(rate))
2008 return index == IWL_RATE_MCS_7_INDEX ||
2009 index == IWL_RATE_MCS_8_INDEX ||
2010 index == IWL_RATE_MCS_9_INDEX;
2011
2012 WARN_ON_ONCE(1);
2013 return false;
2014}
2015
2016enum tpc_action {
2017 TPC_ACTION_STAY,
2018 TPC_ACTION_DECREASE,
2019 TPC_ACTION_INCREASE,
2020 TPC_ACTION_NO_RESTIRCTION,
2021};
2022
2023static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
2024 s32 sr, int weak, int strong,
2025 int current_tpt,
2026 int weak_tpt, int strong_tpt)
2027{
2028 /* stay until we have valid tpt */
2029 if (current_tpt == IWL_INVALID_VALUE) {
2030 IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
2031 return TPC_ACTION_STAY;
2032 }
2033
2034 /* Too many failures, increase txp */
2035 if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
2036 current_tpt == 0) {
2037 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
2038 return TPC_ACTION_NO_RESTIRCTION;
2039 }
2040
2041 /* try decreasing first if applicable */
2042 if (weak != TPC_INVALID) {
2043 if (weak_tpt == IWL_INVALID_VALUE &&
2044 (strong_tpt == IWL_INVALID_VALUE ||
2045 current_tpt >= strong_tpt)) {
2046 IWL_DEBUG_RATE(mvm,
2047 "no weak txp measurement. decrease txp\n");
2048 return TPC_ACTION_DECREASE;
2049 }
2050
2051 if (weak_tpt > current_tpt) {
2052 IWL_DEBUG_RATE(mvm,
2053 "lower txp has better tpt. decrease txp\n");
2054 return TPC_ACTION_DECREASE;
2055 }
2056 }
2057
2058 /* next, increase if needed */
2059 if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
2060 strong != TPC_INVALID) {
2061 if (weak_tpt == IWL_INVALID_VALUE &&
2062 strong_tpt != IWL_INVALID_VALUE &&
2063 current_tpt < strong_tpt) {
2064 IWL_DEBUG_RATE(mvm,
2065 "higher txp has better tpt. increase txp\n");
2066 return TPC_ACTION_INCREASE;
2067 }
2068
2069 if (weak_tpt < current_tpt &&
2070 (strong_tpt == IWL_INVALID_VALUE ||
2071 strong_tpt > current_tpt)) {
2072 IWL_DEBUG_RATE(mvm,
2073 "lower txp has worse tpt. increase txp\n");
2074 return TPC_ACTION_INCREASE;
2075 }
2076 }
2077
2078 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
2079 return TPC_ACTION_STAY;
2080}
2081
2082static bool rs_tpc_perform(struct iwl_mvm *mvm,
2083 struct ieee80211_sta *sta,
2084 struct iwl_lq_sta *lq_sta,
2085 struct iwl_scale_tbl_info *tbl)
2086{
2087 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2088 struct ieee80211_vif *vif = mvm_sta->vif;
2089 struct ieee80211_chanctx_conf *chanctx_conf;
2090 enum ieee80211_band band;
2091 struct iwl_rate_scale_data *window;
2092 struct rs_rate *rate = &tbl->rate;
2093 enum tpc_action action;
2094 s32 sr;
2095 u8 cur = lq_sta->lq.reduced_tpc;
2096 int current_tpt;
2097 int weak, strong;
2098 int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
2099
2100#ifdef CONFIG_MAC80211_DEBUGFS
2101 if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
2102 IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
2103 lq_sta->pers.dbg_fixed_txp_reduction);
2104 lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
2105 return cur != lq_sta->pers.dbg_fixed_txp_reduction;
2106 }
2107#endif
2108
2109 rcu_read_lock();
2110 chanctx_conf = rcu_dereference(vif->chanctx_conf);
2111 if (WARN_ON(!chanctx_conf))
2112 band = IEEE80211_NUM_BANDS;
2113 else
2114 band = chanctx_conf->def.chan->band;
2115 rcu_read_unlock();
2116
2117 if (!rs_tpc_allowed(mvm, vif, rate, band)) {
2118 IWL_DEBUG_RATE(mvm,
2119 "tpc is not allowed. remove txp restrictions\n");
2120 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
2121 return cur != TPC_NO_REDUCTION;
2122 }
2123
2124 rs_get_adjacent_txp(mvm, cur, &weak, &strong);
2125
2126 /* Collect measured throughputs for current and adjacent rates */
2127 window = tbl->tpc_win;
2128 sr = window[cur].success_ratio;
2129 current_tpt = window[cur].average_tpt;
2130 if (weak != TPC_INVALID)
2131 weak_tpt = window[weak].average_tpt;
2132 if (strong != TPC_INVALID)
2133 strong_tpt = window[strong].average_tpt;
2134
2135 IWL_DEBUG_RATE(mvm,
2136 "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
2137 cur, current_tpt, sr, weak, strong,
2138 weak_tpt, strong_tpt);
2139
2140 action = rs_get_tpc_action(mvm, sr, weak, strong,
2141 current_tpt, weak_tpt, strong_tpt);
2142
2143 /* override actions if we are on the edge */
2144 if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
2145 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
2146 action = TPC_ACTION_STAY;
2147 } else if (strong == TPC_INVALID &&
2148 (action == TPC_ACTION_INCREASE ||
2149 action == TPC_ACTION_NO_RESTIRCTION)) {
2150 IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
2151 action = TPC_ACTION_STAY;
2152 }
2153
2154 switch (action) {
2155 case TPC_ACTION_DECREASE:
2156 lq_sta->lq.reduced_tpc = weak;
2157 return true;
2158 case TPC_ACTION_INCREASE:
2159 lq_sta->lq.reduced_tpc = strong;
2160 return true;
2161 case TPC_ACTION_NO_RESTIRCTION:
2162 lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
2163 return true;
2164 case TPC_ACTION_STAY:
2165 /* do nothing */
2166 break;
2167 }
2168 return false;
2169}
2170
2171/*
2172 * Do rate scaling and search for new modulation mode.
2173 */
2174static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2175 struct ieee80211_sta *sta,
2176 struct iwl_lq_sta *lq_sta,
2177 int tid)
2178{
2179 int low = IWL_RATE_INVALID;
2180 int high = IWL_RATE_INVALID;
2181 int index;
2182 struct iwl_rate_scale_data *window = NULL;
2183 int current_tpt = IWL_INVALID_VALUE;
2184 int low_tpt = IWL_INVALID_VALUE;
2185 int high_tpt = IWL_INVALID_VALUE;
2186 u32 fail_count;
2187 enum rs_action scale_action = RS_ACTION_STAY;
2188 u16 rate_mask;
2189 u8 update_lq = 0;
2190 struct iwl_scale_tbl_info *tbl, *tbl1;
2191 u8 active_tbl = 0;
2192 u8 done_search = 0;
2193 u16 high_low;
2194 s32 sr;
2195 u8 prev_agg = lq_sta->is_agg;
2196 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2197 struct iwl_mvm_tid_data *tid_data;
2198 struct rs_rate *rate;
2199
2200 lq_sta->is_agg = !!sta_priv->agg_tids;
2201
2202 /*
2203 * Select rate-scale / modulation-mode table to work with in
2204 * the rest of this function: "search" if searching for better
2205 * modulation mode, or "active" if doing rate scaling within a mode.
2206 */
2207 if (!lq_sta->search_better_tbl)
2208 active_tbl = lq_sta->active_tbl;
2209 else
2210 active_tbl = 1 - lq_sta->active_tbl;
2211
2212 tbl = &(lq_sta->lq_info[active_tbl]);
2213 rate = &tbl->rate;
2214
2215 if (prev_agg != lq_sta->is_agg) {
2216 IWL_DEBUG_RATE(mvm,
2217 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
2218 prev_agg, lq_sta->is_agg);
2219 rs_set_expected_tpt_table(lq_sta, tbl);
2220 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2221 }
2222
2223 /* current tx rate */
2224 index = rate->index;
2225
2226 /* rates available for this association, and for modulation mode */
2227 rate_mask = rs_get_supported_rates(lq_sta, rate);
2228
2229 if (!(BIT(index) & rate_mask)) {
2230 IWL_ERR(mvm, "Current Rate is not valid\n");
2231 if (lq_sta->search_better_tbl) {
2232 /* revert to active table if search table is not valid*/
2233 rate->type = LQ_NONE;
2234 lq_sta->search_better_tbl = 0;
2235 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2236 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2237 }
2238 return;
2239 }
2240
2241 /* Get expected throughput table and history window for current rate */
2242 if (!tbl->expected_tpt) {
2243 IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
2244 return;
2245 }
2246
2247 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2248 window = &(tbl->win[index]);
2249
2250 /*
2251 * If there is not enough history to calculate actual average
2252 * throughput, keep analyzing results of more tx frames, without
2253 * changing rate or mode (bypass most of the rest of this function).
2254 * Set up new rate table in uCode only if old rate is not supported
2255 * in current association (use new rate found above).
2256 */
2257 fail_count = window->counter - window->success_counter;
2258 if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
2259 (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
2260 IWL_DEBUG_RATE(mvm,
2261 "%s: Test Window: succ %d total %d\n",
2262 rs_pretty_rate(rate),
2263 window->success_counter, window->counter);
2264
2265 /* Can't calculate this yet; not enough history */
2266 window->average_tpt = IWL_INVALID_VALUE;
2267
2268 /* Should we stay with this modulation mode,
2269 * or search for a new one? */
2270 rs_stay_in_table(lq_sta, false);
2271
2272 return;
2273 }
2274
2275 /* If we are searching for better modulation mode, check success. */
2276 if (lq_sta->search_better_tbl) {
2277 /* If good success, continue using the "search" mode;
2278 * no need to send new link quality command, since we're
2279 * continuing to use the setup that we've been trying. */
2280 if (window->average_tpt > lq_sta->last_tpt) {
2281 IWL_DEBUG_RATE(mvm,
2282 "SWITCHING TO NEW TABLE SR: %d "
2283 "cur-tpt %d old-tpt %d\n",
2284 window->success_ratio,
2285 window->average_tpt,
2286 lq_sta->last_tpt);
2287
2288 /* Swap tables; "search" becomes "active" */
2289 lq_sta->active_tbl = active_tbl;
2290 current_tpt = window->average_tpt;
2291 /* Else poor success; go back to mode in "active" table */
2292 } else {
2293 IWL_DEBUG_RATE(mvm,
2294 "GOING BACK TO THE OLD TABLE: SR %d "
2295 "cur-tpt %d old-tpt %d\n",
2296 window->success_ratio,
2297 window->average_tpt,
2298 lq_sta->last_tpt);
2299
2300 /* Nullify "search" table */
2301 rate->type = LQ_NONE;
2302
2303 /* Revert to "active" table */
2304 active_tbl = lq_sta->active_tbl;
2305 tbl = &(lq_sta->lq_info[active_tbl]);
2306
2307 /* Revert to "active" rate and throughput info */
2308 index = tbl->rate.index;
2309 current_tpt = lq_sta->last_tpt;
2310
2311 /* Need to set up a new rate table in uCode */
2312 update_lq = 1;
2313 }
2314
2315 /* Either way, we've made a decision; modulation mode
2316 * search is done, allow rate adjustment next time. */
2317 lq_sta->search_better_tbl = 0;
2318 done_search = 1; /* Don't switch modes below! */
2319 goto lq_update;
2320 }
2321
2322 /* (Else) not in search of better modulation mode, try for better
2323 * starting rate, while staying in this mode. */
2324 high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
2325 low = high_low & 0xff;
2326 high = (high_low >> 8) & 0xff;
2327
2328 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2329
2330 sr = window->success_ratio;
2331
2332 /* Collect measured throughputs for current and adjacent rates */
2333 current_tpt = window->average_tpt;
2334 if (low != IWL_RATE_INVALID)
2335 low_tpt = tbl->win[low].average_tpt;
2336 if (high != IWL_RATE_INVALID)
2337 high_tpt = tbl->win[high].average_tpt;
2338
2339 IWL_DEBUG_RATE(mvm,
2340 "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
2341 rs_pretty_rate(rate), current_tpt, sr,
2342 low, high, low_tpt, high_tpt);
2343
2344 scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
2345 current_tpt, low_tpt, high_tpt);
2346
2347 /* Force a search in case BT doesn't like us being in MIMO */
2348 if (is_mimo(rate) &&
2349 !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
2350 IWL_DEBUG_RATE(mvm,
2351 "BT Coex forbids MIMO. Search for new config\n");
2352 rs_stay_in_table(lq_sta, true);
2353 goto lq_update;
2354 }
2355
2356 switch (scale_action) {
2357 case RS_ACTION_DOWNSCALE:
2358 /* Decrease starting rate, update uCode's rate table */
2359 if (low != IWL_RATE_INVALID) {
2360 update_lq = 1;
2361 index = low;
2362 } else {
2363 IWL_DEBUG_RATE(mvm,
2364 "At the bottom rate. Can't decrease\n");
2365 }
2366
2367 break;
2368 case RS_ACTION_UPSCALE:
2369 /* Increase starting rate, update uCode's rate table */
2370 if (high != IWL_RATE_INVALID) {
2371 update_lq = 1;
2372 index = high;
2373 } else {
2374 IWL_DEBUG_RATE(mvm,
2375 "At the top rate. Can't increase\n");
2376 }
2377
2378 break;
2379 case RS_ACTION_STAY:
2380 /* No change */
2381 if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
2382 update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
2383 break;
2384 default:
2385 break;
2386 }
2387
2388lq_update:
2389 /* Replace uCode's rate table for the destination station. */
2390 if (update_lq) {
2391 tbl->rate.index = index;
2392 if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
2393 rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
2394 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2395 }
2396
2397 rs_stay_in_table(lq_sta, false);
2398
2399 /*
2400 * Search for new modulation mode if we're:
2401 * 1) Not changing rates right now
2402 * 2) Not just finishing up a search
2403 * 3) Allowing a new search
2404 */
2405 if (!update_lq && !done_search &&
2406 lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
2407 && window->counter) {
2408 enum rs_column next_column;
2409
2410 /* Save current throughput to compare with "search" throughput*/
2411 lq_sta->last_tpt = current_tpt;
2412
2413 IWL_DEBUG_RATE(mvm,
2414 "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
2415 update_lq, done_search, lq_sta->rs_state,
2416 window->counter);
2417
2418 next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
2419 if (next_column != RS_COLUMN_INVALID) {
2420 int ret = rs_switch_to_column(mvm, lq_sta, sta,
2421 next_column);
2422 if (!ret)
2423 lq_sta->search_better_tbl = 1;
2424 } else {
2425 IWL_DEBUG_RATE(mvm,
2426 "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
2427 lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
2428 }
2429
2430 /* If new "search" mode was selected, set up in uCode table */
2431 if (lq_sta->search_better_tbl) {
2432 /* Access the "search" table, clear its history. */
2433 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2434 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2435
2436 /* Use new "search" start rate */
2437 index = tbl->rate.index;
2438
2439 rs_dump_rate(mvm, &tbl->rate,
2440 "Switch to SEARCH TABLE:");
2441 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2442 } else {
2443 done_search = 1;
2444 }
2445 }
2446
2447 if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
2448 /* If the "active" (non-search) mode was legacy,
2449 * and we've tried switching antennas,
2450 * but we haven't been able to try HT modes (not available),
2451 * stay with best antenna legacy modulation for a while
2452 * before next round of mode comparisons. */
2453 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2454 if (is_legacy(&tbl1->rate)) {
2455 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2456
2457 if (tid != IWL_MAX_TID_COUNT) {
2458 tid_data = &sta_priv->tid_data[tid];
2459 if (tid_data->state != IWL_AGG_OFF) {
2460 IWL_DEBUG_RATE(mvm,
2461 "Stop aggregation on tid %d\n",
2462 tid);
2463 ieee80211_stop_tx_ba_session(sta, tid);
2464 }
2465 }
2466 rs_set_stay_in_table(mvm, 1, lq_sta);
2467 } else {
2468 /* If we're in an HT mode, and all 3 mode switch actions
2469 * have been tried and compared, stay in this best modulation
2470 * mode for a while before next round of mode comparisons. */
2471 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2472 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2473 (tid != IWL_MAX_TID_COUNT)) {
2474 tid_data = &sta_priv->tid_data[tid];
2475 if (tid_data->state == IWL_AGG_OFF) {
2476 IWL_DEBUG_RATE(mvm,
2477 "try to aggregate tid %d\n",
2478 tid);
2479 rs_tl_turn_on_agg(mvm, tid,
2480 lq_sta, sta);
2481 }
2482 }
2483 rs_set_stay_in_table(mvm, 0, lq_sta);
2484 }
2485 }
2486}
2487
2488struct rs_init_rate_info {
2489 s8 rssi;
2490 u8 rate_idx;
2491};
2492
2493static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
2494 { -60, IWL_RATE_54M_INDEX },
2495 { -64, IWL_RATE_48M_INDEX },
2496 { -68, IWL_RATE_36M_INDEX },
2497 { -80, IWL_RATE_24M_INDEX },
2498 { -84, IWL_RATE_18M_INDEX },
2499 { -85, IWL_RATE_12M_INDEX },
2500 { -86, IWL_RATE_11M_INDEX },
2501 { -88, IWL_RATE_5M_INDEX },
2502 { -90, IWL_RATE_2M_INDEX },
2503 { S8_MIN, IWL_RATE_1M_INDEX },
2504};
2505
2506static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
2507 { -60, IWL_RATE_54M_INDEX },
2508 { -64, IWL_RATE_48M_INDEX },
2509 { -72, IWL_RATE_36M_INDEX },
2510 { -80, IWL_RATE_24M_INDEX },
2511 { -84, IWL_RATE_18M_INDEX },
2512 { -85, IWL_RATE_12M_INDEX },
2513 { -87, IWL_RATE_9M_INDEX },
2514 { S8_MIN, IWL_RATE_6M_INDEX },
2515};
2516
2517static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
2518 { -60, IWL_RATE_MCS_7_INDEX },
2519 { -64, IWL_RATE_MCS_6_INDEX },
2520 { -68, IWL_RATE_MCS_5_INDEX },
2521 { -72, IWL_RATE_MCS_4_INDEX },
2522 { -80, IWL_RATE_MCS_3_INDEX },
2523 { -84, IWL_RATE_MCS_2_INDEX },
2524 { -85, IWL_RATE_MCS_1_INDEX },
2525 { S8_MIN, IWL_RATE_MCS_0_INDEX},
2526};
2527
2528static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
2529 { -60, IWL_RATE_MCS_8_INDEX },
2530 { -64, IWL_RATE_MCS_7_INDEX },
2531 { -68, IWL_RATE_MCS_6_INDEX },
2532 { -72, IWL_RATE_MCS_5_INDEX },
2533 { -80, IWL_RATE_MCS_4_INDEX },
2534 { -84, IWL_RATE_MCS_3_INDEX },
2535 { -85, IWL_RATE_MCS_2_INDEX },
2536 { -87, IWL_RATE_MCS_1_INDEX },
2537 { S8_MIN, IWL_RATE_MCS_0_INDEX},
2538};
2539
2540static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
2541 { -60, IWL_RATE_MCS_9_INDEX },
2542 { -64, IWL_RATE_MCS_8_INDEX },
2543 { -68, IWL_RATE_MCS_7_INDEX },
2544 { -72, IWL_RATE_MCS_6_INDEX },
2545 { -80, IWL_RATE_MCS_5_INDEX },
2546 { -84, IWL_RATE_MCS_4_INDEX },
2547 { -85, IWL_RATE_MCS_3_INDEX },
2548 { -87, IWL_RATE_MCS_2_INDEX },
2549 { -88, IWL_RATE_MCS_1_INDEX },
2550 { S8_MIN, IWL_RATE_MCS_0_INDEX },
2551};
2552
2553/* Init the optimal rate based on STA caps
2554 * This combined with rssi is used to report the last tx rate
2555 * to userspace when we haven't transmitted enough frames.
2556 */
2557static void rs_init_optimal_rate(struct iwl_mvm *mvm,
2558 struct ieee80211_sta *sta,
2559 struct iwl_lq_sta *lq_sta)
2560{
2561 struct rs_rate *rate = &lq_sta->optimal_rate;
2562
2563 if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
2564 rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
2565 else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
2566 rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
2567 else if (lq_sta->band == IEEE80211_BAND_5GHZ)
2568 rate->type = LQ_LEGACY_A;
2569 else
2570 rate->type = LQ_LEGACY_G;
2571
2572 rate->bw = rs_bw_from_sta_bw(sta);
2573 rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
2574
2575 /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
2576
2577 if (is_mimo(rate)) {
2578 lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
2579 } else if (is_siso(rate)) {
2580 lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
2581 } else {
2582 lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
2583
2584 if (lq_sta->band == IEEE80211_BAND_5GHZ) {
2585 lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
2586 lq_sta->optimal_nentries =
2587 ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
2588 } else {
2589 lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
2590 lq_sta->optimal_nentries =
2591 ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
2592 }
2593 }
2594
2595 if (is_vht(rate)) {
2596 if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
2597 lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
2598 lq_sta->optimal_nentries =
2599 ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
2600 } else {
2601 lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
2602 lq_sta->optimal_nentries =
2603 ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
2604 }
2605 } else if (is_ht(rate)) {
2606 lq_sta->optimal_rates = rs_optimal_rates_ht;
2607 lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
2608 }
2609}
2610
2611/* Compute the optimal rate index based on RSSI */
2612static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
2613 struct iwl_lq_sta *lq_sta)
2614{
2615 struct rs_rate *rate = &lq_sta->optimal_rate;
2616 int i;
2617
2618 rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
2619 BITS_PER_LONG);
2620
2621 for (i = 0; i < lq_sta->optimal_nentries; i++) {
2622 int rate_idx = lq_sta->optimal_rates[i].rate_idx;
2623
2624 if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
2625 (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
2626 rate->index = rate_idx;
2627 break;
2628 }
2629 }
2630
2631 return rate;
2632}
2633
2634/* Choose an initial legacy rate and antenna to use based on the RSSI
2635 * of last Rx
2636 */
2637static void rs_get_initial_rate(struct iwl_mvm *mvm,
2638 struct iwl_lq_sta *lq_sta,
2639 enum ieee80211_band band,
2640 struct rs_rate *rate)
2641{
2642 int i, nentries;
2643 s8 best_rssi = S8_MIN;
2644 u8 best_ant = ANT_NONE;
2645 u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
2646 const struct rs_init_rate_info *initial_rates;
2647
2648 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
2649 if (!(lq_sta->pers.chains & BIT(i)))
2650 continue;
2651
2652 if (lq_sta->pers.chain_signal[i] > best_rssi) {
2653 best_rssi = lq_sta->pers.chain_signal[i];
2654 best_ant = BIT(i);
2655 }
2656 }
2657
2658 IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
2659 rs_pretty_ant(best_ant), best_rssi);
2660
2661 if (best_ant != ANT_A && best_ant != ANT_B)
2662 rate->ant = first_antenna(valid_tx_ant);
2663 else
2664 rate->ant = best_ant;
2665
2666 rate->sgi = false;
2667 rate->ldpc = false;
2668 rate->bw = RATE_MCS_CHAN_WIDTH_20;
2669
2670 rate->index = find_first_bit(&lq_sta->active_legacy_rate,
2671 BITS_PER_LONG);
2672
2673 if (band == IEEE80211_BAND_5GHZ) {
2674 rate->type = LQ_LEGACY_A;
2675 initial_rates = rs_optimal_rates_5ghz_legacy;
2676 nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
2677 } else {
2678 rate->type = LQ_LEGACY_G;
2679 initial_rates = rs_optimal_rates_24ghz_legacy;
2680 nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
2681 }
2682
2683 if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
2684 for (i = 0; i < nentries; i++) {
2685 int rate_idx = initial_rates[i].rate_idx;
2686 if ((best_rssi >= initial_rates[i].rssi) &&
2687 (BIT(rate_idx) & lq_sta->active_legacy_rate)) {
2688 rate->index = rate_idx;
2689 break;
2690 }
2691 }
2692 }
2693
2694 IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index,
2695 rs_pretty_ant(rate->ant));
2696}
2697
2698/* Save info about RSSI of last Rx */
2699void rs_update_last_rssi(struct iwl_mvm *mvm,
2700 struct iwl_lq_sta *lq_sta,
2701 struct ieee80211_rx_status *rx_status)
2702{
2703 int i;
2704
2705 lq_sta->pers.chains = rx_status->chains;
2706 lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
2707 lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
2708 lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
2709 lq_sta->pers.last_rssi = S8_MIN;
2710
2711 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
2712 if (!(lq_sta->pers.chains & BIT(i)))
2713 continue;
2714
2715 if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
2716 lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
2717 }
2718}
2719
2720/**
2721 * rs_initialize_lq - Initialize a station's hardware rate table
2722 *
2723 * The uCode's station table contains a table of fallback rates
2724 * for automatic fallback during transmission.
2725 *
2726 * NOTE: This sets up a default set of values. These will be replaced later
2727 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2728 * rc80211_simple.
2729 *
2730 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2731 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2732 * which requires station table entry to exist).
2733 */
2734static void rs_initialize_lq(struct iwl_mvm *mvm,
2735 struct ieee80211_sta *sta,
2736 struct iwl_lq_sta *lq_sta,
2737 enum ieee80211_band band,
2738 bool init)
2739{
2740 struct iwl_scale_tbl_info *tbl;
2741 struct rs_rate *rate;
2742 u8 active_tbl = 0;
2743
2744 if (!sta || !lq_sta)
2745 return;
2746
2747 if (!lq_sta->search_better_tbl)
2748 active_tbl = lq_sta->active_tbl;
2749 else
2750 active_tbl = 1 - lq_sta->active_tbl;
2751
2752 tbl = &(lq_sta->lq_info[active_tbl]);
2753 rate = &tbl->rate;
2754
2755 rs_get_initial_rate(mvm, lq_sta, band, rate);
2756 rs_init_optimal_rate(mvm, sta, lq_sta);
2757
2758 WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
2759 if (rate->ant == ANT_A)
2760 tbl->column = RS_COLUMN_LEGACY_ANT_A;
2761 else
2762 tbl->column = RS_COLUMN_LEGACY_ANT_B;
2763
2764 rs_set_expected_tpt_table(lq_sta, tbl);
2765 rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
2766 /* TODO restore station should remember the lq cmd */
2767 iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
2768}
2769
2770static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2771 struct ieee80211_tx_rate_control *txrc)
2772{
2773 struct sk_buff *skb = txrc->skb;
2774 struct iwl_op_mode *op_mode __maybe_unused =
2775 (struct iwl_op_mode *)mvm_r;
2776 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
2777 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2778 struct iwl_lq_sta *lq_sta = mvm_sta;
2779 struct rs_rate *optimal_rate;
2780 u32 last_ucode_rate;
2781
2782 if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
2783 /* if vif isn't initialized mvm doesn't know about
2784 * this station, so don't do anything with the it
2785 */
2786 sta = NULL;
2787 mvm_sta = NULL;
2788 }
2789
2790 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2791
2792 /* Treat uninitialized rate scaling data same as non-existing. */
2793 if (lq_sta && !lq_sta->pers.drv) {
2794 IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
2795 mvm_sta = NULL;
2796 }
2797
2798 /* Send management frames and NO_ACK data using lowest rate. */
2799 if (rate_control_send_low(sta, mvm_sta, txrc))
2800 return;
2801
2802 iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
2803 info->band, &info->control.rates[0]);
2804 info->control.rates[0].count = 1;
2805
2806 /* Report the optimal rate based on rssi and STA caps if we haven't
2807 * converged yet (too little traffic) or exploring other modulations
2808 */
2809 if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
2810 optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
2811 last_ucode_rate = ucode_rate_from_rs_rate(mvm,
2812 optimal_rate);
2813 iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
2814 &txrc->reported_rate);
2815 }
2816}
2817
2818static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
2819 gfp_t gfp)
2820{
2821 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2822 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
2823 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2824 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
2825
2826 IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
2827
2828 lq_sta->pers.drv = mvm;
2829#ifdef CONFIG_MAC80211_DEBUGFS
2830 lq_sta->pers.dbg_fixed_rate = 0;
2831 lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
2832 lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
2833#endif
2834 lq_sta->pers.chains = 0;
2835 memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
2836 lq_sta->pers.last_rssi = S8_MIN;
2837
2838 return &sta_priv->lq_sta;
2839}
2840
2841static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
2842 int nss)
2843{
2844 u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
2845 (0x3 << (2 * (nss - 1)));
2846 rx_mcs >>= (2 * (nss - 1));
2847
2848 if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
2849 return IWL_RATE_MCS_7_INDEX;
2850 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
2851 return IWL_RATE_MCS_8_INDEX;
2852 else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
2853 return IWL_RATE_MCS_9_INDEX;
2854
2855 WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
2856 return -1;
2857}
2858
2859static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2860 struct ieee80211_sta_vht_cap *vht_cap,
2861 struct iwl_lq_sta *lq_sta)
2862{
2863 int i;
2864 int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
2865
2866 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2867 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2868 if (i == IWL_RATE_9M_INDEX)
2869 continue;
2870
2871 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2872 if (i == IWL_RATE_MCS_9_INDEX &&
2873 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2874 continue;
2875
2876 lq_sta->active_siso_rate |= BIT(i);
2877 }
2878 }
2879
2880 if (sta->rx_nss < 2)
2881 return;
2882
2883 highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
2884 if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
2885 for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
2886 if (i == IWL_RATE_9M_INDEX)
2887 continue;
2888
2889 /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
2890 if (i == IWL_RATE_MCS_9_INDEX &&
2891 sta->bandwidth == IEEE80211_STA_RX_BW_20)
2892 continue;
2893
2894 lq_sta->active_mimo2_rate |= BIT(i);
2895 }
2896 }
2897}
2898
2899static void rs_ht_init(struct iwl_mvm *mvm,
2900 struct ieee80211_sta *sta,
2901 struct iwl_lq_sta *lq_sta,
2902 struct ieee80211_sta_ht_cap *ht_cap)
2903{
2904 /* active_siso_rate mask includes 9 MBits (bit 5),
2905 * and CCK (bits 0-3), supp_rates[] does not;
2906 * shift to convert format, force 9 MBits off.
2907 */
2908 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2909 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2910 lq_sta->active_siso_rate &= ~((u16)0x2);
2911 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2912
2913 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2914 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2915 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2916 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2917
2918 if (mvm->cfg->ht_params->ldpc &&
2919 (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
2920 lq_sta->ldpc = true;
2921
2922 if (mvm->cfg->ht_params->stbc &&
2923 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2924 (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
2925 lq_sta->stbc_capable = true;
2926
2927 lq_sta->is_vht = false;
2928}
2929
2930static void rs_vht_init(struct iwl_mvm *mvm,
2931 struct ieee80211_sta *sta,
2932 struct iwl_lq_sta *lq_sta,
2933 struct ieee80211_sta_vht_cap *vht_cap)
2934{
2935 rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
2936
2937 if (mvm->cfg->ht_params->ldpc &&
2938 (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
2939 lq_sta->ldpc = true;
2940
2941 if (mvm->cfg->ht_params->stbc &&
2942 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2943 (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
2944 lq_sta->stbc_capable = true;
2945
2946 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
2947 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2948 (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
2949 lq_sta->bfer_capable = true;
2950
2951 lq_sta->is_vht = true;
2952}
2953
2954#ifdef CONFIG_IWLWIFI_DEBUGFS
2955static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
2956{
2957 spin_lock_bh(&mvm->drv_stats_lock);
2958 memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
2959 spin_unlock_bh(&mvm->drv_stats_lock);
2960}
2961
2962void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
2963{
2964 u8 nss = 0, mcs = 0;
2965
2966 spin_lock(&mvm->drv_stats_lock);
2967
2968 if (agg)
2969 mvm->drv_rx_stats.agg_frames++;
2970
2971 mvm->drv_rx_stats.success_frames++;
2972
2973 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2974 case RATE_MCS_CHAN_WIDTH_20:
2975 mvm->drv_rx_stats.bw_20_frames++;
2976 break;
2977 case RATE_MCS_CHAN_WIDTH_40:
2978 mvm->drv_rx_stats.bw_40_frames++;
2979 break;
2980 case RATE_MCS_CHAN_WIDTH_80:
2981 mvm->drv_rx_stats.bw_80_frames++;
2982 break;
2983 default:
2984 WARN_ONCE(1, "bad BW. rate 0x%x", rate);
2985 }
2986
2987 if (rate & RATE_MCS_HT_MSK) {
2988 mvm->drv_rx_stats.ht_frames++;
2989 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
2990 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
2991 } else if (rate & RATE_MCS_VHT_MSK) {
2992 mvm->drv_rx_stats.vht_frames++;
2993 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2994 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
2995 RATE_VHT_MCS_NSS_POS) + 1;
2996 } else {
2997 mvm->drv_rx_stats.legacy_frames++;
2998 }
2999
3000 if (nss == 1)
3001 mvm->drv_rx_stats.siso_frames++;
3002 else if (nss == 2)
3003 mvm->drv_rx_stats.mimo2_frames++;
3004
3005 if (rate & RATE_MCS_SGI_MSK)
3006 mvm->drv_rx_stats.sgi_frames++;
3007 else
3008 mvm->drv_rx_stats.ngi_frames++;
3009
3010 mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
3011 mvm->drv_rx_stats.last_frame_idx =
3012 (mvm->drv_rx_stats.last_frame_idx + 1) %
3013 ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
3014
3015 spin_unlock(&mvm->drv_stats_lock);
3016}
3017#endif
3018
3019/*
3020 * Called after adding a new station to initialize rate scaling
3021 */
3022void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3023 enum ieee80211_band band, bool init)
3024{
3025 int i, j;
3026 struct ieee80211_hw *hw = mvm->hw;
3027 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
3028 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
3029 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
3030 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
3031 struct ieee80211_supported_band *sband;
3032 unsigned long supp; /* must be unsigned long for for_each_set_bit */
3033
3034 /* clear all non-persistent lq data */
3035 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
3036
3037 sband = hw->wiphy->bands[band];
3038
3039 lq_sta->lq.sta_id = sta_priv->sta_id;
3040
3041 for (j = 0; j < LQ_SIZE; j++)
3042 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
3043
3044 lq_sta->flush_timer = 0;
3045 lq_sta->last_tx = jiffies;
3046
3047 IWL_DEBUG_RATE(mvm,
3048 "LQ: *** rate scale station global init for station %d ***\n",
3049 sta_priv->sta_id);
3050 /* TODO: what is a good starting rate for STA? About middle? Maybe not
3051 * the lowest or the highest rate.. Could consider using RSSI from
3052 * previous packets? Need to have IEEE 802.1X auth succeed immediately
3053 * after assoc.. */
3054
3055 lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
3056 lq_sta->band = sband->band;
3057 /*
3058 * active legacy rates as per supported rates bitmap
3059 */
3060 supp = sta->supp_rates[sband->band];
3061 lq_sta->active_legacy_rate = 0;
3062 for_each_set_bit(i, &supp, BITS_PER_LONG)
3063 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
3064
3065 /* TODO: should probably account for rx_highest for both HT/VHT */
3066 if (!vht_cap || !vht_cap->vht_supported)
3067 rs_ht_init(mvm, sta, lq_sta, ht_cap);
3068 else
3069 rs_vht_init(mvm, sta, lq_sta, vht_cap);
3070
3071 lq_sta->max_legacy_rate_idx =
3072 rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
3073 lq_sta->max_siso_rate_idx =
3074 rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
3075 lq_sta->max_mimo2_rate_idx =
3076 rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
3077
3078 IWL_DEBUG_RATE(mvm,
3079 "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
3080 lq_sta->active_legacy_rate,
3081 lq_sta->active_siso_rate,
3082 lq_sta->active_mimo2_rate,
3083 lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
3084 lq_sta->bfer_capable);
3085 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
3086 lq_sta->max_legacy_rate_idx,
3087 lq_sta->max_siso_rate_idx,
3088 lq_sta->max_mimo2_rate_idx);
3089
3090 /* These values will be overridden later */
3091 lq_sta->lq.single_stream_ant_msk =
3092 first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
3093 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
3094
3095 /* as default allow aggregation for all tids */
3096 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
3097 lq_sta->is_agg = 0;
3098#ifdef CONFIG_IWLWIFI_DEBUGFS
3099 iwl_mvm_reset_frame_stats(mvm);
3100#endif
3101 rs_initialize_lq(mvm, sta, lq_sta, band, init);
3102}
3103
3104static void rs_rate_update(void *mvm_r,
3105 struct ieee80211_supported_band *sband,
3106 struct cfg80211_chan_def *chandef,
3107 struct ieee80211_sta *sta, void *priv_sta,
3108 u32 changed)
3109{
3110 u8 tid;
3111 struct iwl_op_mode *op_mode =
3112 (struct iwl_op_mode *)mvm_r;
3113 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
3114
3115 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
3116 return;
3117
3118 /* Stop any ongoing aggregations as rs starts off assuming no agg */
3119 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
3120 ieee80211_stop_tx_ba_session(sta, tid);
3121
3122 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
3123}
3124
3125#ifdef CONFIG_MAC80211_DEBUGFS
3126static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
3127 struct iwl_lq_cmd *lq_cmd,
3128 enum ieee80211_band band,
3129 u32 ucode_rate)
3130{
3131 struct rs_rate rate;
3132 int i;
3133 int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
3134 __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
3135 u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
3136
3137 for (i = 0; i < num_rates; i++)
3138 lq_cmd->rs_table[i] = ucode_rate_le32;
3139
3140 rs_rate_from_ucode_rate(ucode_rate, band, &rate);
3141
3142 if (is_mimo(&rate))
3143 lq_cmd->mimo_delim = num_rates - 1;
3144 else
3145 lq_cmd->mimo_delim = 0;
3146
3147 lq_cmd->reduced_tpc = 0;
3148
3149 if (num_of_ant(ant) == 1)
3150 lq_cmd->single_stream_ant_msk = ant;
3151
3152 lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3153}
3154#endif /* CONFIG_MAC80211_DEBUGFS */
3155
3156static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
3157 struct iwl_lq_sta *lq_sta,
3158 struct rs_rate *rate,
3159 __le32 *rs_table, int *rs_table_index,
3160 int num_rates, int num_retries,
3161 u8 valid_tx_ant, bool toggle_ant)
3162{
3163 int i, j;
3164 __le32 ucode_rate;
3165 bool bottom_reached = false;
3166 int prev_rate_idx = rate->index;
3167 int end = LINK_QUAL_MAX_RETRY_NUM;
3168 int index = *rs_table_index;
3169
3170 for (i = 0; i < num_rates && index < end; i++) {
3171 for (j = 0; j < num_retries && index < end; j++, index++) {
3172 ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
3173 rate));
3174 rs_table[index] = ucode_rate;
3175 if (toggle_ant)
3176 rs_toggle_antenna(valid_tx_ant, rate);
3177 }
3178
3179 prev_rate_idx = rate->index;
3180 bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
3181 if (bottom_reached && !is_legacy(rate))
3182 break;
3183 }
3184
3185 if (!bottom_reached && !is_legacy(rate))
3186 rate->index = prev_rate_idx;
3187
3188 *rs_table_index = index;
3189}
3190
3191/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
3192 * column the rate table should look like this:
3193 *
3194 * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
3195 * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
3196 * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
3197 * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
3198 * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
3199 * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
3200 * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
3201 * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
3202 * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
3203 * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
3204 * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
3205 * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
3206 * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
3207 * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
3208 * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
3209 * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
3210 */
3211static void rs_build_rates_table(struct iwl_mvm *mvm,
3212 struct ieee80211_sta *sta,
3213 struct iwl_lq_sta *lq_sta,
3214 const struct rs_rate *initial_rate)
3215{
3216 struct rs_rate rate;
3217 int num_rates, num_retries, index = 0;
3218 u8 valid_tx_ant = 0;
3219 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3220 bool toggle_ant = false;
3221
3222 memcpy(&rate, initial_rate, sizeof(rate));
3223
3224 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
3225
3226 /* TODO: remove old API when min FW API hits 14 */
3227 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
3228 rs_stbc_allow(mvm, sta, lq_sta))
3229 rate.stbc = true;
3230
3231 if (is_siso(&rate)) {
3232 num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
3233 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
3234 } else if (is_mimo(&rate)) {
3235 num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
3236 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
3237 } else {
3238 num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
3239 num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
3240 toggle_ant = true;
3241 }
3242
3243 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3244 num_rates, num_retries, valid_tx_ant,
3245 toggle_ant);
3246
3247 rs_get_lower_rate_down_column(lq_sta, &rate);
3248
3249 if (is_siso(&rate)) {
3250 num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
3251 num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
3252 lq_cmd->mimo_delim = index;
3253 } else if (is_legacy(&rate)) {
3254 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
3255 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
3256 } else {
3257 WARN_ON_ONCE(1);
3258 }
3259
3260 toggle_ant = true;
3261
3262 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3263 num_rates, num_retries, valid_tx_ant,
3264 toggle_ant);
3265
3266 rs_get_lower_rate_down_column(lq_sta, &rate);
3267
3268 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
3269 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
3270
3271 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
3272 num_rates, num_retries, valid_tx_ant,
3273 toggle_ant);
3274
3275}
3276
3277struct rs_bfer_active_iter_data {
3278 struct ieee80211_sta *exclude_sta;
3279 struct iwl_mvm_sta *bfer_mvmsta;
3280};
3281
3282static void rs_bfer_active_iter(void *_data,
3283 struct ieee80211_sta *sta)
3284{
3285 struct rs_bfer_active_iter_data *data = _data;
3286 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3287 struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
3288 u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
3289
3290 if (sta == data->exclude_sta)
3291 return;
3292
3293 /* The current sta has BFER allowed */
3294 if (ss_params & LQ_SS_BFER_ALLOWED) {
3295 WARN_ON_ONCE(data->bfer_mvmsta != NULL);
3296
3297 data->bfer_mvmsta = mvmsta;
3298 }
3299}
3300
3301static int rs_bfer_priority(struct iwl_mvm_sta *sta)
3302{
3303 int prio = -1;
3304 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
3305
3306 switch (viftype) {
3307 case NL80211_IFTYPE_AP:
3308 case NL80211_IFTYPE_P2P_GO:
3309 prio = 3;
3310 break;
3311 case NL80211_IFTYPE_P2P_CLIENT:
3312 prio = 2;
3313 break;
3314 case NL80211_IFTYPE_STATION:
3315 prio = 1;
3316 break;
3317 default:
3318 WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
3319 prio = -1;
3320 }
3321
3322 return prio;
3323}
3324
3325/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
3326static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
3327 struct iwl_mvm_sta *sta2)
3328{
3329 int prio1 = rs_bfer_priority(sta1);
3330 int prio2 = rs_bfer_priority(sta2);
3331
3332 if (prio1 > prio2)
3333 return 1;
3334 if (prio1 < prio2)
3335 return -1;
3336 return 0;
3337}
3338
3339static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
3340 struct ieee80211_sta *sta,
3341 struct iwl_lq_sta *lq_sta,
3342 const struct rs_rate *initial_rate)
3343{
3344 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3345 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3346 struct rs_bfer_active_iter_data data = {
3347 .exclude_sta = sta,
3348 .bfer_mvmsta = NULL,
3349 };
3350 struct iwl_mvm_sta *bfer_mvmsta = NULL;
3351 u32 ss_params = LQ_SS_PARAMS_VALID;
3352
3353 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
3354 goto out;
3355
3356#ifdef CONFIG_MAC80211_DEBUGFS
3357 /* Check if forcing the decision is configured.
3358 * Note that SISO is forced by not allowing STBC or BFER
3359 */
3360 if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
3361 ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
3362 else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
3363 ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
3364
3365 if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
3366 IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
3367 lq_sta->pers.ss_force);
3368 goto out;
3369 }
3370#endif
3371
3372 if (lq_sta->stbc_capable)
3373 ss_params |= LQ_SS_STBC_1SS_ALLOWED;
3374
3375 if (!lq_sta->bfer_capable)
3376 goto out;
3377
3378 ieee80211_iterate_stations_atomic(mvm->hw,
3379 rs_bfer_active_iter,
3380 &data);
3381 bfer_mvmsta = data.bfer_mvmsta;
3382
3383 /* This code is safe as it doesn't run concurrently for different
3384 * stations. This is guaranteed by the fact that calls to
3385 * ieee80211_tx_status wouldn't run concurrently for a single HW.
3386 */
3387 if (!bfer_mvmsta) {
3388 IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
3389
3390 ss_params |= LQ_SS_BFER_ALLOWED;
3391 goto out;
3392 }
3393
3394 IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
3395 bfer_mvmsta->sta_id);
3396
3397 /* Disallow BFER on another STA if active and we're a higher priority */
3398 if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
3399 struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
3400 u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
3401
3402 bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
3403 bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
3404 iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
3405
3406 ss_params |= LQ_SS_BFER_ALLOWED;
3407 IWL_DEBUG_RATE(mvm,
3408 "Lower priority BFER sta found (%d). Switch BFER\n",
3409 bfer_mvmsta->sta_id);
3410 }
3411out:
3412 lq_cmd->ss_params = cpu_to_le32(ss_params);
3413}
3414
3415static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
3416 struct ieee80211_sta *sta,
3417 struct iwl_lq_sta *lq_sta,
3418 const struct rs_rate *initial_rate)
3419{
3420 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3421 struct iwl_mvm_sta *mvmsta;
3422 struct iwl_mvm_vif *mvmvif;
3423
3424 lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
3425 lq_cmd->agg_time_limit =
3426 cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
3427
3428#ifdef CONFIG_MAC80211_DEBUGFS
3429 if (lq_sta->pers.dbg_fixed_rate) {
3430 rs_build_rates_table_from_fixed(mvm, lq_cmd,
3431 lq_sta->band,
3432 lq_sta->pers.dbg_fixed_rate);
3433 return;
3434 }
3435#endif
3436 if (WARN_ON_ONCE(!sta || !initial_rate))
3437 return;
3438
3439 rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
3440
3441 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
3442 rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
3443
3444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3445 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
3446
3447 if (num_of_ant(initial_rate->ant) == 1)
3448 lq_cmd->single_stream_ant_msk = initial_rate->ant;
3449
3450 lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3451
3452 /*
3453 * In case of low latency, tell the firmware to leave a frame in the
3454 * Tx Fifo so that it can start a transaction in the same TxOP. This
3455 * basically allows the firmware to send bursts.
3456 */
3457 if (iwl_mvm_vif_low_latency(mvmvif)) {
3458 lq_cmd->agg_frame_cnt_limit--;
3459
3460 if (mvm->low_latency_agg_frame_limit)
3461 lq_cmd->agg_frame_cnt_limit =
3462 min(lq_cmd->agg_frame_cnt_limit,
3463 mvm->low_latency_agg_frame_limit);
3464 }
3465
3466 if (mvmsta->vif->p2p)
3467 lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK;
3468
3469 lq_cmd->agg_time_limit =
3470 cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
3471}
3472
3473static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
3474{
3475 return hw->priv;
3476}
3477/* rate scale requires free function to be implemented */
3478static void rs_free(void *mvm_rate)
3479{
3480 return;
3481}
3482
3483static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
3484 void *mvm_sta)
3485{
3486 struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
3487 struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
3488
3489 IWL_DEBUG_RATE(mvm, "enter\n");
3490 IWL_DEBUG_RATE(mvm, "leave\n");
3491}
3492
3493#ifdef CONFIG_MAC80211_DEBUGFS
3494int rs_pretty_print_rate(char *buf, const u32 rate)
3495{
3496
3497 char *type, *bw;
3498 u8 mcs = 0, nss = 0;
3499 u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
3500
3501 if (!(rate & RATE_MCS_HT_MSK) &&
3502 !(rate & RATE_MCS_VHT_MSK)) {
3503 int index = iwl_hwrate_to_plcp_idx(rate);
3504
3505 return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
3506 rs_pretty_ant(ant),
3507 index == IWL_RATE_INVALID ? "BAD" :
3508 iwl_rate_mcs[index].mbps);
3509 }
3510
3511 if (rate & RATE_MCS_VHT_MSK) {
3512 type = "VHT";
3513 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
3514 nss = ((rate & RATE_VHT_MCS_NSS_MSK)
3515 >> RATE_VHT_MCS_NSS_POS) + 1;
3516 } else if (rate & RATE_MCS_HT_MSK) {
3517 type = "HT";
3518 mcs = rate & RATE_HT_MCS_INDEX_MSK;
3519 } else {
3520 type = "Unknown"; /* shouldn't happen */
3521 }
3522
3523 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
3524 case RATE_MCS_CHAN_WIDTH_20:
3525 bw = "20Mhz";
3526 break;
3527 case RATE_MCS_CHAN_WIDTH_40:
3528 bw = "40Mhz";
3529 break;
3530 case RATE_MCS_CHAN_WIDTH_80:
3531 bw = "80Mhz";
3532 break;
3533 case RATE_MCS_CHAN_WIDTH_160:
3534 bw = "160Mhz";
3535 break;
3536 default:
3537 bw = "BAD BW";
3538 }
3539
3540 return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
3541 type, rs_pretty_ant(ant), bw, mcs, nss,
3542 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
3543 (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
3544 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
3545 (rate & RATE_MCS_BF_MSK) ? "BF " : "",
3546 (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
3547}
3548
3549/**
3550 * Program the device to use fixed rate for frame transmit
3551 * This is for debugging/testing only
3552 * once the device start use fixed rate, we need to reload the module
3553 * to being back the normal operation.
3554 */
3555static void rs_program_fix_rate(struct iwl_mvm *mvm,
3556 struct iwl_lq_sta *lq_sta)
3557{
3558 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
3559 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3560 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3561
3562 IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
3563 lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
3564
3565 if (lq_sta->pers.dbg_fixed_rate) {
3566 rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
3567 iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
3568 }
3569}
3570
3571static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3572 const char __user *user_buf, size_t count, loff_t *ppos)
3573{
3574 struct iwl_lq_sta *lq_sta = file->private_data;
3575 struct iwl_mvm *mvm;
3576 char buf[64];
3577 size_t buf_size;
3578 u32 parsed_rate;
3579
3580 mvm = lq_sta->pers.drv;
3581 memset(buf, 0, sizeof(buf));
3582 buf_size = min(count, sizeof(buf) - 1);
3583 if (copy_from_user(buf, user_buf, buf_size))
3584 return -EFAULT;
3585
3586 if (sscanf(buf, "%x", &parsed_rate) == 1)
3587 lq_sta->pers.dbg_fixed_rate = parsed_rate;
3588 else
3589 lq_sta->pers.dbg_fixed_rate = 0;
3590
3591 rs_program_fix_rate(mvm, lq_sta);
3592
3593 return count;
3594}
3595
3596static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3597 char __user *user_buf, size_t count, loff_t *ppos)
3598{
3599 char *buff;
3600 int desc = 0;
3601 int i = 0;
3602 ssize_t ret;
3603
3604 struct iwl_lq_sta *lq_sta = file->private_data;
3605 struct iwl_mvm *mvm;
3606 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3607 struct rs_rate *rate = &tbl->rate;
3608 u32 ss_params;
3609 mvm = lq_sta->pers.drv;
3610 buff = kmalloc(2048, GFP_KERNEL);
3611 if (!buff)
3612 return -ENOMEM;
3613
3614 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
3615 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
3616 lq_sta->total_failed, lq_sta->total_success,
3617 lq_sta->active_legacy_rate);
3618 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3619 lq_sta->pers.dbg_fixed_rate);
3620 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3621 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
3622 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
3623 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
3624 desc += sprintf(buff+desc, "lq type %s\n",
3625 (is_legacy(rate)) ? "legacy" :
3626 is_vht(rate) ? "VHT" : "HT");
3627 if (!is_legacy(rate)) {
3628 desc += sprintf(buff + desc, " %s",
3629 (is_siso(rate)) ? "SISO" : "MIMO2");
3630 desc += sprintf(buff + desc, " %s",
3631 (is_ht20(rate)) ? "20MHz" :
3632 (is_ht40(rate)) ? "40MHz" :
3633 (is_ht80(rate)) ? "80Mhz" : "BAD BW");
3634 desc += sprintf(buff + desc, " %s %s %s\n",
3635 (rate->sgi) ? "SGI" : "NGI",
3636 (rate->ldpc) ? "LDPC" : "BCC",
3637 (lq_sta->is_agg) ? "AGG on" : "");
3638 }
3639 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3640 lq_sta->last_rate_n_flags);
3641 desc += sprintf(buff+desc,
3642 "general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
3643 lq_sta->lq.flags,
3644 lq_sta->lq.mimo_delim,
3645 lq_sta->lq.single_stream_ant_msk,
3646 lq_sta->lq.dual_stream_ant_msk);
3647
3648 desc += sprintf(buff+desc,
3649 "agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
3650 le16_to_cpu(lq_sta->lq.agg_time_limit),
3651 lq_sta->lq.agg_disable_start_th,
3652 lq_sta->lq.agg_frame_cnt_limit);
3653
3654 desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
3655 ss_params = le32_to_cpu(lq_sta->lq.ss_params);
3656 desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
3657 (ss_params & LQ_SS_PARAMS_VALID) ?
3658 "VALID" : "INVALID",
3659 (ss_params & LQ_SS_BFER_ALLOWED) ?
3660 ", BFER" : "",
3661 (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
3662 ", STBC" : "",
3663 (ss_params & LQ_SS_FORCE) ?
3664 ", FORCE" : "");
3665 desc += sprintf(buff+desc,
3666 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3667 lq_sta->lq.initial_rate_index[0],
3668 lq_sta->lq.initial_rate_index[1],
3669 lq_sta->lq.initial_rate_index[2],
3670 lq_sta->lq.initial_rate_index[3]);
3671
3672 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3673 u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
3674
3675 desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
3676 desc += rs_pretty_print_rate(buff+desc, r);
3677 }
3678
3679 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3680 kfree(buff);
3681 return ret;
3682}
3683
3684static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
3685 .write = rs_sta_dbgfs_scale_table_write,
3686 .read = rs_sta_dbgfs_scale_table_read,
3687 .open = simple_open,
3688 .llseek = default_llseek,
3689};
3690static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
3691 char __user *user_buf, size_t count, loff_t *ppos)
3692{
3693 char *buff;
3694 int desc = 0;
3695 int i, j;
3696 ssize_t ret;
3697 struct iwl_scale_tbl_info *tbl;
3698 struct rs_rate *rate;
3699 struct iwl_lq_sta *lq_sta = file->private_data;
3700
3701 buff = kmalloc(1024, GFP_KERNEL);
3702 if (!buff)
3703 return -ENOMEM;
3704
3705 for (i = 0; i < LQ_SIZE; i++) {
3706 tbl = &(lq_sta->lq_info[i]);
3707 rate = &tbl->rate;
3708 desc += sprintf(buff+desc,
3709 "%s type=%d SGI=%d BW=%s DUP=0\n"
3710 "index=%d\n",
3711 lq_sta->active_tbl == i ? "*" : "x",
3712 rate->type,
3713 rate->sgi,
3714 is_ht20(rate) ? "20Mhz" :
3715 is_ht40(rate) ? "40Mhz" :
3716 is_ht80(rate) ? "80Mhz" : "ERR",
3717 rate->index);
3718 for (j = 0; j < IWL_RATE_COUNT; j++) {
3719 desc += sprintf(buff+desc,
3720 "counter=%d success=%d %%=%d\n",
3721 tbl->win[j].counter,
3722 tbl->win[j].success_counter,
3723 tbl->win[j].success_ratio);
3724 }
3725 }
3726 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3727 kfree(buff);
3728 return ret;
3729}
3730
3731static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3732 .read = rs_sta_dbgfs_stats_table_read,
3733 .open = simple_open,
3734 .llseek = default_llseek,
3735};
3736
3737static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3738 char __user *user_buf,
3739 size_t count, loff_t *ppos)
3740{
3741 static const char * const column_name[] = {
3742 [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
3743 [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
3744 [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
3745 [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
3746 [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
3747 [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
3748 [RS_COLUMN_MIMO2] = "MIMO2",
3749 [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
3750 };
3751
3752 static const char * const rate_name[] = {
3753 [IWL_RATE_1M_INDEX] = "1M",
3754 [IWL_RATE_2M_INDEX] = "2M",
3755 [IWL_RATE_5M_INDEX] = "5.5M",
3756 [IWL_RATE_11M_INDEX] = "11M",
3757 [IWL_RATE_6M_INDEX] = "6M|MCS0",
3758 [IWL_RATE_9M_INDEX] = "9M",
3759 [IWL_RATE_12M_INDEX] = "12M|MCS1",
3760 [IWL_RATE_18M_INDEX] = "18M|MCS2",
3761 [IWL_RATE_24M_INDEX] = "24M|MCS3",
3762 [IWL_RATE_36M_INDEX] = "36M|MCS4",
3763 [IWL_RATE_48M_INDEX] = "48M|MCS5",
3764 [IWL_RATE_54M_INDEX] = "54M|MCS6",
3765 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3766 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3767 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3768 };
3769
3770 char *buff, *pos, *endpos;
3771 int col, rate;
3772 ssize_t ret;
3773 struct iwl_lq_sta *lq_sta = file->private_data;
3774 struct rs_rate_stats *stats;
3775 static const size_t bufsz = 1024;
3776
3777 buff = kmalloc(bufsz, GFP_KERNEL);
3778 if (!buff)
3779 return -ENOMEM;
3780
3781 pos = buff;
3782 endpos = pos + bufsz;
3783
3784 pos += scnprintf(pos, endpos - pos, "COLUMN,");
3785 for (rate = 0; rate < IWL_RATE_COUNT; rate++)
3786 pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
3787 pos += scnprintf(pos, endpos - pos, "\n");
3788
3789 for (col = 0; col < RS_COLUMN_COUNT; col++) {
3790 pos += scnprintf(pos, endpos - pos,
3791 "%s,", column_name[col]);
3792
3793 for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
3794 stats = &(lq_sta->pers.tx_stats[col][rate]);
3795 pos += scnprintf(pos, endpos - pos,
3796 "%llu/%llu,",
3797 stats->success,
3798 stats->total);
3799 }
3800 pos += scnprintf(pos, endpos - pos, "\n");
3801 }
3802
3803 ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
3804 kfree(buff);
3805 return ret;
3806}
3807
3808static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
3809 const char __user *user_buf,
3810 size_t count, loff_t *ppos)
3811{
3812 struct iwl_lq_sta *lq_sta = file->private_data;
3813 memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats));
3814
3815 return count;
3816}
3817
3818static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3819 .read = rs_sta_dbgfs_drv_tx_stats_read,
3820 .write = rs_sta_dbgfs_drv_tx_stats_write,
3821 .open = simple_open,
3822 .llseek = default_llseek,
3823};
3824
3825static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
3826 char __user *user_buf,
3827 size_t count, loff_t *ppos)
3828{
3829 struct iwl_lq_sta *lq_sta = file->private_data;
3830 char buf[12];
3831 int bufsz = sizeof(buf);
3832 int pos = 0;
3833 static const char * const ss_force_name[] = {
3834 [RS_SS_FORCE_NONE] = "none",
3835 [RS_SS_FORCE_STBC] = "stbc",
3836 [RS_SS_FORCE_BFER] = "bfer",
3837 [RS_SS_FORCE_SISO] = "siso",
3838 };
3839
3840 pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
3841 ss_force_name[lq_sta->pers.ss_force]);
3842 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3843}
3844
3845static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
3846 size_t count, loff_t *ppos)
3847{
3848 struct iwl_mvm *mvm = lq_sta->pers.drv;
3849 int ret = 0;
3850
3851 if (!strncmp("none", buf, 4)) {
3852 lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
3853 } else if (!strncmp("siso", buf, 4)) {
3854 lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
3855 } else if (!strncmp("stbc", buf, 4)) {
3856 if (lq_sta->stbc_capable) {
3857 lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
3858 } else {
3859 IWL_ERR(mvm,
3860 "can't force STBC. peer doesn't support\n");
3861 ret = -EINVAL;
3862 }
3863 } else if (!strncmp("bfer", buf, 4)) {
3864 if (lq_sta->bfer_capable) {
3865 lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
3866 } else {
3867 IWL_ERR(mvm,
3868 "can't force BFER. peer doesn't support\n");
3869 ret = -EINVAL;
3870 }
3871 } else {
3872 IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
3873 ret = -EINVAL;
3874 }
3875 return ret ?: count;
3876}
3877
3878#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
3879 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
3880#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
3881 if (!debugfs_create_file(#name, mode, parent, lq_sta, \
3882 &iwl_dbgfs_##name##_ops)) \
3883 goto err; \
3884 } while (0)
3885
3886MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
3887
3888static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
3889{
3890 struct iwl_lq_sta *lq_sta = priv_sta;
3891 struct iwl_mvm_sta *mvmsta;
3892
3893 mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
3894
3895 if (!mvmsta->vif)
3896 return;
3897
3898 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3899 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3900 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3901 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3902 debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
3903 lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
3904 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3905 &lq_sta->tx_agg_tid_en);
3906 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3907 &lq_sta->pers.dbg_fixed_txp_reduction);
3908
3909 MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR);
3910 return;
3911err:
3912 IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
3913}
3914
3915static void rs_remove_debugfs(void *mvm, void *mvm_sta)
3916{
3917}
3918#endif
3919
3920/*
3921 * Initialization of rate scaling information is done by driver after
3922 * the station is added. Since mac80211 calls this function before a
3923 * station is added we ignore it.
3924 */
3925static void rs_rate_init_stub(void *mvm_r,
3926 struct ieee80211_supported_band *sband,
3927 struct cfg80211_chan_def *chandef,
3928 struct ieee80211_sta *sta, void *mvm_sta)
3929{
3930}
3931
3932static const struct rate_control_ops rs_mvm_ops = {
3933 .name = RS_NAME,
3934 .tx_status = rs_mac80211_tx_status,
3935 .get_rate = rs_get_rate,
3936 .rate_init = rs_rate_init_stub,
3937 .alloc = rs_alloc,
3938 .free = rs_free,
3939 .alloc_sta = rs_alloc_sta,
3940 .free_sta = rs_free_sta,
3941 .rate_update = rs_rate_update,
3942#ifdef CONFIG_MAC80211_DEBUGFS
3943 .add_sta_debugfs = rs_add_debugfs,
3944 .remove_sta_debugfs = rs_remove_debugfs,
3945#endif
3946};
3947
3948int iwl_mvm_rate_control_register(void)
3949{
3950 return ieee80211_rate_control_register(&rs_mvm_ops);
3951}
3952
3953void iwl_mvm_rate_control_unregister(void)
3954{
3955 ieee80211_rate_control_unregister(&rs_mvm_ops);
3956}
3957
3958/**
3959 * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
3960 * Tx protection, according to this request and previous requests,
3961 * and send the LQ command.
3962 * @mvmsta: The station
3963 * @enable: Enable Tx protection?
3964 */
3965int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
3966 bool enable)
3967{
3968 struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
3969
3970 lockdep_assert_held(&mvm->mutex);
3971
3972 if (enable) {
3973 if (mvmsta->tx_protection == 0)
3974 lq->flags |= LQ_FLAG_USE_RTS_MSK;
3975 mvmsta->tx_protection++;
3976 } else {
3977 mvmsta->tx_protection--;
3978 if (mvmsta->tx_protection == 0)
3979 lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
3980 }
3981
3982 return iwl_mvm_send_lq_cmd(mvm, lq, false);
3983}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
new file mode 100644
index 000000000000..81314ad9ebe0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -0,0 +1,392 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called LICENSE.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <ilw@linux.intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 *****************************************************************************/
27
28#ifndef __rs_h__
29#define __rs_h__
30
31#include <net/mac80211.h>
32
33#include "iwl-config.h"
34
35#include "fw-api.h"
36#include "iwl-trans.h"
37
38struct iwl_rs_rate_info {
39 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
40 u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
41 u8 plcp_ht_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
42 u8 plcp_vht_siso;
43 u8 plcp_vht_mimo2;
44 u8 prev_rs; /* previous rate used in rs algo */
45 u8 next_rs; /* next rate used in rs algo */
46};
47
48#define IWL_RATE_60M_PLCP 3
49
50enum {
51 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
52 IWL_RATE_INVALID = IWL_RATE_COUNT,
53};
54
55#define LINK_QUAL_MAX_RETRY_NUM 16
56
57enum {
58 IWL_RATE_6M_INDEX_TABLE = 0,
59 IWL_RATE_9M_INDEX_TABLE,
60 IWL_RATE_12M_INDEX_TABLE,
61 IWL_RATE_18M_INDEX_TABLE,
62 IWL_RATE_24M_INDEX_TABLE,
63 IWL_RATE_36M_INDEX_TABLE,
64 IWL_RATE_48M_INDEX_TABLE,
65 IWL_RATE_54M_INDEX_TABLE,
66 IWL_RATE_1M_INDEX_TABLE,
67 IWL_RATE_2M_INDEX_TABLE,
68 IWL_RATE_5M_INDEX_TABLE,
69 IWL_RATE_11M_INDEX_TABLE,
70 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
71};
72
73/* #define vs. enum to keep from defaulting to 'large integer' */
74#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
75#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
76#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
77#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
78#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
79#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
80#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
81#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
82#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
83#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
84#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
85#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
86#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
87
88
89/* uCode API values for HT/VHT bit rates */
90enum {
91 IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
92 IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
93 IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
94 IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
95 IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
96 IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
97 IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
98 IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
99 IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
100 IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
101 IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
102 IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
103 IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
104 IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
105 IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
106 IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
107 IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
108 IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
109 IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
110 IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
111 IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
112 IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
113 IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
114 IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
115 IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
116 IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
117 IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
118 IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
119 IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
120 IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
121 IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
122 IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
123 IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
124 IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
125 IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
126 IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
127 IWL_RATE_HT_SISO_MCS_INV_PLCP,
128 IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
129 IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
130 IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
131 IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
132 IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
133 IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
134 IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
135};
136
137#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
138
139#define IWL_INVALID_VALUE -1
140
141#define TPC_MAX_REDUCTION 15
142#define TPC_NO_REDUCTION 0
143#define TPC_INVALID 0xff
144
145#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
146#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
147#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
148
149#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
150
151/* load per tid defines for A-MPDU activation */
152#define IWL_AGG_TPT_THREHOLD 0
153#define IWL_AGG_ALL_TID 0xff
154
155enum iwl_table_type {
156 LQ_NONE,
157 LQ_LEGACY_G, /* legacy types */
158 LQ_LEGACY_A,
159 LQ_HT_SISO, /* HT types */
160 LQ_HT_MIMO2,
161 LQ_VHT_SISO, /* VHT types */
162 LQ_VHT_MIMO2,
163 LQ_MAX,
164};
165
166struct rs_rate {
167 int index;
168 enum iwl_table_type type;
169 u8 ant;
170 u32 bw;
171 bool sgi;
172 bool ldpc;
173 bool stbc;
174 bool bfer;
175};
176
177
178#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
179 ((type) == LQ_LEGACY_A))
180#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
181#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
182#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
183#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
184#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
185#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
186#define is_type_mimo(type) (is_type_mimo2(type))
187#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
188#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
189#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
190#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
191
192#define is_legacy(rate) is_type_legacy((rate)->type)
193#define is_ht_siso(rate) is_type_ht_siso((rate)->type)
194#define is_ht_mimo2(rate) is_type_ht_mimo2((rate)->type)
195#define is_vht_siso(rate) is_type_vht_siso((rate)->type)
196#define is_vht_mimo2(rate) is_type_vht_mimo2((rate)->type)
197#define is_siso(rate) is_type_siso((rate)->type)
198#define is_mimo2(rate) is_type_mimo2((rate)->type)
199#define is_mimo(rate) is_type_mimo((rate)->type)
200#define is_ht(rate) is_type_ht((rate)->type)
201#define is_vht(rate) is_type_vht((rate)->type)
202#define is_a_band(rate) is_type_a_band((rate)->type)
203#define is_g_band(rate) is_type_g_band((rate)->type)
204
205#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
206#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
207#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
208
209#define IWL_MAX_MCS_DISPLAY_SIZE 12
210
211struct iwl_rate_mcs_info {
212 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
213 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
214};
215
216/**
217 * struct iwl_rate_scale_data -- tx success history for one rate
218 */
219struct iwl_rate_scale_data {
220 u64 data; /* bitmap of successful frames */
221 s32 success_counter; /* number of frames successful */
222 s32 success_ratio; /* per-cent * 128 */
223 s32 counter; /* number of frames attempted */
224 s32 average_tpt; /* success ratio * expected throughput */
225};
226
227/* Possible Tx columns
228 * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
229 */
230enum rs_column {
231 RS_COLUMN_LEGACY_ANT_A = 0,
232 RS_COLUMN_LEGACY_ANT_B,
233 RS_COLUMN_SISO_ANT_A,
234 RS_COLUMN_SISO_ANT_B,
235 RS_COLUMN_SISO_ANT_A_SGI,
236 RS_COLUMN_SISO_ANT_B_SGI,
237 RS_COLUMN_MIMO2,
238 RS_COLUMN_MIMO2_SGI,
239
240 RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
241 RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
242 RS_COLUMN_INVALID,
243};
244
245enum rs_ss_force_opt {
246 RS_SS_FORCE_NONE = 0,
247 RS_SS_FORCE_STBC,
248 RS_SS_FORCE_BFER,
249 RS_SS_FORCE_SISO,
250};
251
252/* Packet stats per rate */
253struct rs_rate_stats {
254 u64 success;
255 u64 total;
256};
257
258/**
259 * struct iwl_scale_tbl_info -- tx params and success history for all rates
260 *
261 * There are two of these in struct iwl_lq_sta,
262 * one for "active", and one for "search".
263 */
264struct iwl_scale_tbl_info {
265 struct rs_rate rate;
266 enum rs_column column;
267 const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
268 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
269 /* per txpower-reduction history */
270 struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
271};
272
273enum {
274 RS_STATE_SEARCH_CYCLE_STARTED,
275 RS_STATE_SEARCH_CYCLE_ENDED,
276 RS_STATE_STAY_IN_COLUMN,
277};
278
279/**
280 * struct iwl_lq_sta -- driver's rate scaling private structure
281 *
282 * Pointer to this gets passed back and forth between driver and mac80211.
283 */
284struct iwl_lq_sta {
285 u8 active_tbl; /* index of active table, range 0-1 */
286 u8 rs_state; /* RS_STATE_* */
287 u8 search_better_tbl; /* 1: currently trying alternate mode */
288 s32 last_tpt;
289
290 /* The following determine when to search for a new mode */
291 u32 table_count_limit;
292 u32 max_failure_limit; /* # failed frames before new search */
293 u32 max_success_limit; /* # successful frames before new search */
294 u32 table_count;
295 u32 total_failed; /* total failed frames, any/all rates */
296 u32 total_success; /* total successful frames, any/all rates */
297 u64 flush_timer; /* time staying in mode before new search */
298
299 u32 visited_columns; /* Bitmask marking which Tx columns were
300 * explored during a search cycle
301 */
302 u64 last_tx;
303 bool is_vht;
304 bool ldpc; /* LDPC Rx is supported by the STA */
305 bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
306 bool bfer_capable; /* Remote supports beamformee and we BFer */
307
308 enum ieee80211_band band;
309
310 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
311 unsigned long active_legacy_rate;
312 unsigned long active_siso_rate;
313 unsigned long active_mimo2_rate;
314
315 /* Highest rate per Tx mode */
316 u8 max_legacy_rate_idx;
317 u8 max_siso_rate_idx;
318 u8 max_mimo2_rate_idx;
319
320 /* Optimal rate based on RSSI and STA caps.
321 * Used only to reflect link speed to userspace.
322 */
323 struct rs_rate optimal_rate;
324 unsigned long optimal_rate_mask;
325 const struct rs_init_rate_info *optimal_rates;
326 int optimal_nentries;
327
328 u8 missed_rate_counter;
329
330 struct iwl_lq_cmd lq;
331 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
332 u8 tx_agg_tid_en;
333
334 /* last tx rate_n_flags */
335 u32 last_rate_n_flags;
336 /* packets destined for this STA are aggregated */
337 u8 is_agg;
338
339 /* tx power reduce for this sta */
340 int tpc_reduce;
341
342 /* persistent fields - initialized only once - keep last! */
343 struct lq_sta_pers {
344#ifdef CONFIG_MAC80211_DEBUGFS
345 u32 dbg_fixed_rate;
346 u8 dbg_fixed_txp_reduction;
347
348 /* force STBC/BFER/SISO for testing */
349 enum rs_ss_force_opt ss_force;
350#endif
351 u8 chains;
352 s8 chain_signal[IEEE80211_MAX_CHAINS];
353 s8 last_rssi;
354 struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
355 struct iwl_mvm *drv;
356 } pers;
357};
358
359/* Initialize station's rate scaling information after adding station */
360void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
361 enum ieee80211_band band, bool init);
362
363/* Notify RS about Tx status */
364void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
365 int tid, struct ieee80211_tx_info *info);
366
367/**
368 * iwl_rate_control_register - Register the rate control algorithm callbacks
369 *
370 * Since the rate control algorithm is hardware specific, there is no need
371 * or reason to place it as a stand alone module. The driver can call
372 * iwl_rate_control_register in order to register the rate control callbacks
373 * with the mac80211 subsystem. This should be performed prior to calling
374 * ieee80211_register_hw
375 *
376 */
377int iwl_mvm_rate_control_register(void);
378
379/**
380 * iwl_rate_control_unregister - Unregister the rate control callbacks
381 *
382 * This should be called after calling ieee80211_unregister_hw, but before
383 * the driver is unloaded.
384 */
385void iwl_mvm_rate_control_unregister(void);
386
387struct iwl_mvm_sta;
388
389int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
390 bool enable);
391
392#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
new file mode 100644
index 000000000000..5b58f5320e8d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -0,0 +1,612 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64#include <linux/skbuff.h>
65#include "iwl-trans.h"
66#include "mvm.h"
67#include "fw-api.h"
68
69/*
70 * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
71 *
72 * Copies the phy information in mvm->last_phy_info, it will be used when the
73 * actual data will come from the fw in the next packet.
74 */
75void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
76{
77 struct iwl_rx_packet *pkt = rxb_addr(rxb);
78
79 memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
80 mvm->ampdu_ref++;
81
82#ifdef CONFIG_IWLWIFI_DEBUGFS
83 if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
84 spin_lock(&mvm->drv_stats_lock);
85 mvm->drv_rx_stats.ampdu_count++;
86 spin_unlock(&mvm->drv_stats_lock);
87 }
88#endif
89}
90
91/*
92 * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
93 *
94 * Adds the rxb to a new skb and give it to mac80211
95 */
96static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
97 struct napi_struct *napi,
98 struct sk_buff *skb,
99 struct ieee80211_hdr *hdr, u16 len,
100 u32 ampdu_status, u8 crypt_len,
101 struct iwl_rx_cmd_buffer *rxb)
102{
103 unsigned int hdrlen, fraglen;
104
105 /* If frame is small enough to fit in skb->head, pull it completely.
106 * If not, only pull ieee80211_hdr (including crypto if present, and
107 * an additional 8 bytes for SNAP/ethertype, see below) so that
108 * splice() or TCP coalesce are more efficient.
109 *
110 * Since, in addition, ieee80211_data_to_8023() always pull in at
111 * least 8 bytes (possibly more for mesh) we can do the same here
112 * to save the cost of doing it later. That still doesn't pull in
113 * the actual IP header since the typical case has a SNAP header.
114 * If the latter changes (there are efforts in the standards group
115 * to do so) we should revisit this and ieee80211_data_to_8023().
116 */
117 hdrlen = (len <= skb_tailroom(skb)) ? len :
118 sizeof(*hdr) + crypt_len + 8;
119
120 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
121 fraglen = len - hdrlen;
122
123 if (fraglen) {
124 int offset = (void *)hdr + hdrlen -
125 rxb_addr(rxb) + rxb_offset(rxb);
126
127 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
128 fraglen, rxb->truesize);
129 }
130
131 ieee80211_rx_napi(mvm->hw, skb, napi);
132}
133
134/*
135 * iwl_mvm_get_signal_strength - use new rx PHY INFO API
136 * values are reported by the fw as positive values - need to negate
137 * to obtain their dBM. Account for missing antennas by replacing 0
138 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
139 */
140static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
141 struct iwl_rx_phy_info *phy_info,
142 struct ieee80211_rx_status *rx_status)
143{
144 int energy_a, energy_b, energy_c, max_energy;
145 u32 val;
146
147 val =
148 le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
149 energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
150 IWL_RX_INFO_ENERGY_ANT_A_POS;
151 energy_a = energy_a ? -energy_a : S8_MIN;
152 energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
153 IWL_RX_INFO_ENERGY_ANT_B_POS;
154 energy_b = energy_b ? -energy_b : S8_MIN;
155 energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
156 IWL_RX_INFO_ENERGY_ANT_C_POS;
157 energy_c = energy_c ? -energy_c : S8_MIN;
158 max_energy = max(energy_a, energy_b);
159 max_energy = max(max_energy, energy_c);
160
161 IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
162 energy_a, energy_b, energy_c, max_energy);
163
164 rx_status->signal = max_energy;
165 rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
166 RX_RES_PHY_FLAGS_ANTENNA)
167 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
168 rx_status->chain_signal[0] = energy_a;
169 rx_status->chain_signal[1] = energy_b;
170 rx_status->chain_signal[2] = energy_c;
171}
172
173/*
174 * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
175 * @mvm: the mvm object
176 * @hdr: 80211 header
177 * @stats: status in mac80211's format
178 * @rx_pkt_status: status coming from fw
179 *
180 * returns non 0 value if the packet should be dropped
181 */
182static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
183 struct ieee80211_hdr *hdr,
184 struct ieee80211_rx_status *stats,
185 u32 rx_pkt_status,
186 u8 *crypt_len)
187{
188 if (!ieee80211_has_protected(hdr->frame_control) ||
189 (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
190 RX_MPDU_RES_STATUS_SEC_NO_ENC)
191 return 0;
192
193 /* packet was encrypted with unknown alg */
194 if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
195 RX_MPDU_RES_STATUS_SEC_ENC_ERR)
196 return 0;
197
198 switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
199 case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
200 /* alg is CCM: check MIC only */
201 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
202 return -1;
203
204 stats->flag |= RX_FLAG_DECRYPTED;
205 *crypt_len = IEEE80211_CCMP_HDR_LEN;
206 return 0;
207
208 case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
209 /* Don't drop the frame and decrypt it in SW */
210 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
211 return 0;
212 *crypt_len = IEEE80211_TKIP_IV_LEN;
213 /* fall through if TTAK OK */
214
215 case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
216 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
217 return -1;
218
219 stats->flag |= RX_FLAG_DECRYPTED;
220 if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
221 RX_MPDU_RES_STATUS_SEC_WEP_ENC)
222 *crypt_len = IEEE80211_WEP_IV_LEN;
223 return 0;
224
225 case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
226 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
227 return -1;
228 stats->flag |= RX_FLAG_DECRYPTED;
229 return 0;
230
231 default:
232 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
233 }
234
235 return 0;
236}
237
238static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
239 struct sk_buff *skb,
240 u32 status)
241{
242 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
243 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
244
245 if (mvmvif->features & NETIF_F_RXCSUM &&
246 status & RX_MPDU_RES_STATUS_CSUM_DONE &&
247 status & RX_MPDU_RES_STATUS_CSUM_OK)
248 skb->ip_summed = CHECKSUM_UNNECESSARY;
249}
250
251/*
252 * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
253 *
254 * Handles the actual data of the Rx packet from the fw
255 */
256void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
257 struct iwl_rx_cmd_buffer *rxb)
258{
259 struct ieee80211_hdr *hdr;
260 struct ieee80211_rx_status *rx_status;
261 struct iwl_rx_packet *pkt = rxb_addr(rxb);
262 struct iwl_rx_phy_info *phy_info;
263 struct iwl_rx_mpdu_res_start *rx_res;
264 struct ieee80211_sta *sta;
265 struct sk_buff *skb;
266 u32 len;
267 u32 ampdu_status;
268 u32 rate_n_flags;
269 u32 rx_pkt_status;
270 u8 crypt_len = 0;
271
272 phy_info = &mvm->last_phy_info;
273 rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
274 hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
275 len = le16_to_cpu(rx_res->byte_count);
276 rx_pkt_status = le32_to_cpup((__le32 *)
277 (pkt->data + sizeof(*rx_res) + len));
278
279 /* Dont use dev_alloc_skb(), we'll have enough headroom once
280 * ieee80211_hdr pulled.
281 */
282 skb = alloc_skb(128, GFP_ATOMIC);
283 if (!skb) {
284 IWL_ERR(mvm, "alloc_skb failed\n");
285 return;
286 }
287
288 rx_status = IEEE80211_SKB_RXCB(skb);
289
290 /*
291 * drop the packet if it has failed being decrypted by HW
292 */
293 if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
294 &crypt_len)) {
295 IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
296 rx_pkt_status);
297 kfree_skb(skb);
298 return;
299 }
300
301 /*
302 * Keep packets with CRC errors (and with overrun) for monitor mode
303 * (otherwise the firmware discards them) but mark them as bad.
304 */
305 if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
306 !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
307 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
308 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
309 }
310
311 /* This will be used in several places later */
312 rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
313
314 /* rx_status carries information about the packet to mac80211 */
315 rx_status->mactime = le64_to_cpu(phy_info->timestamp);
316 rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
317 rx_status->band =
318 (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
319 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
320 rx_status->freq =
321 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
322 rx_status->band);
323 /*
324 * TSF as indicated by the fw is at INA time, but mac80211 expects the
325 * TSF at the beginning of the MPDU.
326 */
327 /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
328
329 iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
330
331 IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
332 (unsigned long long)rx_status->mactime);
333
334 rcu_read_lock();
335 /*
336 * We have tx blocked stations (with CS bit). If we heard frames from
337 * a blocked station on a new channel we can TX to it again.
338 */
339 if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
340 sta = ieee80211_find_sta(
341 rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
342 if (sta)
343 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
344 }
345
346 /* This is fine since we don't support multiple AP interfaces */
347 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
348 if (sta) {
349 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
350
351 rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
352
353 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
354 ieee80211_is_beacon(hdr->frame_control)) {
355 struct iwl_fw_dbg_trigger_tlv *trig;
356 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
357 bool trig_check;
358 s32 rssi;
359
360 trig = iwl_fw_dbg_get_trigger(mvm->fw,
361 FW_DBG_TRIGGER_RSSI);
362 rssi_trig = (void *)trig->data;
363 rssi = le32_to_cpu(rssi_trig->rssi);
364
365 trig_check =
366 iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
367 trig);
368 if (trig_check && rx_status->signal < rssi)
369 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
370 }
371 }
372
373 if (sta && ieee80211_is_data(hdr->frame_control))
374 iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
375
376 rcu_read_unlock();
377
378 /* set the preamble flag if appropriate */
379 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
380 rx_status->flag |= RX_FLAG_SHORTPRE;
381
382 if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
383 /*
384 * We know which subframes of an A-MPDU belong
385 * together since we get a single PHY response
386 * from the firmware for all of them
387 */
388 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
389 rx_status->ampdu_reference = mvm->ampdu_ref;
390 }
391
392 /* Set up the HT phy flags */
393 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
394 case RATE_MCS_CHAN_WIDTH_20:
395 break;
396 case RATE_MCS_CHAN_WIDTH_40:
397 rx_status->flag |= RX_FLAG_40MHZ;
398 break;
399 case RATE_MCS_CHAN_WIDTH_80:
400 rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
401 break;
402 case RATE_MCS_CHAN_WIDTH_160:
403 rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
404 break;
405 }
406 if (rate_n_flags & RATE_MCS_SGI_MSK)
407 rx_status->flag |= RX_FLAG_SHORT_GI;
408 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
409 rx_status->flag |= RX_FLAG_HT_GF;
410 if (rate_n_flags & RATE_MCS_LDPC_MSK)
411 rx_status->flag |= RX_FLAG_LDPC;
412 if (rate_n_flags & RATE_MCS_HT_MSK) {
413 u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
414 RATE_MCS_STBC_POS;
415 rx_status->flag |= RX_FLAG_HT;
416 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
417 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
418 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
419 u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
420 RATE_MCS_STBC_POS;
421 rx_status->vht_nss =
422 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
423 RATE_VHT_MCS_NSS_POS) + 1;
424 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
425 rx_status->flag |= RX_FLAG_VHT;
426 rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
427 if (rate_n_flags & RATE_MCS_BF_MSK)
428 rx_status->vht_flag |= RX_VHT_FLAG_BF;
429 } else {
430 rx_status->rate_idx =
431 iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
432 rx_status->band);
433 }
434
435#ifdef CONFIG_IWLWIFI_DEBUGFS
436 iwl_mvm_update_frame_stats(mvm, rate_n_flags,
437 rx_status->flag & RX_FLAG_AMPDU_DETAILS);
438#endif
439 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
440 crypt_len, rxb);
441}
442
443static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
444 struct mvm_statistics_rx *rx_stats)
445{
446 lockdep_assert_held(&mvm->mutex);
447
448 mvm->rx_stats = *rx_stats;
449}
450
451struct iwl_mvm_stat_data {
452 struct iwl_mvm *mvm;
453 __le32 mac_id;
454 u8 beacon_filter_average_energy;
455 struct mvm_statistics_general_v8 *general;
456};
457
458static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
459 struct ieee80211_vif *vif)
460{
461 struct iwl_mvm_stat_data *data = _data;
462 struct iwl_mvm *mvm = data->mvm;
463 int sig = -data->beacon_filter_average_energy;
464 int last_event;
465 int thold = vif->bss_conf.cqm_rssi_thold;
466 int hyst = vif->bss_conf.cqm_rssi_hyst;
467 u16 id = le32_to_cpu(data->mac_id);
468 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
469
470 /* This doesn't need the MAC ID check since it's not taking the
471 * data copied into the "data" struct, but rather the data from
472 * the notification directly.
473 */
474 if (data->general) {
475 mvmvif->beacon_stats.num_beacons =
476 le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
477 mvmvif->beacon_stats.avg_signal =
478 -data->general->beacon_average_energy[mvmvif->id];
479 }
480
481 if (mvmvif->id != id)
482 return;
483
484 if (vif->type != NL80211_IFTYPE_STATION)
485 return;
486
487 if (sig == 0) {
488 IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
489 return;
490 }
491
492 mvmvif->bf_data.ave_beacon_signal = sig;
493
494 /* BT Coex */
495 if (mvmvif->bf_data.bt_coex_min_thold !=
496 mvmvif->bf_data.bt_coex_max_thold) {
497 last_event = mvmvif->bf_data.last_bt_coex_event;
498 if (sig > mvmvif->bf_data.bt_coex_max_thold &&
499 (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
500 last_event == 0)) {
501 mvmvif->bf_data.last_bt_coex_event = sig;
502 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
503 sig);
504 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
505 } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
506 (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
507 last_event == 0)) {
508 mvmvif->bf_data.last_bt_coex_event = sig;
509 IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
510 sig);
511 iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
512 }
513 }
514
515 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
516 return;
517
518 /* CQM Notification */
519 last_event = mvmvif->bf_data.last_cqm_event;
520 if (thold && sig < thold && (last_event == 0 ||
521 sig < last_event - hyst)) {
522 mvmvif->bf_data.last_cqm_event = sig;
523 IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
524 sig);
525 ieee80211_cqm_rssi_notify(
526 vif,
527 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
528 GFP_KERNEL);
529 } else if (sig > thold &&
530 (last_event == 0 || sig > last_event + hyst)) {
531 mvmvif->bf_data.last_cqm_event = sig;
532 IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
533 sig);
534 ieee80211_cqm_rssi_notify(
535 vif,
536 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
537 GFP_KERNEL);
538 }
539}
540
541static inline void
542iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
543{
544 struct iwl_fw_dbg_trigger_tlv *trig;
545 struct iwl_fw_dbg_trigger_stats *trig_stats;
546 u32 trig_offset, trig_thold;
547
548 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
549 return;
550
551 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
552 trig_stats = (void *)trig->data;
553
554 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
555 return;
556
557 trig_offset = le32_to_cpu(trig_stats->stop_offset);
558 trig_thold = le32_to_cpu(trig_stats->stop_threshold);
559
560 if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
561 return;
562
563 if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
564 return;
565
566 iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
567}
568
569void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
570 struct iwl_rx_packet *pkt)
571{
572 struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
573 struct iwl_mvm_stat_data data = {
574 .mvm = mvm,
575 };
576 u32 temperature;
577
578 if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
579 goto invalid;
580
581 temperature = le32_to_cpu(stats->general.radio_temperature);
582 data.mac_id = stats->rx.general.mac_id;
583 data.beacon_filter_average_energy =
584 stats->general.beacon_filter_average_energy;
585
586 iwl_mvm_update_rx_statistics(mvm, &stats->rx);
587
588 mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
589 mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
590 mvm->radio_stats.on_time_rf =
591 le64_to_cpu(stats->general.on_time_rf);
592 mvm->radio_stats.on_time_scan =
593 le64_to_cpu(stats->general.on_time_scan);
594
595 data.general = &stats->general;
596
597 iwl_mvm_rx_stats_check_trigger(mvm, pkt);
598
599 ieee80211_iterate_active_interfaces(mvm->hw,
600 IEEE80211_IFACE_ITER_NORMAL,
601 iwl_mvm_stat_iterator,
602 &data);
603 return;
604 invalid:
605 IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
606 iwl_rx_packet_payload_len(pkt));
607}
608
609void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
610{
611 iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
612}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
new file mode 100644
index 000000000000..d6e0c1b5c20c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -0,0 +1,1552 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/etherdevice.h>
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "fw-api-scan.h"
71
72#define IWL_DENSE_EBS_SCAN_RATIO 5
73#define IWL_SPARSE_EBS_SCAN_RATIO 1
74
75enum iwl_mvm_scan_type {
76 IWL_SCAN_TYPE_UNASSOC,
77 IWL_SCAN_TYPE_WILD,
78 IWL_SCAN_TYPE_MILD,
79 IWL_SCAN_TYPE_FRAGMENTED,
80};
81
82enum iwl_mvm_traffic_load {
83 IWL_MVM_TRAFFIC_LOW,
84 IWL_MVM_TRAFFIC_MEDIUM,
85 IWL_MVM_TRAFFIC_HIGH,
86};
87
88struct iwl_mvm_scan_timing_params {
89 u32 dwell_active;
90 u32 dwell_passive;
91 u32 dwell_fragmented;
92 u32 suspend_time;
93 u32 max_out_time;
94};
95
96static struct iwl_mvm_scan_timing_params scan_timing[] = {
97 [IWL_SCAN_TYPE_UNASSOC] = {
98 .dwell_active = 10,
99 .dwell_passive = 110,
100 .dwell_fragmented = 44,
101 .suspend_time = 0,
102 .max_out_time = 0,
103 },
104 [IWL_SCAN_TYPE_WILD] = {
105 .dwell_active = 10,
106 .dwell_passive = 110,
107 .dwell_fragmented = 44,
108 .suspend_time = 30,
109 .max_out_time = 120,
110 },
111 [IWL_SCAN_TYPE_MILD] = {
112 .dwell_active = 10,
113 .dwell_passive = 110,
114 .dwell_fragmented = 44,
115 .suspend_time = 120,
116 .max_out_time = 120,
117 },
118 [IWL_SCAN_TYPE_FRAGMENTED] = {
119 .dwell_active = 10,
120 .dwell_passive = 110,
121 .dwell_fragmented = 44,
122 .suspend_time = 95,
123 .max_out_time = 44,
124 },
125};
126
127struct iwl_mvm_scan_params {
128 enum iwl_mvm_scan_type type;
129 u32 n_channels;
130 u16 delay;
131 int n_ssids;
132 struct cfg80211_ssid *ssids;
133 struct ieee80211_channel **channels;
134 u32 flags;
135 u8 *mac_addr;
136 u8 *mac_addr_mask;
137 bool no_cck;
138 bool pass_all;
139 int n_match_sets;
140 struct iwl_scan_probe_req preq;
141 struct cfg80211_match_set *match_sets;
142 int n_scan_plans;
143 struct cfg80211_sched_scan_plan *scan_plans;
144};
145
146static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
147{
148 if (mvm->scan_rx_ant != ANT_NONE)
149 return mvm->scan_rx_ant;
150 return iwl_mvm_get_valid_rx_ant(mvm);
151}
152
153static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
154{
155 u16 rx_chain;
156 u8 rx_ant;
157
158 rx_ant = iwl_mvm_scan_rx_ant(mvm);
159 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
160 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
161 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
162 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
163 return cpu_to_le16(rx_chain);
164}
165
166static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
167{
168 if (band == IEEE80211_BAND_2GHZ)
169 return cpu_to_le32(PHY_BAND_24);
170 else
171 return cpu_to_le32(PHY_BAND_5);
172}
173
174static inline __le32
175iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
176 bool no_cck)
177{
178 u32 tx_ant;
179
180 mvm->scan_last_antenna_idx =
181 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
182 mvm->scan_last_antenna_idx);
183 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
184
185 if (band == IEEE80211_BAND_2GHZ && !no_cck)
186 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
187 tx_ant);
188 else
189 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
190}
191
192static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
193 struct ieee80211_vif *vif)
194{
195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
196 int *global_cnt = data;
197
198 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
199 mvmvif->phy_ctxt->id < MAX_PHYS)
200 *global_cnt += 1;
201}
202
203static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
204{
205 return IWL_MVM_TRAFFIC_LOW;
206}
207
208static enum
209iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
210 struct ieee80211_vif *vif,
211 struct iwl_mvm_scan_params *params)
212{
213 int global_cnt = 0;
214 enum iwl_mvm_traffic_load load;
215 bool low_latency;
216
217 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
218 IEEE80211_IFACE_ITER_NORMAL,
219 iwl_mvm_scan_condition_iterator,
220 &global_cnt);
221 if (!global_cnt)
222 return IWL_SCAN_TYPE_UNASSOC;
223
224 load = iwl_mvm_get_traffic_load(mvm);
225 low_latency = iwl_mvm_low_latency(mvm);
226
227 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
228 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
229 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
230 return IWL_SCAN_TYPE_FRAGMENTED;
231
232 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
233 return IWL_SCAN_TYPE_MILD;
234
235 return IWL_SCAN_TYPE_WILD;
236}
237
238static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
239{
240 /* require rrm scan whenever the fw supports it */
241 return fw_has_capa(&mvm->fw->ucode_capa,
242 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
243}
244
245static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
246{
247 int max_probe_len;
248
249 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
250
251 /* we create the 802.11 header and SSID element */
252 max_probe_len -= 24 + 2;
253
254 /* DS parameter set element is added on 2.4GHZ band if required */
255 if (iwl_mvm_rrm_scan_needed(mvm))
256 max_probe_len -= 3;
257
258 return max_probe_len;
259}
260
261int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
262{
263 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
264
265 /* TODO: [BUG] This function should return the maximum allowed size of
266 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
267 * in the same command. So the correct implementation of this function
268 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
269 * command has only 512 bytes and it would leave us with about 240
270 * bytes for scan IEs, which is clearly not enough. So meanwhile
271 * we will report an incorrect value. This may result in a failure to
272 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
273 * functions with -ENOBUFS, if a large enough probe will be provided.
274 */
275 return max_ie_len;
276}
277
278static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
279 int num_res, u8 *buf, size_t buf_size)
280{
281 int i;
282 u8 *pos = buf, *end = buf + buf_size;
283
284 for (i = 0; pos < end && i < num_res; i++)
285 pos += snprintf(pos, end - pos, " %u", res[i].channel);
286
287 /* terminate the string in case the buffer was too short */
288 *(buf + buf_size - 1) = '\0';
289
290 return buf;
291}
292
293void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
294 struct iwl_rx_cmd_buffer *rxb)
295{
296 struct iwl_rx_packet *pkt = rxb_addr(rxb);
297 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
298 u8 buf[256];
299
300 IWL_DEBUG_SCAN(mvm,
301 "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
302 notif->status, notif->scanned_channels,
303 iwl_mvm_dump_channel_list(notif->results,
304 notif->scanned_channels, buf,
305 sizeof(buf)));
306}
307
308void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
309 struct iwl_rx_cmd_buffer *rxb)
310{
311 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
312 ieee80211_sched_scan_results(mvm->hw);
313}
314
315static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
316{
317 switch (status) {
318 case IWL_SCAN_EBS_SUCCESS:
319 return "successful";
320 case IWL_SCAN_EBS_INACTIVE:
321 return "inactive";
322 case IWL_SCAN_EBS_FAILED:
323 case IWL_SCAN_EBS_CHAN_NOT_FOUND:
324 default:
325 return "failed";
326 }
327}
328
329void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
330 struct iwl_rx_cmd_buffer *rxb)
331{
332 struct iwl_rx_packet *pkt = rxb_addr(rxb);
333 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
334 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
335
336 /* scan status must be locked for proper checking */
337 lockdep_assert_held(&mvm->mutex);
338
339 /* We first check if we were stopping a scan, in which case we
340 * just clear the stopping flag. Then we check if it was a
341 * firmware initiated stop, in which case we need to inform
342 * mac80211.
343 * Note that we can have a stopping and a running scan
344 * simultaneously, but we can't have two different types of
345 * scans stopping or running at the same time (since LMAC
346 * doesn't support it).
347 */
348
349 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
350 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
351
352 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
353 aborted ? "aborted" : "completed",
354 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
355 IWL_DEBUG_SCAN(mvm,
356 "Last line %d, Last iteration %d, Time after last iteration %d\n",
357 scan_notif->last_schedule_line,
358 scan_notif->last_schedule_iteration,
359 __le32_to_cpu(scan_notif->time_after_last_iter));
360
361 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
362 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
363 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
364 aborted ? "aborted" : "completed",
365 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
366
367 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
368 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
369 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
370
371 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
372 aborted ? "aborted" : "completed",
373 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
374 IWL_DEBUG_SCAN(mvm,
375 "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
376 scan_notif->last_schedule_line,
377 scan_notif->last_schedule_iteration,
378 __le32_to_cpu(scan_notif->time_after_last_iter));
379
380 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
381 ieee80211_sched_scan_stopped(mvm->hw);
382 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
383 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
384 aborted ? "aborted" : "completed",
385 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
386
387 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
388 ieee80211_scan_completed(mvm->hw,
389 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
390 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
391 }
392
393 mvm->last_ebs_successful =
394 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
395 scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
396}
397
398static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
399{
400 int i;
401
402 for (i = 0; i < PROBE_OPTION_MAX; i++) {
403 if (!ssid_list[i].len)
404 break;
405 if (ssid_list[i].len == ssid_len &&
406 !memcmp(ssid_list->ssid, ssid, ssid_len))
407 return i;
408 }
409 return -1;
410}
411
412/* We insert the SSIDs in an inverted order, because the FW will
413 * invert it back.
414 */
415static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
416 struct iwl_ssid_ie *ssids,
417 u32 *ssid_bitmap)
418{
419 int i, j;
420 int index;
421
422 /*
423 * copy SSIDs from match list.
424 * iwl_config_sched_scan_profiles() uses the order of these ssids to
425 * config match list.
426 */
427 for (i = 0, j = params->n_match_sets - 1;
428 j >= 0 && i < PROBE_OPTION_MAX;
429 i++, j--) {
430 /* skip empty SSID matchsets */
431 if (!params->match_sets[j].ssid.ssid_len)
432 continue;
433 ssids[i].id = WLAN_EID_SSID;
434 ssids[i].len = params->match_sets[j].ssid.ssid_len;
435 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
436 ssids[i].len);
437 }
438
439 /* add SSIDs from scan SSID list */
440 *ssid_bitmap = 0;
441 for (j = params->n_ssids - 1;
442 j >= 0 && i < PROBE_OPTION_MAX;
443 i++, j--) {
444 index = iwl_ssid_exist(params->ssids[j].ssid,
445 params->ssids[j].ssid_len,
446 ssids);
447 if (index < 0) {
448 ssids[i].id = WLAN_EID_SSID;
449 ssids[i].len = params->ssids[j].ssid_len;
450 memcpy(ssids[i].ssid, params->ssids[j].ssid,
451 ssids[i].len);
452 *ssid_bitmap |= BIT(i);
453 } else {
454 *ssid_bitmap |= BIT(index);
455 }
456 }
457}
458
459static int
460iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
461 struct cfg80211_sched_scan_request *req)
462{
463 struct iwl_scan_offload_profile *profile;
464 struct iwl_scan_offload_profile_cfg *profile_cfg;
465 struct iwl_scan_offload_blacklist *blacklist;
466 struct iwl_host_cmd cmd = {
467 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
468 .len[1] = sizeof(*profile_cfg),
469 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
470 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
471 };
472 int blacklist_len;
473 int i;
474 int ret;
475
476 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
477 return -EIO;
478
479 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
480 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
481 else
482 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
483
484 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
485 if (!blacklist)
486 return -ENOMEM;
487
488 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
489 if (!profile_cfg) {
490 ret = -ENOMEM;
491 goto free_blacklist;
492 }
493
494 cmd.data[0] = blacklist;
495 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
496 cmd.data[1] = profile_cfg;
497
498 /* No blacklist configuration */
499
500 profile_cfg->num_profiles = req->n_match_sets;
501 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
502 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
503 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
504 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
505 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
506
507 for (i = 0; i < req->n_match_sets; i++) {
508 profile = &profile_cfg->profiles[i];
509 profile->ssid_index = i;
510 /* Support any cipher and auth algorithm */
511 profile->unicast_cipher = 0xff;
512 profile->auth_alg = 0xff;
513 profile->network_type = IWL_NETWORK_TYPE_ANY;
514 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
515 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
516 }
517
518 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
519
520 ret = iwl_mvm_send_cmd(mvm, &cmd);
521 kfree(profile_cfg);
522free_blacklist:
523 kfree(blacklist);
524
525 return ret;
526}
527
528static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
529 struct cfg80211_sched_scan_request *req)
530{
531 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
532 IWL_DEBUG_SCAN(mvm,
533 "Sending scheduled scan with filtering, n_match_sets %d\n",
534 req->n_match_sets);
535 return false;
536 }
537
538 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
539 return true;
540}
541
542static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
543{
544 int ret;
545 struct iwl_host_cmd cmd = {
546 .id = SCAN_OFFLOAD_ABORT_CMD,
547 };
548 u32 status;
549
550 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
551 if (ret)
552 return ret;
553
554 if (status != CAN_ABORT_STATUS) {
555 /*
556 * The scan abort will return 1 for success or
557 * 2 for "failure". A failure condition can be
558 * due to simply not being in an active scan which
559 * can occur if we send the scan abort before the
560 * microcode has notified us that a scan is completed.
561 */
562 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
563 ret = -ENOENT;
564 }
565
566 return ret;
567}
568
569static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
570 struct iwl_scan_req_tx_cmd *tx_cmd,
571 bool no_cck)
572{
573 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
574 TX_CMD_FLG_BT_DIS);
575 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
576 IEEE80211_BAND_2GHZ,
577 no_cck);
578 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
579
580 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
581 TX_CMD_FLG_BT_DIS);
582 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
583 IEEE80211_BAND_5GHZ,
584 no_cck);
585 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
586}
587
588static void
589iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
590 struct ieee80211_channel **channels,
591 int n_channels, u32 ssid_bitmap,
592 struct iwl_scan_req_lmac *cmd)
593{
594 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
595 int i;
596
597 for (i = 0; i < n_channels; i++) {
598 channel_cfg[i].channel_num =
599 cpu_to_le16(channels[i]->hw_value);
600 channel_cfg[i].iter_count = cpu_to_le16(1);
601 channel_cfg[i].iter_interval = 0;
602 channel_cfg[i].flags =
603 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
604 ssid_bitmap);
605 }
606}
607
608static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
609 size_t len, u8 *const pos)
610{
611 static const u8 before_ds_params[] = {
612 WLAN_EID_SSID,
613 WLAN_EID_SUPP_RATES,
614 WLAN_EID_REQUEST,
615 WLAN_EID_EXT_SUPP_RATES,
616 };
617 size_t offs;
618 u8 *newpos = pos;
619
620 if (!iwl_mvm_rrm_scan_needed(mvm)) {
621 memcpy(newpos, ies, len);
622 return newpos + len;
623 }
624
625 offs = ieee80211_ie_split(ies, len,
626 before_ds_params,
627 ARRAY_SIZE(before_ds_params),
628 0);
629
630 memcpy(newpos, ies, offs);
631 newpos += offs;
632
633 /* Add a placeholder for DS Parameter Set element */
634 *newpos++ = WLAN_EID_DS_PARAMS;
635 *newpos++ = 1;
636 *newpos++ = 0;
637
638 memcpy(newpos, ies + offs, len - offs);
639 newpos += len - offs;
640
641 return newpos;
642}
643
644static void
645iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
646 struct ieee80211_scan_ies *ies,
647 struct iwl_mvm_scan_params *params)
648{
649 struct ieee80211_mgmt *frame = (void *)params->preq.buf;
650 u8 *pos, *newpos;
651 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
652 params->mac_addr : NULL;
653
654 /*
655 * Unfortunately, right now the offload scan doesn't support randomising
656 * within the firmware, so until the firmware API is ready we implement
657 * it in the driver. This means that the scan iterations won't really be
658 * random, only when it's restarted, but at least that helps a bit.
659 */
660 if (mac_addr)
661 get_random_mask_addr(frame->sa, mac_addr,
662 params->mac_addr_mask);
663 else
664 memcpy(frame->sa, vif->addr, ETH_ALEN);
665
666 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
667 eth_broadcast_addr(frame->da);
668 eth_broadcast_addr(frame->bssid);
669 frame->seq_ctrl = 0;
670
671 pos = frame->u.probe_req.variable;
672 *pos++ = WLAN_EID_SSID;
673 *pos++ = 0;
674
675 params->preq.mac_header.offset = 0;
676 params->preq.mac_header.len = cpu_to_le16(24 + 2);
677
678 /* Insert ds parameter set element on 2.4 GHz band */
679 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
680 ies->ies[IEEE80211_BAND_2GHZ],
681 ies->len[IEEE80211_BAND_2GHZ],
682 pos);
683 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
684 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
685 pos = newpos;
686
687 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
688 ies->len[IEEE80211_BAND_5GHZ]);
689 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
690 params->preq.band_data[1].len =
691 cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
692 pos += ies->len[IEEE80211_BAND_5GHZ];
693
694 memcpy(pos, ies->common_ies, ies->common_ie_len);
695 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
696 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
697}
698
699static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
700 enum iwl_scan_priority_ext prio)
701{
702 if (fw_has_api(&mvm->fw->ucode_capa,
703 IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
704 return cpu_to_le32(prio);
705
706 if (prio <= IWL_SCAN_PRIORITY_EXT_2)
707 return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
708
709 if (prio <= IWL_SCAN_PRIORITY_EXT_4)
710 return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
711
712 return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
713}
714
715static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
716 struct iwl_scan_req_lmac *cmd,
717 struct iwl_mvm_scan_params *params)
718{
719 cmd->active_dwell = scan_timing[params->type].dwell_active;
720 cmd->passive_dwell = scan_timing[params->type].dwell_passive;
721 cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
722 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
723 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
724 cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
725}
726
727static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
728 struct ieee80211_scan_ies *ies,
729 int n_channels)
730{
731 return ((n_ssids <= PROBE_OPTION_MAX) &&
732 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
733 (ies->common_ie_len +
734 ies->len[NL80211_BAND_2GHZ] +
735 ies->len[NL80211_BAND_5GHZ] <=
736 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
737}
738
739static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
740 struct ieee80211_vif *vif)
741{
742 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
743
744 /* We can only use EBS if:
745 * 1. the feature is supported;
746 * 2. the last EBS was successful;
747 * 3. if only single scan, the single scan EBS API is supported;
748 * 4. it's not a p2p find operation.
749 */
750 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
751 mvm->last_ebs_successful &&
752 vif->type != NL80211_IFTYPE_P2P_DEVICE);
753}
754
755static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
756 struct iwl_mvm_scan_params *params)
757{
758 int flags = 0;
759
760 if (params->n_ssids == 0)
761 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
762
763 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
764 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
765
766 if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
767 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
768
769 if (iwl_mvm_rrm_scan_needed(mvm))
770 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
771
772 if (params->pass_all)
773 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
774 else
775 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
776
777#ifdef CONFIG_IWLWIFI_DEBUGFS
778 if (mvm->scan_iter_notif_enabled)
779 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
780#endif
781
782 return flags;
783}
784
785static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
786 struct iwl_mvm_scan_params *params)
787{
788 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
789 struct iwl_scan_probe_req *preq =
790 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
791 mvm->fw->ucode_capa.n_scan_channels);
792 u32 ssid_bitmap = 0;
793 int i;
794
795 lockdep_assert_held(&mvm->mutex);
796
797 memset(cmd, 0, ksize(cmd));
798
799 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
800 return -EINVAL;
801
802 iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
803
804 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
805 cmd->iter_num = cpu_to_le32(1);
806 cmd->n_channels = (u8)params->n_channels;
807
808 cmd->delay = cpu_to_le32(params->delay);
809
810 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
811
812 cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
813 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
814 MAC_FILTER_IN_BEACON);
815 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
816 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
817
818 /* this API uses bits 1-20 instead of 0-19 */
819 ssid_bitmap <<= 1;
820
821 for (i = 0; i < params->n_scan_plans; i++) {
822 struct cfg80211_sched_scan_plan *scan_plan =
823 &params->scan_plans[i];
824
825 cmd->schedule[i].delay =
826 cpu_to_le16(scan_plan->interval);
827 cmd->schedule[i].iterations = scan_plan->iterations;
828 cmd->schedule[i].full_scan_mul = 1;
829 }
830
831 /*
832 * If the number of iterations of the last scan plan is set to
833 * zero, it should run infinitely. However, this is not always the case.
834 * For example, when regular scan is requested the driver sets one scan
835 * plan with one iteration.
836 */
837 if (!cmd->schedule[i - 1].iterations)
838 cmd->schedule[i - 1].iterations = 0xff;
839
840 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
841 cmd->channel_opt[0].flags =
842 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
843 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
844 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
845 cmd->channel_opt[0].non_ebs_ratio =
846 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
847 cmd->channel_opt[1].flags =
848 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
849 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
850 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
851 cmd->channel_opt[1].non_ebs_ratio =
852 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
853 }
854
855 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
856 params->n_channels, ssid_bitmap, cmd);
857
858 *preq = params->preq;
859
860 return 0;
861}
862
863static int rate_to_scan_rate_flag(unsigned int rate)
864{
865 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
866 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
867 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
868 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
869 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
870 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
871 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
872 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
873 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
874 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
875 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
876 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
877 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
878 };
879
880 return rate_to_scan_rate[rate];
881}
882
883static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
884{
885 struct ieee80211_supported_band *band;
886 unsigned int rates = 0;
887 int i;
888
889 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
890 for (i = 0; i < band->n_bitrates; i++)
891 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
892 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
893 for (i = 0; i < band->n_bitrates; i++)
894 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
895
896 /* Set both basic rates and supported rates */
897 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
898
899 return cpu_to_le32(rates);
900}
901
902int iwl_mvm_config_scan(struct iwl_mvm *mvm)
903{
904 struct iwl_scan_config *scan_config;
905 struct ieee80211_supported_band *band;
906 int num_channels =
907 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
908 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
909 int ret, i, j = 0, cmd_size;
910 struct iwl_host_cmd cmd = {
911 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
912 };
913
914 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
915 return -ENOBUFS;
916
917 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
918
919 scan_config = kzalloc(cmd_size, GFP_KERNEL);
920 if (!scan_config)
921 return -ENOMEM;
922
923 scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
924 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
925 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
926 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
927 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
928 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
929 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
930 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
931 SCAN_CONFIG_N_CHANNELS(num_channels));
932 scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
933 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
934 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
935 scan_config->out_of_channel_time = cpu_to_le32(170);
936 scan_config->suspend_time = cpu_to_le32(30);
937 scan_config->dwell_active = 20;
938 scan_config->dwell_passive = 110;
939 scan_config->dwell_fragmented = 20;
940
941 memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
942
943 scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
944 scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
945 IWL_CHANNEL_FLAG_ACCURATE_EBS |
946 IWL_CHANNEL_FLAG_EBS_ADD |
947 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
948
949 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
950 for (i = 0; i < band->n_channels; i++, j++)
951 scan_config->channel_array[j] = band->channels[i].hw_value;
952 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
953 for (i = 0; i < band->n_channels; i++, j++)
954 scan_config->channel_array[j] = band->channels[i].hw_value;
955
956 cmd.data[0] = scan_config;
957 cmd.len[0] = cmd_size;
958 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
959
960 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
961
962 ret = iwl_mvm_send_cmd(mvm, &cmd);
963
964 kfree(scan_config);
965 return ret;
966}
967
968static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
969{
970 int i;
971
972 for (i = 0; i < mvm->max_scans; i++)
973 if (mvm->scan_uid_status[i] == status)
974 return i;
975
976 return -ENOENT;
977}
978
979static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
980{
981 return params->n_scan_plans == 1 &&
982 params->scan_plans[0].iterations == 1;
983}
984
985static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
986 struct iwl_scan_req_umac *cmd,
987 struct iwl_mvm_scan_params *params)
988{
989 cmd->active_dwell = scan_timing[params->type].dwell_active;
990 cmd->passive_dwell = scan_timing[params->type].dwell_passive;
991 cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
992 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
993 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
994 cmd->scan_priority =
995 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
996
997 if (iwl_mvm_is_regular_scan(params))
998 cmd->ooc_priority =
999 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1000 else
1001 cmd->ooc_priority =
1002 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
1003}
1004
1005static void
1006iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1007 struct ieee80211_channel **channels,
1008 int n_channels, u32 ssid_bitmap,
1009 struct iwl_scan_req_umac *cmd)
1010{
1011 struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1012 int i;
1013
1014 for (i = 0; i < n_channels; i++) {
1015 channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1016 channel_cfg[i].channel_num = channels[i]->hw_value;
1017 channel_cfg[i].iter_count = 1;
1018 channel_cfg[i].iter_interval = 0;
1019 }
1020}
1021
1022static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1023 struct iwl_mvm_scan_params *params)
1024{
1025 int flags = 0;
1026
1027 if (params->n_ssids == 0)
1028 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1029
1030 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1031 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1032
1033 if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
1034 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1035
1036 if (iwl_mvm_rrm_scan_needed(mvm))
1037 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1038
1039 if (params->pass_all)
1040 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1041 else
1042 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1043
1044 if (!iwl_mvm_is_regular_scan(params))
1045 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1046
1047#ifdef CONFIG_IWLWIFI_DEBUGFS
1048 if (mvm->scan_iter_notif_enabled)
1049 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1050#endif
1051 return flags;
1052}
1053
1054static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1055 struct iwl_mvm_scan_params *params,
1056 int type)
1057{
1058 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1059 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1060 sizeof(struct iwl_scan_channel_cfg_umac) *
1061 mvm->fw->ucode_capa.n_scan_channels;
1062 int uid, i;
1063 u32 ssid_bitmap = 0;
1064
1065 lockdep_assert_held(&mvm->mutex);
1066
1067 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
1068 return -EINVAL;
1069
1070 uid = iwl_mvm_scan_uid_by_status(mvm, 0);
1071 if (uid < 0)
1072 return uid;
1073
1074 memset(cmd, 0, ksize(cmd));
1075
1076 iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1077
1078 mvm->scan_uid_status[uid] = type;
1079
1080 cmd->uid = cpu_to_le32(uid);
1081 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1082
1083 if (type == IWL_MVM_SCAN_SCHED)
1084 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1085
1086 if (iwl_mvm_scan_use_ebs(mvm, vif))
1087 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1088 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1089 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1090
1091 cmd->n_channels = params->n_channels;
1092
1093 iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
1094
1095 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1096 params->n_channels, ssid_bitmap, cmd);
1097
1098 for (i = 0; i < params->n_scan_plans; i++) {
1099 struct cfg80211_sched_scan_plan *scan_plan =
1100 &params->scan_plans[i];
1101
1102 sec_part->schedule[i].iter_count = scan_plan->iterations;
1103 sec_part->schedule[i].interval =
1104 cpu_to_le16(scan_plan->interval);
1105 }
1106
1107 /*
1108 * If the number of iterations of the last scan plan is set to
1109 * zero, it should run infinitely. However, this is not always the case.
1110 * For example, when regular scan is requested the driver sets one scan
1111 * plan with one iteration.
1112 */
1113 if (!sec_part->schedule[i - 1].iter_count)
1114 sec_part->schedule[i - 1].iter_count = 0xff;
1115
1116 sec_part->delay = cpu_to_le16(params->delay);
1117 sec_part->preq = params->preq;
1118
1119 return 0;
1120}
1121
1122static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1123{
1124 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
1125}
1126
1127static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1128{
1129 /* This looks a bit arbitrary, but the idea is that if we run
1130 * out of possible simultaneous scans and the userspace is
1131 * trying to run a scan type that is already running, we
1132 * return -EBUSY. But if the userspace wants to start a
1133 * different type of scan, we stop the opposite type to make
1134 * space for the new request. The reason is backwards
1135 * compatibility with old wpa_supplicant that wouldn't stop a
1136 * scheduled scan before starting a normal scan.
1137 */
1138
1139 if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
1140 return 0;
1141
1142 /* Use a switch, even though this is a bitmask, so that more
1143 * than one bits set will fall in default and we will warn.
1144 */
1145 switch (type) {
1146 case IWL_MVM_SCAN_REGULAR:
1147 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1148 return -EBUSY;
1149 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1150 case IWL_MVM_SCAN_SCHED:
1151 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1152 return -EBUSY;
1153 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1154 case IWL_MVM_SCAN_NETDETECT:
1155 /* No need to stop anything for net-detect since the
1156 * firmware is restarted anyway. This way, any sched
1157 * scans that were running will be restarted when we
1158 * resume.
1159 */
1160 return 0;
1161 default:
1162 WARN_ON(1);
1163 break;
1164 }
1165
1166 return -EIO;
1167}
1168
1169int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1170 struct cfg80211_scan_request *req,
1171 struct ieee80211_scan_ies *ies)
1172{
1173 struct iwl_host_cmd hcmd = {
1174 .len = { iwl_mvm_scan_size(mvm), },
1175 .data = { mvm->scan_cmd, },
1176 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1177 };
1178 struct iwl_mvm_scan_params params = {};
1179 int ret;
1180 struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
1181
1182 lockdep_assert_held(&mvm->mutex);
1183
1184 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1185 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
1186 return -EBUSY;
1187 }
1188
1189 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
1190 if (ret)
1191 return ret;
1192
1193 /* we should have failed registration if scan_cmd was NULL */
1194 if (WARN_ON(!mvm->scan_cmd))
1195 return -ENOMEM;
1196
1197 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1198 return -ENOBUFS;
1199
1200 params.n_ssids = req->n_ssids;
1201 params.flags = req->flags;
1202 params.n_channels = req->n_channels;
1203 params.delay = 0;
1204 params.ssids = req->ssids;
1205 params.channels = req->channels;
1206 params.mac_addr = req->mac_addr;
1207 params.mac_addr_mask = req->mac_addr_mask;
1208 params.no_cck = req->no_cck;
1209 params.pass_all = true;
1210 params.n_match_sets = 0;
1211 params.match_sets = NULL;
1212
1213 params.scan_plans = &scan_plan;
1214 params.n_scan_plans = 1;
1215
1216 params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1217
1218 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1219
1220 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1221 hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1222 ret = iwl_mvm_scan_umac(mvm, vif, &params,
1223 IWL_MVM_SCAN_REGULAR);
1224 } else {
1225 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1226 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1227 }
1228
1229 if (ret)
1230 return ret;
1231
1232 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1233 if (ret) {
1234 /* If the scan failed, it usually means that the FW was unable
1235 * to allocate the time events. Warn on it, but maybe we
1236 * should try to send the command again with different params.
1237 */
1238 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1239 return ret;
1240 }
1241
1242 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1243 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1244 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1245
1246 return 0;
1247}
1248
1249int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1250 struct ieee80211_vif *vif,
1251 struct cfg80211_sched_scan_request *req,
1252 struct ieee80211_scan_ies *ies,
1253 int type)
1254{
1255 struct iwl_host_cmd hcmd = {
1256 .len = { iwl_mvm_scan_size(mvm), },
1257 .data = { mvm->scan_cmd, },
1258 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1259 };
1260 struct iwl_mvm_scan_params params = {};
1261 int ret;
1262
1263 lockdep_assert_held(&mvm->mutex);
1264
1265 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1266 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
1267 return -EBUSY;
1268 }
1269
1270 ret = iwl_mvm_check_running_scans(mvm, type);
1271 if (ret)
1272 return ret;
1273
1274 /* we should have failed registration if scan_cmd was NULL */
1275 if (WARN_ON(!mvm->scan_cmd))
1276 return -ENOMEM;
1277
1278 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1279 return -ENOBUFS;
1280
1281 params.n_ssids = req->n_ssids;
1282 params.flags = req->flags;
1283 params.n_channels = req->n_channels;
1284 params.ssids = req->ssids;
1285 params.channels = req->channels;
1286 params.mac_addr = req->mac_addr;
1287 params.mac_addr_mask = req->mac_addr_mask;
1288 params.no_cck = false;
1289 params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
1290 params.n_match_sets = req->n_match_sets;
1291 params.match_sets = req->match_sets;
1292 if (!req->n_scan_plans)
1293 return -EINVAL;
1294
1295 params.n_scan_plans = req->n_scan_plans;
1296 params.scan_plans = req->scan_plans;
1297
1298 params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
1299
1300 /* In theory, LMAC scans can handle a 32-bit delay, but since
1301 * waiting for over 18 hours to start the scan is a bit silly
1302 * and to keep it aligned with UMAC scans (which only support
1303 * 16-bit delays), trim it down to 16-bits.
1304 */
1305 if (req->delay > U16_MAX) {
1306 IWL_DEBUG_SCAN(mvm,
1307 "delay value is > 16-bits, set to max possible\n");
1308 params.delay = U16_MAX;
1309 } else {
1310 params.delay = req->delay;
1311 }
1312
1313 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1314 if (ret)
1315 return ret;
1316
1317 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1318
1319 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1320 hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1321 ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
1322 } else {
1323 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1324 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1325 }
1326
1327 if (ret)
1328 return ret;
1329
1330 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1331 if (!ret) {
1332 IWL_DEBUG_SCAN(mvm,
1333 "Sched scan request was sent successfully\n");
1334 mvm->scan_status |= type;
1335 } else {
1336 /* If the scan failed, it usually means that the FW was unable
1337 * to allocate the time events. Warn on it, but maybe we
1338 * should try to send the command again with different params.
1339 */
1340 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1341 }
1342
1343 return ret;
1344}
1345
1346void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1347 struct iwl_rx_cmd_buffer *rxb)
1348{
1349 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1350 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1351 u32 uid = __le32_to_cpu(notif->uid);
1352 bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
1353
1354 if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
1355 return;
1356
1357 /* if the scan is already stopping, we don't need to notify mac80211 */
1358 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1359 ieee80211_scan_completed(mvm->hw, aborted);
1360 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1361 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1362 ieee80211_sched_scan_stopped(mvm->hw);
1363 }
1364
1365 mvm->scan_status &= ~mvm->scan_uid_status[uid];
1366 IWL_DEBUG_SCAN(mvm,
1367 "Scan completed, uid %u type %u, status %s, EBS status %s\n",
1368 uid, mvm->scan_uid_status[uid],
1369 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1370 "completed" : "aborted",
1371 iwl_mvm_ebs_status_str(notif->ebs_status));
1372 IWL_DEBUG_SCAN(mvm,
1373 "Last line %d, Last iteration %d, Time from last iteration %d\n",
1374 notif->last_schedule, notif->last_iter,
1375 __le32_to_cpu(notif->time_from_last_iter));
1376
1377 if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
1378 notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
1379 mvm->last_ebs_successful = false;
1380
1381 mvm->scan_uid_status[uid] = 0;
1382}
1383
1384void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1385 struct iwl_rx_cmd_buffer *rxb)
1386{
1387 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1388 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
1389 u8 buf[256];
1390
1391 IWL_DEBUG_SCAN(mvm,
1392 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
1393 notif->status, notif->scanned_channels,
1394 iwl_mvm_dump_channel_list(notif->results,
1395 notif->scanned_channels, buf,
1396 sizeof(buf)));
1397}
1398
1399static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
1400{
1401 struct iwl_umac_scan_abort cmd = {};
1402 int uid, ret;
1403
1404 lockdep_assert_held(&mvm->mutex);
1405
1406 /* We should always get a valid index here, because we already
1407 * checked that this type of scan was running in the generic
1408 * code.
1409 */
1410 uid = iwl_mvm_scan_uid_by_status(mvm, type);
1411 if (WARN_ON_ONCE(uid < 0))
1412 return uid;
1413
1414 cmd.uid = cpu_to_le32(uid);
1415
1416 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1417
1418 ret = iwl_mvm_send_cmd_pdu(mvm,
1419 iwl_cmd_id(SCAN_ABORT_UMAC,
1420 IWL_ALWAYS_LONG_GROUP, 0),
1421 0, sizeof(cmd), &cmd);
1422 if (!ret)
1423 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
1424
1425 return ret;
1426}
1427
1428static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1429{
1430 struct iwl_notification_wait wait_scan_done;
1431 static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
1432 SCAN_OFFLOAD_COMPLETE, };
1433 int ret;
1434
1435 lockdep_assert_held(&mvm->mutex);
1436
1437 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1438 scan_done_notif,
1439 ARRAY_SIZE(scan_done_notif),
1440 NULL, NULL);
1441
1442 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1443
1444 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1445 ret = iwl_mvm_umac_scan_abort(mvm, type);
1446 else
1447 ret = iwl_mvm_lmac_scan_abort(mvm);
1448
1449 if (ret) {
1450 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
1451 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1452 return ret;
1453 }
1454
1455 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1456
1457 return ret;
1458}
1459
1460int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1461{
1462 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1463 return sizeof(struct iwl_scan_req_umac) +
1464 sizeof(struct iwl_scan_channel_cfg_umac) *
1465 mvm->fw->ucode_capa.n_scan_channels +
1466 sizeof(struct iwl_scan_req_umac_tail);
1467
1468 return sizeof(struct iwl_scan_req_lmac) +
1469 sizeof(struct iwl_scan_channel_cfg_lmac) *
1470 mvm->fw->ucode_capa.n_scan_channels +
1471 sizeof(struct iwl_scan_probe_req);
1472}
1473
1474/*
1475 * This function is used in nic restart flow, to inform mac80211 about scans
1476 * that was aborted by restart flow or by an assert.
1477 */
1478void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1479{
1480 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1481 int uid, i;
1482
1483 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1484 if (uid >= 0) {
1485 ieee80211_scan_completed(mvm->hw, true);
1486 mvm->scan_uid_status[uid] = 0;
1487 }
1488 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1489 if (uid >= 0 && !mvm->restart_fw) {
1490 ieee80211_sched_scan_stopped(mvm->hw);
1491 mvm->scan_uid_status[uid] = 0;
1492 }
1493
1494 /* We shouldn't have any UIDs still set. Loop over all the
1495 * UIDs to make sure there's nothing left there and warn if
1496 * any is found.
1497 */
1498 for (i = 0; i < mvm->max_scans; i++) {
1499 if (WARN_ONCE(mvm->scan_uid_status[i],
1500 "UMAC scan UID %d status was not cleaned\n",
1501 i))
1502 mvm->scan_uid_status[i] = 0;
1503 }
1504 } else {
1505 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
1506 ieee80211_scan_completed(mvm->hw, true);
1507
1508 /* Sched scan will be restarted by mac80211 in
1509 * restart_hw, so do not report if FW is about to be
1510 * restarted.
1511 */
1512 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
1513 ieee80211_sched_scan_stopped(mvm->hw);
1514 }
1515}
1516
1517int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
1518{
1519 int ret;
1520
1521 if (!(mvm->scan_status & type))
1522 return 0;
1523
1524 if (iwl_mvm_is_radio_killed(mvm)) {
1525 ret = 0;
1526 goto out;
1527 }
1528
1529 ret = iwl_mvm_scan_stop_wait(mvm, type);
1530 if (!ret)
1531 mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
1532out:
1533 /* Clear the scan status so the next scan requests will
1534 * succeed and mark the scan as stopping, so that the Rx
1535 * handler doesn't do anything, as the scan was stopped from
1536 * above.
1537 */
1538 mvm->scan_status &= ~type;
1539
1540 if (type == IWL_MVM_SCAN_REGULAR) {
1541 /* Since the rx handler won't do anything now, we have
1542 * to release the scan reference here.
1543 */
1544 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1545 if (notify)
1546 ieee80211_scan_completed(mvm->hw, true);
1547 } else if (notify) {
1548 ieee80211_sched_scan_stopped(mvm->hw);
1549 }
1550
1551 return ret;
1552}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000000..b0f59fdd287c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -0,0 +1,340 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include "mvm.h"
66
67/* For counting bound interfaces */
68struct iwl_mvm_active_iface_iterator_data {
69 struct ieee80211_vif *ignore_vif;
70 u8 sta_vif_ap_sta_id;
71 enum iwl_sf_state sta_vif_state;
72 int num_active_macs;
73};
74
75/*
76 * Count bound interfaces which are not p2p, besides data->ignore_vif.
77 * data->station_vif will point to one bound vif of type station, if exists.
78 */
79static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
80 struct ieee80211_vif *vif)
81{
82 struct iwl_mvm_active_iface_iterator_data *data = _data;
83 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
84
85 if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
86 vif->type == NL80211_IFTYPE_P2P_DEVICE)
87 return;
88
89 data->num_active_macs++;
90
91 if (vif->type == NL80211_IFTYPE_STATION) {
92 data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
93 if (vif->bss_conf.assoc)
94 data->sta_vif_state = SF_FULL_ON;
95 else
96 data->sta_vif_state = SF_INIT_OFF;
97 }
98}
99
100/*
101 * Aging and idle timeouts for the different possible scenarios
102 * in default configuration
103 */
104static const
105__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
106 {
107 cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
108 cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
109 },
110 {
111 cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
112 cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
113 },
114 {
115 cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
116 cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
117 },
118 {
119 cpu_to_le32(SF_BA_AGING_TIMER_DEF),
120 cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
121 },
122 {
123 cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
124 cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
125 },
126};
127
128/*
129 * Aging and idle timeouts for the different possible scenarios
130 * in single BSS MAC configuration.
131 */
132static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
133 {
134 cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
135 cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
136 },
137 {
138 cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
139 cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
140 },
141 {
142 cpu_to_le32(SF_MCAST_AGING_TIMER),
143 cpu_to_le32(SF_MCAST_IDLE_TIMER)
144 },
145 {
146 cpu_to_le32(SF_BA_AGING_TIMER),
147 cpu_to_le32(SF_BA_IDLE_TIMER)
148 },
149 {
150 cpu_to_le32(SF_TX_RE_AGING_TIMER),
151 cpu_to_le32(SF_TX_RE_IDLE_TIMER)
152 },
153};
154
155static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
156 struct iwl_sf_cfg_cmd *sf_cmd,
157 struct ieee80211_sta *sta)
158{
159 int i, j, watermark;
160
161 sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
162
163 /*
164 * If we are in association flow - check antenna configuration
165 * capabilities of the AP station, and choose the watermark accordingly.
166 */
167 if (sta) {
168 if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
169 switch (sta->rx_nss) {
170 case 1:
171 watermark = SF_W_MARK_SISO;
172 break;
173 case 2:
174 watermark = SF_W_MARK_MIMO2;
175 break;
176 default:
177 watermark = SF_W_MARK_MIMO3;
178 break;
179 }
180 } else {
181 watermark = SF_W_MARK_LEGACY;
182 }
183 /* default watermark value for unassociated mode. */
184 } else {
185 watermark = SF_W_MARK_MIMO2;
186 }
187 sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
188
189 for (i = 0; i < SF_NUM_SCENARIO; i++) {
190 for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
191 sf_cmd->long_delay_timeouts[i][j] =
192 cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
193 }
194 }
195
196 if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
197 BUILD_BUG_ON(sizeof(sf_full_timeout) !=
198 sizeof(__le32) * SF_NUM_SCENARIO *
199 SF_NUM_TIMEOUT_TYPES);
200
201 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
202 sizeof(sf_full_timeout));
203 } else {
204 BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
205 sizeof(__le32) * SF_NUM_SCENARIO *
206 SF_NUM_TIMEOUT_TYPES);
207
208 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
209 sizeof(sf_full_timeout_def));
210 }
211
212}
213
214static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
215 enum iwl_sf_state new_state)
216{
217 struct iwl_sf_cfg_cmd sf_cmd = {
218 .state = cpu_to_le32(SF_FULL_ON),
219 };
220 struct ieee80211_sta *sta;
221 int ret = 0;
222
223 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
224 sf_cmd.state = cpu_to_le32(new_state);
225
226 if (mvm->cfg->disable_dummy_notification)
227 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
228
229 /*
230 * If an associated AP sta changed its antenna configuration, the state
231 * will remain FULL_ON but SF parameters need to be reconsidered.
232 */
233 if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
234 return 0;
235
236 switch (new_state) {
237 case SF_UNINIT:
238 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
239 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
240 break;
241 case SF_FULL_ON:
242 if (sta_id == IWL_MVM_STATION_COUNT) {
243 IWL_ERR(mvm,
244 "No station: Cannot switch SF to FULL_ON\n");
245 return -EINVAL;
246 }
247 rcu_read_lock();
248 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
249 if (IS_ERR_OR_NULL(sta)) {
250 IWL_ERR(mvm, "Invalid station id\n");
251 rcu_read_unlock();
252 return -EINVAL;
253 }
254 iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
255 rcu_read_unlock();
256 break;
257 case SF_INIT_OFF:
258 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
259 break;
260 default:
261 WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
262 new_state);
263 return -EINVAL;
264 }
265
266 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
267 sizeof(sf_cmd), &sf_cmd);
268 if (!ret)
269 mvm->sf_state = new_state;
270
271 return ret;
272}
273
274/*
275 * Update Smart fifo:
276 * Count bound interfaces that are not to be removed, ignoring p2p devices,
277 * and set new state accordingly.
278 */
279int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
280 bool remove_vif)
281{
282 enum iwl_sf_state new_state;
283 u8 sta_id = IWL_MVM_STATION_COUNT;
284 struct iwl_mvm_vif *mvmvif = NULL;
285 struct iwl_mvm_active_iface_iterator_data data = {
286 .ignore_vif = changed_vif,
287 .sta_vif_state = SF_UNINIT,
288 .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
289 };
290
291 /*
292 * Ignore the call if we are in HW Restart flow, or if the handled
293 * vif is a p2p device.
294 */
295 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
296 (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
297 return 0;
298
299 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
300 IEEE80211_IFACE_ITER_NORMAL,
301 iwl_mvm_bound_iface_iterator,
302 &data);
303
304 /* If changed_vif exists and is not to be removed, add to the count */
305 if (changed_vif && !remove_vif)
306 data.num_active_macs++;
307
308 switch (data.num_active_macs) {
309 case 0:
310 /* If there are no active macs - change state to SF_INIT_OFF */
311 new_state = SF_INIT_OFF;
312 break;
313 case 1:
314 if (remove_vif) {
315 /* The one active mac left is of type station
316 * and we filled the relevant data during iteration
317 */
318 new_state = data.sta_vif_state;
319 sta_id = data.sta_vif_ap_sta_id;
320 } else {
321 if (WARN_ON(!changed_vif))
322 return -EINVAL;
323 if (changed_vif->type != NL80211_IFTYPE_STATION) {
324 new_state = SF_UNINIT;
325 } else if (changed_vif->bss_conf.assoc &&
326 changed_vif->bss_conf.dtim_period) {
327 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
328 sta_id = mvmvif->ap_sta_id;
329 new_state = SF_FULL_ON;
330 } else {
331 new_state = SF_INIT_OFF;
332 }
333 }
334 break;
335 default:
336 /* If there are multiple active macs - change to SF_UNINIT */
337 new_state = SF_UNINIT;
338 }
339 return iwl_mvm_sf_config(mvm, sta_id, new_state);
340}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
new file mode 100644
index 000000000000..300a249486e4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -0,0 +1,1810 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <net/mac80211.h>
66
67#include "mvm.h"
68#include "sta.h"
69#include "rs.h"
70
71static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
72 enum nl80211_iftype iftype)
73{
74 int sta_id;
75 u32 reserved_ids = 0;
76
77 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
78 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
79
80 lockdep_assert_held(&mvm->mutex);
81
82 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
83 if (iftype != NL80211_IFTYPE_STATION)
84 reserved_ids = BIT(0);
85
86 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
87 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
88 if (BIT(sta_id) & reserved_ids)
89 continue;
90
91 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
92 lockdep_is_held(&mvm->mutex)))
93 return sta_id;
94 }
95 return IWL_MVM_STATION_COUNT;
96}
97
98/* send station add/update command to firmware */
99int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
100 bool update)
101{
102 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
103 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
104 .sta_id = mvm_sta->sta_id,
105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
106 .add_modify = update ? 1 : 0,
107 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
108 STA_FLG_MIMO_EN_MSK),
109 };
110 int ret;
111 u32 status;
112 u32 agg_size = 0, mpdu_dens = 0;
113
114 if (!update) {
115 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
116 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
117 }
118
119 switch (sta->bandwidth) {
120 case IEEE80211_STA_RX_BW_160:
121 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
122 /* fall through */
123 case IEEE80211_STA_RX_BW_80:
124 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
125 /* fall through */
126 case IEEE80211_STA_RX_BW_40:
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
128 /* fall through */
129 case IEEE80211_STA_RX_BW_20:
130 if (sta->ht_cap.ht_supported)
131 add_sta_cmd.station_flags |=
132 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
133 break;
134 }
135
136 switch (sta->rx_nss) {
137 case 1:
138 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
139 break;
140 case 2:
141 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
142 break;
143 case 3 ... 8:
144 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
145 break;
146 }
147
148 switch (sta->smps_mode) {
149 case IEEE80211_SMPS_AUTOMATIC:
150 case IEEE80211_SMPS_NUM_MODES:
151 WARN_ON(1);
152 break;
153 case IEEE80211_SMPS_STATIC:
154 /* override NSS */
155 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case IEEE80211_SMPS_DYNAMIC:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
160 break;
161 case IEEE80211_SMPS_OFF:
162 /* nothing */
163 break;
164 }
165
166 if (sta->ht_cap.ht_supported) {
167 add_sta_cmd.station_flags_msk |=
168 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
169 STA_FLG_AGG_MPDU_DENS_MSK);
170
171 mpdu_dens = sta->ht_cap.ampdu_density;
172 }
173
174 if (sta->vht_cap.vht_supported) {
175 agg_size = sta->vht_cap.cap &
176 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
177 agg_size >>=
178 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
179 } else if (sta->ht_cap.ht_supported) {
180 agg_size = sta->ht_cap.ampdu_factor;
181 }
182
183 add_sta_cmd.station_flags |=
184 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
185 add_sta_cmd.station_flags |=
186 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
187
188 status = ADD_STA_SUCCESS;
189 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
190 &add_sta_cmd, &status);
191 if (ret)
192 return ret;
193
194 switch (status) {
195 case ADD_STA_SUCCESS:
196 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
197 break;
198 default:
199 ret = -EIO;
200 IWL_ERR(mvm, "ADD_STA failed\n");
201 break;
202 }
203
204 return ret;
205}
206
207static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
208 struct ieee80211_sta *sta)
209{
210 unsigned long used_hw_queues;
211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
212 unsigned int wdg_timeout =
213 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
214 u32 ac;
215
216 lockdep_assert_held(&mvm->mutex);
217
218 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
219
220 /* Find available queues, and allocate them to the ACs */
221 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
222 u8 queue = find_first_zero_bit(&used_hw_queues,
223 mvm->first_agg_queue);
224
225 if (queue >= mvm->first_agg_queue) {
226 IWL_ERR(mvm, "Failed to allocate STA queue\n");
227 return -EBUSY;
228 }
229
230 __set_bit(queue, &used_hw_queues);
231 mvmsta->hw_queue[ac] = queue;
232 }
233
234 /* Found a place for all queues - enable them */
235 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
236 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
237 mvmsta->hw_queue[ac],
238 iwl_mvm_ac_to_tx_fifo[ac], 0,
239 wdg_timeout);
240 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
241 }
242
243 return 0;
244}
245
246static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
247 struct ieee80211_sta *sta)
248{
249 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
250 unsigned long sta_msk;
251 int i;
252
253 lockdep_assert_held(&mvm->mutex);
254
255 /* disable the TDLS STA-specific queues */
256 sta_msk = mvmsta->tfd_queue_msk;
257 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
258 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
259}
260
261int iwl_mvm_add_sta(struct iwl_mvm *mvm,
262 struct ieee80211_vif *vif,
263 struct ieee80211_sta *sta)
264{
265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
266 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
267 int i, ret, sta_id;
268
269 lockdep_assert_held(&mvm->mutex);
270
271 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
272 sta_id = iwl_mvm_find_free_sta_id(mvm,
273 ieee80211_vif_type_p2p(vif));
274 else
275 sta_id = mvm_sta->sta_id;
276
277 if (sta_id == IWL_MVM_STATION_COUNT)
278 return -ENOSPC;
279
280 if (vif->type == NL80211_IFTYPE_AP) {
281 mvmvif->ap_assoc_sta_count++;
282 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
283 }
284
285 spin_lock_init(&mvm_sta->lock);
286
287 mvm_sta->sta_id = sta_id;
288 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
289 mvmvif->color);
290 mvm_sta->vif = vif;
291 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
292 mvm_sta->tx_protection = 0;
293 mvm_sta->tt_tx_protection = false;
294
295 /* HW restart, don't assume the memory has been zeroed */
296 atomic_set(&mvm->pending_frames[sta_id], 0);
297 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
298 mvm_sta->tfd_queue_msk = 0;
299
300 /* allocate new queues for a TDLS station */
301 if (sta->tdls) {
302 ret = iwl_mvm_tdls_sta_init(mvm, sta);
303 if (ret)
304 return ret;
305 } else {
306 for (i = 0; i < IEEE80211_NUM_ACS; i++)
307 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
308 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
309 }
310
311 /* for HW restart - reset everything but the sequence number */
312 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
313 u16 seq = mvm_sta->tid_data[i].seq_number;
314 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
315 mvm_sta->tid_data[i].seq_number = seq;
316 }
317 mvm_sta->agg_tids = 0;
318
319 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
320 if (ret)
321 goto err;
322
323 if (vif->type == NL80211_IFTYPE_STATION) {
324 if (!sta->tdls) {
325 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
326 mvmvif->ap_sta_id = sta_id;
327 } else {
328 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
329 }
330 }
331
332 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
333
334 return 0;
335
336err:
337 iwl_mvm_tdls_sta_deinit(mvm, sta);
338 return ret;
339}
340
341int iwl_mvm_update_sta(struct iwl_mvm *mvm,
342 struct ieee80211_vif *vif,
343 struct ieee80211_sta *sta)
344{
345 return iwl_mvm_sta_send_to_fw(mvm, sta, true);
346}
347
348int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
349 bool drain)
350{
351 struct iwl_mvm_add_sta_cmd cmd = {};
352 int ret;
353 u32 status;
354
355 lockdep_assert_held(&mvm->mutex);
356
357 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
358 cmd.sta_id = mvmsta->sta_id;
359 cmd.add_modify = STA_MODE_MODIFY;
360 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
361 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
362
363 status = ADD_STA_SUCCESS;
364 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
365 &cmd, &status);
366 if (ret)
367 return ret;
368
369 switch (status) {
370 case ADD_STA_SUCCESS:
371 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
372 mvmsta->sta_id);
373 break;
374 default:
375 ret = -EIO;
376 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
377 mvmsta->sta_id);
378 break;
379 }
380
381 return ret;
382}
383
384/*
385 * Remove a station from the FW table. Before sending the command to remove
386 * the station validate that the station is indeed known to the driver (sanity
387 * only).
388 */
389static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
390{
391 struct ieee80211_sta *sta;
392 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
393 .sta_id = sta_id,
394 };
395 int ret;
396
397 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
398 lockdep_is_held(&mvm->mutex));
399
400 /* Note: internal stations are marked as error values */
401 if (!sta) {
402 IWL_ERR(mvm, "Invalid station id\n");
403 return -EINVAL;
404 }
405
406 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
407 sizeof(rm_sta_cmd), &rm_sta_cmd);
408 if (ret) {
409 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
410 return ret;
411 }
412
413 return 0;
414}
415
416void iwl_mvm_sta_drained_wk(struct work_struct *wk)
417{
418 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
419 u8 sta_id;
420
421 /*
422 * The mutex is needed because of the SYNC cmd, but not only: if the
423 * work would run concurrently with iwl_mvm_rm_sta, it would run before
424 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
425 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
426 * that later.
427 */
428 mutex_lock(&mvm->mutex);
429
430 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
431 int ret;
432 struct ieee80211_sta *sta =
433 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
434 lockdep_is_held(&mvm->mutex));
435
436 /*
437 * This station is in use or RCU-removed; the latter happens in
438 * managed mode, where mac80211 removes the station before we
439 * can remove it from firmware (we can only do that after the
440 * MAC is marked unassociated), and possibly while the deauth
441 * frame to disconnect from the AP is still queued. Then, the
442 * station pointer is -ENOENT when the last skb is reclaimed.
443 */
444 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
445 continue;
446
447 if (PTR_ERR(sta) == -EINVAL) {
448 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
449 sta_id);
450 continue;
451 }
452
453 if (!sta) {
454 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
455 sta_id);
456 continue;
457 }
458
459 WARN_ON(PTR_ERR(sta) != -EBUSY);
460 /* This station was removed and we waited until it got drained,
461 * we can now proceed and remove it.
462 */
463 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
464 if (ret) {
465 IWL_ERR(mvm,
466 "Couldn't remove sta %d after it was drained\n",
467 sta_id);
468 continue;
469 }
470 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
471 clear_bit(sta_id, mvm->sta_drained);
472
473 if (mvm->tfd_drained[sta_id]) {
474 unsigned long i, msk = mvm->tfd_drained[sta_id];
475
476 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
477 iwl_mvm_disable_txq(mvm, i, i,
478 IWL_MAX_TID_COUNT, 0);
479
480 mvm->tfd_drained[sta_id] = 0;
481 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
482 sta_id, msk);
483 }
484 }
485
486 mutex_unlock(&mvm->mutex);
487}
488
489int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
490 struct ieee80211_vif *vif,
491 struct ieee80211_sta *sta)
492{
493 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
494 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
495 int ret;
496
497 lockdep_assert_held(&mvm->mutex);
498
499 if (vif->type == NL80211_IFTYPE_STATION &&
500 mvmvif->ap_sta_id == mvm_sta->sta_id) {
501 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
502 if (ret)
503 return ret;
504 /* flush its queues here since we are freeing mvm_sta */
505 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
506 if (ret)
507 return ret;
508 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
509 mvm_sta->tfd_queue_msk);
510 if (ret)
511 return ret;
512 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
513
514 /* if we are associated - we can't remove the AP STA now */
515 if (vif->bss_conf.assoc)
516 return ret;
517
518 /* unassoc - go ahead - remove the AP STA now */
519 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
520
521 /* clear d0i3_ap_sta_id if no longer relevant */
522 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
523 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
524 }
525
526 /*
527 * This shouldn't happen - the TDLS channel switch should be canceled
528 * before the STA is removed.
529 */
530 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
531 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
532 cancel_delayed_work(&mvm->tdls_cs.dwork);
533 }
534
535 /*
536 * Make sure that the tx response code sees the station as -EBUSY and
537 * calls the drain worker.
538 */
539 spin_lock_bh(&mvm_sta->lock);
540 /*
541 * There are frames pending on the AC queues for this station.
542 * We need to wait until all the frames are drained...
543 */
544 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
545 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
546 ERR_PTR(-EBUSY));
547 spin_unlock_bh(&mvm_sta->lock);
548
549 /* disable TDLS sta queues on drain complete */
550 if (sta->tdls) {
551 mvm->tfd_drained[mvm_sta->sta_id] =
552 mvm_sta->tfd_queue_msk;
553 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
554 mvm_sta->sta_id);
555 }
556
557 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
558 } else {
559 spin_unlock_bh(&mvm_sta->lock);
560
561 if (sta->tdls)
562 iwl_mvm_tdls_sta_deinit(mvm, sta);
563
564 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
565 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
566 }
567
568 return ret;
569}
570
571int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
572 struct ieee80211_vif *vif,
573 u8 sta_id)
574{
575 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
576
577 lockdep_assert_held(&mvm->mutex);
578
579 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
580 return ret;
581}
582
583static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
584 struct iwl_mvm_int_sta *sta,
585 u32 qmask, enum nl80211_iftype iftype)
586{
587 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
588 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
589 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
590 return -ENOSPC;
591 }
592
593 sta->tfd_queue_msk = qmask;
594
595 /* put a non-NULL value so iterating over the stations won't stop */
596 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
597 return 0;
598}
599
600static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
601 struct iwl_mvm_int_sta *sta)
602{
603 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
604 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
605 sta->sta_id = IWL_MVM_STATION_COUNT;
606}
607
608static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
609 struct iwl_mvm_int_sta *sta,
610 const u8 *addr,
611 u16 mac_id, u16 color)
612{
613 struct iwl_mvm_add_sta_cmd cmd;
614 int ret;
615 u32 status;
616
617 lockdep_assert_held(&mvm->mutex);
618
619 memset(&cmd, 0, sizeof(cmd));
620 cmd.sta_id = sta->sta_id;
621 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
622 color));
623
624 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
625
626 if (addr)
627 memcpy(cmd.addr, addr, ETH_ALEN);
628
629 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
630 &cmd, &status);
631 if (ret)
632 return ret;
633
634 switch (status) {
635 case ADD_STA_SUCCESS:
636 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
637 return 0;
638 default:
639 ret = -EIO;
640 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
641 status);
642 break;
643 }
644 return ret;
645}
646
647int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
648{
649 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
650 mvm->cfg->base_params->wd_timeout :
651 IWL_WATCHDOG_DISABLED;
652 int ret;
653
654 lockdep_assert_held(&mvm->mutex);
655
656 /* Map Aux queue to fifo - needs to happen before adding Aux station */
657 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
658 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
659
660 /* Allocate aux station and assign to it the aux queue */
661 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
662 NL80211_IFTYPE_UNSPECIFIED);
663 if (ret)
664 return ret;
665
666 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
667 MAC_INDEX_AUX, 0);
668
669 if (ret)
670 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
671 return ret;
672}
673
674void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
675{
676 lockdep_assert_held(&mvm->mutex);
677
678 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
679}
680
681/*
682 * Send the add station command for the vif's broadcast station.
683 * Assumes that the station was already allocated.
684 *
685 * @mvm: the mvm component
686 * @vif: the interface to which the broadcast station is added
687 * @bsta: the broadcast station to add.
688 */
689int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
690{
691 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
692 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
693 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
694 const u8 *baddr = _baddr;
695
696 lockdep_assert_held(&mvm->mutex);
697
698 if (vif->type == NL80211_IFTYPE_ADHOC)
699 baddr = vif->bss_conf.bssid;
700
701 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
702 return -ENOSPC;
703
704 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
705 mvmvif->id, mvmvif->color);
706}
707
708/* Send the FW a request to remove the station from it's internal data
709 * structures, but DO NOT remove the entry from the local data structures. */
710int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
711{
712 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
713 int ret;
714
715 lockdep_assert_held(&mvm->mutex);
716
717 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
718 if (ret)
719 IWL_WARN(mvm, "Failed sending remove station\n");
720 return ret;
721}
722
723int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
724{
725 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
726 u32 qmask;
727
728 lockdep_assert_held(&mvm->mutex);
729
730 qmask = iwl_mvm_mac_get_queues_mask(vif);
731
732 /*
733 * The firmware defines the TFD queue mask to only be relevant
734 * for *unicast* queues, so the multicast (CAB) queue shouldn't
735 * be included.
736 */
737 if (vif->type == NL80211_IFTYPE_AP)
738 qmask &= ~BIT(vif->cab_queue);
739
740 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
741 ieee80211_vif_type_p2p(vif));
742}
743
744/* Allocate a new station entry for the broadcast station to the given vif,
745 * and send it to the FW.
746 * Note that each P2P mac should have its own broadcast station.
747 *
748 * @mvm: the mvm component
749 * @vif: the interface to which the broadcast station is added
750 * @bsta: the broadcast station to add. */
751int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
752{
753 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
754 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
755 int ret;
756
757 lockdep_assert_held(&mvm->mutex);
758
759 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
760 if (ret)
761 return ret;
762
763 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
764
765 if (ret)
766 iwl_mvm_dealloc_int_sta(mvm, bsta);
767
768 return ret;
769}
770
771void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
772{
773 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
774
775 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
776}
777
778/*
779 * Send the FW a request to remove the station from it's internal data
780 * structures, and in addition remove it from the local data structure.
781 */
782int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
783{
784 int ret;
785
786 lockdep_assert_held(&mvm->mutex);
787
788 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
789
790 iwl_mvm_dealloc_bcast_sta(mvm, vif);
791
792 return ret;
793}
794
795#define IWL_MAX_RX_BA_SESSIONS 16
796
797int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
798 int tid, u16 ssn, bool start)
799{
800 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
801 struct iwl_mvm_add_sta_cmd cmd = {};
802 int ret;
803 u32 status;
804
805 lockdep_assert_held(&mvm->mutex);
806
807 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
808 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
809 return -ENOSPC;
810 }
811
812 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
813 cmd.sta_id = mvm_sta->sta_id;
814 cmd.add_modify = STA_MODE_MODIFY;
815 if (start) {
816 cmd.add_immediate_ba_tid = (u8) tid;
817 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
818 } else {
819 cmd.remove_immediate_ba_tid = (u8) tid;
820 }
821 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
822 STA_MODIFY_REMOVE_BA_TID;
823
824 status = ADD_STA_SUCCESS;
825 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
826 &cmd, &status);
827 if (ret)
828 return ret;
829
830 switch (status) {
831 case ADD_STA_SUCCESS:
832 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
833 start ? "start" : "stopp");
834 break;
835 case ADD_STA_IMMEDIATE_BA_FAILURE:
836 IWL_WARN(mvm, "RX BA Session refused by fw\n");
837 ret = -ENOSPC;
838 break;
839 default:
840 ret = -EIO;
841 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
842 start ? "start" : "stopp", status);
843 break;
844 }
845
846 if (!ret) {
847 if (start)
848 mvm->rx_ba_sessions++;
849 else if (mvm->rx_ba_sessions > 0)
850 /* check that restart flow didn't zero the counter */
851 mvm->rx_ba_sessions--;
852 }
853
854 return ret;
855}
856
857static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
858 int tid, u8 queue, bool start)
859{
860 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
861 struct iwl_mvm_add_sta_cmd cmd = {};
862 int ret;
863 u32 status;
864
865 lockdep_assert_held(&mvm->mutex);
866
867 if (start) {
868 mvm_sta->tfd_queue_msk |= BIT(queue);
869 mvm_sta->tid_disable_agg &= ~BIT(tid);
870 } else {
871 mvm_sta->tfd_queue_msk &= ~BIT(queue);
872 mvm_sta->tid_disable_agg |= BIT(tid);
873 }
874
875 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
876 cmd.sta_id = mvm_sta->sta_id;
877 cmd.add_modify = STA_MODE_MODIFY;
878 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
879 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
880 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
881
882 status = ADD_STA_SUCCESS;
883 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
884 &cmd, &status);
885 if (ret)
886 return ret;
887
888 switch (status) {
889 case ADD_STA_SUCCESS:
890 break;
891 default:
892 ret = -EIO;
893 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
894 start ? "start" : "stopp", status);
895 break;
896 }
897
898 return ret;
899}
900
901const u8 tid_to_mac80211_ac[] = {
902 IEEE80211_AC_BE,
903 IEEE80211_AC_BK,
904 IEEE80211_AC_BK,
905 IEEE80211_AC_BE,
906 IEEE80211_AC_VI,
907 IEEE80211_AC_VI,
908 IEEE80211_AC_VO,
909 IEEE80211_AC_VO,
910};
911
912static const u8 tid_to_ucode_ac[] = {
913 AC_BE,
914 AC_BK,
915 AC_BK,
916 AC_BE,
917 AC_VI,
918 AC_VI,
919 AC_VO,
920 AC_VO,
921};
922
923int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
924 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
925{
926 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
927 struct iwl_mvm_tid_data *tid_data;
928 int txq_id;
929 int ret;
930
931 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
932 return -EINVAL;
933
934 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
935 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
936 mvmsta->tid_data[tid].state);
937 return -ENXIO;
938 }
939
940 lockdep_assert_held(&mvm->mutex);
941
942 spin_lock_bh(&mvmsta->lock);
943
944 /* possible race condition - we entered D0i3 while starting agg */
945 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
946 spin_unlock_bh(&mvmsta->lock);
947 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
948 return -EIO;
949 }
950
951 spin_lock_bh(&mvm->queue_info_lock);
952
953 txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
954 mvm->last_agg_queue);
955 if (txq_id < 0) {
956 ret = txq_id;
957 spin_unlock_bh(&mvm->queue_info_lock);
958 IWL_ERR(mvm, "Failed to allocate agg queue\n");
959 goto release_locks;
960 }
961 mvm->queue_info[txq_id].setup_reserved = true;
962 spin_unlock_bh(&mvm->queue_info_lock);
963
964 tid_data = &mvmsta->tid_data[tid];
965 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
966 tid_data->txq_id = txq_id;
967 *ssn = tid_data->ssn;
968
969 IWL_DEBUG_TX_QUEUES(mvm,
970 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
971 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
972 tid_data->next_reclaimed);
973
974 if (tid_data->ssn == tid_data->next_reclaimed) {
975 tid_data->state = IWL_AGG_STARTING;
976 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
977 } else {
978 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
979 }
980
981 ret = 0;
982
983release_locks:
984 spin_unlock_bh(&mvmsta->lock);
985
986 return ret;
987}
988
989int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
990 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
991{
992 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
993 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
994 unsigned int wdg_timeout =
995 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
996 int queue, fifo, ret;
997 u16 ssn;
998
999 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1000 != IWL_MAX_TID_COUNT);
1001
1002 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
1003
1004 spin_lock_bh(&mvmsta->lock);
1005 ssn = tid_data->ssn;
1006 queue = tid_data->txq_id;
1007 tid_data->state = IWL_AGG_ON;
1008 mvmsta->agg_tids |= BIT(tid);
1009 tid_data->ssn = 0xffff;
1010 spin_unlock_bh(&mvmsta->lock);
1011
1012 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1013
1014 iwl_mvm_enable_agg_txq(mvm, queue,
1015 vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
1016 mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
1017
1018 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1019 if (ret)
1020 return -EIO;
1021
1022 /* No need to mark as reserved */
1023 spin_lock_bh(&mvm->queue_info_lock);
1024 mvm->queue_info[queue].setup_reserved = false;
1025 spin_unlock_bh(&mvm->queue_info_lock);
1026
1027 /*
1028 * Even though in theory the peer could have different
1029 * aggregation reorder buffer sizes for different sessions,
1030 * our ucode doesn't allow for that and has a global limit
1031 * for each station. Therefore, use the minimum of all the
1032 * aggregation sessions and our default value.
1033 */
1034 mvmsta->max_agg_bufsize =
1035 min(mvmsta->max_agg_bufsize, buf_size);
1036 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
1037
1038 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
1039 sta->addr, tid);
1040
1041 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
1042}
1043
1044int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1045 struct ieee80211_sta *sta, u16 tid)
1046{
1047 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1048 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1049 u16 txq_id;
1050 int err;
1051
1052
1053 /*
1054 * If mac80211 is cleaning its state, then say that we finished since
1055 * our state has been cleared anyway.
1056 */
1057 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1058 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1059 return 0;
1060 }
1061
1062 spin_lock_bh(&mvmsta->lock);
1063
1064 txq_id = tid_data->txq_id;
1065
1066 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
1067 mvmsta->sta_id, tid, txq_id, tid_data->state);
1068
1069 mvmsta->agg_tids &= ~BIT(tid);
1070
1071 /* No need to mark as reserved anymore */
1072 spin_lock_bh(&mvm->queue_info_lock);
1073 mvm->queue_info[txq_id].setup_reserved = false;
1074 spin_unlock_bh(&mvm->queue_info_lock);
1075
1076 switch (tid_data->state) {
1077 case IWL_AGG_ON:
1078 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1079
1080 IWL_DEBUG_TX_QUEUES(mvm,
1081 "ssn = %d, next_recl = %d\n",
1082 tid_data->ssn, tid_data->next_reclaimed);
1083
1084 /* There are still packets for this RA / TID in the HW */
1085 if (tid_data->ssn != tid_data->next_reclaimed) {
1086 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
1087 err = 0;
1088 break;
1089 }
1090
1091 tid_data->ssn = 0xffff;
1092 tid_data->state = IWL_AGG_OFF;
1093 spin_unlock_bh(&mvmsta->lock);
1094
1095 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1096
1097 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1098
1099 iwl_mvm_disable_txq(mvm, txq_id,
1100 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1101 0);
1102 return 0;
1103 case IWL_AGG_STARTING:
1104 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1105 /*
1106 * The agg session has been stopped before it was set up. This
1107 * can happen when the AddBA timer times out for example.
1108 */
1109
1110 /* No barriers since we are under mutex */
1111 lockdep_assert_held(&mvm->mutex);
1112
1113 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1114 tid_data->state = IWL_AGG_OFF;
1115 err = 0;
1116 break;
1117 default:
1118 IWL_ERR(mvm,
1119 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1120 mvmsta->sta_id, tid, tid_data->state);
1121 IWL_ERR(mvm,
1122 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
1123 err = -EINVAL;
1124 }
1125
1126 spin_unlock_bh(&mvmsta->lock);
1127
1128 return err;
1129}
1130
1131int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1132 struct ieee80211_sta *sta, u16 tid)
1133{
1134 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1135 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1136 u16 txq_id;
1137 enum iwl_mvm_agg_state old_state;
1138
1139 /*
1140 * First set the agg state to OFF to avoid calling
1141 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1142 */
1143 spin_lock_bh(&mvmsta->lock);
1144 txq_id = tid_data->txq_id;
1145 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
1146 mvmsta->sta_id, tid, txq_id, tid_data->state);
1147 old_state = tid_data->state;
1148 tid_data->state = IWL_AGG_OFF;
1149 mvmsta->agg_tids &= ~BIT(tid);
1150 spin_unlock_bh(&mvmsta->lock);
1151
1152 /* No need to mark as reserved */
1153 spin_lock_bh(&mvm->queue_info_lock);
1154 mvm->queue_info[txq_id].setup_reserved = false;
1155 spin_unlock_bh(&mvm->queue_info_lock);
1156
1157 if (old_state >= IWL_AGG_ON) {
1158 iwl_mvm_drain_sta(mvm, mvmsta, true);
1159 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
1160 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
1161 iwl_trans_wait_tx_queue_empty(mvm->trans,
1162 mvmsta->tfd_queue_msk);
1163 iwl_mvm_drain_sta(mvm, mvmsta, false);
1164
1165 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1166
1167 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1168 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
1169 0);
1170 }
1171
1172 return 0;
1173}
1174
1175static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1176{
1177 int i, max = -1, max_offs = -1;
1178
1179 lockdep_assert_held(&mvm->mutex);
1180
1181 /* Pick the unused key offset with the highest 'deleted'
1182 * counter. Every time a key is deleted, all the counters
1183 * are incremented and the one that was just deleted is
1184 * reset to zero. Thus, the highest counter is the one
1185 * that was deleted longest ago. Pick that one.
1186 */
1187 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1188 if (test_bit(i, mvm->fw_key_table))
1189 continue;
1190 if (mvm->fw_key_deleted[i] > max) {
1191 max = mvm->fw_key_deleted[i];
1192 max_offs = i;
1193 }
1194 }
1195
1196 if (max_offs < 0)
1197 return STA_KEY_IDX_INVALID;
1198
1199 __set_bit(max_offs, mvm->fw_key_table);
1200
1201 return max_offs;
1202}
1203
1204static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta)
1206{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1208
1209 if (sta) {
1210 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1211
1212 return mvm_sta->sta_id;
1213 }
1214
1215 /*
1216 * The device expects GTKs for station interfaces to be
1217 * installed as GTKs for the AP station. If we have no
1218 * station ID, then use AP's station ID.
1219 */
1220 if (vif->type == NL80211_IFTYPE_STATION &&
1221 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
1222 return mvmvif->ap_sta_id;
1223
1224 return IWL_MVM_STATION_COUNT;
1225}
1226
1227static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1228 struct iwl_mvm_sta *mvm_sta,
1229 struct ieee80211_key_conf *keyconf, bool mcast,
1230 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
1231{
1232 struct iwl_mvm_add_sta_key_cmd cmd = {};
1233 __le16 key_flags;
1234 int ret;
1235 u32 status;
1236 u16 keyidx;
1237 int i;
1238 u8 sta_id = mvm_sta->sta_id;
1239
1240 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1241 STA_KEY_FLG_KEYID_MSK;
1242 key_flags = cpu_to_le16(keyidx);
1243 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
1244
1245 switch (keyconf->cipher) {
1246 case WLAN_CIPHER_SUITE_TKIP:
1247 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
1248 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
1249 for (i = 0; i < 5; i++)
1250 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
1251 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1252 break;
1253 case WLAN_CIPHER_SUITE_CCMP:
1254 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
1255 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1256 break;
1257 case WLAN_CIPHER_SUITE_WEP104:
1258 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1259 /* fall through */
1260 case WLAN_CIPHER_SUITE_WEP40:
1261 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1262 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
1263 break;
1264 default:
1265 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
1266 memcpy(cmd.key, keyconf->key, keyconf->keylen);
1267 }
1268
1269 if (mcast)
1270 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1271
1272 cmd.key_offset = keyconf->hw_key_idx;
1273 cmd.key_flags = key_flags;
1274 cmd.sta_id = sta_id;
1275
1276 status = ADD_STA_SUCCESS;
1277 if (cmd_flags & CMD_ASYNC)
1278 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
1279 sizeof(cmd), &cmd);
1280 else
1281 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1282 &cmd, &status);
1283
1284 switch (status) {
1285 case ADD_STA_SUCCESS:
1286 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
1287 break;
1288 default:
1289 ret = -EIO;
1290 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
1291 break;
1292 }
1293
1294 return ret;
1295}
1296
1297static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1298 struct ieee80211_key_conf *keyconf,
1299 u8 sta_id, bool remove_key)
1300{
1301 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
1302
1303 /* verify the key details match the required command's expectations */
1304 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
1305 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
1306 (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
1307 return -EINVAL;
1308
1309 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
1310 igtk_cmd.sta_id = cpu_to_le32(sta_id);
1311
1312 if (remove_key) {
1313 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
1314 } else {
1315 struct ieee80211_key_seq seq;
1316 const u8 *pn;
1317
1318 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1319 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1320 pn = seq.aes_cmac.pn;
1321 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
1322 ((u64) pn[4] << 8) |
1323 ((u64) pn[3] << 16) |
1324 ((u64) pn[2] << 24) |
1325 ((u64) pn[1] << 32) |
1326 ((u64) pn[0] << 40));
1327 }
1328
1329 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
1330 remove_key ? "removing" : "installing",
1331 igtk_cmd.sta_id);
1332
1333 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
1334 sizeof(igtk_cmd), &igtk_cmd);
1335}
1336
1337
1338static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1339 struct ieee80211_vif *vif,
1340 struct ieee80211_sta *sta)
1341{
1342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1343
1344 if (sta)
1345 return sta->addr;
1346
1347 if (vif->type == NL80211_IFTYPE_STATION &&
1348 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1349 u8 sta_id = mvmvif->ap_sta_id;
1350 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1351 lockdep_is_held(&mvm->mutex));
1352 return sta->addr;
1353 }
1354
1355
1356 return NULL;
1357}
1358
1359static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1360 struct ieee80211_vif *vif,
1361 struct ieee80211_sta *sta,
1362 struct ieee80211_key_conf *keyconf,
1363 bool mcast)
1364{
1365 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1366 int ret;
1367 const u8 *addr;
1368 struct ieee80211_key_seq seq;
1369 u16 p1k[5];
1370
1371 switch (keyconf->cipher) {
1372 case WLAN_CIPHER_SUITE_TKIP:
1373 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
1374 /* get phase 1 key from mac80211 */
1375 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1376 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1377 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1378 seq.tkip.iv32, p1k, 0);
1379 break;
1380 case WLAN_CIPHER_SUITE_CCMP:
1381 case WLAN_CIPHER_SUITE_WEP40:
1382 case WLAN_CIPHER_SUITE_WEP104:
1383 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1384 0, NULL, 0);
1385 break;
1386 default:
1387 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1388 0, NULL, 0);
1389 }
1390
1391 return ret;
1392}
1393
1394static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
1395 struct ieee80211_key_conf *keyconf,
1396 bool mcast)
1397{
1398 struct iwl_mvm_add_sta_key_cmd cmd = {};
1399 __le16 key_flags;
1400 int ret;
1401 u32 status;
1402
1403 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
1404 STA_KEY_FLG_KEYID_MSK);
1405 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
1406 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
1407
1408 if (mcast)
1409 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1410
1411 cmd.key_flags = key_flags;
1412 cmd.key_offset = keyconf->hw_key_idx;
1413 cmd.sta_id = sta_id;
1414
1415 status = ADD_STA_SUCCESS;
1416 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
1417 &cmd, &status);
1418
1419 switch (status) {
1420 case ADD_STA_SUCCESS:
1421 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
1422 break;
1423 default:
1424 ret = -EIO;
1425 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
1426 break;
1427 }
1428
1429 return ret;
1430}
1431
1432int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1433 struct ieee80211_vif *vif,
1434 struct ieee80211_sta *sta,
1435 struct ieee80211_key_conf *keyconf,
1436 bool have_key_offset)
1437{
1438 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1439 u8 sta_id;
1440 int ret;
1441 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1442
1443 lockdep_assert_held(&mvm->mutex);
1444
1445 /* Get the station id from the mvm local station table */
1446 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1447 if (sta_id == IWL_MVM_STATION_COUNT) {
1448 IWL_ERR(mvm, "Failed to find station id\n");
1449 return -EINVAL;
1450 }
1451
1452 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
1453 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
1454 goto end;
1455 }
1456
1457 /*
1458 * It is possible that the 'sta' parameter is NULL, and thus
1459 * there is a need to retrieve the sta from the local station table.
1460 */
1461 if (!sta) {
1462 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1463 lockdep_is_held(&mvm->mutex));
1464 if (IS_ERR_OR_NULL(sta)) {
1465 IWL_ERR(mvm, "Invalid station id\n");
1466 return -EINVAL;
1467 }
1468 }
1469
1470 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1471 return -EINVAL;
1472
1473 if (!have_key_offset) {
1474 /*
1475 * The D3 firmware hardcodes the PTK offset to 0, so we have to
1476 * configure it there. As a result, this workaround exists to
1477 * let the caller set the key offset (hw_key_idx), see d3.c.
1478 */
1479 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
1480 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
1481 return -ENOSPC;
1482 }
1483
1484 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
1485 if (ret) {
1486 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1487 goto end;
1488 }
1489
1490 /*
1491 * For WEP, the same key is used for multicast and unicast. Upload it
1492 * again, using the same key offset, and now pointing the other one
1493 * to the same key slot (offset).
1494 * If this fails, remove the original as well.
1495 */
1496 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1497 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1498 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
1499 if (ret) {
1500 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1501 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1502 }
1503 }
1504
1505end:
1506 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1507 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1508 sta ? sta->addr : zero_addr, ret);
1509 return ret;
1510}
1511
1512int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1513 struct ieee80211_vif *vif,
1514 struct ieee80211_sta *sta,
1515 struct ieee80211_key_conf *keyconf)
1516{
1517 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1518 u8 sta_id;
1519 int ret, i;
1520
1521 lockdep_assert_held(&mvm->mutex);
1522
1523 /* Get the station id from the mvm local station table */
1524 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1525
1526 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1527 keyconf->keyidx, sta_id);
1528
1529 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1530 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
1531
1532 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
1533 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
1534 keyconf->hw_key_idx);
1535 return -ENOENT;
1536 }
1537
1538 /* track which key was deleted last */
1539 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
1540 if (mvm->fw_key_deleted[i] < U8_MAX)
1541 mvm->fw_key_deleted[i]++;
1542 }
1543 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
1544
1545 if (sta_id == IWL_MVM_STATION_COUNT) {
1546 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
1547 return 0;
1548 }
1549
1550 /*
1551 * It is possible that the 'sta' parameter is NULL, and thus
1552 * there is a need to retrieve the sta from the local station table,
1553 * for example when a GTK is removed (where the sta_id will then be
1554 * the AP ID, and no station was passed by mac80211.)
1555 */
1556 if (!sta) {
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (!sta) {
1560 IWL_ERR(mvm, "Invalid station id\n");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1566 return -EINVAL;
1567
1568 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1569 if (ret)
1570 return ret;
1571
1572 /* delete WEP key twice to get rid of (now useless) offset */
1573 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1574 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
1575 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
1576
1577 return ret;
1578}
1579
1580void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1581 struct ieee80211_vif *vif,
1582 struct ieee80211_key_conf *keyconf,
1583 struct ieee80211_sta *sta, u32 iv32,
1584 u16 *phase1key)
1585{
1586 struct iwl_mvm_sta *mvm_sta;
1587 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1588 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1589
1590 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1591 return;
1592
1593 rcu_read_lock();
1594
1595 if (!sta) {
1596 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1597 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
1598 rcu_read_unlock();
1599 return;
1600 }
1601 }
1602
1603 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1604 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1605 iv32, phase1key, CMD_ASYNC);
1606 rcu_read_unlock();
1607}
1608
1609void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1610 struct ieee80211_sta *sta)
1611{
1612 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1613 struct iwl_mvm_add_sta_cmd cmd = {
1614 .add_modify = STA_MODE_MODIFY,
1615 .sta_id = mvmsta->sta_id,
1616 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1617 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1618 };
1619 int ret;
1620
1621 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1622 if (ret)
1623 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1624}
1625
1626void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
1627 struct ieee80211_sta *sta,
1628 enum ieee80211_frame_release_type reason,
1629 u16 cnt, u16 tids, bool more_data,
1630 bool agg)
1631{
1632 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1633 struct iwl_mvm_add_sta_cmd cmd = {
1634 .add_modify = STA_MODE_MODIFY,
1635 .sta_id = mvmsta->sta_id,
1636 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1637 .sleep_tx_count = cpu_to_le16(cnt),
1638 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1639 };
1640 int tid, ret;
1641 unsigned long _tids = tids;
1642
1643 /* convert TIDs to ACs - we don't support TSPEC so that's OK
1644 * Note that this field is reserved and unused by firmware not
1645 * supporting GO uAPSD, so it's safe to always do this.
1646 */
1647 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
1648 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
1649
1650 /* If we're releasing frames from aggregation queues then check if the
1651 * all queues combined that we're releasing frames from have
1652 * - more frames than the service period, in which case more_data
1653 * needs to be set
1654 * - fewer than 'cnt' frames, in which case we need to adjust the
1655 * firmware command (but do that unconditionally)
1656 */
1657 if (agg) {
1658 int remaining = cnt;
1659
1660 spin_lock_bh(&mvmsta->lock);
1661 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
1662 struct iwl_mvm_tid_data *tid_data;
1663 u16 n_queued;
1664
1665 tid_data = &mvmsta->tid_data[tid];
1666 if (WARN(tid_data->state != IWL_AGG_ON &&
1667 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
1668 "TID %d state is %d\n",
1669 tid, tid_data->state)) {
1670 spin_unlock_bh(&mvmsta->lock);
1671 ieee80211_sta_eosp(sta);
1672 return;
1673 }
1674
1675 n_queued = iwl_mvm_tid_queued(tid_data);
1676 if (n_queued > remaining) {
1677 more_data = true;
1678 remaining = 0;
1679 break;
1680 }
1681 remaining -= n_queued;
1682 }
1683 spin_unlock_bh(&mvmsta->lock);
1684
1685 cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
1686 if (WARN_ON(cnt - remaining == 0)) {
1687 ieee80211_sta_eosp(sta);
1688 return;
1689 }
1690 }
1691
1692 /* Note: this is ignored by firmware not supporting GO uAPSD */
1693 if (more_data)
1694 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
1695
1696 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
1697 mvmsta->next_status_eosp = true;
1698 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
1699 } else {
1700 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
1701 }
1702
1703 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1704 if (ret)
1705 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1706}
1707
1708void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
1709 struct iwl_rx_cmd_buffer *rxb)
1710{
1711 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1712 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
1713 struct ieee80211_sta *sta;
1714 u32 sta_id = le32_to_cpu(notif->sta_id);
1715
1716 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
1717 return;
1718
1719 rcu_read_lock();
1720 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1721 if (!IS_ERR_OR_NULL(sta))
1722 ieee80211_sta_eosp(sta);
1723 rcu_read_unlock();
1724}
1725
1726void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
1727 struct iwl_mvm_sta *mvmsta, bool disable)
1728{
1729 struct iwl_mvm_add_sta_cmd cmd = {
1730 .add_modify = STA_MODE_MODIFY,
1731 .sta_id = mvmsta->sta_id,
1732 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
1733 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
1734 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1735 };
1736 int ret;
1737
1738 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1739 if (ret)
1740 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1741}
1742
1743void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
1744 struct ieee80211_sta *sta,
1745 bool disable)
1746{
1747 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1748
1749 spin_lock_bh(&mvm_sta->lock);
1750
1751 if (mvm_sta->disable_tx == disable) {
1752 spin_unlock_bh(&mvm_sta->lock);
1753 return;
1754 }
1755
1756 mvm_sta->disable_tx = disable;
1757
1758 /*
1759 * Tell mac80211 to start/stop queuing tx for this station,
1760 * but don't stop queuing if there are still pending frames
1761 * for this station.
1762 */
1763 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
1764 ieee80211_sta_block_awake(mvm->hw, sta, disable);
1765
1766 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
1767
1768 spin_unlock_bh(&mvm_sta->lock);
1769}
1770
1771void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
1772 struct iwl_mvm_vif *mvmvif,
1773 bool disable)
1774{
1775 struct ieee80211_sta *sta;
1776 struct iwl_mvm_sta *mvm_sta;
1777 int i;
1778
1779 lockdep_assert_held(&mvm->mutex);
1780
1781 /* Block/unblock all the stations of the given mvmvif */
1782 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
1783 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
1784 lockdep_is_held(&mvm->mutex));
1785 if (IS_ERR_OR_NULL(sta))
1786 continue;
1787
1788 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1789 if (mvm_sta->mac_id_n_color !=
1790 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
1791 continue;
1792
1793 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
1794 }
1795}
1796
1797void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1798{
1799 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1800 struct iwl_mvm_sta *mvmsta;
1801
1802 rcu_read_lock();
1803
1804 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
1805
1806 if (!WARN_ON(!mvmsta))
1807 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
1808
1809 rcu_read_unlock();
1810}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
new file mode 100644
index 000000000000..eedb215eba3f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -0,0 +1,426 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __sta_h__
67#define __sta_h__
68
69#include <linux/spinlock.h>
70#include <net/mac80211.h>
71#include <linux/wait.h>
72
73#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
74#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
75#include "rs.h"
76
77struct iwl_mvm;
78struct iwl_mvm_vif;
79
80/**
81 * DOC: station table - introduction
82 *
83 * The station table is a list of data structure that reprensent the stations.
84 * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
85 * In GO/AP mode, the driver will have as many stations as associated clients.
86 * All these stations are reflected in the fw's station table. The driver
87 * keeps the fw's station table up to date with the ADD_STA command. Stations
88 * can be removed by the REMOVE_STA command.
89 *
90 * All the data related to a station is held in the structure %iwl_mvm_sta
91 * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
92 * This data includes the index of the station in the fw, per tid information
93 * (sequence numbers, Block-ack state machine, etc...). The stations are
94 * created and deleted by the %sta_state callback from %ieee80211_ops.
95 *
96 * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
97 * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
98 * station index. That way, the driver is able to get the tid related data in
99 * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
100 * paths are triggered by the fw, and the driver needs to get a pointer to the
101 * %ieee80211 structure. This map helps to get that pointer quickly.
102 */
103
104/**
105 * DOC: station table - locking
106 *
107 * As stated before, the station is created / deleted by mac80211's %sta_state
108 * callback from %ieee80211_ops which can sleep. The next paragraph explains
109 * the locking of a single stations, the next ones relates to the station
110 * table.
111 *
112 * The station holds the sequence number per tid. So this data needs to be
113 * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
114 * information (the state machine / and the logic that checks if the queues
115 * were drained), so it also needs to be accessible from the Tx response flow.
116 * In short, the station needs to be access from sleepable context as well as
117 * from tasklets, so the station itself needs a spinlock.
118 *
119 * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
120 * the mvm op_mode. This is possible since %sta_state can sleep.
121 * The pointers in this map are RCU protected, hence we won't replace the
122 * station while we have Tx / Tx response / BA notification running.
123 *
124 * If a station is deleted while it still has packets in its A-MPDU queues,
125 * then the reclaim flow will notice that there is no station in the map for
126 * sta_id and it will dump the responses.
127 */
128
129/**
130 * DOC: station table - internal stations
131 *
132 * The FW needs a few internal stations that are not reflected in
133 * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
134 * scanning and P2P device (during the GO negotiation).
135 * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
136 * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
137 * Usually the data for these stations is static, so no locking is required,
138 * and no TID data as this is also not needed.
139 * One thing to note, is that these stations have an ID in the fw, but not
140 * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
141 * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
142 * pointers from this mapping need to check that the value is not error
143 * or NULL.
144 *
145 * Currently there is only one auxiliary station for scanning, initialized
146 * on init.
147 */
148
149/**
150 * DOC: station table - AP Station in STA mode
151 *
152 * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
153 * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
154 * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
155 * the AP station from the fw before setting the MAC context as unassociated.
156 * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
157 * removed by mac80211, but the station won't be removed in the fw until the
158 * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
159 */
160
161/**
162 * DOC: station table - Drain vs. Flush
163 *
164 * Flush means that all the frames in the SCD queue are dumped regardless the
165 * station to which they were sent. We do that when we disassociate and before
166 * we remove the STA of the AP. The flush can be done synchronously against the
167 * fw.
168 * Drain means that the fw will drop all the frames sent to a specific station.
169 * This is useful when a client (if we are IBSS / GO or AP) disassociates. In
170 * that case, we need to drain all the frames for that client from the AC queues
171 * that are shared with the other clients. Only then, we can remove the STA in
172 * the fw. In order to do so, we track the non-AMPDU packets for each station.
173 * If mac80211 removes a STA and if it still has non-AMPDU packets pending in
174 * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all
175 * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped
176 * (we know about it with its Tx response), we remove the station in fw and set
177 * it as %NULL in %fw_id_to_mac_id: this is the purpose of
178 * %iwl_mvm_sta_drained_wk.
179 */
180
181/**
182 * DOC: station table - fw restart
183 *
184 * When the fw asserts, or we have any other issue that requires to reset the
185 * driver, we require mac80211 to reconfigure the driver. Since the private
186 * data of the stations is embed in mac80211's %ieee80211_sta, that data will
187 * not be zeroed and needs to be reinitialized manually.
188 * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
189 * that we must not allocate a new sta_id but reuse the previous one. This
190 * means that the stations being re-added after the reset will have the same
191 * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
192 * map, since the stations aren't in the fw any more. Internal stations that
193 * are not added by mac80211 will be re-added in the init flow that is called
194 * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
195 * %iwl_mvm_up.
196 */
197
198/**
199 * DOC: AP mode - PS
200 *
201 * When a station is asleep, the fw will set it as "asleep". All frames on
202 * shared queues (i.e. non-aggregation queues) to that station will be dropped
203 * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
204 *
205 * AMPDUs are in a separate queue that is stopped by the fw. We just need to
206 * let mac80211 know when there are frames in these queues so that it can
207 * properly handle trigger frames.
208 *
209 * When a trigger frame is received, mac80211 tells the driver to send frames
210 * from the AMPDU queues or sends frames to non-aggregation queues itself,
211 * depending on which ACs are delivery-enabled and what TID has frames to
212 * transmit. Note that mac80211 has all the knowledge since all the non-agg
213 * frames are buffered / filtered, and the driver tells mac80211 about agg
214 * frames). The driver needs to tell the fw to let frames out even if the
215 * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
216 *
217 * When we receive a frame from that station with PM bit unset, the driver
218 * needs to let the fw know that this station isn't asleep any more. This is
219 * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
220 * station's wakeup.
221 *
222 * For a GO, the Service Period might be cut short due to an absence period
223 * of the GO. In this (and all other cases) the firmware notifies us with the
224 * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
225 * already sent to the device will be rejected again.
226 *
227 * See also "AP support for powersaving clients" in mac80211.h.
228 */
229
230/**
231 * enum iwl_mvm_agg_state
232 *
233 * The state machine of the BA agreement establishment / tear down.
234 * These states relate to a specific RA / TID.
235 *
236 * @IWL_AGG_OFF: aggregation is not used
237 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
238 * @IWL_AGG_ON: aggregation session is up
239 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
240 * HW queue to be empty from packets for this RA /TID.
241 * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
242 * HW queue to be empty from packets for this RA /TID.
243 */
244enum iwl_mvm_agg_state {
245 IWL_AGG_OFF = 0,
246 IWL_AGG_STARTING,
247 IWL_AGG_ON,
248 IWL_EMPTYING_HW_QUEUE_ADDBA,
249 IWL_EMPTYING_HW_QUEUE_DELBA,
250};
251
252/**
253 * struct iwl_mvm_tid_data - holds the states for each RA / TID
254 * @seq_number: the next WiFi sequence number to use
255 * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
256 * This is basically (last acked packet++).
257 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
258 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
259 * @reduced_tpc: Reduced tx power. Holds the data between the
260 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
261 * @state: state of the BA agreement establishment / tear down.
262 * @txq_id: Tx queue used by the BA session
263 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
264 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
265 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
266 * we are ready to finish the Tx AGG stop / start flow.
267 * @tx_time: medium time consumed by this A-MPDU
268 */
269struct iwl_mvm_tid_data {
270 u16 seq_number;
271 u16 next_reclaimed;
272 /* The rest is Tx AGG related */
273 u32 rate_n_flags;
274 u8 reduced_tpc;
275 enum iwl_mvm_agg_state state;
276 u16 txq_id;
277 u16 ssn;
278 u16 tx_time;
279};
280
281static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
282{
283 return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
284 tid_data->next_reclaimed);
285}
286
287/**
288 * struct iwl_mvm_sta - representation of a station in the driver
289 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
290 * @tfd_queue_msk: the tfd queues used by the station
291 * @hw_queue: per-AC mapping of the TFD queues used by station
292 * @mac_id_n_color: the MAC context this station is linked to
293 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
294 * tid.
295 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
296 * @bt_reduced_txpower: is reduced tx power enabled for this station
297 * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
298 * we need to signal the EOSP
299 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
300 * and from Tx response flow, it needs a spinlock.
301 * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
302 * @tx_protection: reference counter for controlling the Tx protection.
303 * @tt_tx_protection: is thermal throttling enable Tx protection?
304 * @disable_tx: is tx to this STA disabled?
305 * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
306 *
307 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
308 * in the structure for use by driver. This structure is placed in that
309 * space.
310 *
311 */
312struct iwl_mvm_sta {
313 u32 sta_id;
314 u32 tfd_queue_msk;
315 u8 hw_queue[IEEE80211_NUM_ACS];
316 u32 mac_id_n_color;
317 u16 tid_disable_agg;
318 u8 max_agg_bufsize;
319 bool bt_reduced_txpower;
320 bool next_status_eosp;
321 spinlock_t lock;
322 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
323 struct iwl_lq_sta lq_sta;
324 struct ieee80211_vif *vif;
325
326 /* Temporary, until the new TLC will control the Tx protection */
327 s8 tx_protection;
328 bool tt_tx_protection;
329
330 bool disable_tx;
331 u8 agg_tids;
332};
333
334static inline struct iwl_mvm_sta *
335iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
336{
337 return (void *)sta->drv_priv;
338}
339
340/**
341 * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
342 * broadcast)
343 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
344 * @tfd_queue_msk: the tfd queues used by the station
345 */
346struct iwl_mvm_int_sta {
347 u32 sta_id;
348 u32 tfd_queue_msk;
349};
350
351int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
352 bool update);
353int iwl_mvm_add_sta(struct iwl_mvm *mvm,
354 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta);
356int iwl_mvm_update_sta(struct iwl_mvm *mvm,
357 struct ieee80211_vif *vif,
358 struct ieee80211_sta *sta);
359int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
360 struct ieee80211_vif *vif,
361 struct ieee80211_sta *sta);
362int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
363 struct ieee80211_vif *vif,
364 u8 sta_id);
365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
366 struct ieee80211_vif *vif,
367 struct ieee80211_sta *sta,
368 struct ieee80211_key_conf *key,
369 bool have_key_offset);
370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
371 struct ieee80211_vif *vif,
372 struct ieee80211_sta *sta,
373 struct ieee80211_key_conf *keyconf);
374
375void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
376 struct ieee80211_vif *vif,
377 struct ieee80211_key_conf *keyconf,
378 struct ieee80211_sta *sta, u32 iv32,
379 u16 *phase1key);
380
381void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
382 struct iwl_rx_cmd_buffer *rxb);
383
384/* AMPDU */
385int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
386 int tid, u16 ssn, bool start);
387int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
388 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
389int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
390 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
391int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
392 struct ieee80211_sta *sta, u16 tid);
393int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
394 struct ieee80211_sta *sta, u16 tid);
395
396int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
397void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
398
399int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
400int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
401int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
402int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
403int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
404void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
405
406void iwl_mvm_sta_drained_wk(struct work_struct *wk);
407void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
408 struct ieee80211_sta *sta);
409void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
410 struct ieee80211_sta *sta,
411 enum ieee80211_frame_release_type reason,
412 u16 cnt, u16 tids, bool more_data,
413 bool agg);
414int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
415 bool drain);
416void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
417 struct iwl_mvm_sta *mvmsta, bool disable);
418void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
419 struct ieee80211_sta *sta,
420 bool disable);
421void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
422 struct iwl_mvm_vif *mvmvif,
423 bool disable);
424void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
425
426#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
new file mode 100644
index 000000000000..fe2fa5650443
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -0,0 +1,732 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/etherdevice.h>
65#include "mvm.h"
66#include "time-event.h"
67#include "iwl-io.h"
68#include "iwl-prph.h"
69
70#define TU_TO_US(x) (x * 1024)
71#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
72
73void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
74{
75 struct ieee80211_sta *sta;
76 struct iwl_mvm_sta *mvmsta;
77 int i;
78
79 lockdep_assert_held(&mvm->mutex);
80
81 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
82 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
83 lockdep_is_held(&mvm->mutex));
84 if (!sta || IS_ERR(sta) || !sta->tdls)
85 continue;
86
87 mvmsta = iwl_mvm_sta_from_mac80211(sta);
88 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
89 NL80211_TDLS_TEARDOWN,
90 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
91 GFP_KERNEL);
92 }
93}
94
95int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
96{
97 struct ieee80211_sta *sta;
98 struct iwl_mvm_sta *mvmsta;
99 int count = 0;
100 int i;
101
102 lockdep_assert_held(&mvm->mutex);
103
104 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
105 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
106 lockdep_is_held(&mvm->mutex));
107 if (!sta || IS_ERR(sta) || !sta->tdls)
108 continue;
109
110 if (vif) {
111 mvmsta = iwl_mvm_sta_from_mac80211(sta);
112 if (mvmsta->vif != vif)
113 continue;
114 }
115
116 count++;
117 }
118
119 return count;
120}
121
122static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
123{
124 struct iwl_rx_packet *pkt;
125 struct iwl_tdls_config_res *resp;
126 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
127 struct iwl_host_cmd cmd = {
128 .id = TDLS_CONFIG_CMD,
129 .flags = CMD_WANT_SKB,
130 .data = { &tdls_cfg_cmd, },
131 .len = { sizeof(struct iwl_tdls_config_cmd), },
132 };
133 struct ieee80211_sta *sta;
134 int ret, i, cnt;
135 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
136
137 lockdep_assert_held(&mvm->mutex);
138
139 tdls_cfg_cmd.id_and_color =
140 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
141 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
142 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
143
144 /* for now the Tx cmd is empty and unused */
145
146 /* populate TDLS peer data */
147 cnt = 0;
148 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
149 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
150 lockdep_is_held(&mvm->mutex));
151 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
152 continue;
153
154 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
155 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
156 IWL_MVM_TDLS_FW_TID;
157 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
158 tdls_cfg_cmd.sta_info[cnt].is_initiator =
159 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
160
161 cnt++;
162 }
163
164 tdls_cfg_cmd.tdls_peer_count = cnt;
165 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
166
167 ret = iwl_mvm_send_cmd(mvm, &cmd);
168 if (WARN_ON_ONCE(ret))
169 return;
170
171 pkt = cmd.resp_pkt;
172
173 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
174
175 /* we don't really care about the response at this point */
176
177 iwl_free_resp(&cmd);
178}
179
180void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
181 bool sta_added)
182{
183 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
184
185 /* when the first peer joins, send a power update first */
186 if (tdls_sta_cnt == 1 && sta_added)
187 iwl_mvm_power_update_mac(mvm);
188
189 /* configure the FW with TDLS peer info */
190 iwl_mvm_tdls_config(mvm, vif);
191
192 /* when the last peer leaves, send a power update last */
193 if (tdls_sta_cnt == 0 && !sta_added)
194 iwl_mvm_power_update_mac(mvm);
195}
196
197void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
198 struct ieee80211_vif *vif)
199{
200 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
201 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
202
203 /*
204 * iwl_mvm_protect_session() reads directly from the device
205 * (the system time), so make sure it is available.
206 */
207 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
208 return;
209
210 mutex_lock(&mvm->mutex);
211 /* Protect the session to hear the TDLS setup response on the channel */
212 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
213 mutex_unlock(&mvm->mutex);
214
215 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
216}
217
218static const char *
219iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
220{
221 switch (state) {
222 case IWL_MVM_TDLS_SW_IDLE:
223 return "IDLE";
224 case IWL_MVM_TDLS_SW_REQ_SENT:
225 return "REQ SENT";
226 case IWL_MVM_TDLS_SW_RESP_RCVD:
227 return "RESP RECEIVED";
228 case IWL_MVM_TDLS_SW_REQ_RCVD:
229 return "REQ RECEIVED";
230 case IWL_MVM_TDLS_SW_ACTIVE:
231 return "ACTIVE";
232 }
233
234 return NULL;
235}
236
237static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
238 enum iwl_mvm_tdls_cs_state state)
239{
240 if (mvm->tdls_cs.state == state)
241 return;
242
243 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
244 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
245 iwl_mvm_tdls_cs_state_str(state));
246 mvm->tdls_cs.state = state;
247
248 /* we only send requests to our switching peer - update sent time */
249 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
250 mvm->tdls_cs.peer.sent_timestamp =
251 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
252
253 if (state == IWL_MVM_TDLS_SW_IDLE)
254 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
255}
256
257void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
258{
259 struct iwl_rx_packet *pkt = rxb_addr(rxb);
260 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
261 struct ieee80211_sta *sta;
262 unsigned int delay;
263 struct iwl_mvm_sta *mvmsta;
264 struct ieee80211_vif *vif;
265 u32 sta_id = le32_to_cpu(notif->sta_id);
266
267 lockdep_assert_held(&mvm->mutex);
268
269 /* can fail sometimes */
270 if (!le32_to_cpu(notif->status)) {
271 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
272 return;
273 }
274
275 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
276 return;
277
278 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
279 lockdep_is_held(&mvm->mutex));
280 /* the station may not be here, but if it is, it must be a TDLS peer */
281 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
282 return;
283
284 mvmsta = iwl_mvm_sta_from_mac80211(sta);
285 vif = mvmsta->vif;
286
287 /*
288 * Update state and possibly switch again after this is over (DTIM).
289 * Also convert TU to msec.
290 */
291 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
292 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
293 msecs_to_jiffies(delay));
294
295 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
296}
297
298static int
299iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
300 enum iwl_tdls_channel_switch_type type,
301 const u8 *peer, bool peer_initiator, u32 timestamp)
302{
303 bool same_peer = false;
304 int ret = 0;
305
306 /* get the existing peer if it's there */
307 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
308 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
309 struct ieee80211_sta *sta = rcu_dereference_protected(
310 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
311 lockdep_is_held(&mvm->mutex));
312 if (!IS_ERR_OR_NULL(sta))
313 same_peer = ether_addr_equal(peer, sta->addr);
314 }
315
316 switch (mvm->tdls_cs.state) {
317 case IWL_MVM_TDLS_SW_IDLE:
318 /*
319 * might be spurious packet from the peer after the switch is
320 * already done
321 */
322 if (type == TDLS_MOVE_CH)
323 ret = -EINVAL;
324 break;
325 case IWL_MVM_TDLS_SW_REQ_SENT:
326 /* only allow requests from the same peer */
327 if (!same_peer)
328 ret = -EBUSY;
329 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
330 !peer_initiator)
331 /*
332 * We received a ch-switch request while an outgoing
333 * one is pending. Allow it if the peer is the link
334 * initiator.
335 */
336 ret = -EBUSY;
337 else if (type == TDLS_SEND_CHAN_SW_REQ)
338 /* wait for idle before sending another request */
339 ret = -EBUSY;
340 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
341 /* we got a stale response - ignore it */
342 ret = -EINVAL;
343 break;
344 case IWL_MVM_TDLS_SW_RESP_RCVD:
345 /*
346 * we are waiting for the FW to give an "active" notification,
347 * so ignore requests in the meantime
348 */
349 ret = -EBUSY;
350 break;
351 case IWL_MVM_TDLS_SW_REQ_RCVD:
352 /* as above, allow the link initiator to proceed */
353 if (type == TDLS_SEND_CHAN_SW_REQ) {
354 if (!same_peer)
355 ret = -EBUSY;
356 else if (peer_initiator) /* they are the initiator */
357 ret = -EBUSY;
358 } else if (type == TDLS_MOVE_CH) {
359 ret = -EINVAL;
360 }
361 break;
362 case IWL_MVM_TDLS_SW_ACTIVE:
363 /*
364 * the only valid request when active is a request to return
365 * to the base channel by the current off-channel peer
366 */
367 if (type != TDLS_MOVE_CH || !same_peer)
368 ret = -EBUSY;
369 break;
370 }
371
372 if (ret)
373 IWL_DEBUG_TDLS(mvm,
374 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
375 type, mvm->tdls_cs.state, peer, same_peer,
376 peer_initiator);
377
378 return ret;
379}
380
381static int
382iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
383 struct ieee80211_vif *vif,
384 enum iwl_tdls_channel_switch_type type,
385 const u8 *peer, bool peer_initiator,
386 u8 oper_class,
387 struct cfg80211_chan_def *chandef,
388 u32 timestamp, u16 switch_time,
389 u16 switch_timeout, struct sk_buff *skb,
390 u32 ch_sw_tm_ie)
391{
392 struct ieee80211_sta *sta;
393 struct iwl_mvm_sta *mvmsta;
394 struct ieee80211_tx_info *info;
395 struct ieee80211_hdr *hdr;
396 struct iwl_tdls_channel_switch_cmd cmd = {0};
397 int ret;
398
399 lockdep_assert_held(&mvm->mutex);
400
401 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
402 timestamp);
403 if (ret)
404 return ret;
405
406 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
407 ret = -EINVAL;
408 goto out;
409 }
410
411 cmd.switch_type = type;
412 cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
413 cmd.timing.switch_time = cpu_to_le32(switch_time);
414 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
415
416 rcu_read_lock();
417 sta = ieee80211_find_sta(vif, peer);
418 if (!sta) {
419 rcu_read_unlock();
420 ret = -ENOENT;
421 goto out;
422 }
423 mvmsta = iwl_mvm_sta_from_mac80211(sta);
424 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
425
426 if (!chandef) {
427 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
428 mvm->tdls_cs.peer.chandef.chan) {
429 /* actually moving to the channel */
430 chandef = &mvm->tdls_cs.peer.chandef;
431 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
432 type == TDLS_MOVE_CH) {
433 /* we need to return to base channel */
434 struct ieee80211_chanctx_conf *chanctx =
435 rcu_dereference(vif->chanctx_conf);
436
437 if (WARN_ON_ONCE(!chanctx)) {
438 rcu_read_unlock();
439 goto out;
440 }
441
442 chandef = &chanctx->def;
443 }
444 }
445
446 if (chandef) {
447 cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
448 PHY_BAND_24 : PHY_BAND_5);
449 cmd.ci.channel = chandef->chan->hw_value;
450 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
451 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
452 }
453
454 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
455 cmd.timing.max_offchan_duration =
456 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
457 vif->bss_conf.beacon_int) / 2);
458
459 /* Switch time is the first element in the switch-timing IE. */
460 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
461
462 info = IEEE80211_SKB_CB(skb);
463 hdr = (void *)skb->data;
464 if (info->control.hw_key) {
465 if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
466 rcu_read_unlock();
467 ret = -EINVAL;
468 goto out;
469 }
470 iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
471 }
472
473 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
474 mvmsta->sta_id);
475
476 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
477 hdr->frame_control);
478 rcu_read_unlock();
479
480 memcpy(cmd.frame.data, skb->data, skb->len);
481
482 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
483 sizeof(cmd), &cmd);
484 if (ret) {
485 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
486 ret);
487 goto out;
488 }
489
490 /* channel switch has started, update state */
491 if (type != TDLS_MOVE_CH) {
492 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
493 iwl_mvm_tdls_update_cs_state(mvm,
494 type == TDLS_SEND_CHAN_SW_REQ ?
495 IWL_MVM_TDLS_SW_REQ_SENT :
496 IWL_MVM_TDLS_SW_REQ_RCVD);
497 } else {
498 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
499 }
500
501out:
502
503 /* channel switch failed - we are idle */
504 if (ret)
505 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
506
507 return ret;
508}
509
510void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
511{
512 struct iwl_mvm *mvm;
513 struct ieee80211_sta *sta;
514 struct iwl_mvm_sta *mvmsta;
515 struct ieee80211_vif *vif;
516 unsigned int delay;
517 int ret;
518
519 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
520 mutex_lock(&mvm->mutex);
521
522 /* called after an active channel switch has finished or timed-out */
523 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
524
525 /* station might be gone, in that case do nothing */
526 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
527 goto out;
528
529 sta = rcu_dereference_protected(
530 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
531 lockdep_is_held(&mvm->mutex));
532 /* the station may not be here, but if it is, it must be a TDLS peer */
533 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
534 goto out;
535
536 mvmsta = iwl_mvm_sta_from_mac80211(sta);
537 vif = mvmsta->vif;
538 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
539 TDLS_SEND_CHAN_SW_REQ,
540 sta->addr,
541 mvm->tdls_cs.peer.initiator,
542 mvm->tdls_cs.peer.op_class,
543 &mvm->tdls_cs.peer.chandef,
544 0, 0, 0,
545 mvm->tdls_cs.peer.skb,
546 mvm->tdls_cs.peer.ch_sw_tm_ie);
547 if (ret)
548 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
549
550 /* retry after a DTIM if we failed sending now */
551 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
552 queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
553 msecs_to_jiffies(delay));
554out:
555 mutex_unlock(&mvm->mutex);
556}
557
558int
559iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
560 struct ieee80211_vif *vif,
561 struct ieee80211_sta *sta, u8 oper_class,
562 struct cfg80211_chan_def *chandef,
563 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
564{
565 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
566 struct iwl_mvm_sta *mvmsta;
567 unsigned int delay;
568 int ret;
569
570 mutex_lock(&mvm->mutex);
571
572 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
573 sta->addr, chandef->chan->center_freq, chandef->width);
574
575 /* we only support a single peer for channel switching */
576 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
577 IWL_DEBUG_TDLS(mvm,
578 "Existing peer. Can't start switch with %pM\n",
579 sta->addr);
580 ret = -EBUSY;
581 goto out;
582 }
583
584 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
585 TDLS_SEND_CHAN_SW_REQ,
586 sta->addr, sta->tdls_initiator,
587 oper_class, chandef, 0, 0, 0,
588 tmpl_skb, ch_sw_tm_ie);
589 if (ret)
590 goto out;
591
592 /*
593 * Mark the peer as "in tdls switch" for this vif. We only allow a
594 * single such peer per vif.
595 */
596 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
597 if (!mvm->tdls_cs.peer.skb) {
598 ret = -ENOMEM;
599 goto out;
600 }
601
602 mvmsta = iwl_mvm_sta_from_mac80211(sta);
603 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
604 mvm->tdls_cs.peer.chandef = *chandef;
605 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
606 mvm->tdls_cs.peer.op_class = oper_class;
607 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
608
609 /*
610 * Wait for 2 DTIM periods before attempting the next switch. The next
611 * switch will be made sooner if the current one completes before that.
612 */
613 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
614 vif->bss_conf.beacon_int);
615 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
616 msecs_to_jiffies(delay));
617
618out:
619 mutex_unlock(&mvm->mutex);
620 return ret;
621}
622
623void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
624 struct ieee80211_vif *vif,
625 struct ieee80211_sta *sta)
626{
627 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
628 struct ieee80211_sta *cur_sta;
629 bool wait_for_phy = false;
630
631 mutex_lock(&mvm->mutex);
632
633 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
634
635 /* we only support a single peer for channel switching */
636 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
637 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
638 goto out;
639 }
640
641 cur_sta = rcu_dereference_protected(
642 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
643 lockdep_is_held(&mvm->mutex));
644 /* make sure it's the same peer */
645 if (cur_sta != sta)
646 goto out;
647
648 /*
649 * If we're currently in a switch because of the now canceled peer,
650 * wait a DTIM here to make sure the phy is back on the base channel.
651 * We can't otherwise force it.
652 */
653 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
654 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
655 wait_for_phy = true;
656
657 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
658 dev_kfree_skb(mvm->tdls_cs.peer.skb);
659 mvm->tdls_cs.peer.skb = NULL;
660
661out:
662 mutex_unlock(&mvm->mutex);
663
664 /* make sure the phy is on the base channel */
665 if (wait_for_phy)
666 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
667 vif->bss_conf.beacon_int));
668
669 /* flush the channel switch state */
670 flush_delayed_work(&mvm->tdls_cs.dwork);
671
672 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
673}
674
675void
676iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
677 struct ieee80211_vif *vif,
678 struct ieee80211_tdls_ch_sw_params *params)
679{
680 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
681 enum iwl_tdls_channel_switch_type type;
682 unsigned int delay;
683 const char *action_str =
684 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
685 "REQ" : "RESP";
686
687 mutex_lock(&mvm->mutex);
688
689 IWL_DEBUG_TDLS(mvm,
690 "Received TDLS ch switch action %s from %pM status %d\n",
691 action_str, params->sta->addr, params->status);
692
693 /*
694 * we got a non-zero status from a peer we were switching to - move to
695 * the idle state and retry again later
696 */
697 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
698 params->status != 0 &&
699 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
700 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
701 struct ieee80211_sta *cur_sta;
702
703 /* make sure it's the same peer */
704 cur_sta = rcu_dereference_protected(
705 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
706 lockdep_is_held(&mvm->mutex));
707 if (cur_sta == params->sta) {
708 iwl_mvm_tdls_update_cs_state(mvm,
709 IWL_MVM_TDLS_SW_IDLE);
710 goto retry;
711 }
712 }
713
714 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
715 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
716
717 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
718 params->sta->tdls_initiator, 0,
719 params->chandef, params->timestamp,
720 params->switch_time,
721 params->switch_timeout,
722 params->tmpl_skb,
723 params->ch_sw_tm_ie);
724
725retry:
726 /* register a timeout in case we don't succeed in switching */
727 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
728 1024 / 1000;
729 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
730 msecs_to_jiffies(delay));
731 mutex_unlock(&mvm->mutex);
732}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000000..79ab6beb6b26
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __IWL_MVM_TESTMODE_H__
67#define __IWL_MVM_TESTMODE_H__
68
69/**
70 * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
71 * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
72 * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
73 * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
74 * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
75 */
76enum iwl_mvm_testmode_attrs {
77 IWL_MVM_TM_ATTR_UNSPEC,
78 IWL_MVM_TM_ATTR_CMD,
79 IWL_MVM_TM_ATTR_NOA_DURATION,
80 IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
81
82 /* keep last */
83 NUM_IWL_MVM_TM_ATTRS,
84 IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
85};
86
87/**
88 * enum iwl_mvm_testmode_commands - MVM testmode commands
89 * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
90 * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
91 */
92enum iwl_mvm_testmode_commands {
93 IWL_MVM_TM_CMD_SET_NOA,
94 IWL_MVM_TM_CMD_SET_BEACON_FILTER,
95};
96
97#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
new file mode 100644
index 000000000000..7530eb23035d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -0,0 +1,872 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#include <linux/jiffies.h>
67#include <net/mac80211.h>
68
69#include "iwl-notif-wait.h"
70#include "iwl-trans.h"
71#include "fw-api.h"
72#include "time-event.h"
73#include "mvm.h"
74#include "iwl-io.h"
75#include "iwl-prph.h"
76
77/*
78 * For the high priority TE use a time event type that has similar priority to
79 * the FW's action scan priority.
80 */
81#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
82#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
83
84void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
85 struct iwl_mvm_time_event_data *te_data)
86{
87 lockdep_assert_held(&mvm->time_event_lock);
88
89 if (!te_data->vif)
90 return;
91
92 list_del(&te_data->list);
93 te_data->running = false;
94 te_data->uid = 0;
95 te_data->id = TE_MAX;
96 te_data->vif = NULL;
97}
98
99void iwl_mvm_roc_done_wk(struct work_struct *wk)
100{
101 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
102 u32 queues = 0;
103
104 /*
105 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
106 * This will cause the TX path to drop offchannel transmissions.
107 * That would also be done by mac80211, but it is racy, in particular
108 * in the case that the time event actually completed in the firmware
109 * (which is handled in iwl_mvm_te_handle_notif).
110 */
111 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
112 queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
113 iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
114 }
115 if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
116 queues |= BIT(mvm->aux_queue);
117 iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
118 }
119
120 synchronize_net();
121
122 /*
123 * Flush the offchannel queue -- this is called when the time
124 * event finishes or is canceled, so that frames queued for it
125 * won't get stuck on the queue and be transmitted in the next
126 * time event.
127 * We have to send the command asynchronously since this cannot
128 * be under the mutex for locking reasons, but that's not an
129 * issue as it will have to complete before the next command is
130 * executed, and a new time event means a new command.
131 */
132 iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
133}
134
135static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
136{
137 /*
138 * Of course, our status bit is just as racy as mac80211, so in
139 * addition, fire off the work struct which will drop all frames
140 * from the hardware queues that made it through the race. First
141 * it will of course synchronize the TX path to make sure that
142 * any *new* TX will be rejected.
143 */
144 schedule_work(&mvm->roc_done_wk);
145}
146
147static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
148{
149 struct ieee80211_vif *csa_vif;
150
151 rcu_read_lock();
152
153 csa_vif = rcu_dereference(mvm->csa_vif);
154 if (!csa_vif || !csa_vif->csa_active)
155 goto out_unlock;
156
157 IWL_DEBUG_TE(mvm, "CSA NOA started\n");
158
159 /*
160 * CSA NoA is started but we still have beacons to
161 * transmit on the current channel.
162 * So we just do nothing here and the switch
163 * will be performed on the last TBTT.
164 */
165 if (!ieee80211_csa_is_complete(csa_vif)) {
166 IWL_WARN(mvm, "CSA NOA started too early\n");
167 goto out_unlock;
168 }
169
170 ieee80211_csa_finish(csa_vif);
171
172 rcu_read_unlock();
173
174 RCU_INIT_POINTER(mvm->csa_vif, NULL);
175
176 return;
177
178out_unlock:
179 rcu_read_unlock();
180}
181
182static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
183 struct ieee80211_vif *vif,
184 const char *errmsg)
185{
186 if (vif->type != NL80211_IFTYPE_STATION)
187 return false;
188 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
189 return false;
190 if (errmsg)
191 IWL_ERR(mvm, "%s\n", errmsg);
192
193 iwl_mvm_connection_loss(mvm, vif, errmsg);
194 return true;
195}
196
197static void
198iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
199 struct iwl_mvm_time_event_data *te_data,
200 struct iwl_time_event_notif *notif)
201{
202 struct ieee80211_vif *vif = te_data->vif;
203 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
204
205 if (!notif->status)
206 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
207
208 switch (te_data->vif->type) {
209 case NL80211_IFTYPE_AP:
210 if (!notif->status)
211 mvmvif->csa_failed = true;
212 iwl_mvm_csa_noa_start(mvm);
213 break;
214 case NL80211_IFTYPE_STATION:
215 if (!notif->status) {
216 iwl_mvm_connection_loss(mvm, vif,
217 "CSA TE failed to start");
218 break;
219 }
220 iwl_mvm_csa_client_absent(mvm, te_data->vif);
221 ieee80211_chswitch_done(te_data->vif, true);
222 break;
223 default:
224 /* should never happen */
225 WARN_ON_ONCE(1);
226 break;
227 }
228
229 /* we don't need it anymore */
230 iwl_mvm_te_clear_data(mvm, te_data);
231}
232
233static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
234 struct iwl_time_event_notif *notif,
235 struct iwl_mvm_time_event_data *te_data)
236{
237 struct iwl_fw_dbg_trigger_tlv *trig;
238 struct iwl_fw_dbg_trigger_time_event *te_trig;
239 int i;
240
241 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
242 return;
243
244 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
245 te_trig = (void *)trig->data;
246
247 if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig))
248 return;
249
250 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
251 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
252 u32 trig_action_bitmap =
253 le32_to_cpu(te_trig->time_events[i].action_bitmap);
254 u32 trig_status_bitmap =
255 le32_to_cpu(te_trig->time_events[i].status_bitmap);
256
257 if (trig_te_id != te_data->id ||
258 !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
259 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
260 continue;
261
262 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
263 "Time event %d Action 0x%x received status: %d",
264 te_data->id,
265 le32_to_cpu(notif->action),
266 le32_to_cpu(notif->status));
267 break;
268 }
269}
270
271/*
272 * Handles a FW notification for an event that is known to the driver.
273 *
274 * @mvm: the mvm component
275 * @te_data: the time event data
276 * @notif: the notification data corresponding the time event data.
277 */
278static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
279 struct iwl_mvm_time_event_data *te_data,
280 struct iwl_time_event_notif *notif)
281{
282 lockdep_assert_held(&mvm->time_event_lock);
283
284 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
285 le32_to_cpu(notif->unique_id),
286 le32_to_cpu(notif->action));
287
288 iwl_mvm_te_check_trigger(mvm, notif, te_data);
289
290 /*
291 * The FW sends the start/end time event notifications even for events
292 * that it fails to schedule. This is indicated in the status field of
293 * the notification. This happens in cases that the scheduler cannot
294 * find a schedule that can handle the event (for example requesting a
295 * P2P Device discoveribility, while there are other higher priority
296 * events in the system).
297 */
298 if (!le32_to_cpu(notif->status)) {
299 const char *msg;
300
301 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
302 msg = "Time Event start notification failure";
303 else
304 msg = "Time Event end notification failure";
305
306 IWL_DEBUG_TE(mvm, "%s\n", msg);
307
308 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
309 iwl_mvm_te_clear_data(mvm, te_data);
310 return;
311 }
312 }
313
314 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
315 IWL_DEBUG_TE(mvm,
316 "TE ended - current time %lu, estimated end %lu\n",
317 jiffies, te_data->end_jiffies);
318
319 switch (te_data->vif->type) {
320 case NL80211_IFTYPE_P2P_DEVICE:
321 ieee80211_remain_on_channel_expired(mvm->hw);
322 iwl_mvm_roc_finished(mvm);
323 break;
324 case NL80211_IFTYPE_STATION:
325 /*
326 * By now, we should have finished association
327 * and know the dtim period.
328 */
329 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
330 "No association and the time event is over already...");
331 break;
332 default:
333 break;
334 }
335
336 iwl_mvm_te_clear_data(mvm, te_data);
337 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
338 te_data->running = true;
339 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
340
341 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
342 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
343 iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
344 ieee80211_ready_on_channel(mvm->hw);
345 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
346 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
347 }
348 } else {
349 IWL_WARN(mvm, "Got TE with unknown action\n");
350 }
351}
352
353/*
354 * Handle A Aux ROC time event
355 */
356static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
357 struct iwl_time_event_notif *notif)
358{
359 struct iwl_mvm_time_event_data *te_data, *tmp;
360 bool aux_roc_te = false;
361
362 list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
363 if (le32_to_cpu(notif->unique_id) == te_data->uid) {
364 aux_roc_te = true;
365 break;
366 }
367 }
368 if (!aux_roc_te) /* Not a Aux ROC time event */
369 return -EINVAL;
370
371 iwl_mvm_te_check_trigger(mvm, notif, te_data);
372
373 if (!le32_to_cpu(notif->status)) {
374 IWL_DEBUG_TE(mvm,
375 "ERROR: Aux ROC Time Event %s notification failure\n",
376 (le32_to_cpu(notif->action) &
377 TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
378 return -EINVAL;
379 }
380
381 IWL_DEBUG_TE(mvm,
382 "Aux ROC time event notification - UID = 0x%x action %d\n",
383 le32_to_cpu(notif->unique_id),
384 le32_to_cpu(notif->action));
385
386 if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
387 /* End TE, notify mac80211 */
388 ieee80211_remain_on_channel_expired(mvm->hw);
389 iwl_mvm_roc_finished(mvm); /* flush aux queue */
390 list_del(&te_data->list); /* remove from list */
391 te_data->running = false;
392 te_data->vif = NULL;
393 te_data->uid = 0;
394 te_data->id = TE_MAX;
395 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
396 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
397 te_data->running = true;
398 iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
399 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
400 } else {
401 IWL_DEBUG_TE(mvm,
402 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
403 le32_to_cpu(notif->action));
404 return -EINVAL;
405 }
406
407 return 0;
408}
409
410/*
411 * The Rx handler for time event notifications
412 */
413void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
414 struct iwl_rx_cmd_buffer *rxb)
415{
416 struct iwl_rx_packet *pkt = rxb_addr(rxb);
417 struct iwl_time_event_notif *notif = (void *)pkt->data;
418 struct iwl_mvm_time_event_data *te_data, *tmp;
419
420 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
421 le32_to_cpu(notif->unique_id),
422 le32_to_cpu(notif->action));
423
424 spin_lock_bh(&mvm->time_event_lock);
425 /* This time event is triggered for Aux ROC request */
426 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
427 goto unlock;
428
429 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
430 if (le32_to_cpu(notif->unique_id) == te_data->uid)
431 iwl_mvm_te_handle_notif(mvm, te_data, notif);
432 }
433unlock:
434 spin_unlock_bh(&mvm->time_event_lock);
435}
436
437static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
438 struct iwl_rx_packet *pkt, void *data)
439{
440 struct iwl_mvm *mvm =
441 container_of(notif_wait, struct iwl_mvm, notif_wait);
442 struct iwl_mvm_time_event_data *te_data = data;
443 struct iwl_time_event_notif *resp;
444 int resp_len = iwl_rx_packet_payload_len(pkt);
445
446 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
447 return true;
448
449 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
450 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
451 return true;
452 }
453
454 resp = (void *)pkt->data;
455
456 /* te_data->uid is already set in the TIME_EVENT_CMD response */
457 if (le32_to_cpu(resp->unique_id) != te_data->uid)
458 return false;
459
460 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
461 te_data->uid);
462 if (!resp->status)
463 IWL_ERR(mvm,
464 "TIME_EVENT_NOTIFICATION received but not executed\n");
465
466 return true;
467}
468
469static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
470 struct iwl_rx_packet *pkt, void *data)
471{
472 struct iwl_mvm *mvm =
473 container_of(notif_wait, struct iwl_mvm, notif_wait);
474 struct iwl_mvm_time_event_data *te_data = data;
475 struct iwl_time_event_resp *resp;
476 int resp_len = iwl_rx_packet_payload_len(pkt);
477
478 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
479 return true;
480
481 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
482 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
483 return true;
484 }
485
486 resp = (void *)pkt->data;
487
488 /* we should never get a response to another TIME_EVENT_CMD here */
489 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
490 return false;
491
492 te_data->uid = le32_to_cpu(resp->unique_id);
493 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
494 te_data->uid);
495 return true;
496}
497
498static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
499 struct ieee80211_vif *vif,
500 struct iwl_mvm_time_event_data *te_data,
501 struct iwl_time_event_cmd *te_cmd)
502{
503 static const u16 time_event_response[] = { TIME_EVENT_CMD };
504 struct iwl_notification_wait wait_time_event;
505 int ret;
506
507 lockdep_assert_held(&mvm->mutex);
508
509 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
510 le32_to_cpu(te_cmd->duration));
511
512 spin_lock_bh(&mvm->time_event_lock);
513 if (WARN_ON(te_data->id != TE_MAX)) {
514 spin_unlock_bh(&mvm->time_event_lock);
515 return -EIO;
516 }
517 te_data->vif = vif;
518 te_data->duration = le32_to_cpu(te_cmd->duration);
519 te_data->id = le32_to_cpu(te_cmd->id);
520 list_add_tail(&te_data->list, &mvm->time_event_list);
521 spin_unlock_bh(&mvm->time_event_lock);
522
523 /*
524 * Use a notification wait, which really just processes the
525 * command response and doesn't wait for anything, in order
526 * to be able to process the response and get the UID inside
527 * the RX path. Using CMD_WANT_SKB doesn't work because it
528 * stores the buffer and then wakes up this thread, by which
529 * time another notification (that the time event started)
530 * might already be processed unsuccessfully.
531 */
532 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
533 time_event_response,
534 ARRAY_SIZE(time_event_response),
535 iwl_mvm_time_event_response, te_data);
536
537 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
538 sizeof(*te_cmd), te_cmd);
539 if (ret) {
540 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
541 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
542 goto out_clear_te;
543 }
544
545 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
546 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
547 /* should never fail */
548 WARN_ON_ONCE(ret);
549
550 if (ret) {
551 out_clear_te:
552 spin_lock_bh(&mvm->time_event_lock);
553 iwl_mvm_te_clear_data(mvm, te_data);
554 spin_unlock_bh(&mvm->time_event_lock);
555 }
556 return ret;
557}
558
559void iwl_mvm_protect_session(struct iwl_mvm *mvm,
560 struct ieee80211_vif *vif,
561 u32 duration, u32 min_duration,
562 u32 max_delay, bool wait_for_notif)
563{
564 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
565 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
566 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
567 struct iwl_notification_wait wait_te_notif;
568 struct iwl_time_event_cmd time_cmd = {};
569
570 lockdep_assert_held(&mvm->mutex);
571
572 if (te_data->running &&
573 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
574 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
575 jiffies_to_msecs(te_data->end_jiffies - jiffies));
576 return;
577 }
578
579 if (te_data->running) {
580 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
581 te_data->uid,
582 jiffies_to_msecs(te_data->end_jiffies - jiffies));
583 /*
584 * we don't have enough time
585 * cancel the current TE and issue a new one
586 * Of course it would be better to remove the old one only
587 * when the new one is added, but we don't care if we are off
588 * channel for a bit. All we need to do, is not to return
589 * before we actually begin to be on the channel.
590 */
591 iwl_mvm_stop_session_protection(mvm, vif);
592 }
593
594 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
595 time_cmd.id_and_color =
596 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
597 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
598
599 time_cmd.apply_time = cpu_to_le32(0);
600
601 time_cmd.max_frags = TE_V2_FRAG_NONE;
602 time_cmd.max_delay = cpu_to_le32(max_delay);
603 /* TODO: why do we need to interval = bi if it is not periodic? */
604 time_cmd.interval = cpu_to_le32(1);
605 time_cmd.duration = cpu_to_le32(duration);
606 time_cmd.repeat = 1;
607 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
608 TE_V2_NOTIF_HOST_EVENT_END |
609 T2_V2_START_IMMEDIATELY);
610
611 if (!wait_for_notif) {
612 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
613 return;
614 }
615
616 /*
617 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
618 * right after we send the time event
619 */
620 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
621 te_notif_response,
622 ARRAY_SIZE(te_notif_response),
623 iwl_mvm_te_notif, te_data);
624
625 /* If TE was sent OK - wait for the notification that started */
626 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
627 IWL_ERR(mvm, "Failed to add TE to protect session\n");
628 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
629 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
630 TU_TO_JIFFIES(max_delay))) {
631 IWL_ERR(mvm, "Failed to protect session until TE\n");
632 }
633}
634
635static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
636 struct iwl_mvm_time_event_data *te_data,
637 u32 *uid)
638{
639 u32 id;
640
641 /*
642 * It is possible that by the time we got to this point the time
643 * event was already removed.
644 */
645 spin_lock_bh(&mvm->time_event_lock);
646
647 /* Save time event uid before clearing its data */
648 *uid = te_data->uid;
649 id = te_data->id;
650
651 /*
652 * The clear_data function handles time events that were already removed
653 */
654 iwl_mvm_te_clear_data(mvm, te_data);
655 spin_unlock_bh(&mvm->time_event_lock);
656
657 /*
658 * It is possible that by the time we try to remove it, the time event
659 * has already ended and removed. In such a case there is no need to
660 * send a removal command.
661 */
662 if (id == TE_MAX) {
663 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
664 return false;
665 }
666
667 return true;
668}
669
670/*
671 * Explicit request to remove a aux roc time event. The removal of a time
672 * event needs to be synchronized with the flow of a time event's end
673 * notification, which also removes the time event from the op mode
674 * data structures.
675 */
676static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
677 struct iwl_mvm_vif *mvmvif,
678 struct iwl_mvm_time_event_data *te_data)
679{
680 struct iwl_hs20_roc_req aux_cmd = {};
681 u32 uid;
682 int ret;
683
684 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
685 return;
686
687 aux_cmd.event_unique_id = cpu_to_le32(uid);
688 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
689 aux_cmd.id_and_color =
690 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
691 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
692 le32_to_cpu(aux_cmd.event_unique_id));
693 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
694 sizeof(aux_cmd), &aux_cmd);
695
696 if (WARN_ON(ret))
697 return;
698}
699
700/*
701 * Explicit request to remove a time event. The removal of a time event needs to
702 * be synchronized with the flow of a time event's end notification, which also
703 * removes the time event from the op mode data structures.
704 */
705void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
706 struct iwl_mvm_vif *mvmvif,
707 struct iwl_mvm_time_event_data *te_data)
708{
709 struct iwl_time_event_cmd time_cmd = {};
710 u32 uid;
711 int ret;
712
713 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
714 return;
715
716 /* When we remove a TE, the UID is to be set in the id field */
717 time_cmd.id = cpu_to_le32(uid);
718 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
719 time_cmd.id_and_color =
720 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
721
722 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
723 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
724 sizeof(time_cmd), &time_cmd);
725 if (WARN_ON(ret))
726 return;
727}
728
729void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
730 struct ieee80211_vif *vif)
731{
732 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
733 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
734
735 lockdep_assert_held(&mvm->mutex);
736 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
737}
738
739int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
740 int duration, enum ieee80211_roc_type type)
741{
742 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
743 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
744 struct iwl_time_event_cmd time_cmd = {};
745
746 lockdep_assert_held(&mvm->mutex);
747 if (te_data->running) {
748 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
749 return -EBUSY;
750 }
751
752 /*
753 * Flush the done work, just in case it's still pending, so that
754 * the work it does can complete and we can accept new frames.
755 */
756 flush_work(&mvm->roc_done_wk);
757
758 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
759 time_cmd.id_and_color =
760 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
761
762 switch (type) {
763 case IEEE80211_ROC_TYPE_NORMAL:
764 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
765 break;
766 case IEEE80211_ROC_TYPE_MGMT_TX:
767 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
768 break;
769 default:
770 WARN_ONCE(1, "Got an invalid ROC type\n");
771 return -EINVAL;
772 }
773
774 time_cmd.apply_time = cpu_to_le32(0);
775 time_cmd.interval = cpu_to_le32(1);
776
777 /*
778 * The P2P Device TEs can have lower priority than other events
779 * that are being scheduled by the driver/fw, and thus it might not be
780 * scheduled. To improve the chances of it being scheduled, allow them
781 * to be fragmented, and in addition allow them to be delayed.
782 */
783 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
784 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
785 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
786 time_cmd.repeat = 1;
787 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
788 TE_V2_NOTIF_HOST_EVENT_END |
789 T2_V2_START_IMMEDIATELY);
790
791 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
792}
793
794void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
795{
796 struct iwl_mvm_vif *mvmvif = NULL;
797 struct iwl_mvm_time_event_data *te_data;
798 bool is_p2p = false;
799
800 lockdep_assert_held(&mvm->mutex);
801
802 spin_lock_bh(&mvm->time_event_lock);
803
804 /*
805 * Iterate over the list of time events and find the time event that is
806 * associated with a P2P_DEVICE interface.
807 * This assumes that a P2P_DEVICE interface can have only a single time
808 * event at any given time and this time event coresponds to a ROC
809 * request
810 */
811 list_for_each_entry(te_data, &mvm->time_event_list, list) {
812 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
813 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
814 is_p2p = true;
815 goto remove_te;
816 }
817 }
818
819 /* There can only be at most one AUX ROC time event, we just use the
820 * list to simplify/unify code. Remove it if it exists.
821 */
822 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
823 struct iwl_mvm_time_event_data,
824 list);
825 if (te_data)
826 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
827
828remove_te:
829 spin_unlock_bh(&mvm->time_event_lock);
830
831 if (!mvmvif) {
832 IWL_WARN(mvm, "No remain on channel event\n");
833 return;
834 }
835
836 if (is_p2p)
837 iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
838 else
839 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
840
841 iwl_mvm_roc_finished(mvm);
842}
843
844int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
845 struct ieee80211_vif *vif,
846 u32 duration, u32 apply_time)
847{
848 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
849 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
850 struct iwl_time_event_cmd time_cmd = {};
851
852 lockdep_assert_held(&mvm->mutex);
853
854 if (te_data->running) {
855 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
856 return -EBUSY;
857 }
858
859 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
860 time_cmd.id_and_color =
861 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
862 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
863 time_cmd.apply_time = cpu_to_le32(apply_time);
864 time_cmd.max_frags = TE_V2_FRAG_NONE;
865 time_cmd.duration = cpu_to_le32(duration);
866 time_cmd.repeat = 1;
867 time_cmd.interval = cpu_to_le32(1);
868 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
869 TE_V2_ABSENCE);
870
871 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
872}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
new file mode 100644
index 000000000000..cbdf8e52a5f1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -0,0 +1,249 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __time_event_h__
67#define __time_event_h__
68
69#include "fw-api.h"
70
71#include "mvm.h"
72
73/**
74 * DOC: Time Events - what is it?
75 *
76 * Time Events are a fw feature that allows the driver to control the presence
77 * of the device on the channel. Since the fw supports multiple channels
78 * concurrently, the fw may choose to jump to another channel at any time.
79 * In order to make sure that the fw is on a specific channel at a certain time
80 * and for a certain duration, the driver needs to issue a time event.
81 *
82 * The simplest example is for BSS association. The driver issues a time event,
83 * waits for it to start, and only then tells mac80211 that we can start the
84 * association. This way, we make sure that the association will be done
85 * smoothly and won't be interrupted by channel switch decided within the fw.
86 */
87
88 /**
89 * DOC: The flow against the fw
90 *
91 * When the driver needs to make sure we are in a certain channel, at a certain
92 * time and for a certain duration, it sends a Time Event. The flow against the
93 * fw goes like this:
94 * 1) Driver sends a TIME_EVENT_CMD to the fw
95 * 2) Driver gets the response for that command. This response contains the
96 * Unique ID (UID) of the event.
97 * 3) The fw sends notification when the event starts.
98 *
99 * Of course the API provides various options that allow to cover parameters
100 * of the flow.
101 * What is the duration of the event?
102 * What is the start time of the event?
103 * Is there an end-time for the event?
104 * How much can the event be delayed?
105 * Can the event be split?
106 * If yes what is the maximal number of chunks?
107 * etc...
108 */
109
110/**
111 * DOC: Abstraction to the driver
112 *
113 * In order to simplify the use of time events to the rest of the driver,
114 * we abstract the use of time events. This component provides the functions
115 * needed by the driver.
116 */
117
118#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
119#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
120
121/**
122 * iwl_mvm_protect_session - start / extend the session protection.
123 * @mvm: the mvm component
124 * @vif: the virtual interface for which the session is issued
125 * @duration: the duration of the session in TU.
126 * @min_duration: will start a new session if the current session will end
127 * in less than min_duration.
128 * @max_delay: maximum delay before starting the time event (in TU)
129 * @wait_for_notif: true if it is required that a time event notification be
130 * waited for (that the time event has been scheduled before returning)
131 *
132 * This function can be used to start a session protection which means that the
133 * fw will stay on the channel for %duration_ms milliseconds. This function
134 * can block (sleep) until the session starts. This function can also be used
135 * to extend a currently running session.
136 * This function is meant to be used for BSS association for example, where we
137 * want to make sure that the fw stays on the channel during the association.
138 */
139void iwl_mvm_protect_session(struct iwl_mvm *mvm,
140 struct ieee80211_vif *vif,
141 u32 duration, u32 min_duration,
142 u32 max_delay, bool wait_for_notif);
143
144/**
145 * iwl_mvm_stop_session_protection - cancel the session protection.
146 * @mvm: the mvm component
147 * @vif: the virtual interface for which the session is issued
148 *
149 * This functions cancels the session protection which is an act of good
150 * citizenship. If it is not needed any more it should be canceled because
151 * the other bindings wait for the medium during that time.
152 * This funtions doesn't sleep.
153 */
154void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
155 struct ieee80211_vif *vif);
156
157/*
158 * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
159 */
160void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
161 struct iwl_rx_cmd_buffer *rxb);
162
163/**
164 * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
165 * @mvm: the mvm component
166 * @vif: the virtual interface for which the roc is requested. It is assumed
167 * that the vif type is NL80211_IFTYPE_P2P_DEVICE
168 * @duration: the requested duration in millisecond for the fw to be on the
169 * channel that is bound to the vif.
170 * @type: the remain on channel request type
171 *
172 * This function can be used to issue a remain on channel session,
173 * which means that the fw will stay in the channel for the request %duration
174 * milliseconds. The function is async, meaning that it only issues the ROC
175 * request but does not wait for it to start. Once the FW is ready to serve the
176 * ROC request, it will issue a notification to the driver that it is on the
177 * requested channel. Once the FW completes the ROC request it will issue
178 * another notification to the driver.
179 */
180int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
181 int duration, enum ieee80211_roc_type type);
182
183/**
184 * iwl_mvm_stop_roc - stop remain on channel functionality
185 * @mvm: the mvm component
186 *
187 * This function can be used to cancel an ongoing ROC session.
188 * The function is async, it will instruct the FW to stop serving the ROC
189 * session, but will not wait for the actual stopping of the session.
190 */
191void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
192
193/**
194 * iwl_mvm_remove_time_event - general function to clean up of time event
195 * @mvm: the mvm component
196 * @vif: the vif to which the time event belongs
197 * @te_data: the time event data that corresponds to that time event
198 *
199 * This function can be used to cancel a time event regardless its type.
200 * It is useful for cleaning up time events running before removing an
201 * interface.
202 */
203void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
204 struct iwl_mvm_vif *mvmvif,
205 struct iwl_mvm_time_event_data *te_data);
206
207/**
208 * iwl_mvm_te_clear_data - remove time event from list
209 * @mvm: the mvm component
210 * @te_data: the time event data to remove
211 *
212 * This function is mostly internal, it is made available here only
213 * for firmware restart purposes.
214 */
215void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
216 struct iwl_mvm_time_event_data *te_data);
217
218void iwl_mvm_roc_done_wk(struct work_struct *wk);
219
220/**
221 * iwl_mvm_schedule_csa_period - request channel switch absence period
222 * @mvm: the mvm component
223 * @vif: the virtual interface for which the channel switch is issued
224 * @duration: the duration of the NoA in TU.
225 * @apply_time: NoA start time in GP2.
226 *
227 * This function is used to schedule NoA time event and is used to perform
228 * the channel switch flow.
229 */
230int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
231 struct ieee80211_vif *vif,
232 u32 duration, u32 apply_time);
233
234/**
235 * iwl_mvm_te_scheduled - check if the fw received the TE cmd
236 * @te_data: the time event data that corresponds to that time event
237 *
238 * This function returns true iff this TE is added to the fw.
239 */
240static inline bool
241iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
242{
243 if (!te_data)
244 return false;
245
246 return !!te_data->uid;
247}
248
249#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
new file mode 100644
index 000000000000..4007f1d421dd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -0,0 +1,306 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include "mvm.h"
64#include "fw-api-tof.h"
65
66#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
67
68void iwl_mvm_tof_init(struct iwl_mvm *mvm)
69{
70 struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
71
72 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
73 return;
74
75 memset(tof_data, 0, sizeof(*tof_data));
76
77 tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
78
79#ifdef CONFIG_IWLWIFI_DEBUGFS
80 if (IWL_MVM_TOF_IS_RESPONDER) {
81 tof_data->responder_cfg.sub_grp_cmd_id =
82 cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
83 tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
84 }
85#endif
86
87 tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
88 tof_data->range_req.req_timeout = 1;
89 tof_data->range_req.initiator = 1;
90 tof_data->range_req.report_policy = 3;
91
92 tof_data->range_req_ext.sub_grp_cmd_id =
93 cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
94
95 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
96}
97
98void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
99{
100 struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
101
102 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
103 return;
104
105 memset(tof_data, 0, sizeof(*tof_data));
106 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
107}
108
109static void iwl_tof_iterator(void *_data, u8 *mac,
110 struct ieee80211_vif *vif)
111{
112 bool *enabled = _data;
113
114 /* non bss vif exists */
115 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
116 *enabled = false;
117}
118
119int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
120{
121 struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
122 bool enabled;
123
124 lockdep_assert_held(&mvm->mutex);
125
126 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
127 return -EINVAL;
128
129 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
130 IEEE80211_IFACE_ITER_NORMAL,
131 iwl_tof_iterator, &enabled);
132 if (!enabled) {
133 IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
134 return -EINVAL;
135 }
136
137 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
138 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
139 IWL_ALWAYS_LONG_GROUP, 0),
140 0, sizeof(*cmd), cmd);
141}
142
143int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
144{
145 struct iwl_tof_range_abort_cmd cmd = {
146 .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
147 .request_id = id,
148 };
149
150 lockdep_assert_held(&mvm->mutex);
151
152 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
153 return -EINVAL;
154
155 if (id != mvm->tof_data.active_range_request) {
156 IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
157 id, mvm->tof_data.active_range_request);
158 return -EINVAL;
159 }
160
161 /* after abort is sent there's no active request anymore */
162 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
163
164 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
165 IWL_ALWAYS_LONG_GROUP, 0),
166 0, sizeof(cmd), &cmd);
167}
168
169#ifdef CONFIG_IWLWIFI_DEBUGFS
170int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
171 struct ieee80211_vif *vif)
172{
173 struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
174 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
175
176 lockdep_assert_held(&mvm->mutex);
177
178 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
179 return -EINVAL;
180
181 if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
182 !mvmvif->ap_ibss_active) {
183 IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
184 return -EIO;
185 }
186
187 cmd->sta_id = mvmvif->bcast_sta.sta_id;
188 memcpy(cmd->bssid, vif->addr, ETH_ALEN);
189 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
190 IWL_ALWAYS_LONG_GROUP, 0),
191 0, sizeof(*cmd), cmd);
192}
193#endif
194
195int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
196 struct ieee80211_vif *vif)
197{
198 struct iwl_host_cmd cmd = {
199 .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
200 .len = { sizeof(mvm->tof_data.range_req), },
201 /* no copy because of the command size */
202 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
203 };
204
205 lockdep_assert_held(&mvm->mutex);
206
207 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
208 return -EINVAL;
209
210 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
211 IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
212 return -EIO;
213 }
214
215 /* nesting of range requests is not supported in FW */
216 if (mvm->tof_data.active_range_request !=
217 IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
218 IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
219 mvm->tof_data.active_range_request);
220 return -EIO;
221 }
222
223 mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
224
225 cmd.data[0] = &mvm->tof_data.range_req;
226 return iwl_mvm_send_cmd(mvm, &cmd);
227}
228
229int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
230 struct ieee80211_vif *vif)
231{
232 lockdep_assert_held(&mvm->mutex);
233
234 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
235 return -EINVAL;
236
237 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
238 IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
239 return -EIO;
240 }
241
242 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
243 IWL_ALWAYS_LONG_GROUP, 0),
244 0, sizeof(mvm->tof_data.range_req_ext),
245 &mvm->tof_data.range_req_ext);
246}
247
248static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
249{
250 struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
251
252 if (resp->request_id != mvm->tof_data.active_range_request) {
253 IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
254 resp->request_id, mvm->tof_data.active_range_request);
255 return -EIO;
256 }
257
258 memcpy(&mvm->tof_data.range_resp, resp,
259 sizeof(struct iwl_tof_range_rsp_ntfy));
260 mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
261
262 return 0;
263}
264
265static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
266{
267 struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
268
269 IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
270 return 0;
271}
272
273static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
274{
275 struct iwl_tof_neighbor_report *report =
276 (struct iwl_tof_neighbor_report *)data;
277
278 IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
279 report->bssid, report->request_token, report->status);
280 return 0;
281}
282
283void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
284 struct iwl_rx_cmd_buffer *rxb)
285{
286 struct iwl_rx_packet *pkt = rxb_addr(rxb);
287 struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
288
289 lockdep_assert_held(&mvm->mutex);
290
291 switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
292 case TOF_RANGE_RESPONSE_NOTIF:
293 iwl_mvm_tof_range_resp(mvm, resp->data);
294 break;
295 case TOF_MCSI_DEBUG_NOTIF:
296 iwl_mvm_tof_mcsi_notif(mvm, resp->data);
297 break;
298 case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
299 iwl_mvm_tof_nb_report_notif(mvm, resp->data);
300 break;
301 default:
302 IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
303 resp->sub_grp_cmd_id);
304 break;
305 }
306}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
new file mode 100644
index 000000000000..9beebc33cb8d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2015 Intel Deutschland GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __tof_h__
64#define __tof_h__
65
66#include "fw-api-tof.h"
67
68struct iwl_mvm_tof_data {
69 struct iwl_tof_config_cmd tof_cfg;
70 struct iwl_tof_range_req_cmd range_req;
71 struct iwl_tof_range_req_ext_cmd range_req_ext;
72#ifdef CONFIG_IWLWIFI_DEBUGFS
73 struct iwl_tof_responder_config_cmd responder_cfg;
74#endif
75 struct iwl_tof_range_rsp_ntfy range_resp;
76 u8 last_abort_id;
77 u16 active_range_request;
78};
79
80void iwl_mvm_tof_init(struct iwl_mvm *mvm);
81void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
82int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
83int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
84int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
85 struct ieee80211_vif *vif);
86void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
87 struct iwl_rx_cmd_buffer *rxb);
88int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
89 struct ieee80211_vif *vif);
90#ifdef CONFIG_IWLWIFI_DEBUGFS
91int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
92 struct ieee80211_vif *vif);
93#endif
94#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
new file mode 100644
index 000000000000..cadfc0460597
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -0,0 +1,460 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * Copyright(c) 2015 Intel Deutschland GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66
67#include "mvm.h"
68
69#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
70
71static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
72{
73 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
74 u32 duration = tt->params.ct_kill_duration;
75
76 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
77 return;
78
79 IWL_ERR(mvm, "Enter CT Kill\n");
80 iwl_mvm_set_hw_ctkill_state(mvm, true);
81
82 tt->throttle = false;
83 tt->dynamic_smps = false;
84
85 /* Don't schedule an exit work if we're in test mode, since
86 * the temperature will not change unless we manually set it
87 * again (or disable testing).
88 */
89 if (!mvm->temperature_test)
90 schedule_delayed_work(&tt->ct_kill_exit,
91 round_jiffies_relative(duration * HZ));
92}
93
94static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
95{
96 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
97 return;
98
99 IWL_ERR(mvm, "Exit CT Kill\n");
100 iwl_mvm_set_hw_ctkill_state(mvm, false);
101}
102
103void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
104{
105 /* ignore the notification if we are in test mode */
106 if (mvm->temperature_test)
107 return;
108
109 if (mvm->temperature == temp)
110 return;
111
112 mvm->temperature = temp;
113 iwl_mvm_tt_handler(mvm);
114}
115
116static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
117 struct iwl_rx_packet *pkt)
118{
119 struct iwl_dts_measurement_notif *notif;
120 int len = iwl_rx_packet_payload_len(pkt);
121 int temp;
122
123 if (WARN_ON_ONCE(len != sizeof(*notif))) {
124 IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
125 return -EINVAL;
126 }
127
128 notif = (void *)pkt->data;
129
130 temp = le32_to_cpu(notif->temp);
131
132 /* shouldn't be negative, but since it's s32, make sure it isn't */
133 if (WARN_ON_ONCE(temp < 0))
134 temp = 0;
135
136 IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
137
138 return temp;
139}
140
141static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
142 struct iwl_rx_packet *pkt, void *data)
143{
144 struct iwl_mvm *mvm =
145 container_of(notif_wait, struct iwl_mvm, notif_wait);
146 int *temp = data;
147 int ret;
148
149 ret = iwl_mvm_temp_notif_parse(mvm, pkt);
150 if (ret < 0)
151 return true;
152
153 *temp = ret;
154
155 return true;
156}
157
158void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
159{
160 struct iwl_rx_packet *pkt = rxb_addr(rxb);
161 int temp;
162
163 /* the notification is handled synchronously in ctkill, so skip here */
164 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
165 return;
166
167 temp = iwl_mvm_temp_notif_parse(mvm, pkt);
168 if (temp < 0)
169 return;
170
171 iwl_mvm_tt_temp_changed(mvm, temp);
172}
173
174static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
175{
176 struct iwl_dts_measurement_cmd cmd = {
177 .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
178 };
179 struct iwl_ext_dts_measurement_cmd extcmd = {
180 .control_mode = cpu_to_le32(DTS_AUTOMATIC),
181 };
182 u32 cmdid;
183
184 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
185 cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
186 PHY_OPS_GROUP, 0);
187 else
188 cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
189
190 if (!fw_has_capa(&mvm->fw->ucode_capa,
191 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
192 return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
193
194 return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
195}
196
197int iwl_mvm_get_temp(struct iwl_mvm *mvm)
198{
199 struct iwl_notification_wait wait_temp_notif;
200 static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
201 DTS_MEASUREMENT_NOTIF_WIDE) };
202 int ret, temp;
203
204 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
205 temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
206
207 lockdep_assert_held(&mvm->mutex);
208
209 iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
210 temp_notif, ARRAY_SIZE(temp_notif),
211 iwl_mvm_temp_notif_wait, &temp);
212
213 ret = iwl_mvm_get_temp_cmd(mvm);
214 if (ret) {
215 IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
216 iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
217 return ret;
218 }
219
220 ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
221 IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
222 if (ret) {
223 IWL_ERR(mvm, "Getting the temperature timed out\n");
224 return ret;
225 }
226
227 return temp;
228}
229
230static void check_exit_ctkill(struct work_struct *work)
231{
232 struct iwl_mvm_tt_mgmt *tt;
233 struct iwl_mvm *mvm;
234 u32 duration;
235 s32 temp;
236
237 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
238 mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
239
240 duration = tt->params.ct_kill_duration;
241
242 mutex_lock(&mvm->mutex);
243
244 if (__iwl_mvm_mac_start(mvm))
245 goto reschedule;
246
247 /* make sure the device is available for direct read/writes */
248 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) {
249 __iwl_mvm_mac_stop(mvm);
250 goto reschedule;
251 }
252
253 temp = iwl_mvm_get_temp(mvm);
254
255 iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
256
257 __iwl_mvm_mac_stop(mvm);
258
259 if (temp < 0)
260 goto reschedule;
261
262 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
263
264 if (temp <= tt->params.ct_kill_exit) {
265 mutex_unlock(&mvm->mutex);
266 iwl_mvm_exit_ctkill(mvm);
267 return;
268 }
269
270reschedule:
271 mutex_unlock(&mvm->mutex);
272 schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
273 round_jiffies(duration * HZ));
274}
275
276static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
277 struct ieee80211_vif *vif)
278{
279 struct iwl_mvm *mvm = _data;
280 enum ieee80211_smps_mode smps_mode;
281
282 lockdep_assert_held(&mvm->mutex);
283
284 if (mvm->thermal_throttle.dynamic_smps)
285 smps_mode = IEEE80211_SMPS_DYNAMIC;
286 else
287 smps_mode = IEEE80211_SMPS_AUTOMATIC;
288
289 if (vif->type != NL80211_IFTYPE_STATION)
290 return;
291
292 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
293}
294
295static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
296{
297 struct ieee80211_sta *sta;
298 struct iwl_mvm_sta *mvmsta;
299 int i, err;
300
301 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
302 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
303 lockdep_is_held(&mvm->mutex));
304 if (IS_ERR_OR_NULL(sta))
305 continue;
306 mvmsta = iwl_mvm_sta_from_mac80211(sta);
307 if (enable == mvmsta->tt_tx_protection)
308 continue;
309 err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
310 if (err) {
311 IWL_ERR(mvm, "Failed to %s Tx protection\n",
312 enable ? "enable" : "disable");
313 } else {
314 IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
315 enable ? "Enable" : "Disable");
316 mvmsta->tt_tx_protection = enable;
317 }
318 }
319}
320
321void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
322{
323 struct iwl_host_cmd cmd = {
324 .id = REPLY_THERMAL_MNG_BACKOFF,
325 .len = { sizeof(u32), },
326 .data = { &backoff, },
327 };
328
329 backoff = max(backoff, mvm->thermal_throttle.min_backoff);
330
331 if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
332 IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
333 backoff);
334 mvm->thermal_throttle.tx_backoff = backoff;
335 } else {
336 IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
337 }
338}
339
340void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
341{
342 struct iwl_tt_params *params = &mvm->thermal_throttle.params;
343 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
344 s32 temperature = mvm->temperature;
345 bool throttle_enable = false;
346 int i;
347 u32 tx_backoff;
348
349 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
350
351 if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
352 iwl_mvm_enter_ctkill(mvm);
353 return;
354 }
355
356 if (params->support_ct_kill &&
357 temperature <= params->ct_kill_exit) {
358 iwl_mvm_exit_ctkill(mvm);
359 return;
360 }
361
362 if (params->support_dynamic_smps) {
363 if (!tt->dynamic_smps &&
364 temperature >= params->dynamic_smps_entry) {
365 IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
366 tt->dynamic_smps = true;
367 ieee80211_iterate_active_interfaces_atomic(
368 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
369 iwl_mvm_tt_smps_iterator, mvm);
370 throttle_enable = true;
371 } else if (tt->dynamic_smps &&
372 temperature <= params->dynamic_smps_exit) {
373 IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
374 tt->dynamic_smps = false;
375 ieee80211_iterate_active_interfaces_atomic(
376 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
377 iwl_mvm_tt_smps_iterator, mvm);
378 }
379 }
380
381 if (params->support_tx_protection) {
382 if (temperature >= params->tx_protection_entry) {
383 iwl_mvm_tt_tx_protection(mvm, true);
384 throttle_enable = true;
385 } else if (temperature <= params->tx_protection_exit) {
386 iwl_mvm_tt_tx_protection(mvm, false);
387 }
388 }
389
390 if (params->support_tx_backoff) {
391 tx_backoff = tt->min_backoff;
392 for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
393 if (temperature < params->tx_backoff[i].temperature)
394 break;
395 tx_backoff = max(tt->min_backoff,
396 params->tx_backoff[i].backoff);
397 }
398 if (tx_backoff != tt->min_backoff)
399 throttle_enable = true;
400 if (tt->tx_backoff != tx_backoff)
401 iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
402 }
403
404 if (!tt->throttle && throttle_enable) {
405 IWL_WARN(mvm,
406 "Due to high temperature thermal throttling initiated\n");
407 tt->throttle = true;
408 } else if (tt->throttle && !tt->dynamic_smps &&
409 tt->tx_backoff == tt->min_backoff &&
410 temperature <= params->tx_protection_exit) {
411 IWL_WARN(mvm,
412 "Temperature is back to normal thermal throttling stopped\n");
413 tt->throttle = false;
414 }
415}
416
417static const struct iwl_tt_params iwl_mvm_default_tt_params = {
418 .ct_kill_entry = 118,
419 .ct_kill_exit = 96,
420 .ct_kill_duration = 5,
421 .dynamic_smps_entry = 114,
422 .dynamic_smps_exit = 110,
423 .tx_protection_entry = 114,
424 .tx_protection_exit = 108,
425 .tx_backoff = {
426 {.temperature = 112, .backoff = 200},
427 {.temperature = 113, .backoff = 600},
428 {.temperature = 114, .backoff = 1200},
429 {.temperature = 115, .backoff = 2000},
430 {.temperature = 116, .backoff = 4000},
431 {.temperature = 117, .backoff = 10000},
432 },
433 .support_ct_kill = true,
434 .support_dynamic_smps = true,
435 .support_tx_protection = true,
436 .support_tx_backoff = true,
437};
438
439void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
440{
441 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
442
443 IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
444
445 if (mvm->cfg->thermal_params)
446 tt->params = *mvm->cfg->thermal_params;
447 else
448 tt->params = iwl_mvm_default_tt_params;
449
450 tt->throttle = false;
451 tt->dynamic_smps = false;
452 tt->min_backoff = min_backoff;
453 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
454}
455
456void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
457{
458 cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
459 IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
460}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
new file mode 100644
index 000000000000..c652a66be803
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -0,0 +1,1115 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/ieee80211.h>
66#include <linux/etherdevice.h>
67
68#include "iwl-trans.h"
69#include "iwl-eeprom-parse.h"
70#include "mvm.h"
71#include "sta.h"
72
73static void
74iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
75 u16 tid, u16 ssn)
76{
77 struct iwl_fw_dbg_trigger_tlv *trig;
78 struct iwl_fw_dbg_trigger_ba *ba_trig;
79
80 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
81 return;
82
83 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
84 ba_trig = (void *)trig->data;
85
86 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
87 return;
88
89 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
90 return;
91
92 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
93 "BAR sent to %pM, tid %d, ssn %d",
94 addr, tid, ssn);
95}
96
97/*
98 * Sets most of the Tx cmd's fields
99 */
100void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
101 struct iwl_tx_cmd *tx_cmd,
102 struct ieee80211_tx_info *info, u8 sta_id)
103{
104 struct ieee80211_hdr *hdr = (void *)skb->data;
105 __le16 fc = hdr->frame_control;
106 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
107 u32 len = skb->len + FCS_LEN;
108 u8 ac;
109
110 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
111 tx_flags |= TX_CMD_FLG_ACK;
112 else
113 tx_flags &= ~TX_CMD_FLG_ACK;
114
115 if (ieee80211_is_probe_resp(fc))
116 tx_flags |= TX_CMD_FLG_TSF;
117
118 if (ieee80211_has_morefrags(fc))
119 tx_flags |= TX_CMD_FLG_MORE_FRAG;
120
121 if (ieee80211_is_data_qos(fc)) {
122 u8 *qc = ieee80211_get_qos_ctl(hdr);
123 tx_cmd->tid_tspec = qc[0] & 0xf;
124 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
125 } else if (ieee80211_is_back_req(fc)) {
126 struct ieee80211_bar *bar = (void *)skb->data;
127 u16 control = le16_to_cpu(bar->control);
128 u16 ssn = le16_to_cpu(bar->start_seq_num);
129
130 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
131 tx_cmd->tid_tspec = (control &
132 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
133 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
134 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
135 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
136 ssn);
137 } else {
138 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
139 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
140 tx_flags |= TX_CMD_FLG_SEQ_CTL;
141 else
142 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
143 }
144
145 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
146 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
147 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
148 else
149 ac = tid_to_mac80211_ac[0];
150
151 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
152 TX_CMD_FLG_BT_PRIO_POS;
153
154 if (ieee80211_is_mgmt(fc)) {
155 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
156 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
157 else if (ieee80211_is_action(fc))
158 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
159 else
160 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
161
162 /* The spec allows Action frames in A-MPDU, we don't support
163 * it
164 */
165 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
166 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
167 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
168 } else {
169 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
170 }
171
172 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
173 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
174 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
175
176 if (fw_has_capa(&mvm->fw->ucode_capa,
177 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
178 ieee80211_action_contains_tpc(skb))
179 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
180
181 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
182 /* Total # bytes to be transmitted */
183 tx_cmd->len = cpu_to_le16((u16)skb->len);
184 tx_cmd->next_frame_len = 0;
185 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
186 tx_cmd->sta_id = sta_id;
187}
188
189/*
190 * Sets the fields in the Tx cmd that are rate related
191 */
192void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
193 struct ieee80211_tx_info *info,
194 struct ieee80211_sta *sta, __le16 fc)
195{
196 u32 rate_flags;
197 int rate_idx;
198 u8 rate_plcp;
199
200 /* Set retry limit on RTS packets */
201 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
202
203 /* Set retry limit on DATA packets and Probe Responses*/
204 if (ieee80211_is_probe_resp(fc)) {
205 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
206 tx_cmd->rts_retry_limit =
207 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
208 } else if (ieee80211_is_back_req(fc)) {
209 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
210 } else {
211 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
212 }
213
214 /*
215 * for data packets, rate info comes from the table inside the fw. This
216 * table is controlled by LINK_QUALITY commands
217 */
218
219 if (ieee80211_is_data(fc) && sta) {
220 tx_cmd->initial_rate_index = 0;
221 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
222 return;
223 } else if (ieee80211_is_back_req(fc)) {
224 tx_cmd->tx_flags |=
225 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
226 }
227
228 /* HT rate doesn't make sense for a non data frame */
229 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
230 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
231 info->control.rates[0].flags,
232 info->control.rates[0].idx,
233 le16_to_cpu(fc));
234
235 rate_idx = info->control.rates[0].idx;
236 /* if the rate isn't a well known legacy rate, take the lowest one */
237 if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
238 rate_idx = rate_lowest_index(
239 &mvm->nvm_data->bands[info->band], sta);
240
241 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
242 if (info->band == IEEE80211_BAND_5GHZ)
243 rate_idx += IWL_FIRST_OFDM_RATE;
244
245 /* For 2.4 GHZ band, check that there is no need to remap */
246 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
247
248 /* Get PLCP rate for tx_cmd->rate_n_flags */
249 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
250
251 mvm->mgmt_last_antenna_idx =
252 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
253 mvm->mgmt_last_antenna_idx);
254
255 if (info->band == IEEE80211_BAND_2GHZ &&
256 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
257 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
258 else
259 rate_flags =
260 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
261
262 /* Set CCK flag as needed */
263 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
264 rate_flags |= RATE_MCS_CCK_MSK;
265
266 /* Set the rate in the TX cmd */
267 tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
268}
269
270/*
271 * Sets the fields in the Tx cmd that are crypto related
272 */
273static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
274 struct ieee80211_tx_info *info,
275 struct iwl_tx_cmd *tx_cmd,
276 struct sk_buff *skb_frag,
277 int hdrlen)
278{
279 struct ieee80211_key_conf *keyconf = info->control.hw_key;
280 u8 *crypto_hdr = skb_frag->data + hdrlen;
281 u64 pn;
282
283 switch (keyconf->cipher) {
284 case WLAN_CIPHER_SUITE_CCMP:
285 case WLAN_CIPHER_SUITE_CCMP_256:
286 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
287 pn = atomic64_inc_return(&keyconf->tx_pn);
288 crypto_hdr[0] = pn;
289 crypto_hdr[2] = 0;
290 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
291 crypto_hdr[1] = pn >> 8;
292 crypto_hdr[4] = pn >> 16;
293 crypto_hdr[5] = pn >> 24;
294 crypto_hdr[6] = pn >> 32;
295 crypto_hdr[7] = pn >> 40;
296 break;
297
298 case WLAN_CIPHER_SUITE_TKIP:
299 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
300 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
301 break;
302
303 case WLAN_CIPHER_SUITE_WEP104:
304 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
305 /* fall through */
306 case WLAN_CIPHER_SUITE_WEP40:
307 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
308 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
309 TX_CMD_SEC_WEP_KEY_IDX_MSK);
310
311 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
312 break;
313 default:
314 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
315 }
316}
317
318/*
319 * Allocates and sets the Tx cmd the driver data pointers in the skb
320 */
321static struct iwl_device_cmd *
322iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
323 int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
324{
325 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
327 struct iwl_device_cmd *dev_cmd;
328 struct iwl_tx_cmd *tx_cmd;
329
330 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
331
332 if (unlikely(!dev_cmd))
333 return NULL;
334
335 memset(dev_cmd, 0, sizeof(*dev_cmd));
336 dev_cmd->hdr.cmd = TX_CMD;
337 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
338
339 if (info->control.hw_key)
340 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
341
342 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
343
344 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
345
346 memset(&info->status, 0, sizeof(info->status));
347
348 info->driver_data[0] = NULL;
349 info->driver_data[1] = dev_cmd;
350
351 return dev_cmd;
352}
353
354int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
355{
356 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
357 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
358 struct iwl_device_cmd *dev_cmd;
359 struct iwl_tx_cmd *tx_cmd;
360 u8 sta_id;
361 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
362
363 if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
364 return -1;
365
366 if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
367 (!info->control.vif ||
368 info->hw_queue != info->control.vif->cab_queue)))
369 return -1;
370
371 /*
372 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
373 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
374 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
375 * and hence needs to be sent on the aux queue
376 */
377 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
378 info->control.vif->type == NL80211_IFTYPE_STATION)
379 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
380
381 /*
382 * If the interface on which the frame is sent is the P2P_DEVICE
383 * or an AP/GO interface use the broadcast station associated
384 * with it; otherwise if the interface is a managed interface
385 * use the AP station associated with it for multicast traffic
386 * (this is not possible for unicast packets as a TLDS discovery
387 * response are sent without a station entry); otherwise use the
388 * AUX station.
389 */
390 sta_id = mvm->aux_sta.sta_id;
391 if (info->control.vif) {
392 struct iwl_mvm_vif *mvmvif =
393 iwl_mvm_vif_from_mac80211(info->control.vif);
394
395 if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
396 info->control.vif->type == NL80211_IFTYPE_AP)
397 sta_id = mvmvif->bcast_sta.sta_id;
398 else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
399 is_multicast_ether_addr(hdr->addr1)) {
400 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
401
402 if (ap_sta_id != IWL_MVM_STATION_COUNT)
403 sta_id = ap_sta_id;
404 }
405 }
406
407 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
408
409 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
410 if (!dev_cmd)
411 return -1;
412
413 /* From now on, we cannot access info->control */
414 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
415
416 /* Copy MAC header from skb into command buffer */
417 memcpy(tx_cmd->hdr, hdr, hdrlen);
418
419 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
420 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
421 return -1;
422 }
423
424 return 0;
425}
426
427/*
428 * Sets the fields in the Tx cmd that are crypto related
429 */
430int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
431 struct ieee80211_sta *sta)
432{
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
434 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
435 struct iwl_mvm_sta *mvmsta;
436 struct iwl_device_cmd *dev_cmd;
437 struct iwl_tx_cmd *tx_cmd;
438 __le16 fc;
439 u16 seq_number = 0;
440 u8 tid = IWL_MAX_TID_COUNT;
441 u8 txq_id = info->hw_queue;
442 bool is_data_qos = false, is_ampdu = false;
443 int hdrlen;
444
445 mvmsta = iwl_mvm_sta_from_mac80211(sta);
446 fc = hdr->frame_control;
447 hdrlen = ieee80211_hdrlen(fc);
448
449 if (WARN_ON_ONCE(!mvmsta))
450 return -1;
451
452 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
453 return -1;
454
455 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
456 if (!dev_cmd)
457 goto drop;
458
459 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
460 /* From now on, we cannot access info->control */
461
462 /*
463 * we handle that entirely ourselves -- for uAPSD the firmware
464 * will always send a notification, and for PS-Poll responses
465 * we'll notify mac80211 when getting frame status
466 */
467 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
468
469 spin_lock(&mvmsta->lock);
470
471 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
472 u8 *qc = NULL;
473 qc = ieee80211_get_qos_ctl(hdr);
474 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
475 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
476 goto drop_unlock_sta;
477
478 seq_number = mvmsta->tid_data[tid].seq_number;
479 seq_number &= IEEE80211_SCTL_SEQ;
480 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
481 hdr->seq_ctrl |= cpu_to_le16(seq_number);
482 is_data_qos = true;
483 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
484 }
485
486 /* Copy MAC header from skb into command buffer */
487 memcpy(tx_cmd->hdr, hdr, hdrlen);
488
489 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
490
491 if (sta->tdls) {
492 /* default to TID 0 for non-QoS packets */
493 u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
494
495 txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
496 }
497
498 if (is_ampdu) {
499 if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
500 goto drop_unlock_sta;
501 txq_id = mvmsta->tid_data[tid].txq_id;
502 }
503
504 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
505 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
506
507 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
508 goto drop_unlock_sta;
509
510 if (is_data_qos && !ieee80211_has_morefrags(fc))
511 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
512
513 spin_unlock(&mvmsta->lock);
514
515 if (txq_id < mvm->first_agg_queue)
516 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
517
518 return 0;
519
520drop_unlock_sta:
521 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
522 spin_unlock(&mvmsta->lock);
523drop:
524 return -1;
525}
526
527static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
528 struct ieee80211_sta *sta, u8 tid)
529{
530 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
531 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
532 struct ieee80211_vif *vif = mvmsta->vif;
533
534 lockdep_assert_held(&mvmsta->lock);
535
536 if ((tid_data->state == IWL_AGG_ON ||
537 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
538 iwl_mvm_tid_queued(tid_data) == 0) {
539 /*
540 * Now that this aggregation queue is empty tell mac80211 so it
541 * knows we no longer have frames buffered for the station on
542 * this TID (for the TIM bitmap calculation.)
543 */
544 ieee80211_sta_set_buffered(sta, tid, false);
545 }
546
547 if (tid_data->ssn != tid_data->next_reclaimed)
548 return;
549
550 switch (tid_data->state) {
551 case IWL_EMPTYING_HW_QUEUE_ADDBA:
552 IWL_DEBUG_TX_QUEUES(mvm,
553 "Can continue addBA flow ssn = next_recl = %d\n",
554 tid_data->next_reclaimed);
555 tid_data->state = IWL_AGG_STARTING;
556 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
557 break;
558
559 case IWL_EMPTYING_HW_QUEUE_DELBA:
560 IWL_DEBUG_TX_QUEUES(mvm,
561 "Can continue DELBA flow ssn = next_recl = %d\n",
562 tid_data->next_reclaimed);
563 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
564 vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
565 CMD_ASYNC);
566 tid_data->state = IWL_AGG_OFF;
567 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
568 break;
569
570 default:
571 break;
572 }
573}
574
575#ifdef CONFIG_IWLWIFI_DEBUG
576const char *iwl_mvm_get_tx_fail_reason(u32 status)
577{
578#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
579#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
580
581 switch (status & TX_STATUS_MSK) {
582 case TX_STATUS_SUCCESS:
583 return "SUCCESS";
584 TX_STATUS_POSTPONE(DELAY);
585 TX_STATUS_POSTPONE(FEW_BYTES);
586 TX_STATUS_POSTPONE(BT_PRIO);
587 TX_STATUS_POSTPONE(QUIET_PERIOD);
588 TX_STATUS_POSTPONE(CALC_TTAK);
589 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
590 TX_STATUS_FAIL(SHORT_LIMIT);
591 TX_STATUS_FAIL(LONG_LIMIT);
592 TX_STATUS_FAIL(UNDERRUN);
593 TX_STATUS_FAIL(DRAIN_FLOW);
594 TX_STATUS_FAIL(RFKILL_FLUSH);
595 TX_STATUS_FAIL(LIFE_EXPIRE);
596 TX_STATUS_FAIL(DEST_PS);
597 TX_STATUS_FAIL(HOST_ABORTED);
598 TX_STATUS_FAIL(BT_RETRY);
599 TX_STATUS_FAIL(STA_INVALID);
600 TX_STATUS_FAIL(FRAG_DROPPED);
601 TX_STATUS_FAIL(TID_DISABLE);
602 TX_STATUS_FAIL(FIFO_FLUSHED);
603 TX_STATUS_FAIL(SMALL_CF_POLL);
604 TX_STATUS_FAIL(FW_DROP);
605 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
606 }
607
608 return "UNKNOWN";
609
610#undef TX_STATUS_FAIL
611#undef TX_STATUS_POSTPONE
612}
613#endif /* CONFIG_IWLWIFI_DEBUG */
614
615void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
616 enum ieee80211_band band,
617 struct ieee80211_tx_rate *r)
618{
619 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
620 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
621 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
622 case RATE_MCS_CHAN_WIDTH_20:
623 break;
624 case RATE_MCS_CHAN_WIDTH_40:
625 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
626 break;
627 case RATE_MCS_CHAN_WIDTH_80:
628 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
629 break;
630 case RATE_MCS_CHAN_WIDTH_160:
631 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
632 break;
633 }
634 if (rate_n_flags & RATE_MCS_SGI_MSK)
635 r->flags |= IEEE80211_TX_RC_SHORT_GI;
636 if (rate_n_flags & RATE_MCS_HT_MSK) {
637 r->flags |= IEEE80211_TX_RC_MCS;
638 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
639 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
640 ieee80211_rate_set_vht(
641 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
642 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
643 RATE_VHT_MCS_NSS_POS) + 1);
644 r->flags |= IEEE80211_TX_RC_VHT_MCS;
645 } else {
646 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
647 band);
648 }
649}
650
651/**
652 * translate ucode response to mac80211 tx status control values
653 */
654static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
655 struct ieee80211_tx_info *info)
656{
657 struct ieee80211_tx_rate *r = &info->status.rates[0];
658
659 info->status.antenna =
660 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
661 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
662}
663
664static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
665 struct iwl_rx_packet *pkt)
666{
667 struct ieee80211_sta *sta;
668 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
669 int txq_id = SEQ_TO_QUEUE(sequence);
670 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
671 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
672 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
673 u32 status = le16_to_cpu(tx_resp->status.status);
674 u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
675 struct iwl_mvm_sta *mvmsta;
676 struct sk_buff_head skbs;
677 u8 skb_freed = 0;
678 u16 next_reclaimed, seq_ctl;
679
680 __skb_queue_head_init(&skbs);
681
682 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
683
684 /* we can free until ssn % q.n_bd not inclusive */
685 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
686
687 while (!skb_queue_empty(&skbs)) {
688 struct sk_buff *skb = __skb_dequeue(&skbs);
689 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
690
691 skb_freed++;
692
693 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
694
695 memset(&info->status, 0, sizeof(info->status));
696
697 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
698
699 /* inform mac80211 about what happened with the frame */
700 switch (status & TX_STATUS_MSK) {
701 case TX_STATUS_SUCCESS:
702 case TX_STATUS_DIRECT_DONE:
703 info->flags |= IEEE80211_TX_STAT_ACK;
704 break;
705 case TX_STATUS_FAIL_DEST_PS:
706 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
707 break;
708 default:
709 break;
710 }
711
712 info->status.rates[0].count = tx_resp->failure_frame + 1;
713 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
714 info);
715 info->status.status_driver_data[1] =
716 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
717
718 /* Single frame failure in an AMPDU queue => send BAR */
719 if (txq_id >= mvm->first_agg_queue &&
720 !(info->flags & IEEE80211_TX_STAT_ACK) &&
721 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
722 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
723
724 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
725 if (status != TX_STATUS_SUCCESS) {
726 struct ieee80211_hdr *hdr = (void *)skb->data;
727 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
728 }
729
730 /*
731 * TODO: this is not accurate if we are freeing more than one
732 * packet.
733 */
734 info->status.tx_time =
735 le16_to_cpu(tx_resp->wireless_media_time);
736 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
737 info->status.status_driver_data[0] =
738 (void *)(uintptr_t)tx_resp->reduced_tpc;
739
740 ieee80211_tx_status(mvm->hw, skb);
741 }
742
743 if (txq_id >= mvm->first_agg_queue) {
744 /* If this is an aggregation queue, we use the ssn since:
745 * ssn = wifi seq_num % 256.
746 * The seq_ctl is the sequence control of the packet to which
747 * this Tx response relates. But if there is a hole in the
748 * bitmap of the BA we received, this Tx response may allow to
749 * reclaim the hole and all the subsequent packets that were
750 * already acked. In that case, seq_ctl != ssn, and the next
751 * packet to be reclaimed will be ssn and not seq_ctl. In that
752 * case, several packets will be reclaimed even if
753 * frame_count = 1.
754 *
755 * The ssn is the index (% 256) of the latest packet that has
756 * treated (acked / dropped) + 1.
757 */
758 next_reclaimed = ssn;
759 } else {
760 /* The next packet to be reclaimed is the one after this one */
761 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
762 }
763
764 IWL_DEBUG_TX_REPLY(mvm,
765 "TXQ %d status %s (0x%08x)\n",
766 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
767
768 IWL_DEBUG_TX_REPLY(mvm,
769 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
770 le32_to_cpu(tx_resp->initial_rate),
771 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
772 ssn, next_reclaimed, seq_ctl);
773
774 rcu_read_lock();
775
776 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
777 /*
778 * sta can't be NULL otherwise it'd mean that the sta has been freed in
779 * the firmware while we still have packets for it in the Tx queues.
780 */
781 if (WARN_ON_ONCE(!sta))
782 goto out;
783
784 if (!IS_ERR(sta)) {
785 mvmsta = iwl_mvm_sta_from_mac80211(sta);
786
787 if (tid != IWL_TID_NON_QOS) {
788 struct iwl_mvm_tid_data *tid_data =
789 &mvmsta->tid_data[tid];
790
791 spin_lock_bh(&mvmsta->lock);
792 tid_data->next_reclaimed = next_reclaimed;
793 IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
794 next_reclaimed);
795 iwl_mvm_check_ratid_empty(mvm, sta, tid);
796 spin_unlock_bh(&mvmsta->lock);
797 }
798
799 if (mvmsta->next_status_eosp) {
800 mvmsta->next_status_eosp = false;
801 ieee80211_sta_eosp(sta);
802 }
803 } else {
804 mvmsta = NULL;
805 }
806
807 /*
808 * If the txq is not an AMPDU queue, there is no chance we freed
809 * several skbs. Check that out...
810 */
811 if (txq_id >= mvm->first_agg_queue)
812 goto out;
813
814 /* We can't free more than one frame at once on a shared queue */
815 WARN_ON(skb_freed > 1);
816
817 /* If we have still frames for this STA nothing to do here */
818 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
819 goto out;
820
821 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
822
823 /*
824 * If there are no pending frames for this STA and
825 * the tx to this station is not disabled, notify
826 * mac80211 that this station can now wake up in its
827 * STA table.
828 * If mvmsta is not NULL, sta is valid.
829 */
830
831 spin_lock_bh(&mvmsta->lock);
832
833 if (!mvmsta->disable_tx)
834 ieee80211_sta_block_awake(mvm->hw, sta, false);
835
836 spin_unlock_bh(&mvmsta->lock);
837 }
838
839 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
840 /*
841 * We are draining and this was the last packet - pre_rcu_remove
842 * has been called already. We might be after the
843 * synchronize_net already.
844 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
845 */
846 set_bit(sta_id, mvm->sta_drained);
847 schedule_work(&mvm->sta_drained_wk);
848 }
849
850out:
851 rcu_read_unlock();
852}
853
854#ifdef CONFIG_IWLWIFI_DEBUG
855#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
856static const char *iwl_get_agg_tx_status(u16 status)
857{
858 switch (status & AGG_TX_STATE_STATUS_MSK) {
859 AGG_TX_STATE_(TRANSMITTED);
860 AGG_TX_STATE_(UNDERRUN);
861 AGG_TX_STATE_(BT_PRIO);
862 AGG_TX_STATE_(FEW_BYTES);
863 AGG_TX_STATE_(ABORT);
864 AGG_TX_STATE_(LAST_SENT_TTL);
865 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
866 AGG_TX_STATE_(LAST_SENT_BT_KILL);
867 AGG_TX_STATE_(SCD_QUERY);
868 AGG_TX_STATE_(TEST_BAD_CRC32);
869 AGG_TX_STATE_(RESPONSE);
870 AGG_TX_STATE_(DUMP_TX);
871 AGG_TX_STATE_(DELAY_TX);
872 }
873
874 return "UNKNOWN";
875}
876
877static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
878 struct iwl_rx_packet *pkt)
879{
880 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
881 struct agg_tx_status *frame_status = &tx_resp->status;
882 int i;
883
884 for (i = 0; i < tx_resp->frame_count; i++) {
885 u16 fstatus = le16_to_cpu(frame_status[i].status);
886
887 IWL_DEBUG_TX_REPLY(mvm,
888 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
889 iwl_get_agg_tx_status(fstatus),
890 fstatus & AGG_TX_STATE_STATUS_MSK,
891 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
892 AGG_TX_STATE_TRY_CNT_POS,
893 le16_to_cpu(frame_status[i].sequence));
894 }
895}
896#else
897static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
898 struct iwl_rx_packet *pkt)
899{}
900#endif /* CONFIG_IWLWIFI_DEBUG */
901
902static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
903 struct iwl_rx_packet *pkt)
904{
905 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
906 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
907 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
908 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
909 struct ieee80211_sta *sta;
910
911 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
912 return;
913
914 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
915 return;
916
917 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
918
919 rcu_read_lock();
920
921 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
922
923 if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
924 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
925 mvmsta->tid_data[tid].rate_n_flags =
926 le32_to_cpu(tx_resp->initial_rate);
927 mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
928 mvmsta->tid_data[tid].tx_time =
929 le16_to_cpu(tx_resp->wireless_media_time);
930 }
931
932 rcu_read_unlock();
933}
934
935void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
936{
937 struct iwl_rx_packet *pkt = rxb_addr(rxb);
938 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
939
940 if (tx_resp->frame_count == 1)
941 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
942 else
943 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
944}
945
946static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
947 struct iwl_mvm_ba_notif *ba_notif,
948 struct iwl_mvm_tid_data *tid_data)
949{
950 info->flags |= IEEE80211_TX_STAT_AMPDU;
951 info->status.ampdu_ack_len = ba_notif->txed_2_done;
952 info->status.ampdu_len = ba_notif->txed;
953 iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
954 info);
955 /* TODO: not accounted if the whole A-MPDU failed */
956 info->status.tx_time = tid_data->tx_time;
957 info->status.status_driver_data[0] =
958 (void *)(uintptr_t)tid_data->reduced_tpc;
959 info->status.status_driver_data[1] =
960 (void *)(uintptr_t)tid_data->rate_n_flags;
961}
962
963void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
964{
965 struct iwl_rx_packet *pkt = rxb_addr(rxb);
966 struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
967 struct sk_buff_head reclaimed_skbs;
968 struct iwl_mvm_tid_data *tid_data;
969 struct ieee80211_sta *sta;
970 struct iwl_mvm_sta *mvmsta;
971 struct sk_buff *skb;
972 int sta_id, tid, freed;
973 /* "flow" corresponds to Tx queue */
974 u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
975 /* "ssn" is start of block-ack Tx window, corresponds to index
976 * (in Tx queue's circular buffer) of first TFD/frame in window */
977 u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
978
979 sta_id = ba_notif->sta_id;
980 tid = ba_notif->tid;
981
982 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
983 tid >= IWL_MAX_TID_COUNT,
984 "sta_id %d tid %d", sta_id, tid))
985 return;
986
987 rcu_read_lock();
988
989 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
990
991 /* Reclaiming frames for a station that has been deleted ? */
992 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
993 rcu_read_unlock();
994 return;
995 }
996
997 mvmsta = iwl_mvm_sta_from_mac80211(sta);
998 tid_data = &mvmsta->tid_data[tid];
999
1000 if (tid_data->txq_id != scd_flow) {
1001 IWL_ERR(mvm,
1002 "invalid BA notification: Q %d, tid %d, flow %d\n",
1003 tid_data->txq_id, tid, scd_flow);
1004 rcu_read_unlock();
1005 return;
1006 }
1007
1008 spin_lock_bh(&mvmsta->lock);
1009
1010 __skb_queue_head_init(&reclaimed_skbs);
1011
1012 /*
1013 * Release all TFDs before the SSN, i.e. all TFDs in front of
1014 * block-ack window (we assume that they've been successfully
1015 * transmitted ... if not, it's too late anyway).
1016 */
1017 iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
1018 &reclaimed_skbs);
1019
1020 IWL_DEBUG_TX_REPLY(mvm,
1021 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1022 (u8 *)&ba_notif->sta_addr_lo32,
1023 ba_notif->sta_id);
1024 IWL_DEBUG_TX_REPLY(mvm,
1025 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1026 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1027 (unsigned long long)le64_to_cpu(ba_notif->bitmap),
1028 scd_flow, ba_resp_scd_ssn, ba_notif->txed,
1029 ba_notif->txed_2_done);
1030
1031 tid_data->next_reclaimed = ba_resp_scd_ssn;
1032
1033 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1034
1035 freed = 0;
1036
1037 skb_queue_walk(&reclaimed_skbs, skb) {
1038 struct ieee80211_hdr *hdr = (void *)skb->data;
1039 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1040
1041 if (ieee80211_is_data_qos(hdr->frame_control))
1042 freed++;
1043 else
1044 WARN_ON_ONCE(1);
1045
1046 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1047
1048 memset(&info->status, 0, sizeof(info->status));
1049 /* Packet was transmitted successfully, failures come as single
1050 * frames because before failing a frame the firmware transmits
1051 * it without aggregation at least once.
1052 */
1053 info->flags |= IEEE80211_TX_STAT_ACK;
1054
1055 /* this is the first skb we deliver in this batch */
1056 /* put the rate scaling data there */
1057 if (freed == 1)
1058 iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
1059 }
1060
1061 spin_unlock_bh(&mvmsta->lock);
1062
1063 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1064 * possible (i.e. first MPDU in the aggregation wasn't acked)
1065 * Still it's important to update RS about sent vs. acked.
1066 */
1067 if (skb_queue_empty(&reclaimed_skbs)) {
1068 struct ieee80211_tx_info ba_info = {};
1069 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1070
1071 if (mvmsta->vif)
1072 chanctx_conf =
1073 rcu_dereference(mvmsta->vif->chanctx_conf);
1074
1075 if (WARN_ON_ONCE(!chanctx_conf))
1076 goto out;
1077
1078 ba_info.band = chanctx_conf->def.chan->band;
1079 iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
1080
1081 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
1082 iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
1083 }
1084
1085out:
1086 rcu_read_unlock();
1087
1088 while (!skb_queue_empty(&reclaimed_skbs)) {
1089 skb = __skb_dequeue(&reclaimed_skbs);
1090 ieee80211_tx_status(mvm->hw, skb);
1091 }
1092}
1093
1094/*
1095 * Note that there are transports that buffer frames before they reach
1096 * the firmware. This means that after flush_tx_path is called, the
1097 * queue might not be empty. The race-free way to handle this is to:
1098 * 1) set the station as draining
1099 * 2) flush the Tx path
1100 * 3) wait for the transport queues to be empty
1101 */
1102int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
1103{
1104 int ret;
1105 struct iwl_tx_path_flush_cmd flush_cmd = {
1106 .queues_ctl = cpu_to_le32(tfd_msk),
1107 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1108 };
1109
1110 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1111 sizeof(flush_cmd), &flush_cmd);
1112 if (ret)
1113 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
1114 return ret;
1115}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
new file mode 100644
index 000000000000..ad0f16909e2e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -0,0 +1,1083 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright (C) 2015 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <ilw@linux.intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#include <net/mac80211.h>
67
68#include "iwl-debug.h"
69#include "iwl-io.h"
70#include "iwl-prph.h"
71
72#include "mvm.h"
73#include "fw-api-rs.h"
74
75/*
76 * Will return 0 even if the cmd failed when RFKILL is asserted unless
77 * CMD_WANT_SKB is set in cmd->flags.
78 */
79int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
80{
81 int ret;
82
83#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
84 if (WARN_ON(mvm->d3_test_active))
85 return -EIO;
86#endif
87
88 /*
89 * Synchronous commands from this op-mode must hold
90 * the mutex, this ensures we don't try to send two
91 * (or more) synchronous commands at a time.
92 */
93 if (!(cmd->flags & CMD_ASYNC))
94 lockdep_assert_held(&mvm->mutex);
95
96 ret = iwl_trans_send_cmd(mvm->trans, cmd);
97
98 /*
99 * If the caller wants the SKB, then don't hide any problems, the
100 * caller might access the response buffer which will be NULL if
101 * the command failed.
102 */
103 if (cmd->flags & CMD_WANT_SKB)
104 return ret;
105
106 /* Silently ignore failures if RFKILL is asserted */
107 if (!ret || ret == -ERFKILL)
108 return 0;
109 return ret;
110}
111
112int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
113 u32 flags, u16 len, const void *data)
114{
115 struct iwl_host_cmd cmd = {
116 .id = id,
117 .len = { len, },
118 .data = { data, },
119 .flags = flags,
120 };
121
122 return iwl_mvm_send_cmd(mvm, &cmd);
123}
124
125/*
126 * We assume that the caller set the status to the success value
127 */
128int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
129 u32 *status)
130{
131 struct iwl_rx_packet *pkt;
132 struct iwl_cmd_response *resp;
133 int ret, resp_len;
134
135 lockdep_assert_held(&mvm->mutex);
136
137#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
138 if (WARN_ON(mvm->d3_test_active))
139 return -EIO;
140#endif
141
142 /*
143 * Only synchronous commands can wait for status,
144 * we use WANT_SKB so the caller can't.
145 */
146 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
147 "cmd flags %x", cmd->flags))
148 return -EINVAL;
149
150 cmd->flags |= CMD_WANT_SKB;
151
152 ret = iwl_trans_send_cmd(mvm->trans, cmd);
153 if (ret == -ERFKILL) {
154 /*
155 * The command failed because of RFKILL, don't update
156 * the status, leave it as success and return 0.
157 */
158 return 0;
159 } else if (ret) {
160 return ret;
161 }
162
163 pkt = cmd->resp_pkt;
164 /* Can happen if RFKILL is asserted */
165 if (!pkt) {
166 ret = 0;
167 goto out_free_resp;
168 }
169
170 resp_len = iwl_rx_packet_payload_len(pkt);
171 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
172 ret = -EIO;
173 goto out_free_resp;
174 }
175
176 resp = (void *)pkt->data;
177 *status = le32_to_cpu(resp->status);
178 out_free_resp:
179 iwl_free_resp(cmd);
180 return ret;
181}
182
183/*
184 * We assume that the caller set the status to the sucess value
185 */
186int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
187 const void *data, u32 *status)
188{
189 struct iwl_host_cmd cmd = {
190 .id = id,
191 .len = { len, },
192 .data = { data, },
193 };
194
195 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
196}
197
198#define IWL_DECLARE_RATE_INFO(r) \
199 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
200
201/*
202 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
203 */
204static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
205 IWL_DECLARE_RATE_INFO(1),
206 IWL_DECLARE_RATE_INFO(2),
207 IWL_DECLARE_RATE_INFO(5),
208 IWL_DECLARE_RATE_INFO(11),
209 IWL_DECLARE_RATE_INFO(6),
210 IWL_DECLARE_RATE_INFO(9),
211 IWL_DECLARE_RATE_INFO(12),
212 IWL_DECLARE_RATE_INFO(18),
213 IWL_DECLARE_RATE_INFO(24),
214 IWL_DECLARE_RATE_INFO(36),
215 IWL_DECLARE_RATE_INFO(48),
216 IWL_DECLARE_RATE_INFO(54),
217};
218
219int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
220 enum ieee80211_band band)
221{
222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
223 int idx;
224 int band_offset = 0;
225
226 /* Legacy rate format, search for match in table */
227 if (band == IEEE80211_BAND_5GHZ)
228 band_offset = IWL_FIRST_OFDM_RATE;
229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
230 if (fw_rate_idx_to_plcp[idx] == rate)
231 return idx - band_offset;
232
233 return -1;
234}
235
236u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
237{
238 /* Get PLCP rate for tx_cmd->rate_n_flags */
239 return fw_rate_idx_to_plcp[rate_idx];
240}
241
242void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
243{
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_error_resp *err_resp = (void *)pkt->data;
246
247 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
248 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
249 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
250 le16_to_cpu(err_resp->bad_cmd_seq_num),
251 le32_to_cpu(err_resp->error_service));
252 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
253 le64_to_cpu(err_resp->timestamp));
254}
255
256/*
257 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
258 * The parameter should also be a combination of ANT_[ABC].
259 */
260u8 first_antenna(u8 mask)
261{
262 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
263 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
264 return BIT(0);
265 return BIT(ffs(mask) - 1);
266}
267
268/*
269 * Toggles between TX antennas to send the probe request on.
270 * Receives the bitmask of valid TX antennas and the *index* used
271 * for the last TX, and returns the next valid *index* to use.
272 * In order to set it in the tx_cmd, must do BIT(idx).
273 */
274u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
275{
276 u8 ind = last_idx;
277 int i;
278
279 for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
280 ind = (ind + 1) % RATE_MCS_ANT_NUM;
281 if (valid & BIT(ind))
282 return ind;
283 }
284
285 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
286 return last_idx;
287}
288
289static const struct {
290 const char *name;
291 u8 num;
292} advanced_lookup[] = {
293 { "NMI_INTERRUPT_WDG", 0x34 },
294 { "SYSASSERT", 0x35 },
295 { "UCODE_VERSION_MISMATCH", 0x37 },
296 { "BAD_COMMAND", 0x38 },
297 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
298 { "FATAL_ERROR", 0x3D },
299 { "NMI_TRM_HW_ERR", 0x46 },
300 { "NMI_INTERRUPT_TRM", 0x4C },
301 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
302 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
303 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
304 { "NMI_INTERRUPT_HOST", 0x66 },
305 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
306 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
307 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
308 { "ADVANCED_SYSASSERT", 0 },
309};
310
311static const char *desc_lookup(u32 num)
312{
313 int i;
314
315 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
316 if (advanced_lookup[i].num == num)
317 return advanced_lookup[i].name;
318
319 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
320 return advanced_lookup[i].name;
321}
322
323/*
324 * Note: This structure is read from the device with IO accesses,
325 * and the reading already does the endian conversion. As it is
326 * read with u32-sized accesses, any members with a different size
327 * need to be ordered correctly though!
328 */
329struct iwl_error_event_table_v1 {
330 u32 valid; /* (nonzero) valid, (0) log is empty */
331 u32 error_id; /* type of error */
332 u32 pc; /* program counter */
333 u32 blink1; /* branch link */
334 u32 blink2; /* branch link */
335 u32 ilink1; /* interrupt link */
336 u32 ilink2; /* interrupt link */
337 u32 data1; /* error-specific data */
338 u32 data2; /* error-specific data */
339 u32 data3; /* error-specific data */
340 u32 bcon_time; /* beacon timer */
341 u32 tsf_low; /* network timestamp function timer */
342 u32 tsf_hi; /* network timestamp function timer */
343 u32 gp1; /* GP1 timer register */
344 u32 gp2; /* GP2 timer register */
345 u32 gp3; /* GP3 timer register */
346 u32 ucode_ver; /* uCode version */
347 u32 hw_ver; /* HW Silicon version */
348 u32 brd_ver; /* HW board version */
349 u32 log_pc; /* log program counter */
350 u32 frame_ptr; /* frame pointer */
351 u32 stack_ptr; /* stack pointer */
352 u32 hcmd; /* last host command header */
353 u32 isr0; /* isr status register LMPM_NIC_ISR0:
354 * rxtx_flag */
355 u32 isr1; /* isr status register LMPM_NIC_ISR1:
356 * host_flag */
357 u32 isr2; /* isr status register LMPM_NIC_ISR2:
358 * enc_flag */
359 u32 isr3; /* isr status register LMPM_NIC_ISR3:
360 * time_flag */
361 u32 isr4; /* isr status register LMPM_NIC_ISR4:
362 * wico interrupt */
363 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
364 u32 wait_event; /* wait event() caller address */
365 u32 l2p_control; /* L2pControlField */
366 u32 l2p_duration; /* L2pDurationField */
367 u32 l2p_mhvalid; /* L2pMhValidBits */
368 u32 l2p_addr_match; /* L2pAddrMatchStat */
369 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
370 * (LMPM_PMG_SEL) */
371 u32 u_timestamp; /* indicate when the date and time of the
372 * compilation */
373 u32 flow_handler; /* FH read/write pointers, RX credit */
374} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
375
376struct iwl_error_event_table {
377 u32 valid; /* (nonzero) valid, (0) log is empty */
378 u32 error_id; /* type of error */
379 u32 pc; /* program counter */
380 u32 blink1; /* branch link */
381 u32 blink2; /* branch link */
382 u32 ilink1; /* interrupt link */
383 u32 ilink2; /* interrupt link */
384 u32 data1; /* error-specific data */
385 u32 data2; /* error-specific data */
386 u32 data3; /* error-specific data */
387 u32 bcon_time; /* beacon timer */
388 u32 tsf_low; /* network timestamp function timer */
389 u32 tsf_hi; /* network timestamp function timer */
390 u32 gp1; /* GP1 timer register */
391 u32 gp2; /* GP2 timer register */
392 u32 gp3; /* GP3 timer register */
393 u32 major; /* uCode version major */
394 u32 minor; /* uCode version minor */
395 u32 hw_ver; /* HW Silicon version */
396 u32 brd_ver; /* HW board version */
397 u32 log_pc; /* log program counter */
398 u32 frame_ptr; /* frame pointer */
399 u32 stack_ptr; /* stack pointer */
400 u32 hcmd; /* last host command header */
401 u32 isr0; /* isr status register LMPM_NIC_ISR0:
402 * rxtx_flag */
403 u32 isr1; /* isr status register LMPM_NIC_ISR1:
404 * host_flag */
405 u32 isr2; /* isr status register LMPM_NIC_ISR2:
406 * enc_flag */
407 u32 isr3; /* isr status register LMPM_NIC_ISR3:
408 * time_flag */
409 u32 isr4; /* isr status register LMPM_NIC_ISR4:
410 * wico interrupt */
411 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
412 u32 wait_event; /* wait event() caller address */
413 u32 l2p_control; /* L2pControlField */
414 u32 l2p_duration; /* L2pDurationField */
415 u32 l2p_mhvalid; /* L2pMhValidBits */
416 u32 l2p_addr_match; /* L2pAddrMatchStat */
417 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
418 * (LMPM_PMG_SEL) */
419 u32 u_timestamp; /* indicate when the date and time of the
420 * compilation */
421 u32 flow_handler; /* FH read/write pointers, RX credit */
422} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
423
424/*
425 * UMAC error struct - relevant starting from family 8000 chip.
426 * Note: This structure is read from the device with IO accesses,
427 * and the reading already does the endian conversion. As it is
428 * read with u32-sized accesses, any members with a different size
429 * need to be ordered correctly though!
430 */
431struct iwl_umac_error_event_table {
432 u32 valid; /* (nonzero) valid, (0) log is empty */
433 u32 error_id; /* type of error */
434 u32 blink1; /* branch link */
435 u32 blink2; /* branch link */
436 u32 ilink1; /* interrupt link */
437 u32 ilink2; /* interrupt link */
438 u32 data1; /* error-specific data */
439 u32 data2; /* error-specific data */
440 u32 data3; /* error-specific data */
441 u32 umac_major;
442 u32 umac_minor;
443 u32 frame_pointer; /* core register 27*/
444 u32 stack_pointer; /* core register 28 */
445 u32 cmd_header; /* latest host cmd sent to UMAC */
446 u32 nic_isr_pref; /* ISR status register */
447} __packed;
448
449#define ERROR_START_OFFSET (1 * sizeof(u32))
450#define ERROR_ELEM_SIZE (7 * sizeof(u32))
451
452static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
453{
454 struct iwl_trans *trans = mvm->trans;
455 struct iwl_umac_error_event_table table;
456 u32 base;
457
458 base = mvm->umac_error_event_table;
459
460 if (base < 0x800000) {
461 IWL_ERR(mvm,
462 "Not valid error log pointer 0x%08X for %s uCode\n",
463 base,
464 (mvm->cur_ucode == IWL_UCODE_INIT)
465 ? "Init" : "RT");
466 return;
467 }
468
469 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
470
471 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
472 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
473 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
474 mvm->status, table.valid);
475 }
476
477 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
478 desc_lookup(table.error_id));
479 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
480 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
481 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
482 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
483 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
484 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
485 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
486 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
487 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
488 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
489 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
490 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
492}
493
494static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
495{
496 struct iwl_trans *trans = mvm->trans;
497 struct iwl_error_event_table_v1 table;
498 u32 base;
499
500 base = mvm->error_event_table;
501 if (mvm->cur_ucode == IWL_UCODE_INIT) {
502 if (!base)
503 base = mvm->fw->init_errlog_ptr;
504 } else {
505 if (!base)
506 base = mvm->fw->inst_errlog_ptr;
507 }
508
509 if (base < 0x800000) {
510 IWL_ERR(mvm,
511 "Not valid error log pointer 0x%08X for %s uCode\n",
512 base,
513 (mvm->cur_ucode == IWL_UCODE_INIT)
514 ? "Init" : "RT");
515 return;
516 }
517
518 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
519
520 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
521 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
522 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
523 mvm->status, table.valid);
524 }
525
526 /* Do not change this output - scripts rely on it */
527
528 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
529
530 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
531 table.data1, table.data2, table.data3,
532 table.blink1, table.blink2, table.ilink1,
533 table.ilink2, table.bcon_time, table.gp1,
534 table.gp2, table.gp3, table.ucode_ver, 0,
535 table.hw_ver, table.brd_ver);
536 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
537 desc_lookup(table.error_id));
538 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
539 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
540 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
541 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
542 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
543 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
544 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
545 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
546 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
547 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
548 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
549 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
550 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
551 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
552 IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
553 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
554 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
555 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
556 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
557 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
558 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
559 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
560 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
561 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
562 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
563 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
564 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
565 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
566 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
567 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
568 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
569 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
570
571 if (mvm->support_umac_log)
572 iwl_mvm_dump_umac_error_log(mvm);
573}
574
575void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
576{
577 struct iwl_trans *trans = mvm->trans;
578 struct iwl_error_event_table table;
579 u32 base;
580
581 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
582 iwl_mvm_dump_nic_error_log_old(mvm);
583 return;
584 }
585
586 base = mvm->error_event_table;
587 if (mvm->cur_ucode == IWL_UCODE_INIT) {
588 if (!base)
589 base = mvm->fw->init_errlog_ptr;
590 } else {
591 if (!base)
592 base = mvm->fw->inst_errlog_ptr;
593 }
594
595 if (base < 0x800000) {
596 IWL_ERR(mvm,
597 "Not valid error log pointer 0x%08X for %s uCode\n",
598 base,
599 (mvm->cur_ucode == IWL_UCODE_INIT)
600 ? "Init" : "RT");
601 return;
602 }
603
604 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
605
606 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
607 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
608 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
609 mvm->status, table.valid);
610 }
611
612 /* Do not change this output - scripts rely on it */
613
614 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
615
616 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
617 table.data1, table.data2, table.data3,
618 table.blink1, table.blink2, table.ilink1,
619 table.ilink2, table.bcon_time, table.gp1,
620 table.gp2, table.gp3, table.major,
621 table.minor, table.hw_ver, table.brd_ver);
622 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
623 desc_lookup(table.error_id));
624 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
625 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
626 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
627 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
628 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
629 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
630 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
631 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
632 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
633 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
634 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
635 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
636 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
637 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
638 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
639 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
640 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
641 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
642 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
643 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
644 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
645 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
646 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
647 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
648 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
649 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
650 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
651 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
652 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
653 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
654 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
655 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
656 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
657
658 if (mvm->support_umac_log)
659 iwl_mvm_dump_umac_error_log(mvm);
660}
661
662int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
663{
664 int i;
665
666 lockdep_assert_held(&mvm->queue_info_lock);
667
668 for (i = minq; i <= maxq; i++)
669 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
670 !mvm->queue_info[i].setup_reserved)
671 return i;
672
673 return -ENOSPC;
674}
675
676void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
677 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
678 unsigned int wdg_timeout)
679{
680 bool enable_queue = true;
681
682 spin_lock_bh(&mvm->queue_info_lock);
683
684 /* Make sure this TID isn't already enabled */
685 if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
686 spin_unlock_bh(&mvm->queue_info_lock);
687 IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
688 cfg->tid);
689 return;
690 }
691
692 /* Update mappings and refcounts */
693 mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
694 mvm->queue_info[queue].hw_queue_refcount++;
695 if (mvm->queue_info[queue].hw_queue_refcount > 1)
696 enable_queue = false;
697 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
698
699 IWL_DEBUG_TX_QUEUES(mvm,
700 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
701 queue, mvm->queue_info[queue].hw_queue_refcount,
702 mvm->queue_info[queue].hw_queue_to_mac80211);
703
704 spin_unlock_bh(&mvm->queue_info_lock);
705
706 /* Send the enabling command if we need to */
707 if (enable_queue) {
708 struct iwl_scd_txq_cfg_cmd cmd = {
709 .scd_queue = queue,
710 .enable = 1,
711 .window = cfg->frame_limit,
712 .sta_id = cfg->sta_id,
713 .ssn = cpu_to_le16(ssn),
714 .tx_fifo = cfg->fifo,
715 .aggregate = cfg->aggregate,
716 .tid = cfg->tid,
717 };
718
719 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
720 wdg_timeout);
721 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
722 &cmd),
723 "Failed to configure queue %d on FIFO %d\n", queue,
724 cfg->fifo);
725 }
726}
727
728void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
729 u8 tid, u8 flags)
730{
731 struct iwl_scd_txq_cfg_cmd cmd = {
732 .scd_queue = queue,
733 .enable = 0,
734 };
735 bool remove_mac_queue = true;
736 int ret;
737
738 spin_lock_bh(&mvm->queue_info_lock);
739
740 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
741 spin_unlock_bh(&mvm->queue_info_lock);
742 return;
743 }
744
745 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
746
747 /*
748 * If there is another TID with the same AC - don't remove the MAC queue
749 * from the mapping
750 */
751 if (tid < IWL_MAX_TID_COUNT) {
752 unsigned long tid_bitmap =
753 mvm->queue_info[queue].tid_bitmap;
754 int ac = tid_to_mac80211_ac[tid];
755 int i;
756
757 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
758 if (tid_to_mac80211_ac[i] == ac)
759 remove_mac_queue = false;
760 }
761 }
762
763 if (remove_mac_queue)
764 mvm->queue_info[queue].hw_queue_to_mac80211 &=
765 ~BIT(mac80211_queue);
766 mvm->queue_info[queue].hw_queue_refcount--;
767
768 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
769
770 IWL_DEBUG_TX_QUEUES(mvm,
771 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
772 queue,
773 mvm->queue_info[queue].hw_queue_refcount,
774 mvm->queue_info[queue].hw_queue_to_mac80211);
775
776 /* If the queue is still enabled - nothing left to do in this func */
777 if (cmd.enable) {
778 spin_unlock_bh(&mvm->queue_info_lock);
779 return;
780 }
781
782 /* Make sure queue info is correct even though we overwrite it */
783 WARN(mvm->queue_info[queue].hw_queue_refcount ||
784 mvm->queue_info[queue].tid_bitmap ||
785 mvm->queue_info[queue].hw_queue_to_mac80211,
786 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
787 queue, mvm->queue_info[queue].hw_queue_refcount,
788 mvm->queue_info[queue].hw_queue_to_mac80211,
789 mvm->queue_info[queue].tid_bitmap);
790
791 /* If we are here - the queue is freed and we can zero out these vals */
792 mvm->queue_info[queue].hw_queue_refcount = 0;
793 mvm->queue_info[queue].tid_bitmap = 0;
794 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
795
796 spin_unlock_bh(&mvm->queue_info_lock);
797
798 iwl_trans_txq_disable(mvm->trans, queue, false);
799 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
800 sizeof(cmd), &cmd);
801 if (ret)
802 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
803 queue, ret);
804}
805
806/**
807 * iwl_mvm_send_lq_cmd() - Send link quality command
808 * @init: This command is sent as part of station initialization right
809 * after station has been added.
810 *
811 * The link quality command is sent as the last step of station creation.
812 * This is the special case in which init is set and we call a callback in
813 * this case to clear the state indicating that station creation is in
814 * progress.
815 */
816int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
817{
818 struct iwl_host_cmd cmd = {
819 .id = LQ_CMD,
820 .len = { sizeof(struct iwl_lq_cmd), },
821 .flags = init ? 0 : CMD_ASYNC,
822 .data = { lq, },
823 };
824
825 if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
826 return -EINVAL;
827
828 return iwl_mvm_send_cmd(mvm, &cmd);
829}
830
831/**
832 * iwl_mvm_update_smps - Get a request to change the SMPS mode
833 * @req_type: The part of the driver who call for a change.
834 * @smps_requests: The request to change the SMPS mode.
835 *
836 * Get a requst to change the SMPS mode,
837 * and change it according to all other requests in the driver.
838 */
839void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
840 enum iwl_mvm_smps_type_request req_type,
841 enum ieee80211_smps_mode smps_request)
842{
843 struct iwl_mvm_vif *mvmvif;
844 enum ieee80211_smps_mode smps_mode;
845 int i;
846
847 lockdep_assert_held(&mvm->mutex);
848
849 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
850 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
851 return;
852
853 if (vif->type == NL80211_IFTYPE_AP)
854 smps_mode = IEEE80211_SMPS_OFF;
855 else
856 smps_mode = IEEE80211_SMPS_AUTOMATIC;
857
858 mvmvif = iwl_mvm_vif_from_mac80211(vif);
859 mvmvif->smps_requests[req_type] = smps_request;
860 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
861 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
862 smps_mode = IEEE80211_SMPS_STATIC;
863 break;
864 }
865 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
866 smps_mode = IEEE80211_SMPS_DYNAMIC;
867 }
868
869 ieee80211_request_smps(vif, smps_mode);
870}
871
872int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
873{
874 struct iwl_statistics_cmd scmd = {
875 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
876 };
877 struct iwl_host_cmd cmd = {
878 .id = STATISTICS_CMD,
879 .len[0] = sizeof(scmd),
880 .data[0] = &scmd,
881 .flags = CMD_WANT_SKB,
882 };
883 int ret;
884
885 ret = iwl_mvm_send_cmd(mvm, &cmd);
886 if (ret)
887 return ret;
888
889 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
890 iwl_free_resp(&cmd);
891
892 if (clear)
893 iwl_mvm_accu_radio_stats(mvm);
894
895 return 0;
896}
897
898void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
899{
900 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
901 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
902 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
903 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
904}
905
906static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
907 struct ieee80211_vif *vif)
908{
909 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
910 bool *result = _data;
911 int i;
912
913 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
914 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
915 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
916 *result = false;
917 }
918}
919
920bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
921{
922 bool result = true;
923
924 lockdep_assert_held(&mvm->mutex);
925
926 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
927 return false;
928
929 if (mvm->cfg->rx_with_siso_diversity)
930 return false;
931
932 ieee80211_iterate_active_interfaces_atomic(
933 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
934 iwl_mvm_diversity_iter, &result);
935
936 return result;
937}
938
939int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
940 bool value)
941{
942 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
943 int res;
944
945 lockdep_assert_held(&mvm->mutex);
946
947 if (mvmvif->low_latency == value)
948 return 0;
949
950 mvmvif->low_latency = value;
951
952 res = iwl_mvm_update_quotas(mvm, false, NULL);
953 if (res)
954 return res;
955
956 iwl_mvm_bt_coex_vif_change(mvm);
957
958 return iwl_mvm_power_update_mac(mvm);
959}
960
961static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
962{
963 bool *result = _data;
964
965 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
966 *result = true;
967}
968
969bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
970{
971 bool result = false;
972
973 ieee80211_iterate_active_interfaces_atomic(
974 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
975 iwl_mvm_ll_iter, &result);
976
977 return result;
978}
979
980struct iwl_bss_iter_data {
981 struct ieee80211_vif *vif;
982 bool error;
983};
984
985static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
986 struct ieee80211_vif *vif)
987{
988 struct iwl_bss_iter_data *data = _data;
989
990 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
991 return;
992
993 if (data->vif) {
994 data->error = true;
995 return;
996 }
997
998 data->vif = vif;
999}
1000
1001struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
1002{
1003 struct iwl_bss_iter_data bss_iter_data = {};
1004
1005 ieee80211_iterate_active_interfaces_atomic(
1006 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1007 iwl_mvm_bss_iface_iterator, &bss_iter_data);
1008
1009 if (bss_iter_data.error) {
1010 IWL_ERR(mvm, "More than one managed interface active!\n");
1011 return ERR_PTR(-EINVAL);
1012 }
1013
1014 return bss_iter_data.vif;
1015}
1016
1017unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1018 struct ieee80211_vif *vif,
1019 bool tdls, bool cmd_q)
1020{
1021 struct iwl_fw_dbg_trigger_tlv *trigger;
1022 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
1023 unsigned int default_timeout =
1024 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1025
1026 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS))
1027 return iwlmvm_mod_params.tfd_q_hang_detect ?
1028 default_timeout : IWL_WATCHDOG_DISABLED;
1029
1030 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1031 txq_timer = (void *)trigger->data;
1032
1033 if (tdls)
1034 return le32_to_cpu(txq_timer->tdls);
1035
1036 if (cmd_q)
1037 return le32_to_cpu(txq_timer->command_queue);
1038
1039 if (WARN_ON(!vif))
1040 return default_timeout;
1041
1042 switch (ieee80211_vif_type_p2p(vif)) {
1043 case NL80211_IFTYPE_ADHOC:
1044 return le32_to_cpu(txq_timer->ibss);
1045 case NL80211_IFTYPE_STATION:
1046 return le32_to_cpu(txq_timer->bss);
1047 case NL80211_IFTYPE_AP:
1048 return le32_to_cpu(txq_timer->softap);
1049 case NL80211_IFTYPE_P2P_CLIENT:
1050 return le32_to_cpu(txq_timer->p2p_client);
1051 case NL80211_IFTYPE_P2P_GO:
1052 return le32_to_cpu(txq_timer->p2p_go);
1053 case NL80211_IFTYPE_P2P_DEVICE:
1054 return le32_to_cpu(txq_timer->p2p_device);
1055 default:
1056 WARN_ON(1);
1057 return mvm->cfg->base_params->wd_timeout;
1058 }
1059}
1060
1061void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1062 const char *errmsg)
1063{
1064 struct iwl_fw_dbg_trigger_tlv *trig;
1065 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1066
1067 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
1068 goto out;
1069
1070 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
1071 trig_mlme = (void *)trig->data;
1072 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
1073 goto out;
1074
1075 if (trig_mlme->stop_connection_loss &&
1076 --trig_mlme->stop_connection_loss)
1077 goto out;
1078
1079 iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg);
1080
1081out:
1082 ieee80211_connection_loss(vif);
1083}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
new file mode 100644
index 000000000000..644b58bc5226
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -0,0 +1,685 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/pci-aspm.h>
71#include <linux/acpi.h>
72
73#include "iwl-trans.h"
74#include "iwl-drv.h"
75#include "internal.h"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82/* Hardware specific file defines the PCI IDs table for that hardware module */
83static const struct pci_device_id iwl_hw_card_ids[] = {
84#if IS_ENABLED(CONFIG_IWLDVM)
85 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
86 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
87 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
88 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
89 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
90 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
91 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
92 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
93 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
94 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
95 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
96 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
97 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
98 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
99 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
100 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
101 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
102 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
103 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
104 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
105 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
106 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
107 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
108 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
109
110/* 5300 Series WiFi */
111 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
112 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
113 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
114 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
115 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
116 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
117 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
118 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
119 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
120 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
121 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
122 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
123
124/* 5350 Series WiFi/WiMax */
125 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
126 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
127 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
128
129/* 5150 Series Wifi/WiMax */
130 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
131 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
132 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
133 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
134 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
135 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
136 {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
137
138 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
139 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
140 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
141 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
142
143/* 6x00 Series */
144 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
145 {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
146 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
147 {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
148 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
149 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
150 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
151 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
152 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
153 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
154 {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
155 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
156 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
157
158/* 6x05 Series */
159 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
160 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
161 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
164 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
165 {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
166 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
167 {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
168 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
169 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
170 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
171 {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
172 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
173 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
174 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
175
176/* 6x30 Series */
177 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
178 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
179 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
180 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
181 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
182 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
183 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
184 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
185 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
186 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
187 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
188 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
189 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
190 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
191 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
192 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
193
194/* 6x50 WiFi/WiMax Series */
195 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
196 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
197 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
198 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
199 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
200 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
201
202/* 6150 WiFi/WiMax Series */
203 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
204 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
205 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
206 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
207 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
208 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
209
210/* 1000 Series WiFi */
211 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
212 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
213 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
214 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
215 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
216 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
217 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
218 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
219 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
220 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
221 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
222 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
223
224/* 100 Series WiFi */
225 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
226 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
227 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
228 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
229 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
230 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
231
232/* 130 Series WiFi */
233 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
234 {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
235 {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
236 {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
237 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
238 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
239
240/* 2x00 Series */
241 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
242 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
243 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
244 {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
245
246/* 2x30 Series */
247 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
248 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
249 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
250
251/* 6x35 Series */
252 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
253 {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
254 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
255 {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
256 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
257 {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
258 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
259 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
260
261/* 105 Series */
262 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
263 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
264 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
265 {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
266
267/* 135 Series */
268 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
269 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
270 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
271#endif /* CONFIG_IWLDVM */
272
273#if IS_ENABLED(CONFIG_IWLMVM)
274/* 7260 Series */
275 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
279 {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
280 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
281 {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
282 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
283 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
284 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
285 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
286 {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
287 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
289 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
290 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
291 {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
292 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
293 {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
294 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
295 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
297 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
298 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
299 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
300 {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
301 {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
302 {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
303 {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
304 {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
306 {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
307 {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
308 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
309 {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
310 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
311 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
312 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
313 {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
314 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
315 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
316 {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
317 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
318 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
319 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
320 {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
321 {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
322 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
323 {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
324 {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
325 {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
326 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
327 {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
328 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
329 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
330 {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
331 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
332 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
333 {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
334 {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
335 {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
336 {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
337 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
338 {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
339 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
340 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
341
342/* 3160 Series */
343 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
344 {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
345 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
346 {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
347 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
348 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
349 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
350 {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
351 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
352 {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
353 {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
354 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
355 {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
356 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
357 {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
359 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
360 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
365 {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
366 {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
367
368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
379
380/* 7265 Series */
381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
385 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
386 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
387 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
388 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
389 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
390 {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
391 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
392 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
393 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
394 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
395 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
396 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
397 {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
398 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
399 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
400 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
401 {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
402 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
403 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
404 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
405 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
406 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
407 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
408 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
409 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
410 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
411 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
412 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
413 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
414 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
415 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
416 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
417 {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
418 {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
419 {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
420 {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
421 {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
422
423/* 8000 Series */
424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
435 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
436 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
437 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
438 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
440 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
441 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
442 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
443 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
444 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
445 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
446 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
447 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
449 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
451 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
452 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
453#endif /* CONFIG_IWLMVM */
454
455 {0}
456};
457MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
458
459#ifdef CONFIG_ACPI
460#define SPL_METHOD "SPLC"
461#define SPL_DOMAINTYPE_MODULE BIT(0)
462#define SPL_DOMAINTYPE_WIFI BIT(1)
463#define SPL_DOMAINTYPE_WIGIG BIT(2)
464#define SPL_DOMAINTYPE_RFEM BIT(3)
465
466static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
467{
468 union acpi_object *limits, *domain_type, *power_limit;
469
470 if (splx->type != ACPI_TYPE_PACKAGE ||
471 splx->package.count != 2 ||
472 splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
473 splx->package.elements[0].integer.value != 0) {
474 IWL_ERR(trans, "Unsupported splx structure\n");
475 return 0;
476 }
477
478 limits = &splx->package.elements[1];
479 if (limits->type != ACPI_TYPE_PACKAGE ||
480 limits->package.count < 2 ||
481 limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
482 limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
483 IWL_ERR(trans, "Invalid limits element\n");
484 return 0;
485 }
486
487 domain_type = &limits->package.elements[0];
488 power_limit = &limits->package.elements[1];
489 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
490 IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
491 return 0;
492 }
493
494 return power_limit->integer.value;
495}
496
497static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
498{
499 acpi_handle pxsx_handle;
500 acpi_handle handle;
501 struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
502 acpi_status status;
503
504 pxsx_handle = ACPI_HANDLE(&pdev->dev);
505 if (!pxsx_handle) {
506 IWL_DEBUG_INFO(trans,
507 "Could not retrieve root port ACPI handle\n");
508 return;
509 }
510
511 /* Get the method's handle */
512 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
513 if (ACPI_FAILURE(status)) {
514 IWL_DEBUG_INFO(trans, "SPL method not found\n");
515 return;
516 }
517
518 /* Call SPLC with no arguments */
519 status = acpi_evaluate_object(handle, NULL, NULL, &splx);
520 if (ACPI_FAILURE(status)) {
521 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
522 return;
523 }
524
525 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
526 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
527 trans->dflt_pwr_limit);
528 kfree(splx.pointer);
529}
530
531#else /* CONFIG_ACPI */
532static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
533#endif
534
535/* PCI registers */
536#define PCI_CFG_RETRY_TIMEOUT 0x041
537
538static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
539{
540 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
541 const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
542 struct iwl_trans *iwl_trans;
543 struct iwl_trans_pcie *trans_pcie;
544 int ret;
545
546 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
547 if (IS_ERR(iwl_trans))
548 return PTR_ERR(iwl_trans);
549
550#if IS_ENABLED(CONFIG_IWLMVM)
551 /*
552 * special-case 7265D, it has the same PCI IDs.
553 *
554 * Note that because we already pass the cfg to the transport above,
555 * all the parameters that the transport uses must, until that is
556 * changed, be identical to the ones in the 7265D configuration.
557 */
558 if (cfg == &iwl7265_2ac_cfg)
559 cfg_7265d = &iwl7265d_2ac_cfg;
560 else if (cfg == &iwl7265_2n_cfg)
561 cfg_7265d = &iwl7265d_2n_cfg;
562 else if (cfg == &iwl7265_n_cfg)
563 cfg_7265d = &iwl7265d_n_cfg;
564 if (cfg_7265d &&
565 (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
566 cfg = cfg_7265d;
567 iwl_trans->cfg = cfg_7265d;
568 }
569#endif
570
571 pci_set_drvdata(pdev, iwl_trans);
572
573 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
574 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
575
576 if (IS_ERR(trans_pcie->drv)) {
577 ret = PTR_ERR(trans_pcie->drv);
578 goto out_free_trans;
579 }
580
581 set_dflt_pwr_limit(iwl_trans, pdev);
582
583 /* register transport layer debugfs here */
584 ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
585 if (ret)
586 goto out_free_drv;
587
588 return 0;
589
590out_free_drv:
591 iwl_drv_stop(trans_pcie->drv);
592out_free_trans:
593 iwl_trans_pcie_free(iwl_trans);
594 return ret;
595}
596
597static void iwl_pci_remove(struct pci_dev *pdev)
598{
599 struct iwl_trans *trans = pci_get_drvdata(pdev);
600 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
601
602 iwl_drv_stop(trans_pcie->drv);
603 iwl_trans_pcie_free(trans);
604}
605
606#ifdef CONFIG_PM_SLEEP
607
608static int iwl_pci_suspend(struct device *device)
609{
610 /* Before you put code here, think about WoWLAN. You cannot check here
611 * whether WoWLAN is enabled or not, and your code will run even if
612 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
613 */
614
615 return 0;
616}
617
618static int iwl_pci_resume(struct device *device)
619{
620 struct pci_dev *pdev = to_pci_dev(device);
621 struct iwl_trans *trans = pci_get_drvdata(pdev);
622 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
623 bool hw_rfkill;
624
625 /* Before you put code here, think about WoWLAN. You cannot check here
626 * whether WoWLAN is enabled or not, and your code will run even if
627 * WoWLAN is enabled - the NIC may be alive.
628 */
629
630 /*
631 * We disable the RETRY_TIMEOUT register (0x41) to keep
632 * PCI Tx retries from interfering with C3 CPU state.
633 */
634 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
635
636 if (!trans->op_mode)
637 return 0;
638
639 /*
640 * Enable rfkill interrupt (in order to keep track of
641 * the rfkill status)
642 */
643 iwl_enable_rfkill_int(trans);
644
645 hw_rfkill = iwl_is_rfkill_set(trans);
646
647 mutex_lock(&trans_pcie->mutex);
648 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
649 mutex_unlock(&trans_pcie->mutex);
650
651 return 0;
652}
653
654static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
655
656#define IWL_PM_OPS (&iwl_dev_pm_ops)
657
658#else
659
660#define IWL_PM_OPS NULL
661
662#endif
663
664static struct pci_driver iwl_pci_driver = {
665 .name = DRV_NAME,
666 .id_table = iwl_hw_card_ids,
667 .probe = iwl_pci_probe,
668 .remove = iwl_pci_remove,
669 .driver.pm = IWL_PM_OPS,
670};
671
672int __must_check iwl_pci_register_driver(void)
673{
674 int ret;
675 ret = pci_register_driver(&iwl_pci_driver);
676 if (ret)
677 pr_err("Unable to initialize PCI module\n");
678
679 return ret;
680}
681
682void iwl_pci_unregister_driver(void)
683{
684 pci_unregister_driver(&iwl_pci_driver);
685}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
new file mode 100644
index 000000000000..feb2f7e81134
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -0,0 +1,569 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
30#ifndef __iwl_trans_int_pcie_h__
31#define __iwl_trans_int_pcie_h__
32
33#include <linux/spinlock.h>
34#include <linux/interrupt.h>
35#include <linux/skbuff.h>
36#include <linux/wait.h>
37#include <linux/pci.h>
38#include <linux/timer.h>
39
40#include "iwl-fh.h"
41#include "iwl-csr.h"
42#include "iwl-trans.h"
43#include "iwl-debug.h"
44#include "iwl-io.h"
45#include "iwl-op-mode.h"
46
47/* We need 2 entries for the TX command and header, and another one might
48 * be needed for potential data in the SKB's head. The remaining ones can
49 * be used for frags.
50 */
51#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
52
53/*
54 * RX related structures and functions
55 */
56#define RX_NUM_QUEUES 1
57#define RX_POST_REQ_ALLOC 2
58#define RX_CLAIM_REQ_ALLOC 8
59#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
60#define RX_LOW_WATERMARK 8
61
62struct iwl_host_cmd;
63
64/*This file includes the declaration that are internal to the
65 * trans_pcie layer */
66
67struct iwl_rx_mem_buffer {
68 dma_addr_t page_dma;
69 struct page *page;
70 struct list_head list;
71};
72
73/**
74 * struct isr_statistics - interrupt statistics
75 *
76 */
77struct isr_statistics {
78 u32 hw;
79 u32 sw;
80 u32 err_code;
81 u32 sch;
82 u32 alive;
83 u32 rfkill;
84 u32 ctkill;
85 u32 wakeup;
86 u32 rx;
87 u32 tx;
88 u32 unhandled;
89};
90
91/**
92 * struct iwl_rxq - Rx queue
93 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
94 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
95 * @read: Shared index to newest available Rx buffer
96 * @write: Shared index to oldest written Rx packet
97 * @free_count: Number of pre-allocated buffers in rx_free
98 * @used_count: Number of RBDs handled to allocator to use for allocation
99 * @write_actual:
100 * @rx_free: list of RBDs with allocated RB ready for use
101 * @rx_used: list of RBDs with no RB attached
102 * @need_update: flag to indicate we need to update read/write index
103 * @rb_stts: driver's pointer to receive buffer status
104 * @rb_stts_dma: bus address of receive buffer status
105 * @lock:
106 * @pool: initial pool of iwl_rx_mem_buffer for the queue
107 * @queue: actual rx queue
108 *
109 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
110 */
111struct iwl_rxq {
112 __le32 *bd;
113 dma_addr_t bd_dma;
114 u32 read;
115 u32 write;
116 u32 free_count;
117 u32 used_count;
118 u32 write_actual;
119 struct list_head rx_free;
120 struct list_head rx_used;
121 bool need_update;
122 struct iwl_rb_status *rb_stts;
123 dma_addr_t rb_stts_dma;
124 spinlock_t lock;
125 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
126 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
127};
128
129/**
130 * struct iwl_rb_allocator - Rx allocator
131 * @pool: initial pool of allocator
132 * @req_pending: number of requests the allcator had not processed yet
133 * @req_ready: number of requests honored and ready for claiming
134 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
135 * the queue. This is a list of &struct iwl_rx_mem_buffer
136 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
137 * of &struct iwl_rx_mem_buffer
138 * @lock: protects the rbd_allocated and rbd_empty lists
139 * @alloc_wq: work queue for background calls
140 * @rx_alloc: work struct for background calls
141 */
142struct iwl_rb_allocator {
143 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
144 atomic_t req_pending;
145 atomic_t req_ready;
146 struct list_head rbd_allocated;
147 struct list_head rbd_empty;
148 spinlock_t lock;
149 struct workqueue_struct *alloc_wq;
150 struct work_struct rx_alloc;
151};
152
153struct iwl_dma_ptr {
154 dma_addr_t dma;
155 void *addr;
156 size_t size;
157};
158
159/**
160 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
161 * @index -- current index
162 */
163static inline int iwl_queue_inc_wrap(int index)
164{
165 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
166}
167
168/**
169 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
170 * @index -- current index
171 */
172static inline int iwl_queue_dec_wrap(int index)
173{
174 return --index & (TFD_QUEUE_SIZE_MAX - 1);
175}
176
177struct iwl_cmd_meta {
178 /* only for SYNC commands, iff the reply skb is wanted */
179 struct iwl_host_cmd *source;
180 u32 flags;
181};
182
183/*
184 * Generic queue structure
185 *
186 * Contains common data for Rx and Tx queues.
187 *
188 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
189 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
190 * there might be HW changes in the future). For the normal TX
191 * queues, n_window, which is the size of the software queue data
192 * is also 256; however, for the command queue, n_window is only
193 * 32 since we don't need so many commands pending. Since the HW
194 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
195 * the software buffers (in the variables @meta, @txb in struct
196 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
197 * the same struct) have 256.
198 * This means that we end up with the following:
199 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
200 * SW entries: | 0 | ... | 31 |
201 * where N is a number between 0 and 7. This means that the SW
202 * data is a window overlayed over the HW queue.
203 */
204struct iwl_queue {
205 int write_ptr; /* 1-st empty entry (index) host_w*/
206 int read_ptr; /* last used entry (index) host_r*/
207 /* use for monitoring and recovering the stuck queue */
208 dma_addr_t dma_addr; /* physical addr for BD's */
209 int n_window; /* safe queue window */
210 u32 id;
211 int low_mark; /* low watermark, resume queue if free
212 * space more than this */
213 int high_mark; /* high watermark, stop queue if free
214 * space less than this */
215};
216
217#define TFD_TX_CMD_SLOTS 256
218#define TFD_CMD_SLOTS 32
219
220/*
221 * The FH will write back to the first TB only, so we need
222 * to copy some data into the buffer regardless of whether
223 * it should be mapped or not. This indicates how big the
224 * first TB must be to include the scratch buffer. Since
225 * the scratch is 4 bytes at offset 12, it's 16 now. If we
226 * make it bigger then allocations will be bigger and copy
227 * slower, so that's probably not useful.
228 */
229#define IWL_HCMD_SCRATCHBUF_SIZE 16
230
231struct iwl_pcie_txq_entry {
232 struct iwl_device_cmd *cmd;
233 struct sk_buff *skb;
234 /* buffer to free after command completes */
235 const void *free_buf;
236 struct iwl_cmd_meta meta;
237};
238
239struct iwl_pcie_txq_scratch_buf {
240 struct iwl_cmd_header hdr;
241 u8 buf[8];
242 __le32 scratch;
243};
244
245/**
246 * struct iwl_txq - Tx Queue for DMA
247 * @q: generic Rx/Tx queue descriptor
248 * @tfds: transmit frame descriptors (DMA memory)
249 * @scratchbufs: start of command headers, including scratch buffers, for
250 * the writeback -- this is DMA memory and an array holding one buffer
251 * for each command on the queue
252 * @scratchbufs_dma: DMA address for the scratchbufs start
253 * @entries: transmit entries (driver state)
254 * @lock: queue lock
255 * @stuck_timer: timer that fires if queue gets stuck
256 * @trans_pcie: pointer back to transport (for timer)
257 * @need_update: indicates need to update read/write index
258 * @active: stores if queue is active
259 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
260 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
261 * @frozen: tx stuck queue timer is frozen
262 * @frozen_expiry_remainder: remember how long until the timer fires
263 *
264 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
265 * descriptors) and required locking structures.
266 */
267struct iwl_txq {
268 struct iwl_queue q;
269 struct iwl_tfd *tfds;
270 struct iwl_pcie_txq_scratch_buf *scratchbufs;
271 dma_addr_t scratchbufs_dma;
272 struct iwl_pcie_txq_entry *entries;
273 spinlock_t lock;
274 unsigned long frozen_expiry_remainder;
275 struct timer_list stuck_timer;
276 struct iwl_trans_pcie *trans_pcie;
277 bool need_update;
278 bool frozen;
279 u8 active;
280 bool ampdu;
281 unsigned long wd_timeout;
282};
283
284static inline dma_addr_t
285iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
286{
287 return txq->scratchbufs_dma +
288 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
289}
290
291/**
292 * struct iwl_trans_pcie - PCIe transport specific data
293 * @rxq: all the RX queue data
294 * @rba: allocator for RX replenishing
295 * @drv - pointer to iwl_drv
296 * @trans: pointer to the generic transport area
297 * @scd_base_addr: scheduler sram base address in SRAM
298 * @scd_bc_tbls: pointer to the byte count table of the scheduler
299 * @kw: keep warm address
300 * @pci_dev: basic pci-network driver stuff
301 * @hw_base: pci hardware address support
302 * @ucode_write_complete: indicates that the ucode has been copied.
303 * @ucode_write_waitq: wait queue for uCode load
304 * @cmd_queue - command queue number
305 * @rx_buf_size_8k: 8 kB RX buffer size
306 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
307 * @scd_set_active: should the transport configure the SCD for HCMD queue
308 * @wide_cmd_header: true when ucode supports wide command header format
309 * @rx_page_order: page order for receive buffer size
310 * @reg_lock: protect hw register access
311 * @mutex: to protect stop_device / start_fw / start_hw
312 * @cmd_in_flight: true when we have a host command in flight
313 * @fw_mon_phys: physical address of the buffer for the firmware monitor
314 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
315 * @fw_mon_size: size of the buffer for the firmware monitor
316 */
317struct iwl_trans_pcie {
318 struct iwl_rxq rxq;
319 struct iwl_rb_allocator rba;
320 struct iwl_trans *trans;
321 struct iwl_drv *drv;
322
323 struct net_device napi_dev;
324 struct napi_struct napi;
325
326 /* INT ICT Table */
327 __le32 *ict_tbl;
328 dma_addr_t ict_tbl_dma;
329 int ict_index;
330 bool use_ict;
331 bool is_down;
332 struct isr_statistics isr_stats;
333
334 spinlock_t irq_lock;
335 struct mutex mutex;
336 u32 inta_mask;
337 u32 scd_base_addr;
338 struct iwl_dma_ptr scd_bc_tbls;
339 struct iwl_dma_ptr kw;
340
341 struct iwl_txq *txq;
342 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
343 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
344
345 /* PCI bus related data */
346 struct pci_dev *pci_dev;
347 void __iomem *hw_base;
348
349 bool ucode_write_complete;
350 wait_queue_head_t ucode_write_waitq;
351 wait_queue_head_t wait_command_queue;
352
353 u8 cmd_queue;
354 u8 cmd_fifo;
355 unsigned int cmd_q_wdg_timeout;
356 u8 n_no_reclaim_cmds;
357 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
358
359 bool rx_buf_size_8k;
360 bool bc_table_dword;
361 bool scd_set_active;
362 bool wide_cmd_header;
363 u32 rx_page_order;
364
365 const char *const *command_names;
366
367 /*protect hw register */
368 spinlock_t reg_lock;
369 bool cmd_hold_nic_awake;
370 bool ref_cmd_in_flight;
371
372 /* protect ref counter */
373 spinlock_t ref_lock;
374 u32 ref_count;
375
376 dma_addr_t fw_mon_phys;
377 struct page *fw_mon_page;
378 u32 fw_mon_size;
379};
380
381#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
382 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
383
384static inline struct iwl_trans *
385iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
386{
387 return container_of((void *)trans_pcie, struct iwl_trans,
388 trans_specific);
389}
390
391/*
392 * Convention: trans API functions: iwl_trans_pcie_XXX
393 * Other functions: iwl_pcie_XXX
394 */
395struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
396 const struct pci_device_id *ent,
397 const struct iwl_cfg *cfg);
398void iwl_trans_pcie_free(struct iwl_trans *trans);
399
400/*****************************************************
401* RX
402******************************************************/
403int iwl_pcie_rx_init(struct iwl_trans *trans);
404irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
405int iwl_pcie_rx_stop(struct iwl_trans *trans);
406void iwl_pcie_rx_free(struct iwl_trans *trans);
407
408/*****************************************************
409* ICT - interrupt handling
410******************************************************/
411irqreturn_t iwl_pcie_isr(int irq, void *data);
412int iwl_pcie_alloc_ict(struct iwl_trans *trans);
413void iwl_pcie_free_ict(struct iwl_trans *trans);
414void iwl_pcie_reset_ict(struct iwl_trans *trans);
415void iwl_pcie_disable_ict(struct iwl_trans *trans);
416
417/*****************************************************
418* TX / HCMD
419******************************************************/
420int iwl_pcie_tx_init(struct iwl_trans *trans);
421void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
422int iwl_pcie_tx_stop(struct iwl_trans *trans);
423void iwl_pcie_tx_free(struct iwl_trans *trans);
424void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
425 const struct iwl_trans_txq_scd_cfg *cfg,
426 unsigned int wdg_timeout);
427void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
428 bool configure_scd);
429int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
430 struct iwl_device_cmd *dev_cmd, int txq_id);
431void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
432int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
433void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
434 struct iwl_rx_cmd_buffer *rxb);
435void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
436 struct sk_buff_head *skbs);
437void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
438
439void iwl_trans_pcie_ref(struct iwl_trans *trans);
440void iwl_trans_pcie_unref(struct iwl_trans *trans);
441
442static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
443{
444 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
445
446 return le16_to_cpu(tb->hi_n_len) >> 4;
447}
448
449/*****************************************************
450* Error handling
451******************************************************/
452void iwl_pcie_dump_csr(struct iwl_trans *trans);
453
454/*****************************************************
455* Helpers
456******************************************************/
457static inline void iwl_disable_interrupts(struct iwl_trans *trans)
458{
459 clear_bit(STATUS_INT_ENABLED, &trans->status);
460
461 /* disable interrupts from uCode/NIC to host */
462 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
463
464 /* acknowledge/clear/reset any interrupts still pending
465 * from uCode or flow handler (Rx/Tx DMA) */
466 iwl_write32(trans, CSR_INT, 0xffffffff);
467 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
468 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
469}
470
471static inline void iwl_enable_interrupts(struct iwl_trans *trans)
472{
473 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
474
475 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
476 set_bit(STATUS_INT_ENABLED, &trans->status);
477 trans_pcie->inta_mask = CSR_INI_SET_MASK;
478 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
479}
480
481static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
482{
483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
484
485 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
486 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
487 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
488}
489
490static inline void iwl_wake_queue(struct iwl_trans *trans,
491 struct iwl_txq *txq)
492{
493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
494
495 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
496 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
497 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
498 }
499}
500
501static inline void iwl_stop_queue(struct iwl_trans *trans,
502 struct iwl_txq *txq)
503{
504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
505
506 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
507 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
508 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
509 } else
510 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
511 txq->q.id);
512}
513
514static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
515{
516 return q->write_ptr >= q->read_ptr ?
517 (i >= q->read_ptr && i < q->write_ptr) :
518 !(i < q->read_ptr && i >= q->write_ptr);
519}
520
521static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
522{
523 return index & (q->n_window - 1);
524}
525
526static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
527 u8 cmd)
528{
529 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
530 return "UNKNOWN";
531 return trans_pcie->command_names[cmd];
532}
533
534static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
535{
536 return !(iwl_read32(trans, CSR_GP_CNTRL) &
537 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
538}
539
540static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
541 u32 reg, u32 mask, u32 value)
542{
543 u32 v;
544
545#ifdef CONFIG_IWLWIFI_DEBUG
546 WARN_ON_ONCE(value & ~mask);
547#endif
548
549 v = iwl_read32(trans, reg);
550 v &= ~mask;
551 v |= value;
552 iwl_write32(trans, reg, v);
553}
554
555static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
556 u32 reg, u32 mask)
557{
558 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
559}
560
561static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
562 u32 reg, u32 mask)
563{
564 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
565}
566
567void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
568
569#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
new file mode 100644
index 000000000000..e06591f625c4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -0,0 +1,1548 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
30#include <linux/sched.h>
31#include <linux/wait.h>
32#include <linux/gfp.h>
33
34#include "iwl-prph.h"
35#include "iwl-io.h"
36#include "internal.h"
37#include "iwl-op-mode.h"
38
39/******************************************************************************
40 *
41 * RX path functions
42 *
43 ******************************************************************************/
44
45/*
46 * Rx theory of operation
47 *
48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49 * each of which point to Receive Buffers to be filled by the NIC. These get
50 * used not only for Rx frames, but for any command response or notification
51 * from the NIC. The driver and NIC manage the Rx buffers by means
52 * of indexes into the circular buffer.
53 *
54 * Rx Queue Indexes
55 * The host/firmware share two index registers for managing the Rx buffers.
56 *
57 * The READ index maps to the first position that the firmware may be writing
58 * to -- the driver can read up to (but not including) this position and get
59 * good data.
60 * The READ index is managed by the firmware once the card is enabled.
61 *
62 * The WRITE index maps to the last position the driver has read from -- the
63 * position preceding WRITE is the last slot the firmware can place a packet.
64 *
65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
66 * WRITE = READ.
67 *
68 * During initialization, the host sets up the READ queue position to the first
69 * INDEX position, and WRITE to the last (READ - 1 wrapped)
70 *
71 * When the firmware places a packet in a buffer, it will advance the READ index
72 * and fire the RX interrupt. The driver can then query the READ index and
73 * process as many packets as possible, moving the WRITE index forward as it
74 * resets the Rx queue buffers with new memory.
75 *
76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
78 * When the interrupt handler is called, the request is processed.
79 * The page is either stolen - transferred to the upper layer
80 * or reused - added immediately to the iwl->rxq->rx_free list.
81 * + When the page is stolen - the driver updates the matching queue's used
82 * count, detaches the RBD and transfers it to the queue used list.
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 *
103 *
104 * Driver sequence:
105 *
106 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock.
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates
112 * the WRITE index.
113 * iwl_pcie_rx_allocator() Background work for allocating pages.
114 *
115 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ...
135 *
136 */
137
138/*
139 * iwl_rxq_space - Return number of free slots available in queue.
140 */
141static int iwl_rxq_space(const struct iwl_rxq *rxq)
142{
143 /* Make sure RX_QUEUE_SIZE is a power of 2 */
144 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
145
146 /*
147 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
148 * between empty and completely full queues.
149 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
150 * defined for negative dividends.
151 */
152 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
153}
154
155/*
156 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
157 */
158static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
159{
160 return cpu_to_le32((u32)(dma_addr >> 8));
161}
162
163/*
164 * iwl_pcie_rx_stop - stops the Rx DMA
165 */
166int iwl_pcie_rx_stop(struct iwl_trans *trans)
167{
168 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
169 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
170 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
171}
172
173/*
174 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
175 */
176static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
177{
178 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
179 struct iwl_rxq *rxq = &trans_pcie->rxq;
180 u32 reg;
181
182 lockdep_assert_held(&rxq->lock);
183
184 /*
185 * explicitly wake up the NIC if:
186 * 1. shadow registers aren't enabled
187 * 2. there is a chance that the NIC is asleep
188 */
189 if (!trans->cfg->base_params->shadow_reg_enable &&
190 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
191 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
192
193 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
194 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
195 reg);
196 iwl_set_bit(trans, CSR_GP_CNTRL,
197 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
198 rxq->need_update = true;
199 return;
200 }
201 }
202
203 rxq->write_actual = round_down(rxq->write, 8);
204 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
205}
206
207static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
208{
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 struct iwl_rxq *rxq = &trans_pcie->rxq;
211
212 spin_lock(&rxq->lock);
213
214 if (!rxq->need_update)
215 goto exit_unlock;
216
217 iwl_pcie_rxq_inc_wr_ptr(trans);
218 rxq->need_update = false;
219
220 exit_unlock:
221 spin_unlock(&rxq->lock);
222}
223
224/*
225 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
226 *
227 * If there are slots in the RX queue that need to be restocked,
228 * and we have free pre-allocated buffers, fill the ranks as much
229 * as we can, pulling from rx_free.
230 *
231 * This moves the 'write' index forward to catch up with 'processed', and
232 * also updates the memory address in the firmware to reference the new
233 * target buffer.
234 */
235static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
236{
237 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
238 struct iwl_rxq *rxq = &trans_pcie->rxq;
239 struct iwl_rx_mem_buffer *rxb;
240
241 /*
242 * If the device isn't enabled - not need to try to add buffers...
243 * This can happen when we stop the device and still have an interrupt
244 * pending. We stop the APM before we sync the interrupts because we
245 * have to (see comment there). On the other hand, since the APM is
246 * stopped, we cannot access the HW (in particular not prph).
247 * So don't try to restock if the APM has been already stopped.
248 */
249 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
250 return;
251
252 spin_lock(&rxq->lock);
253 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
254 /* The overwritten rxb must be a used one */
255 rxb = rxq->queue[rxq->write];
256 BUG_ON(rxb && rxb->page);
257
258 /* Get next free Rx buffer, remove from free list */
259 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
260 list);
261 list_del(&rxb->list);
262
263 /* Point to Rx buffer via next RBD in circular buffer */
264 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
265 rxq->queue[rxq->write] = rxb;
266 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
267 rxq->free_count--;
268 }
269 spin_unlock(&rxq->lock);
270
271 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */
273 if (rxq->write_actual != (rxq->write & ~0x7)) {
274 spin_lock(&rxq->lock);
275 iwl_pcie_rxq_inc_wr_ptr(trans);
276 spin_unlock(&rxq->lock);
277 }
278}
279
280/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
285 gfp_t priority)
286{
287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288 struct iwl_rxq *rxq = &trans_pcie->rxq;
289 struct page *page;
290 gfp_t gfp_mask = priority;
291
292 if (rxq->free_count > RX_LOW_WATERMARK)
293 gfp_mask |= __GFP_NOWARN;
294
295 if (trans_pcie->rx_page_order > 0)
296 gfp_mask |= __GFP_COMP;
297
298 /* Alloc a new receive buffer */
299 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
300 if (!page) {
301 if (net_ratelimit())
302 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
303 trans_pcie->rx_page_order);
304 /* Issue an error if the hardware has consumed more than half
305 * of its free buffer list and we don't have enough
306 * pre-allocated buffers.
307` */
308 if (rxq->free_count <= RX_LOW_WATERMARK &&
309 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
310 net_ratelimit())
311 IWL_CRIT(trans,
312 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
313 rxq->free_count);
314 return NULL;
315 }
316 return page;
317}
318
319/*
320 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
321 *
322 * A used RBD is an Rx buffer that has been given to the stack. To use it again
323 * a page must be allocated and the RBD must point to the page. This function
324 * doesn't change the HW pointer but handles the list of pages that is used by
325 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
326 * allocated buffers.
327 */
328static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
329{
330 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
331 struct iwl_rxq *rxq = &trans_pcie->rxq;
332 struct iwl_rx_mem_buffer *rxb;
333 struct page *page;
334
335 while (1) {
336 spin_lock(&rxq->lock);
337 if (list_empty(&rxq->rx_used)) {
338 spin_unlock(&rxq->lock);
339 return;
340 }
341 spin_unlock(&rxq->lock);
342
343 /* Alloc a new receive buffer */
344 page = iwl_pcie_rx_alloc_page(trans, priority);
345 if (!page)
346 return;
347
348 spin_lock(&rxq->lock);
349
350 if (list_empty(&rxq->rx_used)) {
351 spin_unlock(&rxq->lock);
352 __free_pages(page, trans_pcie->rx_page_order);
353 return;
354 }
355 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
356 list);
357 list_del(&rxb->list);
358 spin_unlock(&rxq->lock);
359
360 BUG_ON(rxb->page);
361 rxb->page = page;
362 /* Get physical address of the RB */
363 rxb->page_dma =
364 dma_map_page(trans->dev, page, 0,
365 PAGE_SIZE << trans_pcie->rx_page_order,
366 DMA_FROM_DEVICE);
367 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
368 rxb->page = NULL;
369 spin_lock(&rxq->lock);
370 list_add(&rxb->list, &rxq->rx_used);
371 spin_unlock(&rxq->lock);
372 __free_pages(page, trans_pcie->rx_page_order);
373 return;
374 }
375 /* dma address must be no more than 36 bits */
376 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
377 /* and also 256 byte aligned! */
378 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
379
380 spin_lock(&rxq->lock);
381
382 list_add_tail(&rxb->list, &rxq->rx_free);
383 rxq->free_count++;
384
385 spin_unlock(&rxq->lock);
386 }
387}
388
389static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
390{
391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392 struct iwl_rxq *rxq = &trans_pcie->rxq;
393 int i;
394
395 lockdep_assert_held(&rxq->lock);
396
397 for (i = 0; i < RX_QUEUE_SIZE; i++) {
398 if (!rxq->pool[i].page)
399 continue;
400 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
401 PAGE_SIZE << trans_pcie->rx_page_order,
402 DMA_FROM_DEVICE);
403 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
404 rxq->pool[i].page = NULL;
405 }
406}
407
408/*
409 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
410 *
411 * When moving to rx_free an page is allocated for the slot.
412 *
413 * Also restock the Rx queue via iwl_pcie_rxq_restock.
414 * This is called only during initialization
415 */
416static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
417{
418 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
419
420 iwl_pcie_rxq_restock(trans);
421}
422
423/*
424 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
425 *
426 * Allocates for each received request 8 pages
427 * Called as a scheduled work item.
428 */
429static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
430{
431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
432 struct iwl_rb_allocator *rba = &trans_pcie->rba;
433 struct list_head local_empty;
434 int pending = atomic_xchg(&rba->req_pending, 0);
435
436 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
437
438 /* If we were scheduled - there is at least one request */
439 spin_lock(&rba->lock);
440 /* swap out the rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 while (pending) {
445 int i;
446 struct list_head local_allocated;
447
448 INIT_LIST_HEAD(&local_allocated);
449
450 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
451 struct iwl_rx_mem_buffer *rxb;
452 struct page *page;
453
454 /* List should never be empty - each reused RBD is
455 * returned to the list, and initial pool covers any
456 * possible gap between the time the page is allocated
457 * to the time the RBD is added.
458 */
459 BUG_ON(list_empty(&local_empty));
460 /* Get the first rxb from the rbd list */
461 rxb = list_first_entry(&local_empty,
462 struct iwl_rx_mem_buffer, list);
463 BUG_ON(rxb->page);
464
465 /* Alloc a new receive buffer */
466 page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
467 if (!page)
468 continue;
469 rxb->page = page;
470
471 /* Get physical address of the RB */
472 rxb->page_dma = dma_map_page(trans->dev, page, 0,
473 PAGE_SIZE << trans_pcie->rx_page_order,
474 DMA_FROM_DEVICE);
475 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
476 rxb->page = NULL;
477 __free_pages(page, trans_pcie->rx_page_order);
478 continue;
479 }
480 /* dma address must be no more than 36 bits */
481 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
482 /* and also 256 byte aligned! */
483 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
484
485 /* move the allocated entry to the out list */
486 list_move(&rxb->list, &local_allocated);
487 i++;
488 }
489
490 pending--;
491 if (!pending) {
492 pending = atomic_xchg(&rba->req_pending, 0);
493 IWL_DEBUG_RX(trans,
494 "Pending allocation requests = %d\n",
495 pending);
496 }
497
498 spin_lock(&rba->lock);
499 /* add the allocated rbds to the allocator allocated list */
500 list_splice_tail(&local_allocated, &rba->rbd_allocated);
501 /* get more empty RBDs for current pending requests */
502 list_splice_tail_init(&rba->rbd_empty, &local_empty);
503 spin_unlock(&rba->lock);
504
505 atomic_inc(&rba->req_ready);
506 }
507
508 spin_lock(&rba->lock);
509 /* return unused rbds to the allocator empty list */
510 list_splice_tail(&local_empty, &rba->rbd_empty);
511 spin_unlock(&rba->lock);
512}
513
514/*
515 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
516.*
517.* Called by queue when the queue posted allocation request and
518 * has freed 8 RBDs in order to restock itself.
519 */
520static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
521 struct iwl_rx_mem_buffer
522 *out[RX_CLAIM_REQ_ALLOC])
523{
524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525 struct iwl_rb_allocator *rba = &trans_pcie->rba;
526 int i;
527
528 /*
529 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
530 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
531 * function will return -ENOMEM, as there are no ready requests.
532 * atomic_dec_if_positive will perofrm the *actual* decrement only if
533 * req_ready > 0, i.e. - there are ready requests and the function
534 * hands one request to the caller.
535 */
536 if (atomic_dec_if_positive(&rba->req_ready) < 0)
537 return -ENOMEM;
538
539 spin_lock(&rba->lock);
540 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
541 /* Get next free Rx buffer, remove it from free list */
542 out[i] = list_first_entry(&rba->rbd_allocated,
543 struct iwl_rx_mem_buffer, list);
544 list_del(&out[i]->list);
545 }
546 spin_unlock(&rba->lock);
547
548 return 0;
549}
550
551static void iwl_pcie_rx_allocator_work(struct work_struct *data)
552{
553 struct iwl_rb_allocator *rba_p =
554 container_of(data, struct iwl_rb_allocator, rx_alloc);
555 struct iwl_trans_pcie *trans_pcie =
556 container_of(rba_p, struct iwl_trans_pcie, rba);
557
558 iwl_pcie_rx_allocator(trans_pcie->trans);
559}
560
561static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
562{
563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564 struct iwl_rxq *rxq = &trans_pcie->rxq;
565 struct iwl_rb_allocator *rba = &trans_pcie->rba;
566 struct device *dev = trans->dev;
567
568 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
569
570 spin_lock_init(&rxq->lock);
571 spin_lock_init(&rba->lock);
572
573 if (WARN_ON(rxq->bd || rxq->rb_stts))
574 return -EINVAL;
575
576 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
577 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
578 &rxq->bd_dma, GFP_KERNEL);
579 if (!rxq->bd)
580 goto err_bd;
581
582 /*Allocate the driver's pointer to receive buffer status */
583 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
584 &rxq->rb_stts_dma, GFP_KERNEL);
585 if (!rxq->rb_stts)
586 goto err_rb_stts;
587
588 return 0;
589
590err_rb_stts:
591 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
592 rxq->bd, rxq->bd_dma);
593 rxq->bd_dma = 0;
594 rxq->bd = NULL;
595err_bd:
596 return -ENOMEM;
597}
598
599static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
600{
601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
602 u32 rb_size;
603 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
604
605 if (trans_pcie->rx_buf_size_8k)
606 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
607 else
608 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
609
610 /* Stop Rx DMA */
611 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
612 /* reset and flush pointers */
613 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
614 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
615 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
616
617 /* Reset driver's Rx queue write index */
618 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
619
620 /* Tell device where to find RBD circular buffer in DRAM */
621 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
622 (u32)(rxq->bd_dma >> 8));
623
624 /* Tell device where in DRAM to update its Rx status */
625 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
626 rxq->rb_stts_dma >> 4);
627
628 /* Enable Rx DMA
629 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
630 * the credit mechanism in 5000 HW RX FIFO
631 * Direct rx interrupts to hosts
632 * Rx buffer size 4 or 8k
633 * RB timeout 0x10
634 * 256 RBDs
635 */
636 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
637 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
638 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
639 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
640 rb_size|
641 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
642 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
643
644 /* Set interrupt coalescing timer to default (2048 usecs) */
645 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
646
647 /* W/A for interrupt coalescing bug in 7260 and 3160 */
648 if (trans->cfg->host_interrupt_operation_mode)
649 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
650}
651
652static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
653{
654 int i;
655
656 lockdep_assert_held(&rxq->lock);
657
658 INIT_LIST_HEAD(&rxq->rx_free);
659 INIT_LIST_HEAD(&rxq->rx_used);
660 rxq->free_count = 0;
661 rxq->used_count = 0;
662
663 for (i = 0; i < RX_QUEUE_SIZE; i++)
664 list_add(&rxq->pool[i].list, &rxq->rx_used);
665}
666
667static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
668{
669 int i;
670
671 lockdep_assert_held(&rba->lock);
672
673 INIT_LIST_HEAD(&rba->rbd_allocated);
674 INIT_LIST_HEAD(&rba->rbd_empty);
675
676 for (i = 0; i < RX_POOL_SIZE; i++)
677 list_add(&rba->pool[i].list, &rba->rbd_empty);
678}
679
680static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
681{
682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
683 struct iwl_rb_allocator *rba = &trans_pcie->rba;
684 int i;
685
686 lockdep_assert_held(&rba->lock);
687
688 for (i = 0; i < RX_POOL_SIZE; i++) {
689 if (!rba->pool[i].page)
690 continue;
691 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
692 PAGE_SIZE << trans_pcie->rx_page_order,
693 DMA_FROM_DEVICE);
694 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
695 rba->pool[i].page = NULL;
696 }
697}
698
699int iwl_pcie_rx_init(struct iwl_trans *trans)
700{
701 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
702 struct iwl_rxq *rxq = &trans_pcie->rxq;
703 struct iwl_rb_allocator *rba = &trans_pcie->rba;
704 int i, err;
705
706 if (!rxq->bd) {
707 err = iwl_pcie_rx_alloc(trans);
708 if (err)
709 return err;
710 }
711 if (!rba->alloc_wq)
712 rba->alloc_wq = alloc_workqueue("rb_allocator",
713 WQ_HIGHPRI | WQ_UNBOUND, 1);
714 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
715
716 spin_lock(&rba->lock);
717 atomic_set(&rba->req_pending, 0);
718 atomic_set(&rba->req_ready, 0);
719 /* free all first - we might be reconfigured for a different size */
720 iwl_pcie_rx_free_rba(trans);
721 iwl_pcie_rx_init_rba(rba);
722 spin_unlock(&rba->lock);
723
724 spin_lock(&rxq->lock);
725
726 /* free all first - we might be reconfigured for a different size */
727 iwl_pcie_rxq_free_rbs(trans);
728 iwl_pcie_rx_init_rxb_lists(rxq);
729
730 for (i = 0; i < RX_QUEUE_SIZE; i++)
731 rxq->queue[i] = NULL;
732
733 /* Set us so that we have processed and used all buffers, but have
734 * not restocked the Rx queue with fresh buffers */
735 rxq->read = rxq->write = 0;
736 rxq->write_actual = 0;
737 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
738 spin_unlock(&rxq->lock);
739
740 iwl_pcie_rx_replenish(trans);
741
742 iwl_pcie_rx_hw_init(trans, rxq);
743
744 spin_lock(&rxq->lock);
745 iwl_pcie_rxq_inc_wr_ptr(trans);
746 spin_unlock(&rxq->lock);
747
748 return 0;
749}
750
751void iwl_pcie_rx_free(struct iwl_trans *trans)
752{
753 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
754 struct iwl_rxq *rxq = &trans_pcie->rxq;
755 struct iwl_rb_allocator *rba = &trans_pcie->rba;
756
757 /*if rxq->bd is NULL, it means that nothing has been allocated,
758 * exit now */
759 if (!rxq->bd) {
760 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
761 return;
762 }
763
764 cancel_work_sync(&rba->rx_alloc);
765 if (rba->alloc_wq) {
766 destroy_workqueue(rba->alloc_wq);
767 rba->alloc_wq = NULL;
768 }
769
770 spin_lock(&rba->lock);
771 iwl_pcie_rx_free_rba(trans);
772 spin_unlock(&rba->lock);
773
774 spin_lock(&rxq->lock);
775 iwl_pcie_rxq_free_rbs(trans);
776 spin_unlock(&rxq->lock);
777
778 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
779 rxq->bd, rxq->bd_dma);
780 rxq->bd_dma = 0;
781 rxq->bd = NULL;
782
783 if (rxq->rb_stts)
784 dma_free_coherent(trans->dev,
785 sizeof(struct iwl_rb_status),
786 rxq->rb_stts, rxq->rb_stts_dma);
787 else
788 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
789 rxq->rb_stts_dma = 0;
790 rxq->rb_stts = NULL;
791}
792
793/*
794 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
795 *
796 * Called when a RBD can be reused. The RBD is transferred to the allocator.
797 * When there are 2 empty RBDs - a request for allocation is posted
798 */
799static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
800 struct iwl_rx_mem_buffer *rxb,
801 struct iwl_rxq *rxq, bool emergency)
802{
803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
804 struct iwl_rb_allocator *rba = &trans_pcie->rba;
805
806 /* Move the RBD to the used list, will be moved to allocator in batches
807 * before claiming or posting a request*/
808 list_add_tail(&rxb->list, &rxq->rx_used);
809
810 if (unlikely(emergency))
811 return;
812
813 /* Count the allocator owned RBDs */
814 rxq->used_count++;
815
816 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
817 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
818 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
819 * after but we still need to post another request.
820 */
821 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
822 /* Move the 2 RBDs to the allocator ownership.
823 Allocator has another 6 from pool for the request completion*/
824 spin_lock(&rba->lock);
825 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
826 spin_unlock(&rba->lock);
827
828 atomic_inc(&rba->req_pending);
829 queue_work(rba->alloc_wq, &rba->rx_alloc);
830 }
831}
832
833static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
834 struct iwl_rx_mem_buffer *rxb,
835 bool emergency)
836{
837 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
838 struct iwl_rxq *rxq = &trans_pcie->rxq;
839 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
840 bool page_stolen = false;
841 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
842 u32 offset = 0;
843
844 if (WARN_ON(!rxb))
845 return;
846
847 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
848
849 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
850 struct iwl_rx_packet *pkt;
851 u16 sequence;
852 bool reclaim;
853 int index, cmd_index, len;
854 struct iwl_rx_cmd_buffer rxcb = {
855 ._offset = offset,
856 ._rx_page_order = trans_pcie->rx_page_order,
857 ._page = rxb->page,
858 ._page_stolen = false,
859 .truesize = max_len,
860 };
861
862 pkt = rxb_addr(&rxcb);
863
864 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
865 break;
866
867 IWL_DEBUG_RX(trans,
868 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
869 rxcb._offset,
870 get_cmd_string(trans_pcie, pkt->hdr.cmd),
871 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
872
873 len = iwl_rx_packet_len(pkt);
874 len += sizeof(u32); /* account for status word */
875 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
876 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
877
878 /* Reclaim a command buffer only if this packet is a response
879 * to a (driver-originated) command.
880 * If the packet (e.g. Rx frame) originated from uCode,
881 * there is no command buffer to reclaim.
882 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
883 * but apparently a few don't get set; catch them here. */
884 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
885 if (reclaim) {
886 int i;
887
888 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
889 if (trans_pcie->no_reclaim_cmds[i] ==
890 pkt->hdr.cmd) {
891 reclaim = false;
892 break;
893 }
894 }
895 }
896
897 sequence = le16_to_cpu(pkt->hdr.sequence);
898 index = SEQ_TO_INDEX(sequence);
899 cmd_index = get_cmd_index(&txq->q, index);
900
901 iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
902
903 if (reclaim) {
904 kzfree(txq->entries[cmd_index].free_buf);
905 txq->entries[cmd_index].free_buf = NULL;
906 }
907
908 /*
909 * After here, we should always check rxcb._page_stolen,
910 * if it is true then one of the handlers took the page.
911 */
912
913 if (reclaim) {
914 /* Invoke any callbacks, transfer the buffer to caller,
915 * and fire off the (possibly) blocking
916 * iwl_trans_send_cmd()
917 * as we reclaim the driver command queue */
918 if (!rxcb._page_stolen)
919 iwl_pcie_hcmd_complete(trans, &rxcb);
920 else
921 IWL_WARN(trans, "Claim null rxb?\n");
922 }
923
924 page_stolen |= rxcb._page_stolen;
925 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
926 }
927
928 /* page was stolen from us -- free our reference */
929 if (page_stolen) {
930 __free_pages(rxb->page, trans_pcie->rx_page_order);
931 rxb->page = NULL;
932 }
933
934 /* Reuse the page if possible. For notification packets and
935 * SKBs that fail to Rx correctly, add them back into the
936 * rx_free list for reuse later. */
937 if (rxb->page != NULL) {
938 rxb->page_dma =
939 dma_map_page(trans->dev, rxb->page, 0,
940 PAGE_SIZE << trans_pcie->rx_page_order,
941 DMA_FROM_DEVICE);
942 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
943 /*
944 * free the page(s) as well to not break
945 * the invariant that the items on the used
946 * list have no page(s)
947 */
948 __free_pages(rxb->page, trans_pcie->rx_page_order);
949 rxb->page = NULL;
950 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
951 } else {
952 list_add_tail(&rxb->list, &rxq->rx_free);
953 rxq->free_count++;
954 }
955 } else
956 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
957}
958
959/*
960 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
961 */
962static void iwl_pcie_rx_handle(struct iwl_trans *trans)
963{
964 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
965 struct iwl_rxq *rxq = &trans_pcie->rxq;
966 u32 r, i, j, count = 0;
967 bool emergency = false;
968
969restart:
970 spin_lock(&rxq->lock);
971 /* uCode's read index (stored in shared DRAM) indicates the last Rx
972 * buffer that the driver may process (last buffer filled by ucode). */
973 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
974 i = rxq->read;
975
976 /* Rx interrupt, but nothing sent from uCode */
977 if (i == r)
978 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
979
980 while (i != r) {
981 struct iwl_rx_mem_buffer *rxb;
982
983 if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
984 emergency = true;
985
986 rxb = rxq->queue[i];
987 rxq->queue[i] = NULL;
988
989 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
990 r, i, rxb);
991 iwl_pcie_rx_handle_rb(trans, rxb, emergency);
992
993 i = (i + 1) & RX_QUEUE_MASK;
994
995 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
996 * try to claim the pre-allocated buffers from the allocator */
997 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
998 struct iwl_rb_allocator *rba = &trans_pcie->rba;
999 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
1000
1001 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
1002 !emergency) {
1003 /* Add the remaining 6 empty RBDs
1004 * for allocator use
1005 */
1006 spin_lock(&rba->lock);
1007 list_splice_tail_init(&rxq->rx_used,
1008 &rba->rbd_empty);
1009 spin_unlock(&rba->lock);
1010 }
1011
1012 /* If not ready - continue, will try to reclaim later.
1013 * No need to reschedule work - allocator exits only on
1014 * success */
1015 if (!iwl_pcie_rx_allocator_get(trans, out)) {
1016 /* If success - then RX_CLAIM_REQ_ALLOC
1017 * buffers were retrieved and should be added
1018 * to free list */
1019 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
1020 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
1021 list_add_tail(&out[j]->list,
1022 &rxq->rx_free);
1023 rxq->free_count++;
1024 }
1025 }
1026 }
1027 if (emergency) {
1028 count++;
1029 if (count == 8) {
1030 count = 0;
1031 if (rxq->used_count < RX_QUEUE_SIZE / 3)
1032 emergency = false;
1033 spin_unlock(&rxq->lock);
1034 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1035 spin_lock(&rxq->lock);
1036 }
1037 }
1038 /* handle restock for three cases, can be all of them at once:
1039 * - we just pulled buffers from the allocator
1040 * - we have 8+ unstolen pages accumulated
1041 * - we are in emergency and allocated buffers
1042 */
1043 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1044 rxq->read = i;
1045 spin_unlock(&rxq->lock);
1046 iwl_pcie_rxq_restock(trans);
1047 goto restart;
1048 }
1049 }
1050
1051 /* Backtrack one entry */
1052 rxq->read = i;
1053 spin_unlock(&rxq->lock);
1054
1055 /*
1056 * handle a case where in emergency there are some unallocated RBDs.
1057 * those RBDs are in the used list, but are not tracked by the queue's
1058 * used_count which counts allocator owned RBDs.
1059 * unallocated emergency RBDs must be allocated on exit, otherwise
1060 * when called again the function may not be in emergency mode and
1061 * they will be handed to the allocator with no tracking in the RBD
1062 * allocator counters, which will lead to them never being claimed back
1063 * by the queue.
1064 * by allocating them here, they are now in the queue free list, and
1065 * will be restocked by the next call of iwl_pcie_rxq_restock.
1066 */
1067 if (unlikely(emergency && count))
1068 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
1069
1070 if (trans_pcie->napi.poll)
1071 napi_gro_flush(&trans_pcie->napi, false);
1072}
1073
1074/*
1075 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1076 */
1077static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1078{
1079 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1080 int i;
1081
1082 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1083 if (trans->cfg->internal_wimax_coex &&
1084 !trans->cfg->apmg_not_supported &&
1085 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1086 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1087 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1088 APMG_PS_CTRL_VAL_RESET_REQ))) {
1089 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1090 iwl_op_mode_wimax_active(trans->op_mode);
1091 wake_up(&trans_pcie->wait_command_queue);
1092 return;
1093 }
1094
1095 iwl_pcie_dump_csr(trans);
1096 iwl_dump_fh(trans, NULL);
1097
1098 local_bh_disable();
1099 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1100 * before we wake up the command caller, to ensure a proper cleanup. */
1101 iwl_trans_fw_error(trans);
1102 local_bh_enable();
1103
1104 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1105 del_timer(&trans_pcie->txq[i].stuck_timer);
1106
1107 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1108 wake_up(&trans_pcie->wait_command_queue);
1109}
1110
1111static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1112{
1113 u32 inta;
1114
1115 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1116
1117 trace_iwlwifi_dev_irq(trans->dev);
1118
1119 /* Discover which interrupts are active/pending */
1120 inta = iwl_read32(trans, CSR_INT);
1121
1122 /* the thread will service interrupts and re-enable them */
1123 return inta;
1124}
1125
1126/* a device (PCI-E) page is 4096 bytes long */
1127#define ICT_SHIFT 12
1128#define ICT_SIZE (1 << ICT_SHIFT)
1129#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1130
1131/* interrupt handler using ict table, with this interrupt driver will
1132 * stop using INTA register to get device's interrupt, reading this register
1133 * is expensive, device will write interrupts in ICT dram table, increment
1134 * index then will fire interrupt to driver, driver will OR all ICT table
1135 * entries from current index up to table entry with 0 value. the result is
1136 * the interrupt we need to service, driver will set the entries back to 0 and
1137 * set index.
1138 */
1139static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1140{
1141 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1142 u32 inta;
1143 u32 val = 0;
1144 u32 read;
1145
1146 trace_iwlwifi_dev_irq(trans->dev);
1147
1148 /* Ignore interrupt if there's nothing in NIC to service.
1149 * This may be due to IRQ shared with another device,
1150 * or due to sporadic interrupts thrown from our NIC. */
1151 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1152 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1153 if (!read)
1154 return 0;
1155
1156 /*
1157 * Collect all entries up to the first 0, starting from ict_index;
1158 * note we already read at ict_index.
1159 */
1160 do {
1161 val |= read;
1162 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1163 trans_pcie->ict_index, read);
1164 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1165 trans_pcie->ict_index =
1166 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1167
1168 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1169 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1170 read);
1171 } while (read);
1172
1173 /* We should not get this value, just ignore it. */
1174 if (val == 0xffffffff)
1175 val = 0;
1176
1177 /*
1178 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1179 * (bit 15 before shifting it to 31) to clear when using interrupt
1180 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1181 * so we use them to decide on the real state of the Rx bit.
1182 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1183 */
1184 if (val & 0xC0000)
1185 val |= 0x8000;
1186
1187 inta = (0xff & val) | ((0xff00 & val) << 16);
1188 return inta;
1189}
1190
1191irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1192{
1193 struct iwl_trans *trans = dev_id;
1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1196 u32 inta = 0;
1197 u32 handled = 0;
1198
1199 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1200
1201 spin_lock(&trans_pcie->irq_lock);
1202
1203 /* dram interrupt table not set yet,
1204 * use legacy interrupt.
1205 */
1206 if (likely(trans_pcie->use_ict))
1207 inta = iwl_pcie_int_cause_ict(trans);
1208 else
1209 inta = iwl_pcie_int_cause_non_ict(trans);
1210
1211 if (iwl_have_debug_level(IWL_DL_ISR)) {
1212 IWL_DEBUG_ISR(trans,
1213 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1214 inta, trans_pcie->inta_mask,
1215 iwl_read32(trans, CSR_INT_MASK),
1216 iwl_read32(trans, CSR_FH_INT_STATUS));
1217 if (inta & (~trans_pcie->inta_mask))
1218 IWL_DEBUG_ISR(trans,
1219 "We got a masked interrupt (0x%08x)\n",
1220 inta & (~trans_pcie->inta_mask));
1221 }
1222
1223 inta &= trans_pcie->inta_mask;
1224
1225 /*
1226 * Ignore interrupt if there's nothing in NIC to service.
1227 * This may be due to IRQ shared with another device,
1228 * or due to sporadic interrupts thrown from our NIC.
1229 */
1230 if (unlikely(!inta)) {
1231 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1232 /*
1233 * Re-enable interrupts here since we don't
1234 * have anything to service
1235 */
1236 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1237 iwl_enable_interrupts(trans);
1238 spin_unlock(&trans_pcie->irq_lock);
1239 lock_map_release(&trans->sync_cmd_lockdep_map);
1240 return IRQ_NONE;
1241 }
1242
1243 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1244 /*
1245 * Hardware disappeared. It might have
1246 * already raised an interrupt.
1247 */
1248 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1249 spin_unlock(&trans_pcie->irq_lock);
1250 goto out;
1251 }
1252
1253 /* Ack/clear/reset pending uCode interrupts.
1254 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1255 */
1256 /* There is a hardware bug in the interrupt mask function that some
1257 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1258 * they are disabled in the CSR_INT_MASK register. Furthermore the
1259 * ICT interrupt handling mechanism has another bug that might cause
1260 * these unmasked interrupts fail to be detected. We workaround the
1261 * hardware bugs here by ACKing all the possible interrupts so that
1262 * interrupt coalescing can still be achieved.
1263 */
1264 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1265
1266 if (iwl_have_debug_level(IWL_DL_ISR))
1267 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1268 inta, iwl_read32(trans, CSR_INT_MASK));
1269
1270 spin_unlock(&trans_pcie->irq_lock);
1271
1272 /* Now service all interrupt bits discovered above. */
1273 if (inta & CSR_INT_BIT_HW_ERR) {
1274 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1275
1276 /* Tell the device to stop sending interrupts */
1277 iwl_disable_interrupts(trans);
1278
1279 isr_stats->hw++;
1280 iwl_pcie_irq_handle_error(trans);
1281
1282 handled |= CSR_INT_BIT_HW_ERR;
1283
1284 goto out;
1285 }
1286
1287 if (iwl_have_debug_level(IWL_DL_ISR)) {
1288 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1289 if (inta & CSR_INT_BIT_SCD) {
1290 IWL_DEBUG_ISR(trans,
1291 "Scheduler finished to transmit the frame/frames.\n");
1292 isr_stats->sch++;
1293 }
1294
1295 /* Alive notification via Rx interrupt will do the real work */
1296 if (inta & CSR_INT_BIT_ALIVE) {
1297 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1298 isr_stats->alive++;
1299 }
1300 }
1301
1302 /* Safely ignore these bits for debug checks below */
1303 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1304
1305 /* HW RF KILL switch toggled */
1306 if (inta & CSR_INT_BIT_RF_KILL) {
1307 bool hw_rfkill;
1308
1309 hw_rfkill = iwl_is_rfkill_set(trans);
1310 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1311 hw_rfkill ? "disable radio" : "enable radio");
1312
1313 isr_stats->rfkill++;
1314
1315 mutex_lock(&trans_pcie->mutex);
1316 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1317 mutex_unlock(&trans_pcie->mutex);
1318 if (hw_rfkill) {
1319 set_bit(STATUS_RFKILL, &trans->status);
1320 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1321 &trans->status))
1322 IWL_DEBUG_RF_KILL(trans,
1323 "Rfkill while SYNC HCMD in flight\n");
1324 wake_up(&trans_pcie->wait_command_queue);
1325 } else {
1326 clear_bit(STATUS_RFKILL, &trans->status);
1327 }
1328
1329 handled |= CSR_INT_BIT_RF_KILL;
1330 }
1331
1332 /* Chip got too hot and stopped itself */
1333 if (inta & CSR_INT_BIT_CT_KILL) {
1334 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1335 isr_stats->ctkill++;
1336 handled |= CSR_INT_BIT_CT_KILL;
1337 }
1338
1339 /* Error detected by uCode */
1340 if (inta & CSR_INT_BIT_SW_ERR) {
1341 IWL_ERR(trans, "Microcode SW error detected. "
1342 " Restarting 0x%X.\n", inta);
1343 isr_stats->sw++;
1344 iwl_pcie_irq_handle_error(trans);
1345 handled |= CSR_INT_BIT_SW_ERR;
1346 }
1347
1348 /* uCode wakes up after power-down sleep */
1349 if (inta & CSR_INT_BIT_WAKEUP) {
1350 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1351 iwl_pcie_rxq_check_wrptr(trans);
1352 iwl_pcie_txq_check_wrptrs(trans);
1353
1354 isr_stats->wakeup++;
1355
1356 handled |= CSR_INT_BIT_WAKEUP;
1357 }
1358
1359 /* All uCode command responses, including Tx command responses,
1360 * Rx "responses" (frame-received notification), and other
1361 * notifications from uCode come through here*/
1362 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1363 CSR_INT_BIT_RX_PERIODIC)) {
1364 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1365 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1366 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1367 iwl_write32(trans, CSR_FH_INT_STATUS,
1368 CSR_FH_INT_RX_MASK);
1369 }
1370 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1371 handled |= CSR_INT_BIT_RX_PERIODIC;
1372 iwl_write32(trans,
1373 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1374 }
1375 /* Sending RX interrupt require many steps to be done in the
1376 * the device:
1377 * 1- write interrupt to current index in ICT table.
1378 * 2- dma RX frame.
1379 * 3- update RX shared data to indicate last write index.
1380 * 4- send interrupt.
1381 * This could lead to RX race, driver could receive RX interrupt
1382 * but the shared data changes does not reflect this;
1383 * periodic interrupt will detect any dangling Rx activity.
1384 */
1385
1386 /* Disable periodic interrupt; we use it as just a one-shot. */
1387 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1388 CSR_INT_PERIODIC_DIS);
1389
1390 /*
1391 * Enable periodic interrupt in 8 msec only if we received
1392 * real RX interrupt (instead of just periodic int), to catch
1393 * any dangling Rx interrupt. If it was just the periodic
1394 * interrupt, there was no dangling Rx activity, and no need
1395 * to extend the periodic interrupt; one-shot is enough.
1396 */
1397 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1398 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1399 CSR_INT_PERIODIC_ENA);
1400
1401 isr_stats->rx++;
1402
1403 local_bh_disable();
1404 iwl_pcie_rx_handle(trans);
1405 local_bh_enable();
1406 }
1407
1408 /* This "Tx" DMA channel is used only for loading uCode */
1409 if (inta & CSR_INT_BIT_FH_TX) {
1410 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1411 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1412 isr_stats->tx++;
1413 handled |= CSR_INT_BIT_FH_TX;
1414 /* Wake up uCode load routine, now that load is complete */
1415 trans_pcie->ucode_write_complete = true;
1416 wake_up(&trans_pcie->ucode_write_waitq);
1417 }
1418
1419 if (inta & ~handled) {
1420 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1421 isr_stats->unhandled++;
1422 }
1423
1424 if (inta & ~(trans_pcie->inta_mask)) {
1425 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1426 inta & ~trans_pcie->inta_mask);
1427 }
1428
1429 /* Re-enable all interrupts */
1430 /* only Re-enable if disabled by irq */
1431 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1432 iwl_enable_interrupts(trans);
1433 /* Re-enable RF_KILL if it occurred */
1434 else if (handled & CSR_INT_BIT_RF_KILL)
1435 iwl_enable_rfkill_int(trans);
1436
1437out:
1438 lock_map_release(&trans->sync_cmd_lockdep_map);
1439 return IRQ_HANDLED;
1440}
1441
1442/******************************************************************************
1443 *
1444 * ICT functions
1445 *
1446 ******************************************************************************/
1447
1448/* Free dram table */
1449void iwl_pcie_free_ict(struct iwl_trans *trans)
1450{
1451 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1452
1453 if (trans_pcie->ict_tbl) {
1454 dma_free_coherent(trans->dev, ICT_SIZE,
1455 trans_pcie->ict_tbl,
1456 trans_pcie->ict_tbl_dma);
1457 trans_pcie->ict_tbl = NULL;
1458 trans_pcie->ict_tbl_dma = 0;
1459 }
1460}
1461
1462/*
1463 * allocate dram shared table, it is an aligned memory
1464 * block of ICT_SIZE.
1465 * also reset all data related to ICT table interrupt.
1466 */
1467int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1468{
1469 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1470
1471 trans_pcie->ict_tbl =
1472 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1473 &trans_pcie->ict_tbl_dma,
1474 GFP_KERNEL);
1475 if (!trans_pcie->ict_tbl)
1476 return -ENOMEM;
1477
1478 /* just an API sanity check ... it is guaranteed to be aligned */
1479 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1480 iwl_pcie_free_ict(trans);
1481 return -EINVAL;
1482 }
1483
1484 IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
1485 (unsigned long long)trans_pcie->ict_tbl_dma,
1486 trans_pcie->ict_tbl);
1487
1488 return 0;
1489}
1490
1491/* Device is going up inform it about using ICT interrupt table,
1492 * also we need to tell the driver to start using ICT interrupt.
1493 */
1494void iwl_pcie_reset_ict(struct iwl_trans *trans)
1495{
1496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1497 u32 val;
1498
1499 if (!trans_pcie->ict_tbl)
1500 return;
1501
1502 spin_lock(&trans_pcie->irq_lock);
1503 iwl_disable_interrupts(trans);
1504
1505 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1506
1507 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1508
1509 val |= CSR_DRAM_INT_TBL_ENABLE |
1510 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1511 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1512
1513 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1514
1515 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1516 trans_pcie->use_ict = true;
1517 trans_pcie->ict_index = 0;
1518 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1519 iwl_enable_interrupts(trans);
1520 spin_unlock(&trans_pcie->irq_lock);
1521}
1522
1523/* Device is going down disable ict interrupt usage */
1524void iwl_pcie_disable_ict(struct iwl_trans *trans)
1525{
1526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1527
1528 spin_lock(&trans_pcie->irq_lock);
1529 trans_pcie->use_ict = false;
1530 spin_unlock(&trans_pcie->irq_lock);
1531}
1532
1533irqreturn_t iwl_pcie_isr(int irq, void *data)
1534{
1535 struct iwl_trans *trans = data;
1536
1537 if (!trans)
1538 return IRQ_NONE;
1539
1540 /* Disable (but don't clear!) interrupts here to avoid
1541 * back-to-back ISRs and sporadic interrupts from our NIC.
1542 * If we have something to service, the tasklet will re-enable ints.
1543 * If we *don't* have something, we'll re-enable before leaving here.
1544 */
1545 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1546
1547 return IRQ_WAKE_THREAD;
1548}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
new file mode 100644
index 000000000000..90283453073c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -0,0 +1,2825 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/pci.h>
66#include <linux/pci-aspm.h>
67#include <linux/interrupt.h>
68#include <linux/debugfs.h>
69#include <linux/sched.h>
70#include <linux/bitops.h>
71#include <linux/gfp.h>
72#include <linux/vmalloc.h>
73
74#include "iwl-drv.h"
75#include "iwl-trans.h"
76#include "iwl-csr.h"
77#include "iwl-prph.h"
78#include "iwl-scd.h"
79#include "iwl-agn-hw.h"
80#include "iwl-fw-error-dump.h"
81#include "internal.h"
82#include "iwl-fh.h"
83
84/* extended range in FW SRAM */
85#define IWL_FW_MEM_EXTENDED_START 0x40000
86#define IWL_FW_MEM_EXTENDED_END 0x57FFF
87
88static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
89{
90 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
91
92 if (!trans_pcie->fw_mon_page)
93 return;
94
95 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
96 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
97 __free_pages(trans_pcie->fw_mon_page,
98 get_order(trans_pcie->fw_mon_size));
99 trans_pcie->fw_mon_page = NULL;
100 trans_pcie->fw_mon_phys = 0;
101 trans_pcie->fw_mon_size = 0;
102}
103
104static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
105{
106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
107 struct page *page = NULL;
108 dma_addr_t phys;
109 u32 size = 0;
110 u8 power;
111
112 if (!max_power) {
113 /* default max_power is maximum */
114 max_power = 26;
115 } else {
116 max_power += 11;
117 }
118
119 if (WARN(max_power > 26,
120 "External buffer size for monitor is too big %d, check the FW TLV\n",
121 max_power))
122 return;
123
124 if (trans_pcie->fw_mon_page) {
125 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
126 trans_pcie->fw_mon_size,
127 DMA_FROM_DEVICE);
128 return;
129 }
130
131 phys = 0;
132 for (power = max_power; power >= 11; power--) {
133 int order;
134
135 size = BIT(power);
136 order = get_order(size);
137 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
138 order);
139 if (!page)
140 continue;
141
142 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
143 DMA_FROM_DEVICE);
144 if (dma_mapping_error(trans->dev, phys)) {
145 __free_pages(page, order);
146 page = NULL;
147 continue;
148 }
149 IWL_INFO(trans,
150 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
151 size, order);
152 break;
153 }
154
155 if (WARN_ON_ONCE(!page))
156 return;
157
158 if (power != max_power)
159 IWL_ERR(trans,
160 "Sorry - debug buffer is only %luK while you requested %luK\n",
161 (unsigned long)BIT(power - 10),
162 (unsigned long)BIT(max_power - 10));
163
164 trans_pcie->fw_mon_page = page;
165 trans_pcie->fw_mon_phys = phys;
166 trans_pcie->fw_mon_size = size;
167}
168
169static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
170{
171 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
172 ((reg & 0x0000ffff) | (2 << 28)));
173 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
174}
175
176static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
177{
178 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
179 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
180 ((reg & 0x0000ffff) | (3 << 28)));
181}
182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{
185 if (trans->cfg->apmg_not_supported)
186 return;
187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
189 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
190 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
191 ~APMG_PS_CTRL_MSK_PWR_SRC);
192 else
193 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
194 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
195 ~APMG_PS_CTRL_MSK_PWR_SRC);
196}
197
198/* PCI registers */
199#define PCI_CFG_RETRY_TIMEOUT 0x041
200
201static void iwl_pcie_apm_config(struct iwl_trans *trans)
202{
203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
204 u16 lctl;
205 u16 cap;
206
207 /*
208 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
209 * Check if BIOS (or OS) enabled L1-ASPM on this device.
210 * If so (likely), disable L0S, so device moves directly L0->L1;
211 * costs negligible amount of power savings.
212 * If not (unlikely), enable L0S, so there is at least some
213 * power savings, even without L1.
214 */
215 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
216 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
217 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
218 else
219 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
220 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
221
222 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
223 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
224 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
225 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
226 trans->ltr_enabled ? "En" : "Dis");
227}
228
229/*
230 * Start up NIC's basic functionality after it has been reset
231 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
232 * NOTE: This does not load uCode nor start the embedded processor
233 */
234static int iwl_pcie_apm_init(struct iwl_trans *trans)
235{
236 int ret = 0;
237 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
238
239 /*
240 * Use "set_bit" below rather than "write", to preserve any hardware
241 * bits already set by default after reset.
242 */
243
244 /* Disable L0S exit timer (platform NMI Work/Around) */
245 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
246 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
247 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
248
249 /*
250 * Disable L0s without affecting L1;
251 * don't wait for ICH L0s (ICH bug W/A)
252 */
253 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
254 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
255
256 /* Set FH wait threshold to maximum (HW error during stress W/A) */
257 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
258
259 /*
260 * Enable HAP INTA (interrupt from management bus) to
261 * wake device's PCI Express link L1a -> L0s
262 */
263 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
264 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
265
266 iwl_pcie_apm_config(trans);
267
268 /* Configure analog phase-lock-loop before activating to D0A */
269 if (trans->cfg->base_params->pll_cfg_val)
270 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
271 trans->cfg->base_params->pll_cfg_val);
272
273 /*
274 * Set "initialization complete" bit to move adapter from
275 * D0U* --> D0A* (powered-up active) state.
276 */
277 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
278
279 /*
280 * Wait for clock stabilization; once stabilized, access to
281 * device-internal resources is supported, e.g. iwl_write_prph()
282 * and accesses to uCode SRAM.
283 */
284 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
285 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
286 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
287 if (ret < 0) {
288 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
289 goto out;
290 }
291
292 if (trans->cfg->host_interrupt_operation_mode) {
293 /*
294 * This is a bit of an abuse - This is needed for 7260 / 3160
295 * only check host_interrupt_operation_mode even if this is
296 * not related to host_interrupt_operation_mode.
297 *
298 * Enable the oscillator to count wake up time for L1 exit. This
299 * consumes slightly more power (100uA) - but allows to be sure
300 * that we wake up from L1 on time.
301 *
302 * This looks weird: read twice the same register, discard the
303 * value, set a bit, and yet again, read that same register
304 * just to discard the value. But that's the way the hardware
305 * seems to like it.
306 */
307 iwl_read_prph(trans, OSC_CLK);
308 iwl_read_prph(trans, OSC_CLK);
309 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
310 iwl_read_prph(trans, OSC_CLK);
311 iwl_read_prph(trans, OSC_CLK);
312 }
313
314 /*
315 * Enable DMA clock and wait for it to stabilize.
316 *
317 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
318 * bits do not disable clocks. This preserves any hardware
319 * bits already set by default in "CLK_CTRL_REG" after reset.
320 */
321 if (!trans->cfg->apmg_not_supported) {
322 iwl_write_prph(trans, APMG_CLK_EN_REG,
323 APMG_CLK_VAL_DMA_CLK_RQT);
324 udelay(20);
325
326 /* Disable L1-Active */
327 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
328 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
329
330 /* Clear the interrupt in APMG if the NIC is in RFKILL */
331 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
332 APMG_RTC_INT_STT_RFKILL);
333 }
334
335 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
336
337out:
338 return ret;
339}
340
341/*
342 * Enable LP XTAL to avoid HW bug where device may consume much power if
343 * FW is not loaded after device reset. LP XTAL is disabled by default
344 * after device HW reset. Do it only if XTAL is fed by internal source.
345 * Configure device's "persistence" mode to avoid resetting XTAL again when
346 * SHRD_HW_RST occurs in S3.
347 */
348static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
349{
350 int ret;
351 u32 apmg_gp1_reg;
352 u32 apmg_xtal_cfg_reg;
353 u32 dl_cfg_reg;
354
355 /* Force XTAL ON */
356 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
357 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
358
359 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
360 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
361
362 udelay(10);
363
364 /*
365 * Set "initialization complete" bit to move adapter from
366 * D0U* --> D0A* (powered-up active) state.
367 */
368 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
369
370 /*
371 * Wait for clock stabilization; once stabilized, access to
372 * device-internal resources is possible.
373 */
374 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
375 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
376 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
377 25000);
378 if (WARN_ON(ret < 0)) {
379 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
380 /* Release XTAL ON request */
381 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
382 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
383 return;
384 }
385
386 /*
387 * Clear "disable persistence" to avoid LP XTAL resetting when
388 * SHRD_HW_RST is applied in S3.
389 */
390 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
391 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
392
393 /*
394 * Force APMG XTAL to be active to prevent its disabling by HW
395 * caused by APMG idle state.
396 */
397 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
398 SHR_APMG_XTAL_CFG_REG);
399 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
400 apmg_xtal_cfg_reg |
401 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
402
403 /*
404 * Reset entire device again - do controller reset (results in
405 * SHRD_HW_RST). Turn MAC off before proceeding.
406 */
407 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
408
409 udelay(10);
410
411 /* Enable LP XTAL by indirect access through CSR */
412 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
413 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
414 SHR_APMG_GP1_WF_XTAL_LP_EN |
415 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
416
417 /* Clear delay line clock power up */
418 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
419 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
420 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
421
422 /*
423 * Enable persistence mode to avoid LP XTAL resetting when
424 * SHRD_HW_RST is applied in S3.
425 */
426 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
427 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
428
429 /*
430 * Clear "initialization complete" bit to move adapter from
431 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
432 */
433 iwl_clear_bit(trans, CSR_GP_CNTRL,
434 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
435
436 /* Activates XTAL resources monitor */
437 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
438 CSR_MONITOR_XTAL_RESOURCES);
439
440 /* Release XTAL ON request */
441 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
442 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
443 udelay(10);
444
445 /* Release APMG XTAL */
446 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
447 apmg_xtal_cfg_reg &
448 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
449}
450
451static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
452{
453 int ret = 0;
454
455 /* stop device's busmaster DMA activity */
456 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
457
458 ret = iwl_poll_bit(trans, CSR_RESET,
459 CSR_RESET_REG_FLAG_MASTER_DISABLED,
460 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
461 if (ret < 0)
462 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
463
464 IWL_DEBUG_INFO(trans, "stop master\n");
465
466 return ret;
467}
468
469static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
470{
471 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
472
473 if (op_mode_leave) {
474 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
475 iwl_pcie_apm_init(trans);
476
477 /* inform ME that we are leaving */
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
485 CSR_HW_IF_CONFIG_REG_PREPARE |
486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
491 mdelay(5);
492 }
493
494 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
495
496 /* Stop device's DMA activity */
497 iwl_pcie_apm_stop_master(trans);
498
499 if (trans->cfg->lp_xtal_workaround) {
500 iwl_pcie_apm_lp_xtal_enable(trans);
501 return;
502 }
503
504 /* Reset the entire device */
505 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
506
507 udelay(10);
508
509 /*
510 * Clear "initialization complete" bit to move adapter from
511 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
512 */
513 iwl_clear_bit(trans, CSR_GP_CNTRL,
514 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
515}
516
517static int iwl_pcie_nic_init(struct iwl_trans *trans)
518{
519 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520
521 /* nic_init */
522 spin_lock(&trans_pcie->irq_lock);
523 iwl_pcie_apm_init(trans);
524
525 spin_unlock(&trans_pcie->irq_lock);
526
527 iwl_pcie_set_pwr(trans, false);
528
529 iwl_op_mode_nic_config(trans->op_mode);
530
531 /* Allocate the RX queue, or reset if it is already allocated */
532 iwl_pcie_rx_init(trans);
533
534 /* Allocate or reset and init all Tx and Command queues */
535 if (iwl_pcie_tx_init(trans))
536 return -ENOMEM;
537
538 if (trans->cfg->base_params->shadow_reg_enable) {
539 /* enable shadow regs in HW */
540 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
541 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
542 }
543
544 return 0;
545}
546
547#define HW_READY_TIMEOUT (50)
548
549/* Note: returns poll_bit return value, which is >= 0 if success */
550static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
551{
552 int ret;
553
554 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
555 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
556
557 /* See if we got it */
558 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
559 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
560 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
561 HW_READY_TIMEOUT);
562
563 if (ret >= 0)
564 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
565
566 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
567 return ret;
568}
569
570/* Note: returns standard 0/-ERROR code */
571static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
572{
573 int ret;
574 int t = 0;
575 int iter;
576
577 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
578
579 ret = iwl_pcie_set_hw_ready(trans);
580 /* If the card is ready, exit 0 */
581 if (ret >= 0)
582 return 0;
583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
588 for (iter = 0; iter < 10; iter++) {
589 /* If HW is not ready, prepare the conditions to check again */
590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
591 CSR_HW_IF_CONFIG_REG_PREPARE);
592
593 do {
594 ret = iwl_pcie_set_hw_ready(trans);
595 if (ret >= 0)
596 return 0;
597
598 usleep_range(200, 1000);
599 t += 200;
600 } while (t < 150000);
601 msleep(25);
602 }
603
604 IWL_ERR(trans, "Couldn't prepare the card\n");
605
606 return ret;
607}
608
609/*
610 * ucode
611 */
612static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
613 dma_addr_t phy_addr, u32 byte_cnt)
614{
615 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
616 int ret;
617
618 trans_pcie->ucode_write_complete = false;
619
620 iwl_write_direct32(trans,
621 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
622 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
623
624 iwl_write_direct32(trans,
625 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
626 dst_addr);
627
628 iwl_write_direct32(trans,
629 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
630 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
631
632 iwl_write_direct32(trans,
633 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
634 (iwl_get_dma_hi_addr(phy_addr)
635 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
636
637 iwl_write_direct32(trans,
638 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
639 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
640 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
641 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
642
643 iwl_write_direct32(trans,
644 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
645 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
646 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
647 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
648
649 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
650 trans_pcie->ucode_write_complete, 5 * HZ);
651 if (!ret) {
652 IWL_ERR(trans, "Failed to load firmware chunk!\n");
653 return -ETIMEDOUT;
654 }
655
656 return 0;
657}
658
659static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
660 const struct fw_desc *section)
661{
662 u8 *v_addr;
663 dma_addr_t p_addr;
664 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
665 int ret = 0;
666
667 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
668 section_num);
669
670 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
671 GFP_KERNEL | __GFP_NOWARN);
672 if (!v_addr) {
673 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
674 chunk_sz = PAGE_SIZE;
675 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
676 &p_addr, GFP_KERNEL);
677 if (!v_addr)
678 return -ENOMEM;
679 }
680
681 for (offset = 0; offset < section->len; offset += chunk_sz) {
682 u32 copy_size, dst_addr;
683 bool extended_addr = false;
684
685 copy_size = min_t(u32, chunk_sz, section->len - offset);
686 dst_addr = section->offset + offset;
687
688 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
689 dst_addr <= IWL_FW_MEM_EXTENDED_END)
690 extended_addr = true;
691
692 if (extended_addr)
693 iwl_set_bits_prph(trans, LMPM_CHICK,
694 LMPM_CHICK_EXTENDED_ADDR_SPACE);
695
696 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
697 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
698 copy_size);
699
700 if (extended_addr)
701 iwl_clear_bits_prph(trans, LMPM_CHICK,
702 LMPM_CHICK_EXTENDED_ADDR_SPACE);
703
704 if (ret) {
705 IWL_ERR(trans,
706 "Could not load the [%d] uCode section\n",
707 section_num);
708 break;
709 }
710 }
711
712 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
713 return ret;
714}
715
716/*
717 * Driver Takes the ownership on secure machine before FW load
718 * and prevent race with the BT load.
719 * W/A for ROM bug. (should be remove in the next Si step)
720 */
721static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
722{
723 u32 val, loop = 1000;
724
725 /*
726 * Check the RSA semaphore is accessible.
727 * If the HW isn't locked and the rsa semaphore isn't accessible,
728 * we are in trouble.
729 */
730 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
731 if (val & (BIT(1) | BIT(17))) {
732 IWL_INFO(trans,
733 "can't access the RSA semaphore it is write protected\n");
734 return 0;
735 }
736
737 /* take ownership on the AUX IF */
738 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
739 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
740
741 do {
742 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
743 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
744 if (val == 0x1) {
745 iwl_write_prph(trans, RSA_ENABLE, 0);
746 return 0;
747 }
748
749 udelay(10);
750 loop--;
751 } while (loop > 0);
752
753 IWL_ERR(trans, "Failed to take ownership on secure machine\n");
754 return -EIO;
755}
756
757static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
758 const struct fw_img *image,
759 int cpu,
760 int *first_ucode_section)
761{
762 int shift_param;
763 int i, ret = 0, sec_num = 0x1;
764 u32 val, last_read_idx = 0;
765
766 if (cpu == 1) {
767 shift_param = 0;
768 *first_ucode_section = 0;
769 } else {
770 shift_param = 16;
771 (*first_ucode_section)++;
772 }
773
774 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
775 last_read_idx = i;
776
777 /*
778 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
779 * CPU1 to CPU2.
780 * PAGING_SEPARATOR_SECTION delimiter - separate between
781 * CPU2 non paged to CPU2 paging sec.
782 */
783 if (!image->sec[i].data ||
784 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
785 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
786 IWL_DEBUG_FW(trans,
787 "Break since Data not valid or Empty section, sec = %d\n",
788 i);
789 break;
790 }
791
792 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
793 if (ret)
794 return ret;
795
796 /* Notify the ucode of the loaded section number and status */
797 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
798 val = val | (sec_num << shift_param);
799 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
800 sec_num = (sec_num << 1) | 0x1;
801 }
802
803 *first_ucode_section = last_read_idx;
804
805 if (cpu == 1)
806 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
807 else
808 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
809
810 return 0;
811}
812
813static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
814 const struct fw_img *image,
815 int cpu,
816 int *first_ucode_section)
817{
818 int shift_param;
819 int i, ret = 0;
820 u32 last_read_idx = 0;
821
822 if (cpu == 1) {
823 shift_param = 0;
824 *first_ucode_section = 0;
825 } else {
826 shift_param = 16;
827 (*first_ucode_section)++;
828 }
829
830 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
831 last_read_idx = i;
832
833 /*
834 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
835 * CPU1 to CPU2.
836 * PAGING_SEPARATOR_SECTION delimiter - separate between
837 * CPU2 non paged to CPU2 paging sec.
838 */
839 if (!image->sec[i].data ||
840 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
841 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
842 IWL_DEBUG_FW(trans,
843 "Break since Data not valid or Empty section, sec = %d\n",
844 i);
845 break;
846 }
847
848 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
849 if (ret)
850 return ret;
851 }
852
853 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
854 iwl_set_bits_prph(trans,
855 CSR_UCODE_LOAD_STATUS_ADDR,
856 (LMPM_CPU_UCODE_LOADING_COMPLETED |
857 LMPM_CPU_HDRS_LOADING_COMPLETED |
858 LMPM_CPU_UCODE_LOADING_STARTED) <<
859 shift_param);
860
861 *first_ucode_section = last_read_idx;
862
863 return 0;
864}
865
866static void iwl_pcie_apply_destination(struct iwl_trans *trans)
867{
868 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
869 const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
870 int i;
871
872 if (dest->version)
873 IWL_ERR(trans,
874 "DBG DEST version is %d - expect issues\n",
875 dest->version);
876
877 IWL_INFO(trans, "Applying debug destination %s\n",
878 get_fw_dbg_mode_string(dest->monitor_mode));
879
880 if (dest->monitor_mode == EXTERNAL_MODE)
881 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
882 else
883 IWL_WARN(trans, "PCI should have external buffer debug\n");
884
885 for (i = 0; i < trans->dbg_dest_reg_num; i++) {
886 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
887 u32 val = le32_to_cpu(dest->reg_ops[i].val);
888
889 switch (dest->reg_ops[i].op) {
890 case CSR_ASSIGN:
891 iwl_write32(trans, addr, val);
892 break;
893 case CSR_SETBIT:
894 iwl_set_bit(trans, addr, BIT(val));
895 break;
896 case CSR_CLEARBIT:
897 iwl_clear_bit(trans, addr, BIT(val));
898 break;
899 case PRPH_ASSIGN:
900 iwl_write_prph(trans, addr, val);
901 break;
902 case PRPH_SETBIT:
903 iwl_set_bits_prph(trans, addr, BIT(val));
904 break;
905 case PRPH_CLEARBIT:
906 iwl_clear_bits_prph(trans, addr, BIT(val));
907 break;
908 case PRPH_BLOCKBIT:
909 if (iwl_read_prph(trans, addr) & BIT(val)) {
910 IWL_ERR(trans,
911 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
912 val, addr);
913 goto monitor;
914 }
915 break;
916 default:
917 IWL_ERR(trans, "FW debug - unknown OP %d\n",
918 dest->reg_ops[i].op);
919 break;
920 }
921 }
922
923monitor:
924 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
925 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
926 trans_pcie->fw_mon_phys >> dest->base_shift);
927 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
928 (trans_pcie->fw_mon_phys +
929 trans_pcie->fw_mon_size) >> dest->end_shift);
930 }
931}
932
933static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
934 const struct fw_img *image)
935{
936 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
937 int ret = 0;
938 int first_ucode_section;
939
940 IWL_DEBUG_FW(trans, "working with %s CPU\n",
941 image->is_dual_cpus ? "Dual" : "Single");
942
943 /* load to FW the binary non secured sections of CPU1 */
944 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
945 if (ret)
946 return ret;
947
948 if (image->is_dual_cpus) {
949 /* set CPU2 header address */
950 iwl_write_prph(trans,
951 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
952 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
953
954 /* load to FW the binary sections of CPU2 */
955 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
956 &first_ucode_section);
957 if (ret)
958 return ret;
959 }
960
961 /* supported for 7000 only for the moment */
962 if (iwlwifi_mod_params.fw_monitor &&
963 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
964 iwl_pcie_alloc_fw_monitor(trans, 0);
965
966 if (trans_pcie->fw_mon_size) {
967 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
968 trans_pcie->fw_mon_phys >> 4);
969 iwl_write_prph(trans, MON_BUFF_END_ADDR,
970 (trans_pcie->fw_mon_phys +
971 trans_pcie->fw_mon_size) >> 4);
972 }
973 } else if (trans->dbg_dest_tlv) {
974 iwl_pcie_apply_destination(trans);
975 }
976
977 /* release CPU reset */
978 iwl_write32(trans, CSR_RESET, 0);
979
980 return 0;
981}
982
983static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
984 const struct fw_img *image)
985{
986 int ret = 0;
987 int first_ucode_section;
988
989 IWL_DEBUG_FW(trans, "working with %s CPU\n",
990 image->is_dual_cpus ? "Dual" : "Single");
991
992 if (trans->dbg_dest_tlv)
993 iwl_pcie_apply_destination(trans);
994
995 /* TODO: remove in the next Si step */
996 ret = iwl_pcie_rsa_race_bug_wa(trans);
997 if (ret)
998 return ret;
999
1000 /* configure the ucode to be ready to get the secured image */
1001 /* release CPU reset */
1002 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1003
1004 /* load to FW the binary Secured sections of CPU1 */
1005 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1006 &first_ucode_section);
1007 if (ret)
1008 return ret;
1009
1010 /* load to FW the binary sections of CPU2 */
1011 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1012 &first_ucode_section);
1013}
1014
1015static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1016 const struct fw_img *fw, bool run_in_rfkill)
1017{
1018 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1019 bool hw_rfkill;
1020 int ret;
1021
1022 mutex_lock(&trans_pcie->mutex);
1023
1024 /* Someone called stop_device, don't try to start_fw */
1025 if (trans_pcie->is_down) {
1026 IWL_WARN(trans,
1027 "Can't start_fw since the HW hasn't been started\n");
1028 ret = EIO;
1029 goto out;
1030 }
1031
1032 /* This may fail if AMT took ownership of the device */
1033 if (iwl_pcie_prepare_card_hw(trans)) {
1034 IWL_WARN(trans, "Exit HW not ready\n");
1035 ret = -EIO;
1036 goto out;
1037 }
1038
1039 iwl_enable_rfkill_int(trans);
1040
1041 /* If platform's RF_KILL switch is NOT set to KILL */
1042 hw_rfkill = iwl_is_rfkill_set(trans);
1043 if (hw_rfkill)
1044 set_bit(STATUS_RFKILL, &trans->status);
1045 else
1046 clear_bit(STATUS_RFKILL, &trans->status);
1047 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1048 if (hw_rfkill && !run_in_rfkill) {
1049 ret = -ERFKILL;
1050 goto out;
1051 }
1052
1053 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1054
1055 ret = iwl_pcie_nic_init(trans);
1056 if (ret) {
1057 IWL_ERR(trans, "Unable to init nic\n");
1058 goto out;
1059 }
1060
1061 /* make sure rfkill handshake bits are cleared */
1062 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1063 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1064 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1065
1066 /* clear (again), then enable host interrupts */
1067 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1068 iwl_enable_interrupts(trans);
1069
1070 /* really make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1073
1074 /* Load the given image to the HW */
1075 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1076 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1077 else
1078 ret = iwl_pcie_load_given_ucode(trans, fw);
1079
1080out:
1081 mutex_unlock(&trans_pcie->mutex);
1082 return ret;
1083}
1084
1085static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1086{
1087 iwl_pcie_reset_ict(trans);
1088 iwl_pcie_tx_start(trans, scd_addr);
1089}
1090
1091static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1092{
1093 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1094 bool hw_rfkill, was_hw_rfkill;
1095
1096 lockdep_assert_held(&trans_pcie->mutex);
1097
1098 if (trans_pcie->is_down)
1099 return;
1100
1101 trans_pcie->is_down = true;
1102
1103 was_hw_rfkill = iwl_is_rfkill_set(trans);
1104
1105 /* tell the device to stop sending interrupts */
1106 spin_lock(&trans_pcie->irq_lock);
1107 iwl_disable_interrupts(trans);
1108 spin_unlock(&trans_pcie->irq_lock);
1109
1110 /* device going down, Stop using ICT table */
1111 iwl_pcie_disable_ict(trans);
1112
1113 /*
1114 * If a HW restart happens during firmware loading,
1115 * then the firmware loading might call this function
1116 * and later it might be called again due to the
1117 * restart. So don't process again if the device is
1118 * already dead.
1119 */
1120 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1121 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
1122 iwl_pcie_tx_stop(trans);
1123 iwl_pcie_rx_stop(trans);
1124
1125 /* Power-down device's busmaster DMA clocks */
1126 if (!trans->cfg->apmg_not_supported) {
1127 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1128 APMG_CLK_VAL_DMA_CLK_RQT);
1129 udelay(5);
1130 }
1131 }
1132
1133 /* Make sure (redundant) we've released our request to stay awake */
1134 iwl_clear_bit(trans, CSR_GP_CNTRL,
1135 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1136
1137 /* Stop the device, and put it in low power state */
1138 iwl_pcie_apm_stop(trans, false);
1139
1140 /* stop and reset the on-board processor */
1141 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1142 udelay(20);
1143
1144 /*
1145 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1146 * This is a bug in certain verions of the hardware.
1147 * Certain devices also keep sending HW RF kill interrupt all
1148 * the time, unless the interrupt is ACKed even if the interrupt
1149 * should be masked. Re-ACK all the interrupts here.
1150 */
1151 spin_lock(&trans_pcie->irq_lock);
1152 iwl_disable_interrupts(trans);
1153 spin_unlock(&trans_pcie->irq_lock);
1154
1155
1156 /* clear all status bits */
1157 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1158 clear_bit(STATUS_INT_ENABLED, &trans->status);
1159 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161
1162 /*
1163 * Even if we stop the HW, we still want the RF kill
1164 * interrupt
1165 */
1166 iwl_enable_rfkill_int(trans);
1167
1168 /*
1169 * Check again since the RF kill state may have changed while
1170 * all the interrupts were disabled, in this case we couldn't
1171 * receive the RF kill interrupt and update the state in the
1172 * op_mode.
1173 * Don't call the op_mode if the rkfill state hasn't changed.
1174 * This allows the op_mode to call stop_device from the rfkill
1175 * notification without endless recursion. Under very rare
1176 * circumstances, we might have a small recursion if the rfkill
1177 * state changed exactly now while we were called from stop_device.
1178 * This is very unlikely but can happen and is supported.
1179 */
1180 hw_rfkill = iwl_is_rfkill_set(trans);
1181 if (hw_rfkill)
1182 set_bit(STATUS_RFKILL, &trans->status);
1183 else
1184 clear_bit(STATUS_RFKILL, &trans->status);
1185 if (hw_rfkill != was_hw_rfkill)
1186 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1187
1188 /* re-take ownership to prevent other users from stealing the deivce */
1189 iwl_pcie_prepare_card_hw(trans);
1190}
1191
1192static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1193{
1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195
1196 mutex_lock(&trans_pcie->mutex);
1197 _iwl_trans_pcie_stop_device(trans, low_power);
1198 mutex_unlock(&trans_pcie->mutex);
1199}
1200
1201void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1202{
1203 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1204 IWL_TRANS_GET_PCIE_TRANS(trans);
1205
1206 lockdep_assert_held(&trans_pcie->mutex);
1207
1208 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1209 _iwl_trans_pcie_stop_device(trans, true);
1210}
1211
1212static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
1213{
1214 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1215
1216 if (trans->wowlan_d0i3) {
1217 /* Enable persistence mode to avoid reset */
1218 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1219 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1220 }
1221
1222 iwl_disable_interrupts(trans);
1223
1224 /*
1225 * in testing mode, the host stays awake and the
1226 * hardware won't be reset (not even partially)
1227 */
1228 if (test)
1229 return;
1230
1231 iwl_pcie_disable_ict(trans);
1232
1233 synchronize_irq(trans_pcie->pci_dev->irq);
1234
1235 iwl_clear_bit(trans, CSR_GP_CNTRL,
1236 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1237 iwl_clear_bit(trans, CSR_GP_CNTRL,
1238 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1239
1240 if (!trans->wowlan_d0i3) {
1241 /*
1242 * reset TX queues -- some of their registers reset during S3
1243 * so if we don't reset everything here the D3 image would try
1244 * to execute some invalid memory upon resume
1245 */
1246 iwl_trans_pcie_tx_reset(trans);
1247 }
1248
1249 iwl_pcie_set_pwr(trans, true);
1250}
1251
1252static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1253 enum iwl_d3_status *status,
1254 bool test)
1255{
1256 u32 val;
1257 int ret;
1258
1259 if (test) {
1260 iwl_enable_interrupts(trans);
1261 *status = IWL_D3_STATUS_ALIVE;
1262 return 0;
1263 }
1264
1265 /*
1266 * Also enables interrupts - none will happen as the device doesn't
1267 * know we're waking it up, only when the opmode actually tells it
1268 * after this call.
1269 */
1270 iwl_pcie_reset_ict(trans);
1271
1272 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1273 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1274
1275 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1276 udelay(2);
1277
1278 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1279 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1280 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1281 25000);
1282 if (ret < 0) {
1283 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1284 return ret;
1285 }
1286
1287 iwl_pcie_set_pwr(trans, false);
1288
1289 if (trans->wowlan_d0i3) {
1290 iwl_clear_bit(trans, CSR_GP_CNTRL,
1291 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1292 } else {
1293 iwl_trans_pcie_tx_reset(trans);
1294
1295 ret = iwl_pcie_rx_init(trans);
1296 if (ret) {
1297 IWL_ERR(trans,
1298 "Failed to resume the device (RX reset)\n");
1299 return ret;
1300 }
1301 }
1302
1303 val = iwl_read32(trans, CSR_RESET);
1304 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1305 *status = IWL_D3_STATUS_RESET;
1306 else
1307 *status = IWL_D3_STATUS_ALIVE;
1308
1309 return 0;
1310}
1311
1312static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1313{
1314 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1315 bool hw_rfkill;
1316 int err;
1317
1318 lockdep_assert_held(&trans_pcie->mutex);
1319
1320 err = iwl_pcie_prepare_card_hw(trans);
1321 if (err) {
1322 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1323 return err;
1324 }
1325
1326 /* Reset the entire device */
1327 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1328
1329 usleep_range(10, 15);
1330
1331 iwl_pcie_apm_init(trans);
1332
1333 /* From now on, the op_mode will be kept updated about RF kill state */
1334 iwl_enable_rfkill_int(trans);
1335
1336 /* Set is_down to false here so that...*/
1337 trans_pcie->is_down = false;
1338
1339 hw_rfkill = iwl_is_rfkill_set(trans);
1340 if (hw_rfkill)
1341 set_bit(STATUS_RFKILL, &trans->status);
1342 else
1343 clear_bit(STATUS_RFKILL, &trans->status);
1344 /* ... rfkill can call stop_device and set it false if needed */
1345 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1346
1347 return 0;
1348}
1349
1350static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1351{
1352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1353 int ret;
1354
1355 mutex_lock(&trans_pcie->mutex);
1356 ret = _iwl_trans_pcie_start_hw(trans, low_power);
1357 mutex_unlock(&trans_pcie->mutex);
1358
1359 return ret;
1360}
1361
1362static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1363{
1364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1365
1366 mutex_lock(&trans_pcie->mutex);
1367
1368 /* disable interrupts - don't enable HW RF kill interrupt */
1369 spin_lock(&trans_pcie->irq_lock);
1370 iwl_disable_interrupts(trans);
1371 spin_unlock(&trans_pcie->irq_lock);
1372
1373 iwl_pcie_apm_stop(trans, true);
1374
1375 spin_lock(&trans_pcie->irq_lock);
1376 iwl_disable_interrupts(trans);
1377 spin_unlock(&trans_pcie->irq_lock);
1378
1379 iwl_pcie_disable_ict(trans);
1380
1381 mutex_unlock(&trans_pcie->mutex);
1382
1383 synchronize_irq(trans_pcie->pci_dev->irq);
1384}
1385
1386static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1387{
1388 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1389}
1390
1391static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1392{
1393 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1394}
1395
1396static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1397{
1398 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1399}
1400
1401static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1402{
1403 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1404 ((reg & 0x000FFFFF) | (3 << 24)));
1405 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1406}
1407
1408static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1409 u32 val)
1410{
1411 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1412 ((addr & 0x000FFFFF) | (3 << 24)));
1413 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1414}
1415
1416static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1417{
1418 WARN_ON(1);
1419 return 0;
1420}
1421
1422static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1423 const struct iwl_trans_config *trans_cfg)
1424{
1425 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1426
1427 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1428 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1429 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1430 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1431 trans_pcie->n_no_reclaim_cmds = 0;
1432 else
1433 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1434 if (trans_pcie->n_no_reclaim_cmds)
1435 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1436 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1437
1438 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1439 if (trans_pcie->rx_buf_size_8k)
1440 trans_pcie->rx_page_order = get_order(8 * 1024);
1441 else
1442 trans_pcie->rx_page_order = get_order(4 * 1024);
1443
1444 trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
1445 trans_pcie->command_names = trans_cfg->command_names;
1446 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1447 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1448
1449 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1450 trans_pcie->ref_count = 1;
1451
1452 /* Initialize NAPI here - it should be before registering to mac80211
1453 * in the opmode but after the HW struct is allocated.
1454 * As this function may be called again in some corner cases don't
1455 * do anything if NAPI was already initialized.
1456 */
1457 if (!trans_pcie->napi.poll) {
1458 init_dummy_netdev(&trans_pcie->napi_dev);
1459 netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
1460 iwl_pcie_dummy_napi_poll, 64);
1461 }
1462}
1463
1464void iwl_trans_pcie_free(struct iwl_trans *trans)
1465{
1466 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1467
1468 synchronize_irq(trans_pcie->pci_dev->irq);
1469
1470 iwl_pcie_tx_free(trans);
1471 iwl_pcie_rx_free(trans);
1472
1473 free_irq(trans_pcie->pci_dev->irq, trans);
1474 iwl_pcie_free_ict(trans);
1475
1476 pci_disable_msi(trans_pcie->pci_dev);
1477 iounmap(trans_pcie->hw_base);
1478 pci_release_regions(trans_pcie->pci_dev);
1479 pci_disable_device(trans_pcie->pci_dev);
1480
1481 if (trans_pcie->napi.poll)
1482 netif_napi_del(&trans_pcie->napi);
1483
1484 iwl_pcie_free_fw_monitor(trans);
1485
1486 iwl_trans_free(trans);
1487}
1488
1489static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1490{
1491 if (state)
1492 set_bit(STATUS_TPOWER_PMI, &trans->status);
1493 else
1494 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1495}
1496
1497static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1498 unsigned long *flags)
1499{
1500 int ret;
1501 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1502
1503 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1504
1505 if (trans_pcie->cmd_hold_nic_awake)
1506 goto out;
1507
1508 /* this bit wakes up the NIC */
1509 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1510 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1511 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1512 udelay(2);
1513
1514 /*
1515 * These bits say the device is running, and should keep running for
1516 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1517 * but they do not indicate that embedded SRAM is restored yet;
1518 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1519 * to/from host DRAM when sleeping/waking for power-saving.
1520 * Each direction takes approximately 1/4 millisecond; with this
1521 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1522 * series of register accesses are expected (e.g. reading Event Log),
1523 * to keep device from sleeping.
1524 *
1525 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1526 * SRAM is okay/restored. We don't check that here because this call
1527 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1528 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1529 *
1530 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1531 * and do not save/restore SRAM when power cycling.
1532 */
1533 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1534 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1535 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1536 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1537 if (unlikely(ret < 0)) {
1538 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1539 if (!silent) {
1540 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1541 WARN_ONCE(1,
1542 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1543 val);
1544 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1545 return false;
1546 }
1547 }
1548
1549out:
1550 /*
1551 * Fool sparse by faking we release the lock - sparse will
1552 * track nic_access anyway.
1553 */
1554 __release(&trans_pcie->reg_lock);
1555 return true;
1556}
1557
1558static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1559 unsigned long *flags)
1560{
1561 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1562
1563 lockdep_assert_held(&trans_pcie->reg_lock);
1564
1565 /*
1566 * Fool sparse by faking we acquiring the lock - sparse will
1567 * track nic_access anyway.
1568 */
1569 __acquire(&trans_pcie->reg_lock);
1570
1571 if (trans_pcie->cmd_hold_nic_awake)
1572 goto out;
1573
1574 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1575 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1576 /*
1577 * Above we read the CSR_GP_CNTRL register, which will flush
1578 * any previous writes, but we need the write that clears the
1579 * MAC_ACCESS_REQ bit to be performed before any other writes
1580 * scheduled on different CPUs (after we drop reg_lock).
1581 */
1582 mmiowb();
1583out:
1584 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1585}
1586
1587static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1588 void *buf, int dwords)
1589{
1590 unsigned long flags;
1591 int offs, ret = 0;
1592 u32 *vals = buf;
1593
1594 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1595 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1596 for (offs = 0; offs < dwords; offs++)
1597 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1598 iwl_trans_release_nic_access(trans, &flags);
1599 } else {
1600 ret = -EBUSY;
1601 }
1602 return ret;
1603}
1604
1605static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1606 const void *buf, int dwords)
1607{
1608 unsigned long flags;
1609 int offs, ret = 0;
1610 const u32 *vals = buf;
1611
1612 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1613 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1614 for (offs = 0; offs < dwords; offs++)
1615 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1616 vals ? vals[offs] : 0);
1617 iwl_trans_release_nic_access(trans, &flags);
1618 } else {
1619 ret = -EBUSY;
1620 }
1621 return ret;
1622}
1623
1624static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1625 unsigned long txqs,
1626 bool freeze)
1627{
1628 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1629 int queue;
1630
1631 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1632 struct iwl_txq *txq = &trans_pcie->txq[queue];
1633 unsigned long now;
1634
1635 spin_lock_bh(&txq->lock);
1636
1637 now = jiffies;
1638
1639 if (txq->frozen == freeze)
1640 goto next_queue;
1641
1642 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1643 freeze ? "Freezing" : "Waking", queue);
1644
1645 txq->frozen = freeze;
1646
1647 if (txq->q.read_ptr == txq->q.write_ptr)
1648 goto next_queue;
1649
1650 if (freeze) {
1651 if (unlikely(time_after(now,
1652 txq->stuck_timer.expires))) {
1653 /*
1654 * The timer should have fired, maybe it is
1655 * spinning right now on the lock.
1656 */
1657 goto next_queue;
1658 }
1659 /* remember how long until the timer fires */
1660 txq->frozen_expiry_remainder =
1661 txq->stuck_timer.expires - now;
1662 del_timer(&txq->stuck_timer);
1663 goto next_queue;
1664 }
1665
1666 /*
1667 * Wake a non-empty queue -> arm timer with the
1668 * remainder before it froze
1669 */
1670 mod_timer(&txq->stuck_timer,
1671 now + txq->frozen_expiry_remainder);
1672
1673next_queue:
1674 spin_unlock_bh(&txq->lock);
1675 }
1676}
1677
1678#define IWL_FLUSH_WAIT_MS 2000
1679
1680static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1681{
1682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1683 struct iwl_txq *txq;
1684 struct iwl_queue *q;
1685 int cnt;
1686 unsigned long now = jiffies;
1687 u32 scd_sram_addr;
1688 u8 buf[16];
1689 int ret = 0;
1690
1691 /* waiting for all the tx frames complete might take a while */
1692 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1693 u8 wr_ptr;
1694
1695 if (cnt == trans_pcie->cmd_queue)
1696 continue;
1697 if (!test_bit(cnt, trans_pcie->queue_used))
1698 continue;
1699 if (!(BIT(cnt) & txq_bm))
1700 continue;
1701
1702 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1703 txq = &trans_pcie->txq[cnt];
1704 q = &txq->q;
1705 wr_ptr = ACCESS_ONCE(q->write_ptr);
1706
1707 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1708 !time_after(jiffies,
1709 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1710 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1711
1712 if (WARN_ONCE(wr_ptr != write_ptr,
1713 "WR pointer moved while flushing %d -> %d\n",
1714 wr_ptr, write_ptr))
1715 return -ETIMEDOUT;
1716 msleep(1);
1717 }
1718
1719 if (q->read_ptr != q->write_ptr) {
1720 IWL_ERR(trans,
1721 "fail to flush all tx fifo queues Q %d\n", cnt);
1722 ret = -ETIMEDOUT;
1723 break;
1724 }
1725 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1726 }
1727
1728 if (!ret)
1729 return 0;
1730
1731 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1732 txq->q.read_ptr, txq->q.write_ptr);
1733
1734 scd_sram_addr = trans_pcie->scd_base_addr +
1735 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1736 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1737
1738 iwl_print_hex_error(trans, buf, sizeof(buf));
1739
1740 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1741 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1742 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1743
1744 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1745 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1746 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1747 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1748 u32 tbl_dw =
1749 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1750 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1751
1752 if (cnt & 0x1)
1753 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1754 else
1755 tbl_dw = tbl_dw & 0x0000FFFF;
1756
1757 IWL_ERR(trans,
1758 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1759 cnt, active ? "" : "in", fifo, tbl_dw,
1760 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1761 (TFD_QUEUE_SIZE_MAX - 1),
1762 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1763 }
1764
1765 return ret;
1766}
1767
1768static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1769 u32 mask, u32 value)
1770{
1771 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1772 unsigned long flags;
1773
1774 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1775 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1776 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1777}
1778
1779void iwl_trans_pcie_ref(struct iwl_trans *trans)
1780{
1781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1782 unsigned long flags;
1783
1784 if (iwlwifi_mod_params.d0i3_disable)
1785 return;
1786
1787 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1788 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1789 trans_pcie->ref_count++;
1790 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1791}
1792
1793void iwl_trans_pcie_unref(struct iwl_trans *trans)
1794{
1795 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1796 unsigned long flags;
1797
1798 if (iwlwifi_mod_params.d0i3_disable)
1799 return;
1800
1801 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1802 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1803 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
1804 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1805 return;
1806 }
1807 trans_pcie->ref_count--;
1808 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1809}
1810
1811static const char *get_csr_string(int cmd)
1812{
1813#define IWL_CMD(x) case x: return #x
1814 switch (cmd) {
1815 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1816 IWL_CMD(CSR_INT_COALESCING);
1817 IWL_CMD(CSR_INT);
1818 IWL_CMD(CSR_INT_MASK);
1819 IWL_CMD(CSR_FH_INT_STATUS);
1820 IWL_CMD(CSR_GPIO_IN);
1821 IWL_CMD(CSR_RESET);
1822 IWL_CMD(CSR_GP_CNTRL);
1823 IWL_CMD(CSR_HW_REV);
1824 IWL_CMD(CSR_EEPROM_REG);
1825 IWL_CMD(CSR_EEPROM_GP);
1826 IWL_CMD(CSR_OTP_GP_REG);
1827 IWL_CMD(CSR_GIO_REG);
1828 IWL_CMD(CSR_GP_UCODE_REG);
1829 IWL_CMD(CSR_GP_DRIVER_REG);
1830 IWL_CMD(CSR_UCODE_DRV_GP1);
1831 IWL_CMD(CSR_UCODE_DRV_GP2);
1832 IWL_CMD(CSR_LED_REG);
1833 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1834 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1835 IWL_CMD(CSR_ANA_PLL_CFG);
1836 IWL_CMD(CSR_HW_REV_WA_REG);
1837 IWL_CMD(CSR_MONITOR_STATUS_REG);
1838 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1839 default:
1840 return "UNKNOWN";
1841 }
1842#undef IWL_CMD
1843}
1844
1845void iwl_pcie_dump_csr(struct iwl_trans *trans)
1846{
1847 int i;
1848 static const u32 csr_tbl[] = {
1849 CSR_HW_IF_CONFIG_REG,
1850 CSR_INT_COALESCING,
1851 CSR_INT,
1852 CSR_INT_MASK,
1853 CSR_FH_INT_STATUS,
1854 CSR_GPIO_IN,
1855 CSR_RESET,
1856 CSR_GP_CNTRL,
1857 CSR_HW_REV,
1858 CSR_EEPROM_REG,
1859 CSR_EEPROM_GP,
1860 CSR_OTP_GP_REG,
1861 CSR_GIO_REG,
1862 CSR_GP_UCODE_REG,
1863 CSR_GP_DRIVER_REG,
1864 CSR_UCODE_DRV_GP1,
1865 CSR_UCODE_DRV_GP2,
1866 CSR_LED_REG,
1867 CSR_DRAM_INT_TBL_REG,
1868 CSR_GIO_CHICKEN_BITS,
1869 CSR_ANA_PLL_CFG,
1870 CSR_MONITOR_STATUS_REG,
1871 CSR_HW_REV_WA_REG,
1872 CSR_DBG_HPET_MEM_REG
1873 };
1874 IWL_ERR(trans, "CSR values:\n");
1875 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1876 "CSR_INT_PERIODIC_REG)\n");
1877 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1878 IWL_ERR(trans, " %25s: 0X%08x\n",
1879 get_csr_string(csr_tbl[i]),
1880 iwl_read32(trans, csr_tbl[i]));
1881 }
1882}
1883
1884#ifdef CONFIG_IWLWIFI_DEBUGFS
1885/* create and remove of files */
1886#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1887 if (!debugfs_create_file(#name, mode, parent, trans, \
1888 &iwl_dbgfs_##name##_ops)) \
1889 goto err; \
1890} while (0)
1891
1892/* file operation */
1893#define DEBUGFS_READ_FILE_OPS(name) \
1894static const struct file_operations iwl_dbgfs_##name##_ops = { \
1895 .read = iwl_dbgfs_##name##_read, \
1896 .open = simple_open, \
1897 .llseek = generic_file_llseek, \
1898};
1899
1900#define DEBUGFS_WRITE_FILE_OPS(name) \
1901static const struct file_operations iwl_dbgfs_##name##_ops = { \
1902 .write = iwl_dbgfs_##name##_write, \
1903 .open = simple_open, \
1904 .llseek = generic_file_llseek, \
1905};
1906
1907#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1908static const struct file_operations iwl_dbgfs_##name##_ops = { \
1909 .write = iwl_dbgfs_##name##_write, \
1910 .read = iwl_dbgfs_##name##_read, \
1911 .open = simple_open, \
1912 .llseek = generic_file_llseek, \
1913};
1914
1915static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1916 char __user *user_buf,
1917 size_t count, loff_t *ppos)
1918{
1919 struct iwl_trans *trans = file->private_data;
1920 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1921 struct iwl_txq *txq;
1922 struct iwl_queue *q;
1923 char *buf;
1924 int pos = 0;
1925 int cnt;
1926 int ret;
1927 size_t bufsz;
1928
1929 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1930
1931 if (!trans_pcie->txq)
1932 return -EAGAIN;
1933
1934 buf = kzalloc(bufsz, GFP_KERNEL);
1935 if (!buf)
1936 return -ENOMEM;
1937
1938 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1939 txq = &trans_pcie->txq[cnt];
1940 q = &txq->q;
1941 pos += scnprintf(buf + pos, bufsz - pos,
1942 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1943 cnt, q->read_ptr, q->write_ptr,
1944 !!test_bit(cnt, trans_pcie->queue_used),
1945 !!test_bit(cnt, trans_pcie->queue_stopped),
1946 txq->need_update, txq->frozen,
1947 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1948 }
1949 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1950 kfree(buf);
1951 return ret;
1952}
1953
1954static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1955 char __user *user_buf,
1956 size_t count, loff_t *ppos)
1957{
1958 struct iwl_trans *trans = file->private_data;
1959 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1960 struct iwl_rxq *rxq = &trans_pcie->rxq;
1961 char buf[256];
1962 int pos = 0;
1963 const size_t bufsz = sizeof(buf);
1964
1965 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1966 rxq->read);
1967 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1968 rxq->write);
1969 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1970 rxq->write_actual);
1971 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1972 rxq->need_update);
1973 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1974 rxq->free_count);
1975 if (rxq->rb_stts) {
1976 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1977 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1978 } else {
1979 pos += scnprintf(buf + pos, bufsz - pos,
1980 "closed_rb_num: Not Allocated\n");
1981 }
1982 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1983}
1984
1985static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1986 char __user *user_buf,
1987 size_t count, loff_t *ppos)
1988{
1989 struct iwl_trans *trans = file->private_data;
1990 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1991 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1992
1993 int pos = 0;
1994 char *buf;
1995 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1996 ssize_t ret;
1997
1998 buf = kzalloc(bufsz, GFP_KERNEL);
1999 if (!buf)
2000 return -ENOMEM;
2001
2002 pos += scnprintf(buf + pos, bufsz - pos,
2003 "Interrupt Statistics Report:\n");
2004
2005 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2006 isr_stats->hw);
2007 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2008 isr_stats->sw);
2009 if (isr_stats->sw || isr_stats->hw) {
2010 pos += scnprintf(buf + pos, bufsz - pos,
2011 "\tLast Restarting Code: 0x%X\n",
2012 isr_stats->err_code);
2013 }
2014#ifdef CONFIG_IWLWIFI_DEBUG
2015 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2016 isr_stats->sch);
2017 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2018 isr_stats->alive);
2019#endif
2020 pos += scnprintf(buf + pos, bufsz - pos,
2021 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2022
2023 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2024 isr_stats->ctkill);
2025
2026 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2027 isr_stats->wakeup);
2028
2029 pos += scnprintf(buf + pos, bufsz - pos,
2030 "Rx command responses:\t\t %u\n", isr_stats->rx);
2031
2032 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2033 isr_stats->tx);
2034
2035 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2036 isr_stats->unhandled);
2037
2038 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2039 kfree(buf);
2040 return ret;
2041}
2042
2043static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2044 const char __user *user_buf,
2045 size_t count, loff_t *ppos)
2046{
2047 struct iwl_trans *trans = file->private_data;
2048 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2049 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2050
2051 char buf[8];
2052 int buf_size;
2053 u32 reset_flag;
2054
2055 memset(buf, 0, sizeof(buf));
2056 buf_size = min(count, sizeof(buf) - 1);
2057 if (copy_from_user(buf, user_buf, buf_size))
2058 return -EFAULT;
2059 if (sscanf(buf, "%x", &reset_flag) != 1)
2060 return -EFAULT;
2061 if (reset_flag == 0)
2062 memset(isr_stats, 0, sizeof(*isr_stats));
2063
2064 return count;
2065}
2066
2067static ssize_t iwl_dbgfs_csr_write(struct file *file,
2068 const char __user *user_buf,
2069 size_t count, loff_t *ppos)
2070{
2071 struct iwl_trans *trans = file->private_data;
2072 char buf[8];
2073 int buf_size;
2074 int csr;
2075
2076 memset(buf, 0, sizeof(buf));
2077 buf_size = min(count, sizeof(buf) - 1);
2078 if (copy_from_user(buf, user_buf, buf_size))
2079 return -EFAULT;
2080 if (sscanf(buf, "%d", &csr) != 1)
2081 return -EFAULT;
2082
2083 iwl_pcie_dump_csr(trans);
2084
2085 return count;
2086}
2087
2088static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2089 char __user *user_buf,
2090 size_t count, loff_t *ppos)
2091{
2092 struct iwl_trans *trans = file->private_data;
2093 char *buf = NULL;
2094 ssize_t ret;
2095
2096 ret = iwl_dump_fh(trans, &buf);
2097 if (ret < 0)
2098 return ret;
2099 if (!buf)
2100 return -EINVAL;
2101 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2102 kfree(buf);
2103 return ret;
2104}
2105
2106DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2107DEBUGFS_READ_FILE_OPS(fh_reg);
2108DEBUGFS_READ_FILE_OPS(rx_queue);
2109DEBUGFS_READ_FILE_OPS(tx_queue);
2110DEBUGFS_WRITE_FILE_OPS(csr);
2111
2112/*
2113 * Create the debugfs files and directories
2114 *
2115 */
2116static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2117 struct dentry *dir)
2118{
2119 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2120 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2121 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2122 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2123 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2124 return 0;
2125
2126err:
2127 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2128 return -ENOMEM;
2129}
2130#else
2131static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2132 struct dentry *dir)
2133{
2134 return 0;
2135}
2136#endif /*CONFIG_IWLWIFI_DEBUGFS */
2137
2138static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
2139{
2140 u32 cmdlen = 0;
2141 int i;
2142
2143 for (i = 0; i < IWL_NUM_OF_TBS; i++)
2144 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
2145
2146 return cmdlen;
2147}
2148
2149static const struct {
2150 u32 start, end;
2151} iwl_prph_dump_addr[] = {
2152 { .start = 0x00a00000, .end = 0x00a00000 },
2153 { .start = 0x00a0000c, .end = 0x00a00024 },
2154 { .start = 0x00a0002c, .end = 0x00a0003c },
2155 { .start = 0x00a00410, .end = 0x00a00418 },
2156 { .start = 0x00a00420, .end = 0x00a00420 },
2157 { .start = 0x00a00428, .end = 0x00a00428 },
2158 { .start = 0x00a00430, .end = 0x00a0043c },
2159 { .start = 0x00a00444, .end = 0x00a00444 },
2160 { .start = 0x00a004c0, .end = 0x00a004cc },
2161 { .start = 0x00a004d8, .end = 0x00a004d8 },
2162 { .start = 0x00a004e0, .end = 0x00a004f0 },
2163 { .start = 0x00a00840, .end = 0x00a00840 },
2164 { .start = 0x00a00850, .end = 0x00a00858 },
2165 { .start = 0x00a01004, .end = 0x00a01008 },
2166 { .start = 0x00a01010, .end = 0x00a01010 },
2167 { .start = 0x00a01018, .end = 0x00a01018 },
2168 { .start = 0x00a01024, .end = 0x00a01024 },
2169 { .start = 0x00a0102c, .end = 0x00a01034 },
2170 { .start = 0x00a0103c, .end = 0x00a01040 },
2171 { .start = 0x00a01048, .end = 0x00a01094 },
2172 { .start = 0x00a01c00, .end = 0x00a01c20 },
2173 { .start = 0x00a01c58, .end = 0x00a01c58 },
2174 { .start = 0x00a01c7c, .end = 0x00a01c7c },
2175 { .start = 0x00a01c28, .end = 0x00a01c54 },
2176 { .start = 0x00a01c5c, .end = 0x00a01c5c },
2177 { .start = 0x00a01c60, .end = 0x00a01cdc },
2178 { .start = 0x00a01ce0, .end = 0x00a01d0c },
2179 { .start = 0x00a01d18, .end = 0x00a01d20 },
2180 { .start = 0x00a01d2c, .end = 0x00a01d30 },
2181 { .start = 0x00a01d40, .end = 0x00a01d5c },
2182 { .start = 0x00a01d80, .end = 0x00a01d80 },
2183 { .start = 0x00a01d98, .end = 0x00a01d9c },
2184 { .start = 0x00a01da8, .end = 0x00a01da8 },
2185 { .start = 0x00a01db8, .end = 0x00a01df4 },
2186 { .start = 0x00a01dc0, .end = 0x00a01dfc },
2187 { .start = 0x00a01e00, .end = 0x00a01e2c },
2188 { .start = 0x00a01e40, .end = 0x00a01e60 },
2189 { .start = 0x00a01e68, .end = 0x00a01e6c },
2190 { .start = 0x00a01e74, .end = 0x00a01e74 },
2191 { .start = 0x00a01e84, .end = 0x00a01e90 },
2192 { .start = 0x00a01e9c, .end = 0x00a01ec4 },
2193 { .start = 0x00a01ed0, .end = 0x00a01ee0 },
2194 { .start = 0x00a01f00, .end = 0x00a01f1c },
2195 { .start = 0x00a01f44, .end = 0x00a01ffc },
2196 { .start = 0x00a02000, .end = 0x00a02048 },
2197 { .start = 0x00a02068, .end = 0x00a020f0 },
2198 { .start = 0x00a02100, .end = 0x00a02118 },
2199 { .start = 0x00a02140, .end = 0x00a0214c },
2200 { .start = 0x00a02168, .end = 0x00a0218c },
2201 { .start = 0x00a021c0, .end = 0x00a021c0 },
2202 { .start = 0x00a02400, .end = 0x00a02410 },
2203 { .start = 0x00a02418, .end = 0x00a02420 },
2204 { .start = 0x00a02428, .end = 0x00a0242c },
2205 { .start = 0x00a02434, .end = 0x00a02434 },
2206 { .start = 0x00a02440, .end = 0x00a02460 },
2207 { .start = 0x00a02468, .end = 0x00a024b0 },
2208 { .start = 0x00a024c8, .end = 0x00a024cc },
2209 { .start = 0x00a02500, .end = 0x00a02504 },
2210 { .start = 0x00a0250c, .end = 0x00a02510 },
2211 { .start = 0x00a02540, .end = 0x00a02554 },
2212 { .start = 0x00a02580, .end = 0x00a025f4 },
2213 { .start = 0x00a02600, .end = 0x00a0260c },
2214 { .start = 0x00a02648, .end = 0x00a02650 },
2215 { .start = 0x00a02680, .end = 0x00a02680 },
2216 { .start = 0x00a026c0, .end = 0x00a026d0 },
2217 { .start = 0x00a02700, .end = 0x00a0270c },
2218 { .start = 0x00a02804, .end = 0x00a02804 },
2219 { .start = 0x00a02818, .end = 0x00a0281c },
2220 { .start = 0x00a02c00, .end = 0x00a02db4 },
2221 { .start = 0x00a02df4, .end = 0x00a02fb0 },
2222 { .start = 0x00a03000, .end = 0x00a03014 },
2223 { .start = 0x00a0301c, .end = 0x00a0302c },
2224 { .start = 0x00a03034, .end = 0x00a03038 },
2225 { .start = 0x00a03040, .end = 0x00a03048 },
2226 { .start = 0x00a03060, .end = 0x00a03068 },
2227 { .start = 0x00a03070, .end = 0x00a03074 },
2228 { .start = 0x00a0307c, .end = 0x00a0307c },
2229 { .start = 0x00a03080, .end = 0x00a03084 },
2230 { .start = 0x00a0308c, .end = 0x00a03090 },
2231 { .start = 0x00a03098, .end = 0x00a03098 },
2232 { .start = 0x00a030a0, .end = 0x00a030a0 },
2233 { .start = 0x00a030a8, .end = 0x00a030b4 },
2234 { .start = 0x00a030bc, .end = 0x00a030bc },
2235 { .start = 0x00a030c0, .end = 0x00a0312c },
2236 { .start = 0x00a03c00, .end = 0x00a03c5c },
2237 { .start = 0x00a04400, .end = 0x00a04454 },
2238 { .start = 0x00a04460, .end = 0x00a04474 },
2239 { .start = 0x00a044c0, .end = 0x00a044ec },
2240 { .start = 0x00a04500, .end = 0x00a04504 },
2241 { .start = 0x00a04510, .end = 0x00a04538 },
2242 { .start = 0x00a04540, .end = 0x00a04548 },
2243 { .start = 0x00a04560, .end = 0x00a0457c },
2244 { .start = 0x00a04590, .end = 0x00a04598 },
2245 { .start = 0x00a045c0, .end = 0x00a045f4 },
2246};
2247
2248static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
2249 struct iwl_fw_error_dump_data **data)
2250{
2251 struct iwl_fw_error_dump_prph *prph;
2252 unsigned long flags;
2253 u32 prph_len = 0, i;
2254
2255 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2256 return 0;
2257
2258 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2259 /* The range includes both boundaries */
2260 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2261 iwl_prph_dump_addr[i].start + 4;
2262 int reg;
2263 __le32 *val;
2264
2265 prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
2266
2267 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
2268 (*data)->len = cpu_to_le32(sizeof(*prph) +
2269 num_bytes_in_chunk);
2270 prph = (void *)(*data)->data;
2271 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
2272 val = (void *)prph->data;
2273
2274 for (reg = iwl_prph_dump_addr[i].start;
2275 reg <= iwl_prph_dump_addr[i].end;
2276 reg += 4)
2277 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2278 reg));
2279 *data = iwl_fw_error_next_data(*data);
2280 }
2281
2282 iwl_trans_release_nic_access(trans, &flags);
2283
2284 return prph_len;
2285}
2286
2287static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2288 struct iwl_fw_error_dump_data **data,
2289 int allocated_rb_nums)
2290{
2291 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2292 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2293 struct iwl_rxq *rxq = &trans_pcie->rxq;
2294 u32 i, r, j, rb_len = 0;
2295
2296 spin_lock(&rxq->lock);
2297
2298 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
2299
2300 for (i = rxq->read, j = 0;
2301 i != r && j < allocated_rb_nums;
2302 i = (i + 1) & RX_QUEUE_MASK, j++) {
2303 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2304 struct iwl_fw_error_dump_rb *rb;
2305
2306 dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2307 DMA_FROM_DEVICE);
2308
2309 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2310
2311 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2312 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2313 rb = (void *)(*data)->data;
2314 rb->index = cpu_to_le32(i);
2315 memcpy(rb->data, page_address(rxb->page), max_len);
2316 /* remap the page for the free benefit */
2317 rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
2318 max_len,
2319 DMA_FROM_DEVICE);
2320
2321 *data = iwl_fw_error_next_data(*data);
2322 }
2323
2324 spin_unlock(&rxq->lock);
2325
2326 return rb_len;
2327}
2328#define IWL_CSR_TO_DUMP (0x250)
2329
2330static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2331 struct iwl_fw_error_dump_data **data)
2332{
2333 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2334 __le32 *val;
2335 int i;
2336
2337 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2338 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2339 val = (void *)(*data)->data;
2340
2341 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2342 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2343
2344 *data = iwl_fw_error_next_data(*data);
2345
2346 return csr_len;
2347}
2348
2349static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2350 struct iwl_fw_error_dump_data **data)
2351{
2352 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2353 unsigned long flags;
2354 __le32 *val;
2355 int i;
2356
2357 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2358 return 0;
2359
2360 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2361 (*data)->len = cpu_to_le32(fh_regs_len);
2362 val = (void *)(*data)->data;
2363
2364 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
2365 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2366
2367 iwl_trans_release_nic_access(trans, &flags);
2368
2369 *data = iwl_fw_error_next_data(*data);
2370
2371 return sizeof(**data) + fh_regs_len;
2372}
2373
2374static u32
2375iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
2376 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
2377 u32 monitor_len)
2378{
2379 u32 buf_size_in_dwords = (monitor_len >> 2);
2380 u32 *buffer = (u32 *)fw_mon_data->data;
2381 unsigned long flags;
2382 u32 i;
2383
2384 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2385 return 0;
2386
2387 __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2388 for (i = 0; i < buf_size_in_dwords; i++)
2389 buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
2390 __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2391
2392 iwl_trans_release_nic_access(trans, &flags);
2393
2394 return monitor_len;
2395}
2396
2397static u32
2398iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
2399 struct iwl_fw_error_dump_data **data,
2400 u32 monitor_len)
2401{
2402 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2403 u32 len = 0;
2404
2405 if ((trans_pcie->fw_mon_page &&
2406 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
2407 trans->dbg_dest_tlv) {
2408 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2409 u32 base, write_ptr, wrap_cnt;
2410
2411 /* If there was a dest TLV - use the values from there */
2412 if (trans->dbg_dest_tlv) {
2413 write_ptr =
2414 le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2415 wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2416 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2417 } else {
2418 base = MON_BUFF_BASE_ADDR;
2419 write_ptr = MON_BUFF_WRPTR;
2420 wrap_cnt = MON_BUFF_CYCLE_CNT;
2421 }
2422
2423 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2424 fw_mon_data = (void *)(*data)->data;
2425 fw_mon_data->fw_mon_wr_ptr =
2426 cpu_to_le32(iwl_read_prph(trans, write_ptr));
2427 fw_mon_data->fw_mon_cycle_cnt =
2428 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2429 fw_mon_data->fw_mon_base_ptr =
2430 cpu_to_le32(iwl_read_prph(trans, base));
2431
2432 len += sizeof(**data) + sizeof(*fw_mon_data);
2433 if (trans_pcie->fw_mon_page) {
2434 /*
2435 * The firmware is now asserted, it won't write anything
2436 * to the buffer. CPU can take ownership to fetch the
2437 * data. The buffer will be handed back to the device
2438 * before the firmware will be restarted.
2439 */
2440 dma_sync_single_for_cpu(trans->dev,
2441 trans_pcie->fw_mon_phys,
2442 trans_pcie->fw_mon_size,
2443 DMA_FROM_DEVICE);
2444 memcpy(fw_mon_data->data,
2445 page_address(trans_pcie->fw_mon_page),
2446 trans_pcie->fw_mon_size);
2447
2448 monitor_len = trans_pcie->fw_mon_size;
2449 } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
2450 /*
2451 * Update pointers to reflect actual values after
2452 * shifting
2453 */
2454 base = iwl_read_prph(trans, base) <<
2455 trans->dbg_dest_tlv->base_shift;
2456 iwl_trans_read_mem(trans, base, fw_mon_data->data,
2457 monitor_len / sizeof(u32));
2458 } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
2459 monitor_len =
2460 iwl_trans_pci_dump_marbh_monitor(trans,
2461 fw_mon_data,
2462 monitor_len);
2463 } else {
2464 /* Didn't match anything - output no monitor data */
2465 monitor_len = 0;
2466 }
2467
2468 len += monitor_len;
2469 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
2470 }
2471
2472 return len;
2473}
2474
2475static struct iwl_trans_dump_data
2476*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
2477 struct iwl_fw_dbg_trigger_tlv *trigger)
2478{
2479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2480 struct iwl_fw_error_dump_data *data;
2481 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
2482 struct iwl_fw_error_dump_txcmd *txcmd;
2483 struct iwl_trans_dump_data *dump_data;
2484 u32 len, num_rbs;
2485 u32 monitor_len;
2486 int i, ptr;
2487 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
2488
2489 /* transport dump header */
2490 len = sizeof(*dump_data);
2491
2492 /* host commands */
2493 len += sizeof(*data) +
2494 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2495
2496 /* FW monitor */
2497 if (trans_pcie->fw_mon_page) {
2498 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2499 trans_pcie->fw_mon_size;
2500 monitor_len = trans_pcie->fw_mon_size;
2501 } else if (trans->dbg_dest_tlv) {
2502 u32 base, end;
2503
2504 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2505 end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
2506
2507 base = iwl_read_prph(trans, base) <<
2508 trans->dbg_dest_tlv->base_shift;
2509 end = iwl_read_prph(trans, end) <<
2510 trans->dbg_dest_tlv->end_shift;
2511
2512 /* Make "end" point to the actual end */
2513 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
2514 trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
2515 end += (1 << trans->dbg_dest_tlv->end_shift);
2516 monitor_len = end - base;
2517 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2518 monitor_len;
2519 } else {
2520 monitor_len = 0;
2521 }
2522
2523 if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
2524 dump_data = vzalloc(len);
2525 if (!dump_data)
2526 return NULL;
2527
2528 data = (void *)dump_data->data;
2529 len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
2530 dump_data->len = len;
2531
2532 return dump_data;
2533 }
2534
2535 /* CSR registers */
2536 len += sizeof(*data) + IWL_CSR_TO_DUMP;
2537
2538 /* PRPH registers */
2539 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2540 /* The range includes both boundaries */
2541 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2542 iwl_prph_dump_addr[i].start + 4;
2543
2544 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
2545 num_bytes_in_chunk;
2546 }
2547
2548 /* FH registers */
2549 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
2550
2551 if (dump_rbs) {
2552 /* RBs */
2553 num_rbs = le16_to_cpu(ACCESS_ONCE(
2554 trans_pcie->rxq.rb_stts->closed_rb_num))
2555 & 0x0FFF;
2556 num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
2557 len += num_rbs * (sizeof(*data) +
2558 sizeof(struct iwl_fw_error_dump_rb) +
2559 (PAGE_SIZE << trans_pcie->rx_page_order));
2560 }
2561
2562 dump_data = vzalloc(len);
2563 if (!dump_data)
2564 return NULL;
2565
2566 len = 0;
2567 data = (void *)dump_data->data;
2568 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
2569 txcmd = (void *)data->data;
2570 spin_lock_bh(&cmdq->lock);
2571 ptr = cmdq->q.write_ptr;
2572 for (i = 0; i < cmdq->q.n_window; i++) {
2573 u8 idx = get_cmd_index(&cmdq->q, ptr);
2574 u32 caplen, cmdlen;
2575
2576 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2577 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2578
2579 if (cmdlen) {
2580 len += sizeof(*txcmd) + caplen;
2581 txcmd->cmdlen = cpu_to_le32(cmdlen);
2582 txcmd->caplen = cpu_to_le32(caplen);
2583 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2584 txcmd = (void *)((u8 *)txcmd->data + caplen);
2585 }
2586
2587 ptr = iwl_queue_dec_wrap(ptr);
2588 }
2589 spin_unlock_bh(&cmdq->lock);
2590
2591 data->len = cpu_to_le32(len);
2592 len += sizeof(*data);
2593 data = iwl_fw_error_next_data(data);
2594
2595 len += iwl_trans_pcie_dump_prph(trans, &data);
2596 len += iwl_trans_pcie_dump_csr(trans, &data);
2597 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2598 if (dump_rbs)
2599 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
2600
2601 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
2602
2603 dump_data->len = len;
2604
2605 return dump_data;
2606}
2607
2608static const struct iwl_trans_ops trans_ops_pcie = {
2609 .start_hw = iwl_trans_pcie_start_hw,
2610 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
2611 .fw_alive = iwl_trans_pcie_fw_alive,
2612 .start_fw = iwl_trans_pcie_start_fw,
2613 .stop_device = iwl_trans_pcie_stop_device,
2614
2615 .d3_suspend = iwl_trans_pcie_d3_suspend,
2616 .d3_resume = iwl_trans_pcie_d3_resume,
2617
2618 .send_cmd = iwl_trans_pcie_send_hcmd,
2619
2620 .tx = iwl_trans_pcie_tx,
2621 .reclaim = iwl_trans_pcie_reclaim,
2622
2623 .txq_disable = iwl_trans_pcie_txq_disable,
2624 .txq_enable = iwl_trans_pcie_txq_enable,
2625
2626 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2627
2628 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2629 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2630
2631 .write8 = iwl_trans_pcie_write8,
2632 .write32 = iwl_trans_pcie_write32,
2633 .read32 = iwl_trans_pcie_read32,
2634 .read_prph = iwl_trans_pcie_read_prph,
2635 .write_prph = iwl_trans_pcie_write_prph,
2636 .read_mem = iwl_trans_pcie_read_mem,
2637 .write_mem = iwl_trans_pcie_write_mem,
2638 .configure = iwl_trans_pcie_configure,
2639 .set_pmi = iwl_trans_pcie_set_pmi,
2640 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
2641 .release_nic_access = iwl_trans_pcie_release_nic_access,
2642 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
2643
2644 .ref = iwl_trans_pcie_ref,
2645 .unref = iwl_trans_pcie_unref,
2646
2647 .dump_data = iwl_trans_pcie_dump_data,
2648};
2649
2650struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2651 const struct pci_device_id *ent,
2652 const struct iwl_cfg *cfg)
2653{
2654 struct iwl_trans_pcie *trans_pcie;
2655 struct iwl_trans *trans;
2656 u16 pci_cmd;
2657 int ret;
2658
2659 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2660 &pdev->dev, cfg, &trans_ops_pcie, 0);
2661 if (!trans)
2662 return ERR_PTR(-ENOMEM);
2663
2664 trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
2665
2666 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2667
2668 trans_pcie->trans = trans;
2669 spin_lock_init(&trans_pcie->irq_lock);
2670 spin_lock_init(&trans_pcie->reg_lock);
2671 spin_lock_init(&trans_pcie->ref_lock);
2672 mutex_init(&trans_pcie->mutex);
2673 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2674
2675 ret = pci_enable_device(pdev);
2676 if (ret)
2677 goto out_no_pci;
2678
2679 if (!cfg->base_params->pcie_l1_allowed) {
2680 /*
2681 * W/A - seems to solve weird behavior. We need to remove this
2682 * if we don't want to stay in L1 all the time. This wastes a
2683 * lot of power.
2684 */
2685 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2686 PCIE_LINK_STATE_L1 |
2687 PCIE_LINK_STATE_CLKPM);
2688 }
2689
2690 pci_set_master(pdev);
2691
2692 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2693 if (!ret)
2694 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2695 if (ret) {
2696 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2697 if (!ret)
2698 ret = pci_set_consistent_dma_mask(pdev,
2699 DMA_BIT_MASK(32));
2700 /* both attempts failed: */
2701 if (ret) {
2702 dev_err(&pdev->dev, "No suitable DMA available\n");
2703 goto out_pci_disable_device;
2704 }
2705 }
2706
2707 ret = pci_request_regions(pdev, DRV_NAME);
2708 if (ret) {
2709 dev_err(&pdev->dev, "pci_request_regions failed\n");
2710 goto out_pci_disable_device;
2711 }
2712
2713 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2714 if (!trans_pcie->hw_base) {
2715 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2716 ret = -ENODEV;
2717 goto out_pci_release_regions;
2718 }
2719
2720 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2721 * PCI Tx retries from interfering with C3 CPU state */
2722 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2723
2724 trans->dev = &pdev->dev;
2725 trans_pcie->pci_dev = pdev;
2726 iwl_disable_interrupts(trans);
2727
2728 ret = pci_enable_msi(pdev);
2729 if (ret) {
2730 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2731 /* enable rfkill interrupt: hw bug w/a */
2732 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2733 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2734 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2735 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2736 }
2737 }
2738
2739 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2740 /*
2741 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2742 * changed, and now the revision step also includes bit 0-1 (no more
2743 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2744 * in the old format.
2745 */
2746 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2747 unsigned long flags;
2748
2749 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2750 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2751
2752 ret = iwl_pcie_prepare_card_hw(trans);
2753 if (ret) {
2754 IWL_WARN(trans, "Exit HW not ready\n");
2755 goto out_pci_disable_msi;
2756 }
2757
2758 /*
2759 * in-order to recognize C step driver should read chip version
2760 * id located at the AUX bus MISC address space.
2761 */
2762 iwl_set_bit(trans, CSR_GP_CNTRL,
2763 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2764 udelay(2);
2765
2766 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2767 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2768 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2769 25000);
2770 if (ret < 0) {
2771 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2772 goto out_pci_disable_msi;
2773 }
2774
2775 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
2776 u32 hw_step;
2777
2778 hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
2779 hw_step |= ENABLE_WFPM;
2780 __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
2781 hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
2782 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2783 if (hw_step == 0x3)
2784 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2785 (SILICON_C_STEP << 2);
2786 iwl_trans_release_nic_access(trans, &flags);
2787 }
2788 }
2789
2790 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2791 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2792 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2793
2794 /* Initialize the wait queue for commands */
2795 init_waitqueue_head(&trans_pcie->wait_command_queue);
2796
2797 ret = iwl_pcie_alloc_ict(trans);
2798 if (ret)
2799 goto out_pci_disable_msi;
2800
2801 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2802 iwl_pcie_irq_handler,
2803 IRQF_SHARED, DRV_NAME, trans);
2804 if (ret) {
2805 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2806 goto out_free_ict;
2807 }
2808
2809 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2810 trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2811
2812 return trans;
2813
2814out_free_ict:
2815 iwl_pcie_free_ict(trans);
2816out_pci_disable_msi:
2817 pci_disable_msi(pdev);
2818out_pci_release_regions:
2819 pci_release_regions(pdev);
2820out_pci_disable_device:
2821 pci_disable_device(pdev);
2822out_no_pci:
2823 iwl_trans_free(trans);
2824 return ERR_PTR(ret);
2825}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
new file mode 100644
index 000000000000..a8c8a4a7420b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -0,0 +1,1988 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <linux/sched.h>
33
34#include "iwl-debug.h"
35#include "iwl-csr.h"
36#include "iwl-prph.h"
37#include "iwl-io.h"
38#include "iwl-scd.h"
39#include "iwl-op-mode.h"
40#include "internal.h"
41/* FIXME: need to abstract out TX command (once we know what it looks like) */
42#include "dvm/commands.h"
43
44#define IWL_TX_CRC_SIZE 4
45#define IWL_TX_DELIMITER_SIZE 4
46
47/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
48 * DMA services
49 *
50 * Theory of operation
51 *
52 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
53 * of buffer descriptors, each of which points to one or more data buffers for
54 * the device to read from or fill. Driver and device exchange status of each
55 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
56 * entries in each circular buffer, to protect against confusing empty and full
57 * queue states.
58 *
59 * The device reads or writes the data in the queues via the device's several
60 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
61 *
62 * For Tx queue, there are low mark and high mark limits. If, after queuing
63 * the packet for Tx, free space become < low mark, Tx queue stopped. When
64 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
65 * Tx queue resumed.
66 *
67 ***************************************************/
68static int iwl_queue_space(const struct iwl_queue *q)
69{
70 unsigned int max;
71 unsigned int used;
72
73 /*
74 * To avoid ambiguity between empty and completely full queues, there
75 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
76 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
77 * to reserve any queue entries for this purpose.
78 */
79 if (q->n_window < TFD_QUEUE_SIZE_MAX)
80 max = q->n_window;
81 else
82 max = TFD_QUEUE_SIZE_MAX - 1;
83
84 /*
85 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
86 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
87 */
88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
89
90 if (WARN_ON(used > max))
91 return 0;
92
93 return max - used;
94}
95
96/*
97 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
98 */
99static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
100{
101 q->n_window = slots_num;
102 q->id = id;
103
104 /* slots_num must be power-of-two size, otherwise
105 * get_cmd_index is broken. */
106 if (WARN_ON(!is_power_of_2(slots_num)))
107 return -EINVAL;
108
109 q->low_mark = q->n_window / 4;
110 if (q->low_mark < 4)
111 q->low_mark = 4;
112
113 q->high_mark = q->n_window / 8;
114 if (q->high_mark < 2)
115 q->high_mark = 2;
116
117 q->write_ptr = 0;
118 q->read_ptr = 0;
119
120 return 0;
121}
122
123static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
124 struct iwl_dma_ptr *ptr, size_t size)
125{
126 if (WARN_ON(ptr->addr))
127 return -EINVAL;
128
129 ptr->addr = dma_alloc_coherent(trans->dev, size,
130 &ptr->dma, GFP_KERNEL);
131 if (!ptr->addr)
132 return -ENOMEM;
133 ptr->size = size;
134 return 0;
135}
136
137static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
138 struct iwl_dma_ptr *ptr)
139{
140 if (unlikely(!ptr->addr))
141 return;
142
143 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
144 memset(ptr, 0, sizeof(*ptr));
145}
146
147static void iwl_pcie_txq_stuck_timer(unsigned long data)
148{
149 struct iwl_txq *txq = (void *)data;
150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
151 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
152 u32 scd_sram_addr = trans_pcie->scd_base_addr +
153 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
154 u8 buf[16];
155 int i;
156
157 spin_lock(&txq->lock);
158 /* check if triggered erroneously */
159 if (txq->q.read_ptr == txq->q.write_ptr) {
160 spin_unlock(&txq->lock);
161 return;
162 }
163 spin_unlock(&txq->lock);
164
165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
166 jiffies_to_msecs(txq->wd_timeout));
167 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
168 txq->q.read_ptr, txq->q.write_ptr);
169
170 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
171
172 iwl_print_hex_error(trans, buf, sizeof(buf));
173
174 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
175 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
176 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
177
178 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
179 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
180 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
181 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
182 u32 tbl_dw =
183 iwl_trans_read_mem32(trans,
184 trans_pcie->scd_base_addr +
185 SCD_TRANS_TBL_OFFSET_QUEUE(i));
186
187 if (i & 0x1)
188 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
189 else
190 tbl_dw = tbl_dw & 0x0000FFFF;
191
192 IWL_ERR(trans,
193 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
194 i, active ? "" : "in", fifo, tbl_dw,
195 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
196 (TFD_QUEUE_SIZE_MAX - 1),
197 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
198 }
199
200 iwl_force_nmi(trans);
201}
202
203/*
204 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
205 */
206static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
207 struct iwl_txq *txq, u16 byte_cnt)
208{
209 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211 int write_ptr = txq->q.write_ptr;
212 int txq_id = txq->q.id;
213 u8 sec_ctl = 0;
214 u8 sta_id = 0;
215 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
216 __le16 bc_ent;
217 struct iwl_tx_cmd *tx_cmd =
218 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
219
220 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
221
222 sta_id = tx_cmd->sta_id;
223 sec_ctl = tx_cmd->sec_ctl;
224
225 switch (sec_ctl & TX_CMD_SEC_MSK) {
226 case TX_CMD_SEC_CCM:
227 len += IEEE80211_CCMP_MIC_LEN;
228 break;
229 case TX_CMD_SEC_TKIP:
230 len += IEEE80211_TKIP_ICV_LEN;
231 break;
232 case TX_CMD_SEC_WEP:
233 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
234 break;
235 }
236
237 if (trans_pcie->bc_table_dword)
238 len = DIV_ROUND_UP(len, 4);
239
240 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
241 return;
242
243 bc_ent = cpu_to_le16(len | (sta_id << 12));
244
245 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
246
247 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
248 scd_bc_tbl[txq_id].
249 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
250}
251
252static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
253 struct iwl_txq *txq)
254{
255 struct iwl_trans_pcie *trans_pcie =
256 IWL_TRANS_GET_PCIE_TRANS(trans);
257 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
258 int txq_id = txq->q.id;
259 int read_ptr = txq->q.read_ptr;
260 u8 sta_id = 0;
261 __le16 bc_ent;
262 struct iwl_tx_cmd *tx_cmd =
263 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
264
265 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
266
267 if (txq_id != trans_pcie->cmd_queue)
268 sta_id = tx_cmd->sta_id;
269
270 bc_ent = cpu_to_le16(1 | (sta_id << 12));
271 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
272
273 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
274 scd_bc_tbl[txq_id].
275 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
276}
277
278/*
279 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
280 */
281static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
282 struct iwl_txq *txq)
283{
284 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
285 u32 reg = 0;
286 int txq_id = txq->q.id;
287
288 lockdep_assert_held(&txq->lock);
289
290 /*
291 * explicitly wake up the NIC if:
292 * 1. shadow registers aren't enabled
293 * 2. NIC is woken up for CMD regardless of shadow outside this function
294 * 3. there is a chance that the NIC is asleep
295 */
296 if (!trans->cfg->base_params->shadow_reg_enable &&
297 txq_id != trans_pcie->cmd_queue &&
298 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
299 /*
300 * wake up nic if it's powered down ...
301 * uCode will wake up, and interrupt us again, so next
302 * time we'll skip this part.
303 */
304 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
305
306 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
307 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
308 txq_id, reg);
309 iwl_set_bit(trans, CSR_GP_CNTRL,
310 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
311 txq->need_update = true;
312 return;
313 }
314 }
315
316 /*
317 * if not in power-save mode, uCode will never sleep when we're
318 * trying to tx (during RFKILL, we're not trying to tx).
319 */
320 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
321 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
322}
323
324void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
325{
326 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
327 int i;
328
329 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
330 struct iwl_txq *txq = &trans_pcie->txq[i];
331
332 spin_lock_bh(&txq->lock);
333 if (trans_pcie->txq[i].need_update) {
334 iwl_pcie_txq_inc_wr_ptr(trans, txq);
335 trans_pcie->txq[i].need_update = false;
336 }
337 spin_unlock_bh(&txq->lock);
338 }
339}
340
341static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
342{
343 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
344
345 dma_addr_t addr = get_unaligned_le32(&tb->lo);
346 if (sizeof(dma_addr_t) > sizeof(u32))
347 addr |=
348 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
349
350 return addr;
351}
352
353static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
354 dma_addr_t addr, u16 len)
355{
356 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
357 u16 hi_n_len = len << 4;
358
359 put_unaligned_le32(addr, &tb->lo);
360 if (sizeof(dma_addr_t) > sizeof(u32))
361 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
362
363 tb->hi_n_len = cpu_to_le16(hi_n_len);
364
365 tfd->num_tbs = idx + 1;
366}
367
368static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
369{
370 return tfd->num_tbs & 0x1f;
371}
372
373static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
374 struct iwl_cmd_meta *meta,
375 struct iwl_tfd *tfd)
376{
377 int i;
378 int num_tbs;
379
380 /* Sanity check on number of chunks */
381 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
382
383 if (num_tbs >= IWL_NUM_OF_TBS) {
384 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
385 /* @todo issue fatal error, it is quite serious situation */
386 return;
387 }
388
389 /* first TB is never freed - it's the scratchbuf data */
390
391 for (i = 1; i < num_tbs; i++) {
392 if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
393 dma_unmap_page(trans->dev,
394 iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i),
396 DMA_TO_DEVICE);
397 else
398 dma_unmap_single(trans->dev,
399 iwl_pcie_tfd_tb_get_addr(tfd, i),
400 iwl_pcie_tfd_tb_get_len(tfd, i),
401 DMA_TO_DEVICE);
402 }
403 tfd->num_tbs = 0;
404}
405
406/*
407 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
408 * @trans - transport private data
409 * @txq - tx queue
410 * @dma_dir - the direction of the DMA mapping
411 *
412 * Does NOT advance any TFD circular buffer read/write indexes
413 * Does NOT free the TFD itself (which is within circular buffer)
414 */
415static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
416{
417 struct iwl_tfd *tfd_tmp = txq->tfds;
418
419 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
420 * idx is bounded by n_window
421 */
422 int rd_ptr = txq->q.read_ptr;
423 int idx = get_cmd_index(&txq->q, rd_ptr);
424
425 lockdep_assert_held(&txq->lock);
426
427 /* We have only q->n_window txq->entries, but we use
428 * TFD_QUEUE_SIZE_MAX tfds
429 */
430 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
431
432 /* free SKB */
433 if (txq->entries) {
434 struct sk_buff *skb;
435
436 skb = txq->entries[idx].skb;
437
438 /* Can be called from irqs-disabled context
439 * If skb is not NULL, it means that the whole queue is being
440 * freed and that the queue is not empty - free the skb
441 */
442 if (skb) {
443 iwl_op_mode_free_skb(trans->op_mode, skb);
444 txq->entries[idx].skb = NULL;
445 }
446 }
447}
448
449static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
450 dma_addr_t addr, u16 len, bool reset)
451{
452 struct iwl_queue *q;
453 struct iwl_tfd *tfd, *tfd_tmp;
454 u32 num_tbs;
455
456 q = &txq->q;
457 tfd_tmp = txq->tfds;
458 tfd = &tfd_tmp[q->write_ptr];
459
460 if (reset)
461 memset(tfd, 0, sizeof(*tfd));
462
463 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
464
465 /* Each TFD can point to a maximum 20 Tx buffers */
466 if (num_tbs >= IWL_NUM_OF_TBS) {
467 IWL_ERR(trans, "Error can not send more than %d chunks\n",
468 IWL_NUM_OF_TBS);
469 return -EINVAL;
470 }
471
472 if (WARN(addr & ~IWL_TX_DMA_MASK,
473 "Unaligned address = %llx\n", (unsigned long long)addr))
474 return -EINVAL;
475
476 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
477
478 return num_tbs;
479}
480
481static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
482 struct iwl_txq *txq, int slots_num,
483 u32 txq_id)
484{
485 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
486 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
487 size_t scratchbuf_sz;
488 int i;
489
490 if (WARN_ON(txq->entries || txq->tfds))
491 return -EINVAL;
492
493 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
494 (unsigned long)txq);
495 txq->trans_pcie = trans_pcie;
496
497 txq->q.n_window = slots_num;
498
499 txq->entries = kcalloc(slots_num,
500 sizeof(struct iwl_pcie_txq_entry),
501 GFP_KERNEL);
502
503 if (!txq->entries)
504 goto error;
505
506 if (txq_id == trans_pcie->cmd_queue)
507 for (i = 0; i < slots_num; i++) {
508 txq->entries[i].cmd =
509 kmalloc(sizeof(struct iwl_device_cmd),
510 GFP_KERNEL);
511 if (!txq->entries[i].cmd)
512 goto error;
513 }
514
515 /* Circular buffer of transmit frame descriptors (TFDs),
516 * shared with device */
517 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
518 &txq->q.dma_addr, GFP_KERNEL);
519 if (!txq->tfds)
520 goto error;
521
522 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
523 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
524 sizeof(struct iwl_cmd_header) +
525 offsetof(struct iwl_tx_cmd, scratch));
526
527 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
528
529 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
530 &txq->scratchbufs_dma,
531 GFP_KERNEL);
532 if (!txq->scratchbufs)
533 goto err_free_tfds;
534
535 txq->q.id = txq_id;
536
537 return 0;
538err_free_tfds:
539 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
540error:
541 if (txq->entries && txq_id == trans_pcie->cmd_queue)
542 for (i = 0; i < slots_num; i++)
543 kfree(txq->entries[i].cmd);
544 kfree(txq->entries);
545 txq->entries = NULL;
546
547 return -ENOMEM;
548
549}
550
551static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
552 int slots_num, u32 txq_id)
553{
554 int ret;
555
556 txq->need_update = false;
557
558 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
559 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
560 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
561
562 /* Initialize queue's high/low-water marks, and head/tail indexes */
563 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
564 if (ret)
565 return ret;
566
567 spin_lock_init(&txq->lock);
568
569 /*
570 * Tell nic where to find circular buffer of Tx Frame Descriptors for
571 * given Tx queue, and enable the DMA channel used for that queue.
572 * Circular buffer (TFD queue in DRAM) physical base address */
573 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
574 txq->q.dma_addr >> 8);
575
576 return 0;
577}
578
579/*
580 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
581 */
582static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
583{
584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
585 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
586 struct iwl_queue *q = &txq->q;
587
588 spin_lock_bh(&txq->lock);
589 while (q->write_ptr != q->read_ptr) {
590 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
591 txq_id, q->read_ptr);
592 iwl_pcie_txq_free_tfd(trans, txq);
593 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
594 }
595 txq->active = false;
596 spin_unlock_bh(&txq->lock);
597
598 /* just in case - this queue may have been stopped */
599 iwl_wake_queue(trans, txq);
600}
601
602/*
603 * iwl_pcie_txq_free - Deallocate DMA queue.
604 * @txq: Transmit queue to deallocate.
605 *
606 * Empty queue by removing and destroying all BD's.
607 * Free all buffers.
608 * 0-fill, but do not free "txq" descriptor structure.
609 */
610static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
611{
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
614 struct device *dev = trans->dev;
615 int i;
616
617 if (WARN_ON(!txq))
618 return;
619
620 iwl_pcie_txq_unmap(trans, txq_id);
621
622 /* De-alloc array of command/tx buffers */
623 if (txq_id == trans_pcie->cmd_queue)
624 for (i = 0; i < txq->q.n_window; i++) {
625 kzfree(txq->entries[i].cmd);
626 kzfree(txq->entries[i].free_buf);
627 }
628
629 /* De-alloc circular buffer of TFDs */
630 if (txq->tfds) {
631 dma_free_coherent(dev,
632 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
633 txq->tfds, txq->q.dma_addr);
634 txq->q.dma_addr = 0;
635 txq->tfds = NULL;
636
637 dma_free_coherent(dev,
638 sizeof(*txq->scratchbufs) * txq->q.n_window,
639 txq->scratchbufs, txq->scratchbufs_dma);
640 }
641
642 kfree(txq->entries);
643 txq->entries = NULL;
644
645 del_timer_sync(&txq->stuck_timer);
646
647 /* 0-fill queue descriptor structure */
648 memset(txq, 0, sizeof(*txq));
649}
650
651void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
652{
653 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
654 int nq = trans->cfg->base_params->num_of_queues;
655 int chan;
656 u32 reg_val;
657 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
658 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
659
660 /* make sure all queue are not stopped/used */
661 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
662 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
663
664 trans_pcie->scd_base_addr =
665 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
666
667 WARN_ON(scd_base_addr != 0 &&
668 scd_base_addr != trans_pcie->scd_base_addr);
669
670 /* reset context data, TX status and translation data */
671 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
672 SCD_CONTEXT_MEM_LOWER_BOUND,
673 NULL, clear_dwords);
674
675 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
676 trans_pcie->scd_bc_tbls.dma >> 10);
677
678 /* The chain extension of the SCD doesn't work well. This feature is
679 * enabled by default by the HW, so we need to disable it manually.
680 */
681 if (trans->cfg->base_params->scd_chain_ext_wa)
682 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
683
684 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
685 trans_pcie->cmd_fifo,
686 trans_pcie->cmd_q_wdg_timeout);
687
688 /* Activate all Tx DMA/FIFO channels */
689 iwl_scd_activate_fifos(trans);
690
691 /* Enable DMA channel */
692 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
693 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
694 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
695 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
696
697 /* Update FH chicken bits */
698 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
699 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
700 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
701
702 /* Enable L1-Active */
703 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
704 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
705 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
706}
707
708void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
709{
710 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
711 int txq_id;
712
713 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
714 txq_id++) {
715 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
716
717 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
718 txq->q.dma_addr >> 8);
719 iwl_pcie_txq_unmap(trans, txq_id);
720 txq->q.read_ptr = 0;
721 txq->q.write_ptr = 0;
722 }
723
724 /* Tell NIC where to find the "keep warm" buffer */
725 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
726 trans_pcie->kw.dma >> 4);
727
728 /*
729 * Send 0 as the scd_base_addr since the device may have be reset
730 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
731 * contain garbage.
732 */
733 iwl_pcie_tx_start(trans, 0);
734}
735
736static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
737{
738 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
739 unsigned long flags;
740 int ch, ret;
741 u32 mask = 0;
742
743 spin_lock(&trans_pcie->irq_lock);
744
745 if (!iwl_trans_grab_nic_access(trans, false, &flags))
746 goto out;
747
748 /* Stop each Tx DMA channel */
749 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
750 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
751 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
752 }
753
754 /* Wait for DMA channels to be idle */
755 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
756 if (ret < 0)
757 IWL_ERR(trans,
758 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
759 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
760
761 iwl_trans_release_nic_access(trans, &flags);
762
763out:
764 spin_unlock(&trans_pcie->irq_lock);
765}
766
767/*
768 * iwl_pcie_tx_stop - Stop all Tx DMA channels
769 */
770int iwl_pcie_tx_stop(struct iwl_trans *trans)
771{
772 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
773 int txq_id;
774
775 /* Turn off all Tx DMA fifos */
776 iwl_scd_deactivate_fifos(trans);
777
778 /* Turn off all Tx DMA channels */
779 iwl_pcie_tx_stop_fh(trans);
780
781 /*
782 * This function can be called before the op_mode disabled the
783 * queues. This happens when we have an rfkill interrupt.
784 * Since we stop Tx altogether - mark the queues as stopped.
785 */
786 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
787 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
788
789 /* This can happen: start_hw, stop_device */
790 if (!trans_pcie->txq)
791 return 0;
792
793 /* Unmap DMA from host system and free skb's */
794 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
795 txq_id++)
796 iwl_pcie_txq_unmap(trans, txq_id);
797
798 return 0;
799}
800
801/*
802 * iwl_trans_tx_free - Free TXQ Context
803 *
804 * Destroy all TX DMA queues and structures
805 */
806void iwl_pcie_tx_free(struct iwl_trans *trans)
807{
808 int txq_id;
809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
810
811 /* Tx queues */
812 if (trans_pcie->txq) {
813 for (txq_id = 0;
814 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
815 iwl_pcie_txq_free(trans, txq_id);
816 }
817
818 kfree(trans_pcie->txq);
819 trans_pcie->txq = NULL;
820
821 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
822
823 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
824}
825
826/*
827 * iwl_pcie_tx_alloc - allocate TX context
828 * Allocate all Tx DMA structures and initialize them
829 */
830static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
831{
832 int ret;
833 int txq_id, slots_num;
834 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
835
836 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
837 sizeof(struct iwlagn_scd_bc_tbl);
838
839 /*It is not allowed to alloc twice, so warn when this happens.
840 * We cannot rely on the previous allocation, so free and fail */
841 if (WARN_ON(trans_pcie->txq)) {
842 ret = -EINVAL;
843 goto error;
844 }
845
846 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
847 scd_bc_tbls_size);
848 if (ret) {
849 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
850 goto error;
851 }
852
853 /* Alloc keep-warm buffer */
854 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
855 if (ret) {
856 IWL_ERR(trans, "Keep Warm allocation failed\n");
857 goto error;
858 }
859
860 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
861 sizeof(struct iwl_txq), GFP_KERNEL);
862 if (!trans_pcie->txq) {
863 IWL_ERR(trans, "Not enough memory for txq\n");
864 ret = -ENOMEM;
865 goto error;
866 }
867
868 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
869 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
870 txq_id++) {
871 slots_num = (txq_id == trans_pcie->cmd_queue) ?
872 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
873 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
874 slots_num, txq_id);
875 if (ret) {
876 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
877 goto error;
878 }
879 }
880
881 return 0;
882
883error:
884 iwl_pcie_tx_free(trans);
885
886 return ret;
887}
888int iwl_pcie_tx_init(struct iwl_trans *trans)
889{
890 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
891 int ret;
892 int txq_id, slots_num;
893 bool alloc = false;
894
895 if (!trans_pcie->txq) {
896 ret = iwl_pcie_tx_alloc(trans);
897 if (ret)
898 goto error;
899 alloc = true;
900 }
901
902 spin_lock(&trans_pcie->irq_lock);
903
904 /* Turn off all Tx DMA fifos */
905 iwl_scd_deactivate_fifos(trans);
906
907 /* Tell NIC where to find the "keep warm" buffer */
908 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
909 trans_pcie->kw.dma >> 4);
910
911 spin_unlock(&trans_pcie->irq_lock);
912
913 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
914 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
915 txq_id++) {
916 slots_num = (txq_id == trans_pcie->cmd_queue) ?
917 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
918 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
919 slots_num, txq_id);
920 if (ret) {
921 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
922 goto error;
923 }
924 }
925
926 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
927 if (trans->cfg->base_params->num_of_queues > 20)
928 iwl_set_bits_prph(trans, SCD_GP_CTRL,
929 SCD_GP_CTRL_ENABLE_31_QUEUES);
930
931 return 0;
932error:
933 /*Upon error, free only if we allocated something */
934 if (alloc)
935 iwl_pcie_tx_free(trans);
936 return ret;
937}
938
939static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
940{
941 lockdep_assert_held(&txq->lock);
942
943 if (!txq->wd_timeout)
944 return;
945
946 /*
947 * station is asleep and we send data - that must
948 * be uAPSD or PS-Poll. Don't rearm the timer.
949 */
950 if (txq->frozen)
951 return;
952
953 /*
954 * if empty delete timer, otherwise move timer forward
955 * since we're making progress on this queue
956 */
957 if (txq->q.read_ptr == txq->q.write_ptr)
958 del_timer(&txq->stuck_timer);
959 else
960 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
961}
962
963/* Frees buffers until index _not_ inclusive */
964void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
965 struct sk_buff_head *skbs)
966{
967 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
968 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
969 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
970 struct iwl_queue *q = &txq->q;
971 int last_to_free;
972
973 /* This function is not meant to release cmd queue*/
974 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
975 return;
976
977 spin_lock_bh(&txq->lock);
978
979 if (!txq->active) {
980 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
981 txq_id, ssn);
982 goto out;
983 }
984
985 if (txq->q.read_ptr == tfd_num)
986 goto out;
987
988 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
989 txq_id, txq->q.read_ptr, tfd_num, ssn);
990
991 /*Since we free until index _not_ inclusive, the one before index is
992 * the last we will free. This one must be used */
993 last_to_free = iwl_queue_dec_wrap(tfd_num);
994
995 if (!iwl_queue_used(q, last_to_free)) {
996 IWL_ERR(trans,
997 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
998 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
999 q->write_ptr, q->read_ptr);
1000 goto out;
1001 }
1002
1003 if (WARN_ON(!skb_queue_empty(skbs)))
1004 goto out;
1005
1006 for (;
1007 q->read_ptr != tfd_num;
1008 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1009
1010 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
1011 continue;
1012
1013 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
1014
1015 txq->entries[txq->q.read_ptr].skb = NULL;
1016
1017 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1018
1019 iwl_pcie_txq_free_tfd(trans, txq);
1020 }
1021
1022 iwl_pcie_txq_progress(txq);
1023
1024 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1025 iwl_wake_queue(trans, txq);
1026
1027 if (q->read_ptr == q->write_ptr) {
1028 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
1029 iwl_trans_pcie_unref(trans);
1030 }
1031
1032out:
1033 spin_unlock_bh(&txq->lock);
1034}
1035
1036static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1037 const struct iwl_host_cmd *cmd)
1038{
1039 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1040 int ret;
1041
1042 lockdep_assert_held(&trans_pcie->reg_lock);
1043
1044 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1045 !trans_pcie->ref_cmd_in_flight) {
1046 trans_pcie->ref_cmd_in_flight = true;
1047 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1048 iwl_trans_pcie_ref(trans);
1049 }
1050
1051 /*
1052 * wake up the NIC to make sure that the firmware will see the host
1053 * command - we will let the NIC sleep once all the host commands
1054 * returned. This needs to be done only on NICs that have
1055 * apmg_wake_up_wa set.
1056 */
1057 if (trans->cfg->base_params->apmg_wake_up_wa &&
1058 !trans_pcie->cmd_hold_nic_awake) {
1059 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1060 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1061
1062 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1063 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1064 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1065 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1066 15000);
1067 if (ret < 0) {
1068 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1069 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1070 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1071 return -EIO;
1072 }
1073 trans_pcie->cmd_hold_nic_awake = true;
1074 }
1075
1076 return 0;
1077}
1078
1079static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1080{
1081 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1082
1083 lockdep_assert_held(&trans_pcie->reg_lock);
1084
1085 if (trans_pcie->ref_cmd_in_flight) {
1086 trans_pcie->ref_cmd_in_flight = false;
1087 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
1088 iwl_trans_pcie_unref(trans);
1089 }
1090
1091 if (trans->cfg->base_params->apmg_wake_up_wa) {
1092 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
1093 return 0;
1094
1095 trans_pcie->cmd_hold_nic_awake = false;
1096 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1097 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1098 }
1099 return 0;
1100}
1101
1102/*
1103 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1104 *
1105 * When FW advances 'R' index, all entries between old and new 'R' index
1106 * need to be reclaimed. As result, some free space forms. If there is
1107 * enough free space (> low mark), wake the stack that feeds us.
1108 */
1109static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1110{
1111 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1112 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1113 struct iwl_queue *q = &txq->q;
1114 unsigned long flags;
1115 int nfreed = 0;
1116
1117 lockdep_assert_held(&txq->lock);
1118
1119 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
1120 IWL_ERR(trans,
1121 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1122 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1123 q->write_ptr, q->read_ptr);
1124 return;
1125 }
1126
1127 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1128 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
1129
1130 if (nfreed++ > 0) {
1131 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1132 idx, q->write_ptr, q->read_ptr);
1133 iwl_force_nmi(trans);
1134 }
1135 }
1136
1137 if (q->read_ptr == q->write_ptr) {
1138 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1139 iwl_pcie_clear_cmd_in_flight(trans);
1140 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1141 }
1142
1143 iwl_pcie_txq_progress(txq);
1144}
1145
1146static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1147 u16 txq_id)
1148{
1149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1150 u32 tbl_dw_addr;
1151 u32 tbl_dw;
1152 u16 scd_q2ratid;
1153
1154 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1155
1156 tbl_dw_addr = trans_pcie->scd_base_addr +
1157 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1158
1159 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1160
1161 if (txq_id & 0x1)
1162 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1163 else
1164 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1165
1166 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1167
1168 return 0;
1169}
1170
1171/* Receiver address (actually, Rx station's index into station table),
1172 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1173#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1174
1175void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1176 const struct iwl_trans_txq_scd_cfg *cfg,
1177 unsigned int wdg_timeout)
1178{
1179 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1180 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1181 int fifo = -1;
1182
1183 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1184 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1185
1186 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1187
1188 if (cfg) {
1189 fifo = cfg->fifo;
1190
1191 /* Disable the scheduler prior configuring the cmd queue */
1192 if (txq_id == trans_pcie->cmd_queue &&
1193 trans_pcie->scd_set_active)
1194 iwl_scd_enable_set_active(trans, 0);
1195
1196 /* Stop this Tx queue before configuring it */
1197 iwl_scd_txq_set_inactive(trans, txq_id);
1198
1199 /* Set this queue as a chain-building queue unless it is CMD */
1200 if (txq_id != trans_pcie->cmd_queue)
1201 iwl_scd_txq_set_chain(trans, txq_id);
1202
1203 if (cfg->aggregate) {
1204 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1205
1206 /* Map receiver-address / traffic-ID to this queue */
1207 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1208
1209 /* enable aggregations for the queue */
1210 iwl_scd_txq_enable_agg(trans, txq_id);
1211 txq->ampdu = true;
1212 } else {
1213 /*
1214 * disable aggregations for the queue, this will also
1215 * make the ra_tid mapping configuration irrelevant
1216 * since it is now a non-AGG queue.
1217 */
1218 iwl_scd_txq_disable_agg(trans, txq_id);
1219
1220 ssn = txq->q.read_ptr;
1221 }
1222 }
1223
1224 /* Place first TFD at index corresponding to start sequence number.
1225 * Assumes that ssn_idx is valid (!= 0xFFF) */
1226 txq->q.read_ptr = (ssn & 0xff);
1227 txq->q.write_ptr = (ssn & 0xff);
1228 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1229 (ssn & 0xff) | (txq_id << 8));
1230
1231 if (cfg) {
1232 u8 frame_limit = cfg->frame_limit;
1233
1234 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1235
1236 /* Set up Tx window size and frame limit for this queue */
1237 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1238 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1239 iwl_trans_write_mem32(trans,
1240 trans_pcie->scd_base_addr +
1241 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1242 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1243 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1244 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1245 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1246
1247 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1248 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1249 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1250 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1251 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1252 SCD_QUEUE_STTS_REG_MSK);
1253
1254 /* enable the scheduler for this queue (only) */
1255 if (txq_id == trans_pcie->cmd_queue &&
1256 trans_pcie->scd_set_active)
1257 iwl_scd_enable_set_active(trans, BIT(txq_id));
1258
1259 IWL_DEBUG_TX_QUEUES(trans,
1260 "Activate queue %d on FIFO %d WrPtr: %d\n",
1261 txq_id, fifo, ssn & 0xff);
1262 } else {
1263 IWL_DEBUG_TX_QUEUES(trans,
1264 "Activate queue %d WrPtr: %d\n",
1265 txq_id, ssn & 0xff);
1266 }
1267
1268 txq->active = true;
1269}
1270
1271void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1272 bool configure_scd)
1273{
1274 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1275 u32 stts_addr = trans_pcie->scd_base_addr +
1276 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1277 static const u32 zero_val[4] = {};
1278
1279 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
1280 trans_pcie->txq[txq_id].frozen = false;
1281
1282 /*
1283 * Upon HW Rfkill - we stop the device, and then stop the queues
1284 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1285 * allow the op_mode to call txq_disable after it already called
1286 * stop_device.
1287 */
1288 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1289 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1290 "queue %d not used", txq_id);
1291 return;
1292 }
1293
1294 if (configure_scd) {
1295 iwl_scd_txq_set_inactive(trans, txq_id);
1296
1297 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1298 ARRAY_SIZE(zero_val));
1299 }
1300
1301 iwl_pcie_txq_unmap(trans, txq_id);
1302 trans_pcie->txq[txq_id].ampdu = false;
1303
1304 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1305}
1306
1307/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1308
1309/*
1310 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1311 * @priv: device private data point
1312 * @cmd: a pointer to the ucode command structure
1313 *
1314 * The function returns < 0 values to indicate the operation
1315 * failed. On success, it returns the index (>= 0) of command in the
1316 * command queue.
1317 */
1318static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1319 struct iwl_host_cmd *cmd)
1320{
1321 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1322 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1323 struct iwl_queue *q = &txq->q;
1324 struct iwl_device_cmd *out_cmd;
1325 struct iwl_cmd_meta *out_meta;
1326 unsigned long flags;
1327 void *dup_buf = NULL;
1328 dma_addr_t phys_addr;
1329 int idx;
1330 u16 copy_size, cmd_size, scratch_size;
1331 bool had_nocopy = false;
1332 u8 group_id = iwl_cmd_groupid(cmd->id);
1333 int i, ret;
1334 u32 cmd_pos;
1335 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1336 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1337
1338 if (WARN(!trans_pcie->wide_cmd_header &&
1339 group_id > IWL_ALWAYS_LONG_GROUP,
1340 "unsupported wide command %#x\n", cmd->id))
1341 return -EINVAL;
1342
1343 if (group_id != 0) {
1344 copy_size = sizeof(struct iwl_cmd_header_wide);
1345 cmd_size = sizeof(struct iwl_cmd_header_wide);
1346 } else {
1347 copy_size = sizeof(struct iwl_cmd_header);
1348 cmd_size = sizeof(struct iwl_cmd_header);
1349 }
1350
1351 /* need one for the header if the first is NOCOPY */
1352 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1353
1354 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1355 cmddata[i] = cmd->data[i];
1356 cmdlen[i] = cmd->len[i];
1357
1358 if (!cmd->len[i])
1359 continue;
1360
1361 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1362 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1363 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1364
1365 if (copy > cmdlen[i])
1366 copy = cmdlen[i];
1367 cmdlen[i] -= copy;
1368 cmddata[i] += copy;
1369 copy_size += copy;
1370 }
1371
1372 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1373 had_nocopy = true;
1374 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1375 idx = -EINVAL;
1376 goto free_dup_buf;
1377 }
1378 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1379 /*
1380 * This is also a chunk that isn't copied
1381 * to the static buffer so set had_nocopy.
1382 */
1383 had_nocopy = true;
1384
1385 /* only allowed once */
1386 if (WARN_ON(dup_buf)) {
1387 idx = -EINVAL;
1388 goto free_dup_buf;
1389 }
1390
1391 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1392 GFP_ATOMIC);
1393 if (!dup_buf)
1394 return -ENOMEM;
1395 } else {
1396 /* NOCOPY must not be followed by normal! */
1397 if (WARN_ON(had_nocopy)) {
1398 idx = -EINVAL;
1399 goto free_dup_buf;
1400 }
1401 copy_size += cmdlen[i];
1402 }
1403 cmd_size += cmd->len[i];
1404 }
1405
1406 /*
1407 * If any of the command structures end up being larger than
1408 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1409 * allocated into separate TFDs, then we will need to
1410 * increase the size of the buffers.
1411 */
1412 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1413 "Command %s (%#x) is too large (%d bytes)\n",
1414 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1415 idx = -EINVAL;
1416 goto free_dup_buf;
1417 }
1418
1419 spin_lock_bh(&txq->lock);
1420
1421 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1422 spin_unlock_bh(&txq->lock);
1423
1424 IWL_ERR(trans, "No space in command queue\n");
1425 iwl_op_mode_cmd_queue_full(trans->op_mode);
1426 idx = -ENOSPC;
1427 goto free_dup_buf;
1428 }
1429
1430 idx = get_cmd_index(q, q->write_ptr);
1431 out_cmd = txq->entries[idx].cmd;
1432 out_meta = &txq->entries[idx].meta;
1433
1434 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
1435 if (cmd->flags & CMD_WANT_SKB)
1436 out_meta->source = cmd;
1437
1438 /* set up the header */
1439 if (group_id != 0) {
1440 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1441 out_cmd->hdr_wide.group_id = group_id;
1442 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1443 out_cmd->hdr_wide.length =
1444 cpu_to_le16(cmd_size -
1445 sizeof(struct iwl_cmd_header_wide));
1446 out_cmd->hdr_wide.reserved = 0;
1447 out_cmd->hdr_wide.sequence =
1448 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1449 INDEX_TO_SEQ(q->write_ptr));
1450
1451 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1452 copy_size = sizeof(struct iwl_cmd_header_wide);
1453 } else {
1454 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1455 out_cmd->hdr.sequence =
1456 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1457 INDEX_TO_SEQ(q->write_ptr));
1458 out_cmd->hdr.group_id = 0;
1459
1460 cmd_pos = sizeof(struct iwl_cmd_header);
1461 copy_size = sizeof(struct iwl_cmd_header);
1462 }
1463
1464 /* and copy the data that needs to be copied */
1465 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1466 int copy;
1467
1468 if (!cmd->len[i])
1469 continue;
1470
1471 /* copy everything if not nocopy/dup */
1472 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1473 IWL_HCMD_DFL_DUP))) {
1474 copy = cmd->len[i];
1475
1476 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1477 cmd_pos += copy;
1478 copy_size += copy;
1479 continue;
1480 }
1481
1482 /*
1483 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1484 * in total (for the scratchbuf handling), but copy up to what
1485 * we can fit into the payload for debug dump purposes.
1486 */
1487 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1488
1489 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1490 cmd_pos += copy;
1491
1492 /* However, treat copy_size the proper way, we need it below */
1493 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1494 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1495
1496 if (copy > cmd->len[i])
1497 copy = cmd->len[i];
1498 copy_size += copy;
1499 }
1500 }
1501
1502 IWL_DEBUG_HC(trans,
1503 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1504 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1505 group_id, out_cmd->hdr.cmd,
1506 le16_to_cpu(out_cmd->hdr.sequence),
1507 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1508
1509 /* start the TFD with the scratchbuf */
1510 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1511 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1512 iwl_pcie_txq_build_tfd(trans, txq,
1513 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1514 scratch_size, true);
1515
1516 /* map first command fragment, if any remains */
1517 if (copy_size > scratch_size) {
1518 phys_addr = dma_map_single(trans->dev,
1519 ((u8 *)&out_cmd->hdr) + scratch_size,
1520 copy_size - scratch_size,
1521 DMA_TO_DEVICE);
1522 if (dma_mapping_error(trans->dev, phys_addr)) {
1523 iwl_pcie_tfd_unmap(trans, out_meta,
1524 &txq->tfds[q->write_ptr]);
1525 idx = -ENOMEM;
1526 goto out;
1527 }
1528
1529 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1530 copy_size - scratch_size, false);
1531 }
1532
1533 /* map the remaining (adjusted) nocopy/dup fragments */
1534 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1535 const void *data = cmddata[i];
1536
1537 if (!cmdlen[i])
1538 continue;
1539 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1540 IWL_HCMD_DFL_DUP)))
1541 continue;
1542 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1543 data = dup_buf;
1544 phys_addr = dma_map_single(trans->dev, (void *)data,
1545 cmdlen[i], DMA_TO_DEVICE);
1546 if (dma_mapping_error(trans->dev, phys_addr)) {
1547 iwl_pcie_tfd_unmap(trans, out_meta,
1548 &txq->tfds[q->write_ptr]);
1549 idx = -ENOMEM;
1550 goto out;
1551 }
1552
1553 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1554 }
1555
1556 BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
1557 sizeof(out_meta->flags) * BITS_PER_BYTE);
1558 out_meta->flags = cmd->flags;
1559 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1560 kzfree(txq->entries[idx].free_buf);
1561 txq->entries[idx].free_buf = dup_buf;
1562
1563 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1564
1565 /* start timer if queue currently empty */
1566 if (q->read_ptr == q->write_ptr && txq->wd_timeout)
1567 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1568
1569 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1570 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1571 if (ret < 0) {
1572 idx = ret;
1573 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1574 goto out;
1575 }
1576
1577 /* Increment and update queue's write index */
1578 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1579 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1580
1581 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1582
1583 out:
1584 spin_unlock_bh(&txq->lock);
1585 free_dup_buf:
1586 if (idx < 0)
1587 kfree(dup_buf);
1588 return idx;
1589}
1590
1591/*
1592 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1593 * @rxb: Rx buffer to reclaim
1594 *
1595 * If an Rx buffer has an async callback associated with it the callback
1596 * will be executed. The attached skb (if present) will only be freed
1597 * if the callback returns 1
1598 */
1599void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1600 struct iwl_rx_cmd_buffer *rxb)
1601{
1602 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1603 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1604 int txq_id = SEQ_TO_QUEUE(sequence);
1605 int index = SEQ_TO_INDEX(sequence);
1606 int cmd_index;
1607 struct iwl_device_cmd *cmd;
1608 struct iwl_cmd_meta *meta;
1609 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1610 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1611
1612 /* If a Tx command is being handled and it isn't in the actual
1613 * command queue then there a command routing bug has been introduced
1614 * in the queue management code. */
1615 if (WARN(txq_id != trans_pcie->cmd_queue,
1616 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1617 txq_id, trans_pcie->cmd_queue, sequence,
1618 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1619 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1620 iwl_print_hex_error(trans, pkt, 32);
1621 return;
1622 }
1623
1624 spin_lock_bh(&txq->lock);
1625
1626 cmd_index = get_cmd_index(&txq->q, index);
1627 cmd = txq->entries[cmd_index].cmd;
1628 meta = &txq->entries[cmd_index].meta;
1629
1630 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1631
1632 /* Input error checking is done when commands are added to queue. */
1633 if (meta->flags & CMD_WANT_SKB) {
1634 struct page *p = rxb_steal_page(rxb);
1635
1636 meta->source->resp_pkt = pkt;
1637 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1638 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1639 }
1640
1641 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1642
1643 if (!(meta->flags & CMD_ASYNC)) {
1644 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1645 IWL_WARN(trans,
1646 "HCMD_ACTIVE already clear for command %s\n",
1647 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1648 }
1649 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1650 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1651 get_cmd_string(trans_pcie, cmd->hdr.cmd));
1652 wake_up(&trans_pcie->wait_command_queue);
1653 }
1654
1655 meta->flags = 0;
1656
1657 spin_unlock_bh(&txq->lock);
1658}
1659
1660#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1661
1662static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1663 struct iwl_host_cmd *cmd)
1664{
1665 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1666 int ret;
1667
1668 /* An asynchronous command can not expect an SKB to be set. */
1669 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1670 return -EINVAL;
1671
1672 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1673 if (ret < 0) {
1674 IWL_ERR(trans,
1675 "Error sending %s: enqueue_hcmd failed: %d\n",
1676 get_cmd_string(trans_pcie, cmd->id), ret);
1677 return ret;
1678 }
1679 return 0;
1680}
1681
1682static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1683 struct iwl_host_cmd *cmd)
1684{
1685 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1686 int cmd_idx;
1687 int ret;
1688
1689 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1690 get_cmd_string(trans_pcie, cmd->id));
1691
1692 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1693 &trans->status),
1694 "Command %s: a command is already active!\n",
1695 get_cmd_string(trans_pcie, cmd->id)))
1696 return -EIO;
1697
1698 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1699 get_cmd_string(trans_pcie, cmd->id));
1700
1701 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1702 if (cmd_idx < 0) {
1703 ret = cmd_idx;
1704 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1705 IWL_ERR(trans,
1706 "Error sending %s: enqueue_hcmd failed: %d\n",
1707 get_cmd_string(trans_pcie, cmd->id), ret);
1708 return ret;
1709 }
1710
1711 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1712 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1713 &trans->status),
1714 HOST_COMPLETE_TIMEOUT);
1715 if (!ret) {
1716 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1717 struct iwl_queue *q = &txq->q;
1718
1719 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1720 get_cmd_string(trans_pcie, cmd->id),
1721 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1722
1723 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1724 q->read_ptr, q->write_ptr);
1725
1726 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1727 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1728 get_cmd_string(trans_pcie, cmd->id));
1729 ret = -ETIMEDOUT;
1730
1731 iwl_force_nmi(trans);
1732 iwl_trans_fw_error(trans);
1733
1734 goto cancel;
1735 }
1736
1737 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1738 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1739 get_cmd_string(trans_pcie, cmd->id));
1740 dump_stack();
1741 ret = -EIO;
1742 goto cancel;
1743 }
1744
1745 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1746 test_bit(STATUS_RFKILL, &trans->status)) {
1747 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1748 ret = -ERFKILL;
1749 goto cancel;
1750 }
1751
1752 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1753 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1754 get_cmd_string(trans_pcie, cmd->id));
1755 ret = -EIO;
1756 goto cancel;
1757 }
1758
1759 return 0;
1760
1761cancel:
1762 if (cmd->flags & CMD_WANT_SKB) {
1763 /*
1764 * Cancel the CMD_WANT_SKB flag for the cmd in the
1765 * TX cmd queue. Otherwise in case the cmd comes
1766 * in later, it will possibly set an invalid
1767 * address (cmd->meta.source).
1768 */
1769 trans_pcie->txq[trans_pcie->cmd_queue].
1770 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1771 }
1772
1773 if (cmd->resp_pkt) {
1774 iwl_free_resp(cmd);
1775 cmd->resp_pkt = NULL;
1776 }
1777
1778 return ret;
1779}
1780
1781int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1782{
1783 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1784 test_bit(STATUS_RFKILL, &trans->status)) {
1785 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1786 cmd->id);
1787 return -ERFKILL;
1788 }
1789
1790 if (cmd->flags & CMD_ASYNC)
1791 return iwl_pcie_send_hcmd_async(trans, cmd);
1792
1793 /* We still can fail on RFKILL that can be asserted while we wait */
1794 return iwl_pcie_send_hcmd_sync(trans, cmd);
1795}
1796
1797int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1798 struct iwl_device_cmd *dev_cmd, int txq_id)
1799{
1800 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1801 struct ieee80211_hdr *hdr;
1802 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1803 struct iwl_cmd_meta *out_meta;
1804 struct iwl_txq *txq;
1805 struct iwl_queue *q;
1806 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1807 void *tb1_addr;
1808 u16 len, tb1_len, tb2_len;
1809 bool wait_write_ptr;
1810 __le16 fc;
1811 u8 hdr_len;
1812 u16 wifi_seq;
1813 int i;
1814
1815 txq = &trans_pcie->txq[txq_id];
1816 q = &txq->q;
1817
1818 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1819 "TX on unused queue %d\n", txq_id))
1820 return -EINVAL;
1821
1822 if (skb_is_nonlinear(skb) &&
1823 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
1824 __skb_linearize(skb))
1825 return -ENOMEM;
1826
1827 /* mac80211 always puts the full header into the SKB's head,
1828 * so there's no need to check if it's readable there
1829 */
1830 hdr = (struct ieee80211_hdr *)skb->data;
1831 fc = hdr->frame_control;
1832 hdr_len = ieee80211_hdrlen(fc);
1833
1834 spin_lock(&txq->lock);
1835
1836 /* In AGG mode, the index in the ring must correspond to the WiFi
1837 * sequence number. This is a HW requirements to help the SCD to parse
1838 * the BA.
1839 * Check here that the packets are in the right place on the ring.
1840 */
1841 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1842 WARN_ONCE(txq->ampdu &&
1843 (wifi_seq & 0xff) != q->write_ptr,
1844 "Q: %d WiFi Seq %d tfdNum %d",
1845 txq_id, wifi_seq, q->write_ptr);
1846
1847 /* Set up driver data for this TFD */
1848 txq->entries[q->write_ptr].skb = skb;
1849 txq->entries[q->write_ptr].cmd = dev_cmd;
1850
1851 dev_cmd->hdr.sequence =
1852 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1853 INDEX_TO_SEQ(q->write_ptr)));
1854
1855 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1856 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1857 offsetof(struct iwl_tx_cmd, scratch);
1858
1859 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1860 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1861
1862 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1863 out_meta = &txq->entries[q->write_ptr].meta;
1864 out_meta->flags = 0;
1865
1866 /*
1867 * The second TB (tb1) points to the remainder of the TX command
1868 * and the 802.11 header - dword aligned size
1869 * (This calculation modifies the TX command, so do it before the
1870 * setup of the first TB)
1871 */
1872 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1873 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1874 tb1_len = ALIGN(len, 4);
1875
1876 /* Tell NIC about any 2-byte padding after MAC header */
1877 if (tb1_len != len)
1878 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1879
1880 /* The first TB points to the scratchbuf data - min_copy bytes */
1881 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1882 IWL_HCMD_SCRATCHBUF_SIZE);
1883 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1884 IWL_HCMD_SCRATCHBUF_SIZE, true);
1885
1886 /* there must be data left over for TB1 or this code must be changed */
1887 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1888
1889 /* map the data for TB1 */
1890 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1891 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1892 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1893 goto out_err;
1894 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1895
1896 /*
1897 * Set up TFD's third entry to point directly to remainder
1898 * of skb's head, if any
1899 */
1900 tb2_len = skb_headlen(skb) - hdr_len;
1901 if (tb2_len > 0) {
1902 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1903 skb->data + hdr_len,
1904 tb2_len, DMA_TO_DEVICE);
1905 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1906 iwl_pcie_tfd_unmap(trans, out_meta,
1907 &txq->tfds[q->write_ptr]);
1908 goto out_err;
1909 }
1910 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1911 }
1912
1913 /* set up the remaining entries to point to the data */
1914 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1915 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1916 dma_addr_t tb_phys;
1917 int tb_idx;
1918
1919 if (!skb_frag_size(frag))
1920 continue;
1921
1922 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1923 skb_frag_size(frag), DMA_TO_DEVICE);
1924
1925 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
1926 iwl_pcie_tfd_unmap(trans, out_meta,
1927 &txq->tfds[q->write_ptr]);
1928 goto out_err;
1929 }
1930 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1931 skb_frag_size(frag), false);
1932
1933 out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
1934 }
1935
1936 /* Set up entry for this TFD in Tx byte-count array */
1937 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1938
1939 trace_iwlwifi_dev_tx(trans->dev, skb,
1940 &txq->tfds[txq->q.write_ptr],
1941 sizeof(struct iwl_tfd),
1942 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1943 skb->data + hdr_len, tb2_len);
1944 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1945 hdr_len, skb->len - hdr_len);
1946
1947 wait_write_ptr = ieee80211_has_morefrags(fc);
1948
1949 /* start timer if queue currently empty */
1950 if (q->read_ptr == q->write_ptr) {
1951 if (txq->wd_timeout) {
1952 /*
1953 * If the TXQ is active, then set the timer, if not,
1954 * set the timer in remainder so that the timer will
1955 * be armed with the right value when the station will
1956 * wake up.
1957 */
1958 if (!txq->frozen)
1959 mod_timer(&txq->stuck_timer,
1960 jiffies + txq->wd_timeout);
1961 else
1962 txq->frozen_expiry_remainder = txq->wd_timeout;
1963 }
1964 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1965 iwl_trans_pcie_ref(trans);
1966 }
1967
1968 /* Tell device the write index *just past* this latest filled TFD */
1969 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
1970 if (!wait_write_ptr)
1971 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1972
1973 /*
1974 * At this point the frame is "transmitted" successfully
1975 * and we will get a TX status notification eventually.
1976 */
1977 if (iwl_queue_space(q) < q->high_mark) {
1978 if (wait_write_ptr)
1979 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1980 else
1981 iwl_stop_queue(trans, txq);
1982 }
1983 spin_unlock(&txq->lock);
1984 return 0;
1985out_err:
1986 spin_unlock(&txq->lock);
1987 return -1;
1988}