aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2012-06-12 14:25:04 -0400
committerJohn W. Linville <linville@tuxdriver.com>2012-06-12 14:25:04 -0400
commit0440507bbc44149e63bbfb9df730ba3820371904 (patch)
tree7275e41aa1aa7e4d19d0503f1c15f07991c1a120 /drivers/net/wireless/iwlwifi/pcie
parent8d242488ce4627dd7e6333caab56df11ea25e239 (diff)
parent7f0d9f430dc99303558adc30a75eef10c43f7bec (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c141
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c243
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c180
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c383
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h113
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c380
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h447
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c1058
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c2169
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c998
10 files changed, 6112 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
new file mode 100644
index 00000000000..81b83f484f0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -0,0 +1,141 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-csr.h"
31#include "iwl-agn-hw.h"
32#include "cfg.h"
33
34/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5
36#define IWL100_UCODE_API_MAX 5
37
38/* Oldest version we won't warn about */
39#define IWL1000_UCODE_API_OK 5
40#define IWL100_UCODE_API_OK 5
41
42/* Lowest firmware API version supported */
43#define IWL1000_UCODE_API_MIN 1
44#define IWL100_UCODE_API_MIN 5
45
46/* EEPROM version */
47#define EEPROM_1000_TX_POWER_VERSION (4)
48#define EEPROM_1000_EEPROM_VERSION (0x15C)
49
50#define IWL1000_FW_PRE "iwlwifi-1000-"
51#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
52
53#define IWL100_FW_PRE "iwlwifi-100-"
54#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
55
56
57static const struct iwl_base_params iwl1000_base_params = {
58 .num_of_queues = IWLAGN_NUM_QUEUES,
59 .eeprom_size = OTP_LOW_IMAGE_SIZE,
60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
61 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
62 .shadow_ram_support = false,
63 .led_compensation = 51,
64 .support_ct_kill_exit = true,
65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
66 .chain_noise_scale = 1000,
67 .wd_timeout = IWL_WATCHDOG_DISABLED,
68 .max_event_log_size = 128,
69};
70
71static const struct iwl_ht_params iwl1000_ht_params = {
72 .ht_greenfield_support = true,
73 .use_rts_for_aggregation = true, /* use rts/cts protection */
74 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
75};
76
77static const struct iwl_eeprom_params iwl1000_eeprom_params = {
78 .regulatory_bands = {
79 EEPROM_REG_BAND_1_CHANNELS,
80 EEPROM_REG_BAND_2_CHANNELS,
81 EEPROM_REG_BAND_3_CHANNELS,
82 EEPROM_REG_BAND_4_CHANNELS,
83 EEPROM_REG_BAND_5_CHANNELS,
84 EEPROM_REG_BAND_24_HT40_CHANNELS,
85 EEPROM_REGULATORY_BAND_NO_HT40,
86 }
87};
88
89#define IWL_DEVICE_1000 \
90 .fw_name_pre = IWL1000_FW_PRE, \
91 .ucode_api_max = IWL1000_UCODE_API_MAX, \
92 .ucode_api_ok = IWL1000_UCODE_API_OK, \
93 .ucode_api_min = IWL1000_UCODE_API_MIN, \
94 .device_family = IWL_DEVICE_FAMILY_1000, \
95 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
96 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
97 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
98 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
99 .base_params = &iwl1000_base_params, \
100 .eeprom_params = &iwl1000_eeprom_params, \
101 .led_mode = IWL_LED_BLINK
102
103const struct iwl_cfg iwl1000_bgn_cfg = {
104 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
105 IWL_DEVICE_1000,
106 .ht_params = &iwl1000_ht_params,
107};
108
109const struct iwl_cfg iwl1000_bg_cfg = {
110 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
111 IWL_DEVICE_1000,
112};
113
114#define IWL_DEVICE_100 \
115 .fw_name_pre = IWL100_FW_PRE, \
116 .ucode_api_max = IWL100_UCODE_API_MAX, \
117 .ucode_api_ok = IWL100_UCODE_API_OK, \
118 .ucode_api_min = IWL100_UCODE_API_MIN, \
119 .device_family = IWL_DEVICE_FAMILY_100, \
120 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
121 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
122 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
123 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
124 .base_params = &iwl1000_base_params, \
125 .eeprom_params = &iwl1000_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE, \
127 .rx_with_siso_diversity = true
128
129const struct iwl_cfg iwl100_bgn_cfg = {
130 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
131 IWL_DEVICE_100,
132 .ht_params = &iwl1000_ht_params,
133};
134
135const struct iwl_cfg iwl100_bg_cfg = {
136 .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
137 IWL_DEVICE_100,
138};
139
140MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
141MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
new file mode 100644
index 00000000000..fd4e78f56fa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -0,0 +1,243 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33
34/* Highest firmware API version supported */
35#define IWL2030_UCODE_API_MAX 6
36#define IWL2000_UCODE_API_MAX 6
37#define IWL105_UCODE_API_MAX 6
38#define IWL135_UCODE_API_MAX 6
39
40/* Oldest version we won't warn about */
41#define IWL2030_UCODE_API_OK 6
42#define IWL2000_UCODE_API_OK 6
43#define IWL105_UCODE_API_OK 6
44#define IWL135_UCODE_API_OK 6
45
46/* Lowest firmware API version supported */
47#define IWL2030_UCODE_API_MIN 5
48#define IWL2000_UCODE_API_MIN 5
49#define IWL105_UCODE_API_MIN 5
50#define IWL135_UCODE_API_MIN 5
51
52/* EEPROM version */
53#define EEPROM_2000_TX_POWER_VERSION (6)
54#define EEPROM_2000_EEPROM_VERSION (0x805)
55
56
57#define IWL2030_FW_PRE "iwlwifi-2030-"
58#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
59
60#define IWL2000_FW_PRE "iwlwifi-2000-"
61#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
62
63#define IWL105_FW_PRE "iwlwifi-105-"
64#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
65
66#define IWL135_FW_PRE "iwlwifi-135-"
67#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
68
69static const struct iwl_base_params iwl2000_base_params = {
70 .eeprom_size = OTP_LOW_IMAGE_SIZE,
71 .num_of_queues = IWLAGN_NUM_QUEUES,
72 .pll_cfg_val = 0,
73 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
74 .shadow_ram_support = true,
75 .led_compensation = 51,
76 .adv_thermal_throttle = true,
77 .support_ct_kill_exit = true,
78 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
79 .chain_noise_scale = 1000,
80 .wd_timeout = IWL_DEF_WD_TIMEOUT,
81 .max_event_log_size = 512,
82 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
83 .hd_v2 = true,
84};
85
86
87static const struct iwl_base_params iwl2030_base_params = {
88 .eeprom_size = OTP_LOW_IMAGE_SIZE,
89 .num_of_queues = IWLAGN_NUM_QUEUES,
90 .pll_cfg_val = 0,
91 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
92 .shadow_ram_support = true,
93 .led_compensation = 57,
94 .adv_thermal_throttle = true,
95 .support_ct_kill_exit = true,
96 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
97 .chain_noise_scale = 1000,
98 .wd_timeout = IWL_LONG_WD_TIMEOUT,
99 .max_event_log_size = 512,
100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .hd_v2 = true,
102};
103
104static const struct iwl_ht_params iwl2000_ht_params = {
105 .ht_greenfield_support = true,
106 .use_rts_for_aggregation = true, /* use rts/cts protection */
107 .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
108};
109
110static const struct iwl_bt_params iwl2030_bt_params = {
111 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
112 .advanced_bt_coexist = true,
113 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
114 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
115 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
116 .bt_sco_disable = true,
117 .bt_session_2 = true,
118};
119
120static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
121 .regulatory_bands = {
122 EEPROM_REG_BAND_1_CHANNELS,
123 EEPROM_REG_BAND_2_CHANNELS,
124 EEPROM_REG_BAND_3_CHANNELS,
125 EEPROM_REG_BAND_4_CHANNELS,
126 EEPROM_REG_BAND_5_CHANNELS,
127 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
128 EEPROM_REGULATORY_BAND_NO_HT40,
129 },
130 .enhanced_txpower = true,
131};
132
133#define IWL_DEVICE_2000 \
134 .fw_name_pre = IWL2000_FW_PRE, \
135 .ucode_api_max = IWL2000_UCODE_API_MAX, \
136 .ucode_api_ok = IWL2000_UCODE_API_OK, \
137 .ucode_api_min = IWL2000_UCODE_API_MIN, \
138 .device_family = IWL_DEVICE_FAMILY_2000, \
139 .max_inst_size = IWL60_RTC_INST_SIZE, \
140 .max_data_size = IWL60_RTC_DATA_SIZE, \
141 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
142 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
143 .base_params = &iwl2000_base_params, \
144 .eeprom_params = &iwl20x0_eeprom_params, \
145 .need_temp_offset_calib = true, \
146 .temp_offset_v2 = true, \
147 .led_mode = IWL_LED_RF_STATE
148
149const struct iwl_cfg iwl2000_2bgn_cfg = {
150 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
151 IWL_DEVICE_2000,
152 .ht_params = &iwl2000_ht_params,
153};
154
155const struct iwl_cfg iwl2000_2bgn_d_cfg = {
156 .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
157 IWL_DEVICE_2000,
158 .ht_params = &iwl2000_ht_params,
159};
160
161#define IWL_DEVICE_2030 \
162 .fw_name_pre = IWL2030_FW_PRE, \
163 .ucode_api_max = IWL2030_UCODE_API_MAX, \
164 .ucode_api_ok = IWL2030_UCODE_API_OK, \
165 .ucode_api_min = IWL2030_UCODE_API_MIN, \
166 .device_family = IWL_DEVICE_FAMILY_2030, \
167 .max_inst_size = IWL60_RTC_INST_SIZE, \
168 .max_data_size = IWL60_RTC_DATA_SIZE, \
169 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
170 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
171 .base_params = &iwl2030_base_params, \
172 .bt_params = &iwl2030_bt_params, \
173 .eeprom_params = &iwl20x0_eeprom_params, \
174 .need_temp_offset_calib = true, \
175 .temp_offset_v2 = true, \
176 .led_mode = IWL_LED_RF_STATE, \
177 .adv_pm = true
178
179const struct iwl_cfg iwl2030_2bgn_cfg = {
180 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
181 IWL_DEVICE_2030,
182 .ht_params = &iwl2000_ht_params,
183};
184
185#define IWL_DEVICE_105 \
186 .fw_name_pre = IWL105_FW_PRE, \
187 .ucode_api_max = IWL105_UCODE_API_MAX, \
188 .ucode_api_ok = IWL105_UCODE_API_OK, \
189 .ucode_api_min = IWL105_UCODE_API_MIN, \
190 .device_family = IWL_DEVICE_FAMILY_105, \
191 .max_inst_size = IWL60_RTC_INST_SIZE, \
192 .max_data_size = IWL60_RTC_DATA_SIZE, \
193 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
194 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
195 .base_params = &iwl2000_base_params, \
196 .eeprom_params = &iwl20x0_eeprom_params, \
197 .need_temp_offset_calib = true, \
198 .temp_offset_v2 = true, \
199 .led_mode = IWL_LED_RF_STATE, \
200 .adv_pm = true, \
201 .rx_with_siso_diversity = true
202
203const struct iwl_cfg iwl105_bgn_cfg = {
204 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
205 IWL_DEVICE_105,
206 .ht_params = &iwl2000_ht_params,
207};
208
209const struct iwl_cfg iwl105_bgn_d_cfg = {
210 .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
211 IWL_DEVICE_105,
212 .ht_params = &iwl2000_ht_params,
213};
214
215#define IWL_DEVICE_135 \
216 .fw_name_pre = IWL135_FW_PRE, \
217 .ucode_api_max = IWL135_UCODE_API_MAX, \
218 .ucode_api_ok = IWL135_UCODE_API_OK, \
219 .ucode_api_min = IWL135_UCODE_API_MIN, \
220 .device_family = IWL_DEVICE_FAMILY_135, \
221 .max_inst_size = IWL60_RTC_INST_SIZE, \
222 .max_data_size = IWL60_RTC_DATA_SIZE, \
223 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
224 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
225 .base_params = &iwl2030_base_params, \
226 .bt_params = &iwl2030_bt_params, \
227 .eeprom_params = &iwl20x0_eeprom_params, \
228 .need_temp_offset_calib = true, \
229 .temp_offset_v2 = true, \
230 .led_mode = IWL_LED_RF_STATE, \
231 .adv_pm = true, \
232 .rx_with_siso_diversity = true
233
234const struct iwl_cfg iwl135_bgn_cfg = {
235 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
236 IWL_DEVICE_135,
237 .ht_params = &iwl2000_ht_params,
238};
239
240MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
241MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
242MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
243MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
new file mode 100644
index 00000000000..d1665fa6d15
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -0,0 +1,180 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "iwl-csr.h"
32#include "cfg.h"
33
34/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5
36#define IWL5150_UCODE_API_MAX 2
37
38/* Oldest version we won't warn about */
39#define IWL5000_UCODE_API_OK 5
40#define IWL5150_UCODE_API_OK 2
41
42/* Lowest firmware API version supported */
43#define IWL5000_UCODE_API_MIN 1
44#define IWL5150_UCODE_API_MIN 1
45
46/* EEPROM versions */
47#define EEPROM_5000_TX_POWER_VERSION (4)
48#define EEPROM_5000_EEPROM_VERSION (0x11A)
49#define EEPROM_5050_TX_POWER_VERSION (4)
50#define EEPROM_5050_EEPROM_VERSION (0x21E)
51
52#define IWL5000_FW_PRE "iwlwifi-5000-"
53#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
54
55#define IWL5150_FW_PRE "iwlwifi-5150-"
56#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
57
58static const struct iwl_base_params iwl5000_base_params = {
59 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
60 .num_of_queues = IWLAGN_NUM_QUEUES,
61 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
62 .led_compensation = 51,
63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
64 .chain_noise_scale = 1000,
65 .wd_timeout = IWL_WATCHDOG_DISABLED,
66 .max_event_log_size = 512,
67 .no_idle_support = true,
68};
69
70static const struct iwl_ht_params iwl5000_ht_params = {
71 .ht_greenfield_support = true,
72 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
73};
74
75static const struct iwl_eeprom_params iwl5000_eeprom_params = {
76 .regulatory_bands = {
77 EEPROM_REG_BAND_1_CHANNELS,
78 EEPROM_REG_BAND_2_CHANNELS,
79 EEPROM_REG_BAND_3_CHANNELS,
80 EEPROM_REG_BAND_4_CHANNELS,
81 EEPROM_REG_BAND_5_CHANNELS,
82 EEPROM_REG_BAND_24_HT40_CHANNELS,
83 EEPROM_REG_BAND_52_HT40_CHANNELS
84 },
85};
86
87#define IWL_DEVICE_5000 \
88 .fw_name_pre = IWL5000_FW_PRE, \
89 .ucode_api_max = IWL5000_UCODE_API_MAX, \
90 .ucode_api_ok = IWL5000_UCODE_API_OK, \
91 .ucode_api_min = IWL5000_UCODE_API_MIN, \
92 .device_family = IWL_DEVICE_FAMILY_5000, \
93 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
94 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
95 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
96 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
97 .base_params = &iwl5000_base_params, \
98 .eeprom_params = &iwl5000_eeprom_params, \
99 .led_mode = IWL_LED_BLINK
100
101const struct iwl_cfg iwl5300_agn_cfg = {
102 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
103 IWL_DEVICE_5000,
104 /* at least EEPROM 0x11A has wrong info */
105 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
106 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
107 .ht_params = &iwl5000_ht_params,
108};
109
110const struct iwl_cfg iwl5100_bgn_cfg = {
111 .name = "Intel(R) WiFi Link 5100 BGN",
112 IWL_DEVICE_5000,
113 .valid_tx_ant = ANT_B, /* .cfg overwrite */
114 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
115 .ht_params = &iwl5000_ht_params,
116};
117
118const struct iwl_cfg iwl5100_abg_cfg = {
119 .name = "Intel(R) WiFi Link 5100 ABG",
120 IWL_DEVICE_5000,
121 .valid_tx_ant = ANT_B, /* .cfg overwrite */
122 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
123};
124
125const struct iwl_cfg iwl5100_agn_cfg = {
126 .name = "Intel(R) WiFi Link 5100 AGN",
127 IWL_DEVICE_5000,
128 .valid_tx_ant = ANT_B, /* .cfg overwrite */
129 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
130 .ht_params = &iwl5000_ht_params,
131};
132
133const struct iwl_cfg iwl5350_agn_cfg = {
134 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
135 .fw_name_pre = IWL5000_FW_PRE,
136 .ucode_api_max = IWL5000_UCODE_API_MAX,
137 .ucode_api_ok = IWL5000_UCODE_API_OK,
138 .ucode_api_min = IWL5000_UCODE_API_MIN,
139 .device_family = IWL_DEVICE_FAMILY_5000,
140 .max_inst_size = IWLAGN_RTC_INST_SIZE,
141 .max_data_size = IWLAGN_RTC_DATA_SIZE,
142 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
143 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
144 .base_params = &iwl5000_base_params,
145 .eeprom_params = &iwl5000_eeprom_params,
146 .ht_params = &iwl5000_ht_params,
147 .led_mode = IWL_LED_BLINK,
148 .internal_wimax_coex = true,
149};
150
151#define IWL_DEVICE_5150 \
152 .fw_name_pre = IWL5150_FW_PRE, \
153 .ucode_api_max = IWL5150_UCODE_API_MAX, \
154 .ucode_api_ok = IWL5150_UCODE_API_OK, \
155 .ucode_api_min = IWL5150_UCODE_API_MIN, \
156 .device_family = IWL_DEVICE_FAMILY_5150, \
157 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
158 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
159 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
160 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
161 .base_params = &iwl5000_base_params, \
162 .eeprom_params = &iwl5000_eeprom_params, \
163 .no_xtal_calib = true, \
164 .led_mode = IWL_LED_BLINK, \
165 .internal_wimax_coex = true
166
167const struct iwl_cfg iwl5150_agn_cfg = {
168 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
169 IWL_DEVICE_5150,
170 .ht_params = &iwl5000_ht_params,
171
172};
173
174const struct iwl_cfg iwl5150_abg_cfg = {
175 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
176 IWL_DEVICE_5150,
177};
178
179MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
180MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
new file mode 100644
index 00000000000..8dd8a6fe61e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -0,0 +1,383 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28#include <linux/stringify.h>
29#include "iwl-config.h"
30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */
33
34/* Highest firmware API version supported */
35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6
38
39/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6
44
45/* Lowest firmware API version supported */
46#define IWL6000_UCODE_API_MIN 4
47#define IWL6050_UCODE_API_MIN 4
48#define IWL6000G2_UCODE_API_MIN 4
49
50/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4)
52#define EEPROM_6000_EEPROM_VERSION (0x423)
53#define EEPROM_6050_TX_POWER_VERSION (4)
54#define EEPROM_6050_EEPROM_VERSION (0x532)
55#define EEPROM_6150_TX_POWER_VERSION (6)
56#define EEPROM_6150_EEPROM_VERSION (0x553)
57#define EEPROM_6005_TX_POWER_VERSION (6)
58#define EEPROM_6005_EEPROM_VERSION (0x709)
59#define EEPROM_6030_TX_POWER_VERSION (6)
60#define EEPROM_6030_EEPROM_VERSION (0x709)
61#define EEPROM_6035_TX_POWER_VERSION (6)
62#define EEPROM_6035_EEPROM_VERSION (0x753)
63
64#define IWL6000_FW_PRE "iwlwifi-6000-"
65#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
66
67#define IWL6050_FW_PRE "iwlwifi-6050-"
68#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
69
70#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
71#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
72
73#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
74#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
75
76static const struct iwl_base_params iwl6000_base_params = {
77 .eeprom_size = OTP_LOW_IMAGE_SIZE,
78 .num_of_queues = IWLAGN_NUM_QUEUES,
79 .pll_cfg_val = 0,
80 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
81 .shadow_ram_support = true,
82 .led_compensation = 51,
83 .adv_thermal_throttle = true,
84 .support_ct_kill_exit = true,
85 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
86 .chain_noise_scale = 1000,
87 .wd_timeout = IWL_DEF_WD_TIMEOUT,
88 .max_event_log_size = 512,
89 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
90};
91
92static const struct iwl_base_params iwl6050_base_params = {
93 .eeprom_size = OTP_LOW_IMAGE_SIZE,
94 .num_of_queues = IWLAGN_NUM_QUEUES,
95 .pll_cfg_val = 0,
96 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
97 .shadow_ram_support = true,
98 .led_compensation = 51,
99 .adv_thermal_throttle = true,
100 .support_ct_kill_exit = true,
101 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
102 .chain_noise_scale = 1500,
103 .wd_timeout = IWL_DEF_WD_TIMEOUT,
104 .max_event_log_size = 1024,
105 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
106};
107
108static const struct iwl_base_params iwl6000_g2_base_params = {
109 .eeprom_size = OTP_LOW_IMAGE_SIZE,
110 .num_of_queues = IWLAGN_NUM_QUEUES,
111 .pll_cfg_val = 0,
112 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
113 .shadow_ram_support = true,
114 .led_compensation = 57,
115 .adv_thermal_throttle = true,
116 .support_ct_kill_exit = true,
117 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
118 .chain_noise_scale = 1000,
119 .wd_timeout = IWL_LONG_WD_TIMEOUT,
120 .max_event_log_size = 512,
121 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
122};
123
124static const struct iwl_ht_params iwl6000_ht_params = {
125 .ht_greenfield_support = true,
126 .use_rts_for_aggregation = true, /* use rts/cts protection */
127 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
128};
129
130static const struct iwl_bt_params iwl6000_bt_params = {
131 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
132 .advanced_bt_coexist = true,
133 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
134 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
135 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
136 .bt_sco_disable = true,
137};
138
139static const struct iwl_eeprom_params iwl6000_eeprom_params = {
140 .regulatory_bands = {
141 EEPROM_REG_BAND_1_CHANNELS,
142 EEPROM_REG_BAND_2_CHANNELS,
143 EEPROM_REG_BAND_3_CHANNELS,
144 EEPROM_REG_BAND_4_CHANNELS,
145 EEPROM_REG_BAND_5_CHANNELS,
146 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
147 EEPROM_REG_BAND_52_HT40_CHANNELS
148 },
149 .enhanced_txpower = true,
150};
151
152#define IWL_DEVICE_6005 \
153 .fw_name_pre = IWL6005_FW_PRE, \
154 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
155 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
156 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
157 .device_family = IWL_DEVICE_FAMILY_6005, \
158 .max_inst_size = IWL60_RTC_INST_SIZE, \
159 .max_data_size = IWL60_RTC_DATA_SIZE, \
160 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
161 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
162 .base_params = &iwl6000_g2_base_params, \
163 .eeprom_params = &iwl6000_eeprom_params, \
164 .need_temp_offset_calib = true, \
165 .led_mode = IWL_LED_RF_STATE
166
167const struct iwl_cfg iwl6005_2agn_cfg = {
168 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
169 IWL_DEVICE_6005,
170 .ht_params = &iwl6000_ht_params,
171};
172
173const struct iwl_cfg iwl6005_2abg_cfg = {
174 .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
175 IWL_DEVICE_6005,
176};
177
178const struct iwl_cfg iwl6005_2bg_cfg = {
179 .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
180 IWL_DEVICE_6005,
181};
182
183const struct iwl_cfg iwl6005_2agn_sff_cfg = {
184 .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
185 IWL_DEVICE_6005,
186 .ht_params = &iwl6000_ht_params,
187};
188
189const struct iwl_cfg iwl6005_2agn_d_cfg = {
190 .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
191 IWL_DEVICE_6005,
192 .ht_params = &iwl6000_ht_params,
193};
194
195const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
196 .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
197 IWL_DEVICE_6005,
198 .ht_params = &iwl6000_ht_params,
199};
200
201const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
202 .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
203 IWL_DEVICE_6005,
204 .ht_params = &iwl6000_ht_params,
205};
206
207#define IWL_DEVICE_6030 \
208 .fw_name_pre = IWL6030_FW_PRE, \
209 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
210 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
211 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
212 .device_family = IWL_DEVICE_FAMILY_6030, \
213 .max_inst_size = IWL60_RTC_INST_SIZE, \
214 .max_data_size = IWL60_RTC_DATA_SIZE, \
215 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
216 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
217 .base_params = &iwl6000_g2_base_params, \
218 .bt_params = &iwl6000_bt_params, \
219 .eeprom_params = &iwl6000_eeprom_params, \
220 .need_temp_offset_calib = true, \
221 .led_mode = IWL_LED_RF_STATE, \
222 .adv_pm = true \
223
224const struct iwl_cfg iwl6030_2agn_cfg = {
225 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
226 IWL_DEVICE_6030,
227 .ht_params = &iwl6000_ht_params,
228};
229
230const struct iwl_cfg iwl6030_2abg_cfg = {
231 .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
232 IWL_DEVICE_6030,
233};
234
235const struct iwl_cfg iwl6030_2bgn_cfg = {
236 .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
237 IWL_DEVICE_6030,
238 .ht_params = &iwl6000_ht_params,
239};
240
241const struct iwl_cfg iwl6030_2bg_cfg = {
242 .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
243 IWL_DEVICE_6030,
244};
245
246const struct iwl_cfg iwl6035_2agn_cfg = {
247 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
248 IWL_DEVICE_6030,
249 .ht_params = &iwl6000_ht_params,
250};
251
252const struct iwl_cfg iwl1030_bgn_cfg = {
253 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
254 IWL_DEVICE_6030,
255 .ht_params = &iwl6000_ht_params,
256};
257
258const struct iwl_cfg iwl1030_bg_cfg = {
259 .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
260 IWL_DEVICE_6030,
261};
262
263const struct iwl_cfg iwl130_bgn_cfg = {
264 .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
265 IWL_DEVICE_6030,
266 .ht_params = &iwl6000_ht_params,
267 .rx_with_siso_diversity = true,
268};
269
270const struct iwl_cfg iwl130_bg_cfg = {
271 .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
272 IWL_DEVICE_6030,
273 .rx_with_siso_diversity = true,
274};
275
276/*
277 * "i": Internal configuration, use internal Power Amplifier
278 */
279#define IWL_DEVICE_6000i \
280 .fw_name_pre = IWL6000_FW_PRE, \
281 .ucode_api_max = IWL6000_UCODE_API_MAX, \
282 .ucode_api_ok = IWL6000_UCODE_API_OK, \
283 .ucode_api_min = IWL6000_UCODE_API_MIN, \
284 .device_family = IWL_DEVICE_FAMILY_6000i, \
285 .max_inst_size = IWL60_RTC_INST_SIZE, \
286 .max_data_size = IWL60_RTC_DATA_SIZE, \
287 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
288 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
289 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
290 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
291 .base_params = &iwl6000_base_params, \
292 .eeprom_params = &iwl6000_eeprom_params, \
293 .led_mode = IWL_LED_BLINK
294
295const struct iwl_cfg iwl6000i_2agn_cfg = {
296 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
297 IWL_DEVICE_6000i,
298 .ht_params = &iwl6000_ht_params,
299};
300
301const struct iwl_cfg iwl6000i_2abg_cfg = {
302 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
303 IWL_DEVICE_6000i,
304};
305
306const struct iwl_cfg iwl6000i_2bg_cfg = {
307 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
308 IWL_DEVICE_6000i,
309};
310
311#define IWL_DEVICE_6050 \
312 .fw_name_pre = IWL6050_FW_PRE, \
313 .ucode_api_max = IWL6050_UCODE_API_MAX, \
314 .ucode_api_min = IWL6050_UCODE_API_MIN, \
315 .device_family = IWL_DEVICE_FAMILY_6050, \
316 .max_inst_size = IWL60_RTC_INST_SIZE, \
317 .max_data_size = IWL60_RTC_DATA_SIZE, \
318 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
319 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
320 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
321 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
322 .base_params = &iwl6050_base_params, \
323 .eeprom_params = &iwl6000_eeprom_params, \
324 .led_mode = IWL_LED_BLINK, \
325 .internal_wimax_coex = true
326
327const struct iwl_cfg iwl6050_2agn_cfg = {
328 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
329 IWL_DEVICE_6050,
330 .ht_params = &iwl6000_ht_params,
331};
332
333const struct iwl_cfg iwl6050_2abg_cfg = {
334 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
335 IWL_DEVICE_6050,
336};
337
338#define IWL_DEVICE_6150 \
339 .fw_name_pre = IWL6050_FW_PRE, \
340 .ucode_api_max = IWL6050_UCODE_API_MAX, \
341 .ucode_api_min = IWL6050_UCODE_API_MIN, \
342 .device_family = IWL_DEVICE_FAMILY_6150, \
343 .max_inst_size = IWL60_RTC_INST_SIZE, \
344 .max_data_size = IWL60_RTC_DATA_SIZE, \
345 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
346 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
347 .base_params = &iwl6050_base_params, \
348 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true
351
352const struct iwl_cfg iwl6150_bgn_cfg = {
353 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
354 IWL_DEVICE_6150,
355 .ht_params = &iwl6000_ht_params,
356};
357
358const struct iwl_cfg iwl6150_bg_cfg = {
359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
360 IWL_DEVICE_6150,
361};
362
363const struct iwl_cfg iwl6000_3agn_cfg = {
364 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
365 .fw_name_pre = IWL6000_FW_PRE,
366 .ucode_api_max = IWL6000_UCODE_API_MAX,
367 .ucode_api_ok = IWL6000_UCODE_API_OK,
368 .ucode_api_min = IWL6000_UCODE_API_MIN,
369 .device_family = IWL_DEVICE_FAMILY_6000,
370 .max_inst_size = IWL60_RTC_INST_SIZE,
371 .max_data_size = IWL60_RTC_DATA_SIZE,
372 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
373 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
374 .base_params = &iwl6000_base_params,
375 .eeprom_params = &iwl6000_eeprom_params,
376 .ht_params = &iwl6000_ht_params,
377 .led_mode = IWL_LED_BLINK,
378};
379
380MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
381MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
382MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
383MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
new file mode 100644
index 00000000000..82152311d73
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
@@ -0,0 +1,113 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66
67/*
68 * This file declares the config structures for all devices.
69 */
70
71extern const struct iwl_cfg iwl5300_agn_cfg;
72extern const struct iwl_cfg iwl5100_agn_cfg;
73extern const struct iwl_cfg iwl5350_agn_cfg;
74extern const struct iwl_cfg iwl5100_bgn_cfg;
75extern const struct iwl_cfg iwl5100_abg_cfg;
76extern const struct iwl_cfg iwl5150_agn_cfg;
77extern const struct iwl_cfg iwl5150_abg_cfg;
78extern const struct iwl_cfg iwl6005_2agn_cfg;
79extern const struct iwl_cfg iwl6005_2abg_cfg;
80extern const struct iwl_cfg iwl6005_2bg_cfg;
81extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
82extern const struct iwl_cfg iwl6005_2agn_d_cfg;
83extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
84extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
85extern const struct iwl_cfg iwl1030_bgn_cfg;
86extern const struct iwl_cfg iwl1030_bg_cfg;
87extern const struct iwl_cfg iwl6030_2agn_cfg;
88extern const struct iwl_cfg iwl6030_2abg_cfg;
89extern const struct iwl_cfg iwl6030_2bgn_cfg;
90extern const struct iwl_cfg iwl6030_2bg_cfg;
91extern const struct iwl_cfg iwl6000i_2agn_cfg;
92extern const struct iwl_cfg iwl6000i_2abg_cfg;
93extern const struct iwl_cfg iwl6000i_2bg_cfg;
94extern const struct iwl_cfg iwl6000_3agn_cfg;
95extern const struct iwl_cfg iwl6050_2agn_cfg;
96extern const struct iwl_cfg iwl6050_2abg_cfg;
97extern const struct iwl_cfg iwl6150_bgn_cfg;
98extern const struct iwl_cfg iwl6150_bg_cfg;
99extern const struct iwl_cfg iwl1000_bgn_cfg;
100extern const struct iwl_cfg iwl1000_bg_cfg;
101extern const struct iwl_cfg iwl100_bgn_cfg;
102extern const struct iwl_cfg iwl100_bg_cfg;
103extern const struct iwl_cfg iwl130_bgn_cfg;
104extern const struct iwl_cfg iwl130_bg_cfg;
105extern const struct iwl_cfg iwl2000_2bgn_cfg;
106extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
107extern const struct iwl_cfg iwl2030_2bgn_cfg;
108extern const struct iwl_cfg iwl6035_2agn_cfg;
109extern const struct iwl_cfg iwl105_bgn_cfg;
110extern const struct iwl_cfg iwl105_bgn_d_cfg;
111extern const struct iwl_cfg iwl135_bgn_cfg;
112
113#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
new file mode 100644
index 00000000000..f4c3500b68c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -0,0 +1,380 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/pci-aspm.h>
69
70#include "iwl-trans.h"
71#include "iwl-drv.h"
72#include "iwl-trans.h"
73
74#include "cfg.h"
75#include "internal.h"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82/* Hardware specific file defines the PCI IDs table for that hardware module */
83static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
84 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
85 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
86 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
87 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
88 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
89 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
90 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
91 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
92 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
93 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
94 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
95 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
96 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
97 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
98 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
99 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
100 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
101 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
102 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
103 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
104 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
105 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
106 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
107 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
108
109/* 5300 Series WiFi */
110 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
111 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
112 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
113 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
114 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
115 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
116 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
117 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
118 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
119 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
120 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
121 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
122
123/* 5350 Series WiFi/WiMax */
124 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
125 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
126 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
127
128/* 5150 Series Wifi/WiMax */
129 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
130 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
131 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
132 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
133 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
134 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
135
136 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
137 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
138 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
139 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
140
141/* 6x00 Series */
142 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
143 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
144 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
145 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
146 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
147 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
148 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
149 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
150 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
151 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
152
153/* 6x05 Series */
154 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
155 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
156 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
157 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
158 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
159 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
160 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
161 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
162 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
164 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
165 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
166
167/* 6x30 Series */
168 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
169 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
170 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
171 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
172 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
173 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
174 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
175 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
176 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
177 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
178 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
179 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
180 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
181 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
182 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
183 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
184
185/* 6x50 WiFi/WiMax Series */
186 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
187 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
188 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
189 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
190 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
191 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
192
193/* 6150 WiFi/WiMax Series */
194 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
195 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
196 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
197 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
198 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
199 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
200
201/* 1000 Series WiFi */
202 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
203 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
204 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
205 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
206 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
207 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
208 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
209 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
210 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
211 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
212 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
213 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
214
215/* 100 Series WiFi */
216 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
217 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
218 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
219 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
220 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
221 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
222
223/* 130 Series WiFi */
224 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
225 {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
226 {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
227 {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
228 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
229 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
230
231/* 2x00 Series */
232 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
233 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
234 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
235 {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
236
237/* 2x30 Series */
238 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
239 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
240 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
241
242/* 6x35 Series */
243 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
244 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
246 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
247
248/* 105 Series */
249 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
250 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
251 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
252 {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
253
254/* 135 Series */
255 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
256 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
257 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
258
259 {0}
260};
261MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
262
263/* PCI registers */
264#define PCI_CFG_RETRY_TIMEOUT 0x041
265
266#ifndef CONFIG_IWLWIFI_IDI
267
268static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269{
270 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
271 struct iwl_trans *iwl_trans;
272 struct iwl_trans_pcie *trans_pcie;
273
274 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
275 if (iwl_trans == NULL)
276 return -ENOMEM;
277
278 pci_set_drvdata(pdev, iwl_trans);
279
280 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
281 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
282 if (!trans_pcie->drv)
283 goto out_free_trans;
284
285 return 0;
286
287out_free_trans:
288 iwl_trans_pcie_free(iwl_trans);
289 pci_set_drvdata(pdev, NULL);
290 return -EFAULT;
291}
292
293static void __devexit iwl_pci_remove(struct pci_dev *pdev)
294{
295 struct iwl_trans *trans = pci_get_drvdata(pdev);
296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297
298 iwl_drv_stop(trans_pcie->drv);
299 iwl_trans_pcie_free(trans);
300
301 pci_set_drvdata(pdev, NULL);
302}
303
304#endif /* CONFIG_IWLWIFI_IDI */
305
306#ifdef CONFIG_PM_SLEEP
307
308static int iwl_pci_suspend(struct device *device)
309{
310 struct pci_dev *pdev = to_pci_dev(device);
311 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
312
313 /* Before you put code here, think about WoWLAN. You cannot check here
314 * whether WoWLAN is enabled or not, and your code will run even if
315 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
316 */
317
318 return iwl_trans_suspend(iwl_trans);
319}
320
321static int iwl_pci_resume(struct device *device)
322{
323 struct pci_dev *pdev = to_pci_dev(device);
324 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
325
326 /* Before you put code here, think about WoWLAN. You cannot check here
327 * whether WoWLAN is enabled or not, and your code will run even if
328 * WoWLAN is enabled - the NIC may be alive.
329 */
330
331 /*
332 * We disable the RETRY_TIMEOUT register (0x41) to keep
333 * PCI Tx retries from interfering with C3 CPU state.
334 */
335 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
336
337 return iwl_trans_resume(iwl_trans);
338}
339
340static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
341
342#define IWL_PM_OPS (&iwl_dev_pm_ops)
343
344#else
345
346#define IWL_PM_OPS NULL
347
348#endif
349
350#ifdef CONFIG_IWLWIFI_IDI
351/*
352 * Defined externally in iwl-idi.c
353 */
354int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
355void __devexit iwl_pci_remove(struct pci_dev *pdev);
356
357#endif /* CONFIG_IWLWIFI_IDI */
358
359static struct pci_driver iwl_pci_driver = {
360 .name = DRV_NAME,
361 .id_table = iwl_hw_card_ids,
362 .probe = iwl_pci_probe,
363 .remove = __devexit_p(iwl_pci_remove),
364 .driver.pm = IWL_PM_OPS,
365};
366
367int __must_check iwl_pci_register_driver(void)
368{
369 int ret;
370 ret = pci_register_driver(&iwl_pci_driver);
371 if (ret)
372 pr_err("Unable to initialize PCI module\n");
373
374 return ret;
375}
376
377void iwl_pci_unregister_driver(void)
378{
379 pci_unregister_driver(&iwl_pci_driver);
380}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
new file mode 100644
index 00000000000..94201c4d622
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -0,0 +1,447 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_trans_int_pcie_h__
30#define __iwl_trans_int_pcie_h__
31
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36#include <linux/pci.h>
37#include <linux/timer.h>
38
39#include "iwl-fh.h"
40#include "iwl-csr.h"
41#include "iwl-trans.h"
42#include "iwl-debug.h"
43#include "iwl-io.h"
44#include "iwl-op-mode.h"
45
46struct iwl_host_cmd;
47
48/*This file includes the declaration that are internal to the
49 * trans_pcie layer */
50
51struct iwl_rx_mem_buffer {
52 dma_addr_t page_dma;
53 struct page *page;
54 struct list_head list;
55};
56
57/**
58 * struct isr_statistics - interrupt statistics
59 *
60 */
61struct isr_statistics {
62 u32 hw;
63 u32 sw;
64 u32 err_code;
65 u32 sch;
66 u32 alive;
67 u32 rfkill;
68 u32 ctkill;
69 u32 wakeup;
70 u32 rx;
71 u32 tx;
72 u32 unhandled;
73};
74
75/**
76 * struct iwl_rx_queue - Rx queue
77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
79 * @pool:
80 * @queue:
81 * @read: Shared index to newest available Rx buffer
82 * @write: Shared index to oldest written Rx packet
83 * @free_count: Number of pre-allocated buffers in rx_free
84 * @write_actual:
85 * @rx_free: list of free SKBs for use
86 * @rx_used: List of Rx buffers with no SKB
87 * @need_update: flag to indicate we need to update read/write index
88 * @rb_stts: driver's pointer to receive buffer status
89 * @rb_stts_dma: bus address of receive buffer status
90 * @lock:
91 *
92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
93 */
94struct iwl_rx_queue {
95 __le32 *bd;
96 dma_addr_t bd_dma;
97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
98 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
99 u32 read;
100 u32 write;
101 u32 free_count;
102 u32 write_actual;
103 struct list_head rx_free;
104 struct list_head rx_used;
105 int need_update;
106 struct iwl_rb_status *rb_stts;
107 dma_addr_t rb_stts_dma;
108 spinlock_t lock;
109};
110
111struct iwl_dma_ptr {
112 dma_addr_t dma;
113 void *addr;
114 size_t size;
115};
116
117/**
118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
119 * @index -- current index
120 * @n_bd -- total number of entries in queue (must be power of 2)
121 */
122static inline int iwl_queue_inc_wrap(int index, int n_bd)
123{
124 return ++index & (n_bd - 1);
125}
126
127/**
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
129 * @index -- current index
130 * @n_bd -- total number of entries in queue (must be power of 2)
131 */
132static inline int iwl_queue_dec_wrap(int index, int n_bd)
133{
134 return --index & (n_bd - 1);
135}
136
137struct iwl_cmd_meta {
138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd *source;
140
141 DEFINE_DMA_UNMAP_ADDR(mapping);
142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags;
145};
146
147/*
148 * Generic queue structure
149 *
150 * Contains common data for Rx and Tx queues.
151 *
152 * Note the difference between n_bd and n_window: the hardware
153 * always assumes 256 descriptors, so n_bd is always 256 (unless
154 * there might be HW changes in the future). For the normal TX
155 * queues, n_window, which is the size of the software queue data
156 * is also 256; however, for the command queue, n_window is only
157 * 32 since we don't need so many commands pending. Since the HW
158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
159 * the software buffers (in the variables @meta, @txb in struct
160 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
161 * in the same struct) have 256.
162 * This means that we end up with the following:
163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
164 * SW entries: | 0 | ... | 31 |
165 * where N is a number between 0 and 7. This means that the SW
166 * data is a window overlayed over the HW queue.
167 */
168struct iwl_queue {
169 int n_bd; /* number of BDs in this queue */
170 int write_ptr; /* 1-st empty entry (index) host_w*/
171 int read_ptr; /* last used entry (index) host_r*/
172 /* use for monitoring and recovering the stuck queue */
173 dma_addr_t dma_addr; /* physical addr for BD's */
174 int n_window; /* safe queue window */
175 u32 id;
176 int low_mark; /* low watermark, resume queue if free
177 * space more than this */
178 int high_mark; /* high watermark, stop queue if free
179 * space less than this */
180};
181
182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32
184
185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd;
187 struct sk_buff *skb;
188 struct iwl_cmd_meta meta;
189};
190
191/**
192 * struct iwl_tx_queue - Tx Queue for DMA
193 * @q: generic Rx/Tx queue descriptor
194 * @tfds: transmit frame descriptors (DMA memory)
195 * @entries: transmit entries (driver state)
196 * @lock: queue lock
197 * @stuck_timer: timer that fires if queue gets stuck
198 * @trans_pcie: pointer back to transport (for timer)
199 * @need_update: indicates need to update read/write index
200 * @active: stores if queue is active
201 *
202 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
203 * descriptors) and required locking structures.
204 */
205struct iwl_tx_queue {
206 struct iwl_queue q;
207 struct iwl_tfd *tfds;
208 struct iwl_pcie_tx_queue_entry *entries;
209 spinlock_t lock;
210 struct timer_list stuck_timer;
211 struct iwl_trans_pcie *trans_pcie;
212 u8 need_update;
213 u8 active;
214};
215
216/**
217 * struct iwl_trans_pcie - PCIe transport specific data
218 * @rxq: all the RX queue data
219 * @rx_replenish: work that will be called when buffers need to be allocated
220 * @drv - pointer to iwl_drv
221 * @trans: pointer to the generic transport area
222 * @irq - the irq number for the device
223 * @irq_requested: true when the irq has been requested
224 * @scd_base_addr: scheduler sram base address in SRAM
225 * @scd_bc_tbls: pointer to the byte count table of the scheduler
226 * @kw: keep warm address
227 * @pci_dev: basic pci-network driver stuff
228 * @hw_base: pci hardware address support
229 * @ucode_write_complete: indicates that the ucode has been copied.
230 * @ucode_write_waitq: wait queue for uCode load
231 * @status - transport specific status flags
232 * @cmd_queue - command queue number
233 * @rx_buf_size_8k: 8 kB RX buffer size
234 * @rx_page_order: page order for receive buffer size
235 * @wd_timeout: queue watchdog timeout (jiffies)
236 */
237struct iwl_trans_pcie {
238 struct iwl_rx_queue rxq;
239 struct work_struct rx_replenish;
240 struct iwl_trans *trans;
241 struct iwl_drv *drv;
242
243 /* INT ICT Table */
244 __le32 *ict_tbl;
245 dma_addr_t ict_tbl_dma;
246 int ict_index;
247 u32 inta;
248 bool use_ict;
249 bool irq_requested;
250 struct tasklet_struct irq_tasklet;
251 struct isr_statistics isr_stats;
252
253 unsigned int irq;
254 spinlock_t irq_lock;
255 u32 inta_mask;
256 u32 scd_base_addr;
257 struct iwl_dma_ptr scd_bc_tbls;
258 struct iwl_dma_ptr kw;
259
260 struct iwl_tx_queue *txq;
261 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
262 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
263
264 /* PCI bus related data */
265 struct pci_dev *pci_dev;
266 void __iomem *hw_base;
267
268 bool ucode_write_complete;
269 wait_queue_head_t ucode_write_waitq;
270 unsigned long status;
271 u8 cmd_queue;
272 u8 n_no_reclaim_cmds;
273 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
274 u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
275 u8 n_q_to_fifo;
276
277 bool rx_buf_size_8k;
278 u32 rx_page_order;
279
280 const char **command_names;
281
282 /* queue watchdog */
283 unsigned long wd_timeout;
284};
285
286/*****************************************************
287* DRIVER STATUS FUNCTIONS
288******************************************************/
289#define STATUS_HCMD_ACTIVE 0
290#define STATUS_DEVICE_ENABLED 1
291#define STATUS_TPOWER_PMI 2
292#define STATUS_INT_ENABLED 3
293
294#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
295 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
296
297static inline struct iwl_trans *
298iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
299{
300 return container_of((void *)trans_pcie, struct iwl_trans,
301 trans_specific);
302}
303
304struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
305 const struct pci_device_id *ent,
306 const struct iwl_cfg *cfg);
307void iwl_trans_pcie_free(struct iwl_trans *trans);
308
309/*****************************************************
310* RX
311******************************************************/
312void iwl_bg_rx_replenish(struct work_struct *data);
313void iwl_irq_tasklet(struct iwl_trans *trans);
314void iwlagn_rx_replenish(struct iwl_trans *trans);
315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
316 struct iwl_rx_queue *q);
317
318/*****************************************************
319* ICT
320******************************************************/
321void iwl_reset_ict(struct iwl_trans *trans);
322void iwl_disable_ict(struct iwl_trans *trans);
323int iwl_alloc_isr_ict(struct iwl_trans *trans);
324void iwl_free_isr_ict(struct iwl_trans *trans);
325irqreturn_t iwl_isr_ict(int irq, void *data);
326
327/*****************************************************
328* TX / HCMD
329******************************************************/
330void iwl_txq_update_write_ptr(struct iwl_trans *trans,
331 struct iwl_tx_queue *txq);
332int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
333 struct iwl_tx_queue *txq,
334 dma_addr_t addr, u16 len, u8 reset);
335int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
336int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
337void iwl_tx_cmd_complete(struct iwl_trans *trans,
338 struct iwl_rx_cmd_buffer *rxb, int handler_status);
339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340 struct iwl_tx_queue *txq,
341 u16 byte_cnt);
342void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
345 struct iwl_tx_queue *txq,
346 int tx_fifo_id, bool active);
347void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
348 int fifo, int sta_id, int tid,
349 int frame_limit, u16 ssn);
350void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
351 int sta_id, int tid, int frame_limit, u16 ssn);
352void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
353 enum dma_data_direction dma_dir);
354int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
355 struct sk_buff_head *skbs);
356int iwl_queue_space(const struct iwl_queue *q);
357
358/*****************************************************
359* Error handling
360******************************************************/
361int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
362void iwl_dump_csr(struct iwl_trans *trans);
363
364/*****************************************************
365* Helpers
366******************************************************/
367static inline void iwl_disable_interrupts(struct iwl_trans *trans)
368{
369 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
370 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
371
372 /* disable interrupts from uCode/NIC to host */
373 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
374
375 /* acknowledge/clear/reset any interrupts still pending
376 * from uCode or flow handler (Rx/Tx DMA) */
377 iwl_write32(trans, CSR_INT, 0xffffffff);
378 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
379 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
380}
381
382static inline void iwl_enable_interrupts(struct iwl_trans *trans)
383{
384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385
386 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
387 set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
388 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
389}
390
391static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
392{
393 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
394 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
395}
396
397static inline void iwl_wake_queue(struct iwl_trans *trans,
398 struct iwl_tx_queue *txq)
399{
400 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
401
402 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
403 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
404 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
405 }
406}
407
408static inline void iwl_stop_queue(struct iwl_trans *trans,
409 struct iwl_tx_queue *txq)
410{
411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412
413 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
414 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
415 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
416 } else
417 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
418 txq->q.id);
419}
420
421static inline int iwl_queue_used(const struct iwl_queue *q, int i)
422{
423 return q->write_ptr >= q->read_ptr ?
424 (i >= q->read_ptr && i < q->write_ptr) :
425 !(i < q->read_ptr && i >= q->write_ptr);
426}
427
428static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
429{
430 return index & (q->n_window - 1);
431}
432
433static inline const char *
434trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
435{
436 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
437 return "UNKNOWN";
438 return trans_pcie->command_names[cmd];
439}
440
441static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
442{
443 return !(iwl_read32(trans, CSR_GP_CNTRL) &
444 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
445}
446
447#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
new file mode 100644
index 00000000000..d6860c070c1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -0,0 +1,1058 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/gfp.h>
32
33#include "iwl-prph.h"
34#include "iwl-io.h"
35#include "internal.h"
36#include "iwl-op-mode.h"
37
38#ifdef CONFIG_IWLWIFI_IDI
39#include "iwl-amfh.h"
40#endif
41
42/******************************************************************************
43 *
44 * RX path functions
45 *
46 ******************************************************************************/
47
48/*
49 * Rx theory of operation
50 *
51 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
52 * each of which point to Receive Buffers to be filled by the NIC. These get
53 * used not only for Rx frames, but for any command response or notification
54 * from the NIC. The driver and NIC manage the Rx buffers by means
55 * of indexes into the circular buffer.
56 *
57 * Rx Queue Indexes
58 * The host/firmware share two index registers for managing the Rx buffers.
59 *
60 * The READ index maps to the first position that the firmware may be writing
61 * to -- the driver can read up to (but not including) this position and get
62 * good data.
63 * The READ index is managed by the firmware once the card is enabled.
64 *
65 * The WRITE index maps to the last position the driver has read from -- the
66 * position preceding WRITE is the last slot the firmware can place a packet.
67 *
68 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
69 * WRITE = READ.
70 *
71 * During initialization, the host sets up the READ queue position to the first
72 * INDEX position, and WRITE to the last (READ - 1 wrapped)
73 *
74 * When the firmware places a packet in a buffer, it will advance the READ index
75 * and fire the RX interrupt. The driver can then query the READ index and
76 * process as many packets as possible, moving the WRITE index forward as it
77 * resets the Rx queue buffers with new memory.
78 *
79 * The management in the driver is as follows:
80 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
81 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
82 * to replenish the iwl->rxq->rx_free.
83 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
84 * iwl->rxq is replenished and the READ INDEX is updated (updating the
85 * 'processed' and 'read' driver indexes as well)
86 * + A received packet is processed and handed to the kernel network stack,
87 * detached from the iwl->rxq. The driver 'processed' index is updated.
88 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
89 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
90 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
91 * were enough free buffers and RX_STALLED is set it is cleared.
92 *
93 *
94 * Driver sequence:
95 *
96 * iwl_rx_queue_alloc() Allocates rx_free
97 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
98 * iwl_rx_queue_restock
99 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
100 * queue, updates firmware pointers, and updates
101 * the WRITE index. If insufficient rx_free buffers
102 * are available, schedules iwl_rx_replenish
103 *
104 * -- enable interrupts --
105 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
106 * READ INDEX, detaching the SKB from the pool.
107 * Moves the packet buffer from queue to rx_used.
108 * Calls iwl_rx_queue_restock to refill any empty
109 * slots.
110 * ...
111 *
112 */
113
114/**
115 * iwl_rx_queue_space - Return number of free slots available in queue.
116 */
117static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
118{
119 int s = q->read - q->write;
120 if (s <= 0)
121 s += RX_QUEUE_SIZE;
122 /* keep some buffer to not confuse full and empty queue */
123 s -= 2;
124 if (s < 0)
125 s = 0;
126 return s;
127}
128
129/**
130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
131 */
132void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
133 struct iwl_rx_queue *q)
134{
135 unsigned long flags;
136 u32 reg;
137
138 spin_lock_irqsave(&q->lock, flags);
139
140 if (q->need_update == 0)
141 goto exit_unlock;
142
143 if (trans->cfg->base_params->shadow_reg_enable) {
144 /* shadow register enabled */
145 /* Device expects a multiple of 8 */
146 q->write_actual = (q->write & ~0x7);
147 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
148 } else {
149 struct iwl_trans_pcie *trans_pcie =
150 IWL_TRANS_GET_PCIE_TRANS(trans);
151
152 /* If power-saving is in use, make sure device is awake */
153 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
154 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
155
156 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
157 IWL_DEBUG_INFO(trans,
158 "Rx queue requesting wakeup,"
159 " GP1 = 0x%x\n", reg);
160 iwl_set_bit(trans, CSR_GP_CNTRL,
161 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
162 goto exit_unlock;
163 }
164
165 q->write_actual = (q->write & ~0x7);
166 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
167 q->write_actual);
168
169 /* Else device is assumed to be awake */
170 } else {
171 /* Device expects a multiple of 8 */
172 q->write_actual = (q->write & ~0x7);
173 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
174 q->write_actual);
175 }
176 }
177 q->need_update = 0;
178
179 exit_unlock:
180 spin_unlock_irqrestore(&q->lock, flags);
181}
182
183/**
184 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
185 */
186static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
187{
188 return cpu_to_le32((u32)(dma_addr >> 8));
189}
190
191/**
192 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
193 *
194 * If there are slots in the RX queue that need to be restocked,
195 * and we have free pre-allocated buffers, fill the ranks as much
196 * as we can, pulling from rx_free.
197 *
198 * This moves the 'write' index forward to catch up with 'processed', and
199 * also updates the memory address in the firmware to reference the new
200 * target buffer.
201 */
202static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
203{
204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
205 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
206 struct list_head *element;
207 struct iwl_rx_mem_buffer *rxb;
208 unsigned long flags;
209
210 spin_lock_irqsave(&rxq->lock, flags);
211 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
212 /* The overwritten rxb must be a used one */
213 rxb = rxq->queue[rxq->write];
214 BUG_ON(rxb && rxb->page);
215
216 /* Get next free Rx buffer, remove from free list */
217 element = rxq->rx_free.next;
218 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
219 list_del(element);
220
221 /* Point to Rx buffer via next RBD in circular buffer */
222 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
223 rxq->queue[rxq->write] = rxb;
224 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
225 rxq->free_count--;
226 }
227 spin_unlock_irqrestore(&rxq->lock, flags);
228 /* If the pre-allocated buffer pool is dropping low, schedule to
229 * refill it */
230 if (rxq->free_count <= RX_LOW_WATERMARK)
231 schedule_work(&trans_pcie->rx_replenish);
232
233
234 /* If we've added more space for the firmware to place data, tell it.
235 * Increment device's write pointer in multiples of 8. */
236 if (rxq->write_actual != (rxq->write & ~0x7)) {
237 spin_lock_irqsave(&rxq->lock, flags);
238 rxq->need_update = 1;
239 spin_unlock_irqrestore(&rxq->lock, flags);
240 iwl_rx_queue_update_write_ptr(trans, rxq);
241 }
242}
243
244/**
245 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
246 *
247 * When moving to rx_free an SKB is allocated for the slot.
248 *
249 * Also restock the Rx queue via iwl_rx_queue_restock.
250 * This is called as a scheduled work item (except for during initialization)
251 */
252static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
253{
254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
255 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
256 struct list_head *element;
257 struct iwl_rx_mem_buffer *rxb;
258 struct page *page;
259 unsigned long flags;
260 gfp_t gfp_mask = priority;
261
262 while (1) {
263 spin_lock_irqsave(&rxq->lock, flags);
264 if (list_empty(&rxq->rx_used)) {
265 spin_unlock_irqrestore(&rxq->lock, flags);
266 return;
267 }
268 spin_unlock_irqrestore(&rxq->lock, flags);
269
270 if (rxq->free_count > RX_LOW_WATERMARK)
271 gfp_mask |= __GFP_NOWARN;
272
273 if (trans_pcie->rx_page_order > 0)
274 gfp_mask |= __GFP_COMP;
275
276 /* Alloc a new receive buffer */
277 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
278 if (!page) {
279 if (net_ratelimit())
280 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
281 "order: %d\n",
282 trans_pcie->rx_page_order);
283
284 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
285 net_ratelimit())
286 IWL_CRIT(trans, "Failed to alloc_pages with %s."
287 "Only %u free buffers remaining.\n",
288 priority == GFP_ATOMIC ?
289 "GFP_ATOMIC" : "GFP_KERNEL",
290 rxq->free_count);
291 /* We don't reschedule replenish work here -- we will
292 * call the restock method and if it still needs
293 * more buffers it will schedule replenish */
294 return;
295 }
296
297 spin_lock_irqsave(&rxq->lock, flags);
298
299 if (list_empty(&rxq->rx_used)) {
300 spin_unlock_irqrestore(&rxq->lock, flags);
301 __free_pages(page, trans_pcie->rx_page_order);
302 return;
303 }
304 element = rxq->rx_used.next;
305 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
306 list_del(element);
307
308 spin_unlock_irqrestore(&rxq->lock, flags);
309
310 BUG_ON(rxb->page);
311 rxb->page = page;
312 /* Get physical address of the RB */
313 rxb->page_dma =
314 dma_map_page(trans->dev, page, 0,
315 PAGE_SIZE << trans_pcie->rx_page_order,
316 DMA_FROM_DEVICE);
317 /* dma address must be no more than 36 bits */
318 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
319 /* and also 256 byte aligned! */
320 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
321
322 spin_lock_irqsave(&rxq->lock, flags);
323
324 list_add_tail(&rxb->list, &rxq->rx_free);
325 rxq->free_count++;
326
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 }
329}
330
331void iwlagn_rx_replenish(struct iwl_trans *trans)
332{
333 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
334 unsigned long flags;
335
336 iwlagn_rx_allocate(trans, GFP_KERNEL);
337
338 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
339 iwlagn_rx_queue_restock(trans);
340 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
341}
342
343static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
344{
345 iwlagn_rx_allocate(trans, GFP_ATOMIC);
346
347 iwlagn_rx_queue_restock(trans);
348}
349
350void iwl_bg_rx_replenish(struct work_struct *data)
351{
352 struct iwl_trans_pcie *trans_pcie =
353 container_of(data, struct iwl_trans_pcie, rx_replenish);
354
355 iwlagn_rx_replenish(trans_pcie->trans);
356}
357
358static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
359 struct iwl_rx_mem_buffer *rxb)
360{
361 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
362 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
363 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
364 unsigned long flags;
365 bool page_stolen = false;
366 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
367 u32 offset = 0;
368
369 if (WARN_ON(!rxb))
370 return;
371
372 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
373
374 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
375 struct iwl_rx_packet *pkt;
376 struct iwl_device_cmd *cmd;
377 u16 sequence;
378 bool reclaim;
379 int index, cmd_index, err, len;
380 struct iwl_rx_cmd_buffer rxcb = {
381 ._offset = offset,
382 ._page = rxb->page,
383 ._page_stolen = false,
384 .truesize = max_len,
385 };
386
387 pkt = rxb_addr(&rxcb);
388
389 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
390 break;
391
392 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
393 rxcb._offset,
394 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
395 pkt->hdr.cmd);
396
397 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
398 len += sizeof(u32); /* account for status word */
399 trace_iwlwifi_dev_rx(trans->dev, pkt, len);
400
401 /* Reclaim a command buffer only if this packet is a response
402 * to a (driver-originated) command.
403 * If the packet (e.g. Rx frame) originated from uCode,
404 * there is no command buffer to reclaim.
405 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
406 * but apparently a few don't get set; catch them here. */
407 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
408 if (reclaim) {
409 int i;
410
411 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
412 if (trans_pcie->no_reclaim_cmds[i] ==
413 pkt->hdr.cmd) {
414 reclaim = false;
415 break;
416 }
417 }
418 }
419
420 sequence = le16_to_cpu(pkt->hdr.sequence);
421 index = SEQ_TO_INDEX(sequence);
422 cmd_index = get_cmd_index(&txq->q, index);
423
424 if (reclaim)
425 cmd = txq->entries[cmd_index].cmd;
426 else
427 cmd = NULL;
428
429 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
430
431 /*
432 * After here, we should always check rxcb._page_stolen,
433 * if it is true then one of the handlers took the page.
434 */
435
436 if (reclaim) {
437 /* Invoke any callbacks, transfer the buffer to caller,
438 * and fire off the (possibly) blocking
439 * iwl_trans_send_cmd()
440 * as we reclaim the driver command queue */
441 if (!rxcb._page_stolen)
442 iwl_tx_cmd_complete(trans, &rxcb, err);
443 else
444 IWL_WARN(trans, "Claim null rxb?\n");
445 }
446
447 page_stolen |= rxcb._page_stolen;
448 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
449 }
450
451 /* page was stolen from us -- free our reference */
452 if (page_stolen) {
453 __free_pages(rxb->page, trans_pcie->rx_page_order);
454 rxb->page = NULL;
455 }
456
457 /* Reuse the page if possible. For notification packets and
458 * SKBs that fail to Rx correctly, add them back into the
459 * rx_free list for reuse later. */
460 spin_lock_irqsave(&rxq->lock, flags);
461 if (rxb->page != NULL) {
462 rxb->page_dma =
463 dma_map_page(trans->dev, rxb->page, 0,
464 PAGE_SIZE << trans_pcie->rx_page_order,
465 DMA_FROM_DEVICE);
466 list_add_tail(&rxb->list, &rxq->rx_free);
467 rxq->free_count++;
468 } else
469 list_add_tail(&rxb->list, &rxq->rx_used);
470 spin_unlock_irqrestore(&rxq->lock, flags);
471}
472
473/**
474 * iwl_rx_handle - Main entry function for receiving responses from uCode
475 *
476 * Uses the priv->rx_handlers callback function array to invoke
477 * the appropriate handlers, including command responses,
478 * frame-received notifications, and other notifications.
479 */
480static void iwl_rx_handle(struct iwl_trans *trans)
481{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
484 u32 r, i;
485 u8 fill_rx = 0;
486 u32 count = 8;
487 int total_empty;
488
489 /* uCode's read index (stored in shared DRAM) indicates the last Rx
490 * buffer that the driver may process (last buffer filled by ucode). */
491 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
492 i = rxq->read;
493
494 /* Rx interrupt, but nothing sent from uCode */
495 if (i == r)
496 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
497
498 /* calculate total frames need to be restock after handling RX */
499 total_empty = r - rxq->write_actual;
500 if (total_empty < 0)
501 total_empty += RX_QUEUE_SIZE;
502
503 if (total_empty > (RX_QUEUE_SIZE / 2))
504 fill_rx = 1;
505
506 while (i != r) {
507 struct iwl_rx_mem_buffer *rxb;
508
509 rxb = rxq->queue[i];
510 rxq->queue[i] = NULL;
511
512 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
513 r, i, rxb);
514 iwl_rx_handle_rxbuf(trans, rxb);
515
516 i = (i + 1) & RX_QUEUE_MASK;
517 /* If there are a lot of unused frames,
518 * restock the Rx queue so ucode wont assert. */
519 if (fill_rx) {
520 count++;
521 if (count >= 8) {
522 rxq->read = i;
523 iwlagn_rx_replenish_now(trans);
524 count = 0;
525 }
526 }
527 }
528
529 /* Backtrack one entry */
530 rxq->read = i;
531 if (fill_rx)
532 iwlagn_rx_replenish_now(trans);
533 else
534 iwlagn_rx_queue_restock(trans);
535}
536
537/**
538 * iwl_irq_handle_error - called for HW or SW error interrupt from card
539 */
540static void iwl_irq_handle_error(struct iwl_trans *trans)
541{
542 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
543 if (trans->cfg->internal_wimax_coex &&
544 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
545 APMS_CLK_VAL_MRB_FUNC_MODE) ||
546 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
547 APMG_PS_CTRL_VAL_RESET_REQ))) {
548 struct iwl_trans_pcie *trans_pcie =
549 IWL_TRANS_GET_PCIE_TRANS(trans);
550
551 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
552 iwl_op_mode_wimax_active(trans->op_mode);
553 wake_up(&trans->wait_command_queue);
554 return;
555 }
556
557 iwl_dump_csr(trans);
558 iwl_dump_fh(trans, NULL, false);
559
560 iwl_op_mode_nic_error(trans->op_mode);
561}
562
563/* tasklet for iwlagn interrupt */
564void iwl_irq_tasklet(struct iwl_trans *trans)
565{
566 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
567 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
568 u32 inta = 0;
569 u32 handled = 0;
570 unsigned long flags;
571 u32 i;
572#ifdef CONFIG_IWLWIFI_DEBUG
573 u32 inta_mask;
574#endif
575
576 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
577
578 /* Ack/clear/reset pending uCode interrupts.
579 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
580 */
581 /* There is a hardware bug in the interrupt mask function that some
582 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
583 * they are disabled in the CSR_INT_MASK register. Furthermore the
584 * ICT interrupt handling mechanism has another bug that might cause
585 * these unmasked interrupts fail to be detected. We workaround the
586 * hardware bugs here by ACKing all the possible interrupts so that
587 * interrupt coalescing can still be achieved.
588 */
589 iwl_write32(trans, CSR_INT,
590 trans_pcie->inta | ~trans_pcie->inta_mask);
591
592 inta = trans_pcie->inta;
593
594#ifdef CONFIG_IWLWIFI_DEBUG
595 if (iwl_have_debug_level(IWL_DL_ISR)) {
596 /* just for debug */
597 inta_mask = iwl_read32(trans, CSR_INT_MASK);
598 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
599 inta, inta_mask);
600 }
601#endif
602
603 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
604 trans_pcie->inta = 0;
605
606 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
607
608 /* Now service all interrupt bits discovered above. */
609 if (inta & CSR_INT_BIT_HW_ERR) {
610 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
611
612 /* Tell the device to stop sending interrupts */
613 iwl_disable_interrupts(trans);
614
615 isr_stats->hw++;
616 iwl_irq_handle_error(trans);
617
618 handled |= CSR_INT_BIT_HW_ERR;
619
620 return;
621 }
622
623#ifdef CONFIG_IWLWIFI_DEBUG
624 if (iwl_have_debug_level(IWL_DL_ISR)) {
625 /* NIC fires this, but we don't use it, redundant with WAKEUP */
626 if (inta & CSR_INT_BIT_SCD) {
627 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
628 "the frame/frames.\n");
629 isr_stats->sch++;
630 }
631
632 /* Alive notification via Rx interrupt will do the real work */
633 if (inta & CSR_INT_BIT_ALIVE) {
634 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
635 isr_stats->alive++;
636 }
637 }
638#endif
639 /* Safely ignore these bits for debug checks below */
640 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
641
642 /* HW RF KILL switch toggled */
643 if (inta & CSR_INT_BIT_RF_KILL) {
644 bool hw_rfkill;
645
646 hw_rfkill = iwl_is_rfkill_set(trans);
647 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
648 hw_rfkill ? "disable radio" : "enable radio");
649
650 isr_stats->rfkill++;
651
652 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
653
654 handled |= CSR_INT_BIT_RF_KILL;
655 }
656
657 /* Chip got too hot and stopped itself */
658 if (inta & CSR_INT_BIT_CT_KILL) {
659 IWL_ERR(trans, "Microcode CT kill error detected.\n");
660 isr_stats->ctkill++;
661 handled |= CSR_INT_BIT_CT_KILL;
662 }
663
664 /* Error detected by uCode */
665 if (inta & CSR_INT_BIT_SW_ERR) {
666 IWL_ERR(trans, "Microcode SW error detected. "
667 " Restarting 0x%X.\n", inta);
668 isr_stats->sw++;
669 iwl_irq_handle_error(trans);
670 handled |= CSR_INT_BIT_SW_ERR;
671 }
672
673 /* uCode wakes up after power-down sleep */
674 if (inta & CSR_INT_BIT_WAKEUP) {
675 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
676 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
677 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
678 iwl_txq_update_write_ptr(trans,
679 &trans_pcie->txq[i]);
680
681 isr_stats->wakeup++;
682
683 handled |= CSR_INT_BIT_WAKEUP;
684 }
685
686 /* All uCode command responses, including Tx command responses,
687 * Rx "responses" (frame-received notification), and other
688 * notifications from uCode come through here*/
689 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
690 CSR_INT_BIT_RX_PERIODIC)) {
691 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
692 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
693 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
694 iwl_write32(trans, CSR_FH_INT_STATUS,
695 CSR_FH_INT_RX_MASK);
696 }
697 if (inta & CSR_INT_BIT_RX_PERIODIC) {
698 handled |= CSR_INT_BIT_RX_PERIODIC;
699 iwl_write32(trans,
700 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
701 }
702 /* Sending RX interrupt require many steps to be done in the
703 * the device:
704 * 1- write interrupt to current index in ICT table.
705 * 2- dma RX frame.
706 * 3- update RX shared data to indicate last write index.
707 * 4- send interrupt.
708 * This could lead to RX race, driver could receive RX interrupt
709 * but the shared data changes does not reflect this;
710 * periodic interrupt will detect any dangling Rx activity.
711 */
712
713 /* Disable periodic interrupt; we use it as just a one-shot. */
714 iwl_write8(trans, CSR_INT_PERIODIC_REG,
715 CSR_INT_PERIODIC_DIS);
716#ifdef CONFIG_IWLWIFI_IDI
717 iwl_amfh_rx_handler();
718#else
719 iwl_rx_handle(trans);
720#endif
721 /*
722 * Enable periodic interrupt in 8 msec only if we received
723 * real RX interrupt (instead of just periodic int), to catch
724 * any dangling Rx interrupt. If it was just the periodic
725 * interrupt, there was no dangling Rx activity, and no need
726 * to extend the periodic interrupt; one-shot is enough.
727 */
728 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
729 iwl_write8(trans, CSR_INT_PERIODIC_REG,
730 CSR_INT_PERIODIC_ENA);
731
732 isr_stats->rx++;
733 }
734
735 /* This "Tx" DMA channel is used only for loading uCode */
736 if (inta & CSR_INT_BIT_FH_TX) {
737 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
738 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
739 isr_stats->tx++;
740 handled |= CSR_INT_BIT_FH_TX;
741 /* Wake up uCode load routine, now that load is complete */
742 trans_pcie->ucode_write_complete = true;
743 wake_up(&trans_pcie->ucode_write_waitq);
744 }
745
746 if (inta & ~handled) {
747 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
748 isr_stats->unhandled++;
749 }
750
751 if (inta & ~(trans_pcie->inta_mask)) {
752 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
753 inta & ~trans_pcie->inta_mask);
754 }
755
756 /* Re-enable all interrupts */
757 /* only Re-enable if disabled by irq */
758 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
759 iwl_enable_interrupts(trans);
760 /* Re-enable RF_KILL if it occurred */
761 else if (handled & CSR_INT_BIT_RF_KILL)
762 iwl_enable_rfkill_int(trans);
763}
764
765/******************************************************************************
766 *
767 * ICT functions
768 *
769 ******************************************************************************/
770
771/* a device (PCI-E) page is 4096 bytes long */
772#define ICT_SHIFT 12
773#define ICT_SIZE (1 << ICT_SHIFT)
774#define ICT_COUNT (ICT_SIZE / sizeof(u32))
775
776/* Free dram table */
777void iwl_free_isr_ict(struct iwl_trans *trans)
778{
779 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
780
781 if (trans_pcie->ict_tbl) {
782 dma_free_coherent(trans->dev, ICT_SIZE,
783 trans_pcie->ict_tbl,
784 trans_pcie->ict_tbl_dma);
785 trans_pcie->ict_tbl = NULL;
786 trans_pcie->ict_tbl_dma = 0;
787 }
788}
789
790
791/*
792 * allocate dram shared table, it is an aligned memory
793 * block of ICT_SIZE.
794 * also reset all data related to ICT table interrupt.
795 */
796int iwl_alloc_isr_ict(struct iwl_trans *trans)
797{
798 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
799
800 trans_pcie->ict_tbl =
801 dma_alloc_coherent(trans->dev, ICT_SIZE,
802 &trans_pcie->ict_tbl_dma,
803 GFP_KERNEL);
804 if (!trans_pcie->ict_tbl)
805 return -ENOMEM;
806
807 /* just an API sanity check ... it is guaranteed to be aligned */
808 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
809 iwl_free_isr_ict(trans);
810 return -EINVAL;
811 }
812
813 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
814 (unsigned long long)trans_pcie->ict_tbl_dma);
815
816 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
817
818 /* reset table and index to all 0 */
819 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
820 trans_pcie->ict_index = 0;
821
822 /* add periodic RX interrupt */
823 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
824 return 0;
825}
826
827/* Device is going up inform it about using ICT interrupt table,
828 * also we need to tell the driver to start using ICT interrupt.
829 */
830void iwl_reset_ict(struct iwl_trans *trans)
831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833 u32 val;
834 unsigned long flags;
835
836 if (!trans_pcie->ict_tbl)
837 return;
838
839 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
840 iwl_disable_interrupts(trans);
841
842 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
843
844 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
845
846 val |= CSR_DRAM_INT_TBL_ENABLE;
847 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
848
849 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
850
851 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
852 trans_pcie->use_ict = true;
853 trans_pcie->ict_index = 0;
854 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
855 iwl_enable_interrupts(trans);
856 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
857}
858
859/* Device is going down disable ict interrupt usage */
860void iwl_disable_ict(struct iwl_trans *trans)
861{
862 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
863 unsigned long flags;
864
865 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
866 trans_pcie->use_ict = false;
867 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
868}
869
870static irqreturn_t iwl_isr(int irq, void *data)
871{
872 struct iwl_trans *trans = data;
873 struct iwl_trans_pcie *trans_pcie;
874 u32 inta, inta_mask;
875 unsigned long flags;
876#ifdef CONFIG_IWLWIFI_DEBUG
877 u32 inta_fh;
878#endif
879 if (!trans)
880 return IRQ_NONE;
881
882 trace_iwlwifi_dev_irq(trans->dev);
883
884 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
885
886 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
887
888 /* Disable (but don't clear!) interrupts here to avoid
889 * back-to-back ISRs and sporadic interrupts from our NIC.
890 * If we have something to service, the tasklet will re-enable ints.
891 * If we *don't* have something, we'll re-enable before leaving here. */
892 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
893 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
894
895 /* Discover which interrupts are active/pending */
896 inta = iwl_read32(trans, CSR_INT);
897
898 /* Ignore interrupt if there's nothing in NIC to service.
899 * This may be due to IRQ shared with another device,
900 * or due to sporadic interrupts thrown from our NIC. */
901 if (!inta) {
902 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
903 goto none;
904 }
905
906 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
907 /* Hardware disappeared. It might have already raised
908 * an interrupt */
909 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
910 goto unplugged;
911 }
912
913#ifdef CONFIG_IWLWIFI_DEBUG
914 if (iwl_have_debug_level(IWL_DL_ISR)) {
915 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
916 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
917 "fh 0x%08x\n", inta, inta_mask, inta_fh);
918 }
919#endif
920
921 trans_pcie->inta |= inta;
922 /* iwl_irq_tasklet() will service interrupts and re-enable them */
923 if (likely(inta))
924 tasklet_schedule(&trans_pcie->irq_tasklet);
925 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
926 !trans_pcie->inta)
927 iwl_enable_interrupts(trans);
928
929 unplugged:
930 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
931 return IRQ_HANDLED;
932
933 none:
934 /* re-enable interrupts here since we don't have anything to service. */
935 /* only Re-enable if disabled by irq and no schedules tasklet. */
936 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
937 !trans_pcie->inta)
938 iwl_enable_interrupts(trans);
939
940 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
941 return IRQ_NONE;
942}
943
944/* interrupt handler using ict table, with this interrupt driver will
945 * stop using INTA register to get device's interrupt, reading this register
946 * is expensive, device will write interrupts in ICT dram table, increment
947 * index then will fire interrupt to driver, driver will OR all ICT table
948 * entries from current index up to table entry with 0 value. the result is
949 * the interrupt we need to service, driver will set the entries back to 0 and
950 * set index.
951 */
952irqreturn_t iwl_isr_ict(int irq, void *data)
953{
954 struct iwl_trans *trans = data;
955 struct iwl_trans_pcie *trans_pcie;
956 u32 inta, inta_mask;
957 u32 val = 0;
958 u32 read;
959 unsigned long flags;
960
961 if (!trans)
962 return IRQ_NONE;
963
964 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
965
966 /* dram interrupt table not set yet,
967 * use legacy interrupt.
968 */
969 if (!trans_pcie->use_ict)
970 return iwl_isr(irq, data);
971
972 trace_iwlwifi_dev_irq(trans->dev);
973
974 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
975
976 /* Disable (but don't clear!) interrupts here to avoid
977 * back-to-back ISRs and sporadic interrupts from our NIC.
978 * If we have something to service, the tasklet will re-enable ints.
979 * If we *don't* have something, we'll re-enable before leaving here.
980 */
981 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
982 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
983
984
985 /* Ignore interrupt if there's nothing in NIC to service.
986 * This may be due to IRQ shared with another device,
987 * or due to sporadic interrupts thrown from our NIC. */
988 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
989 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
990 if (!read) {
991 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
992 goto none;
993 }
994
995 /*
996 * Collect all entries up to the first 0, starting from ict_index;
997 * note we already read at ict_index.
998 */
999 do {
1000 val |= read;
1001 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1002 trans_pcie->ict_index, read);
1003 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1004 trans_pcie->ict_index =
1005 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1006
1007 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1008 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1009 read);
1010 } while (read);
1011
1012 /* We should not get this value, just ignore it. */
1013 if (val == 0xffffffff)
1014 val = 0;
1015
1016 /*
1017 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1018 * (bit 15 before shifting it to 31) to clear when using interrupt
1019 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1020 * so we use them to decide on the real state of the Rx bit.
1021 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1022 */
1023 if (val & 0xC0000)
1024 val |= 0x8000;
1025
1026 inta = (0xff & val) | ((0xff00 & val) << 16);
1027 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1028 inta, inta_mask, val);
1029
1030 inta &= trans_pcie->inta_mask;
1031 trans_pcie->inta |= inta;
1032
1033 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1034 if (likely(inta))
1035 tasklet_schedule(&trans_pcie->irq_tasklet);
1036 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1037 !trans_pcie->inta) {
1038 /* Allow interrupt if was disabled by this handler and
1039 * no tasklet was schedules, We should not enable interrupt,
1040 * tasklet will enable it.
1041 */
1042 iwl_enable_interrupts(trans);
1043 }
1044
1045 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1046 return IRQ_HANDLED;
1047
1048 none:
1049 /* re-enable interrupts here since we don't have anything to service.
1050 * only Re-enable if disabled by irq.
1051 */
1052 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1053 !trans_pcie->inta)
1054 iwl_enable_interrupts(trans);
1055
1056 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1057 return IRQ_NONE;
1058}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
new file mode 100644
index 00000000000..969f78f421d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -0,0 +1,2169 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/pci.h>
64#include <linux/pci-aspm.h>
65#include <linux/interrupt.h>
66#include <linux/debugfs.h>
67#include <linux/sched.h>
68#include <linux/bitops.h>
69#include <linux/gfp.h>
70
71#include "iwl-drv.h"
72#include "iwl-trans.h"
73#include "iwl-csr.h"
74#include "iwl-prph.h"
75#include "iwl-agn-hw.h"
76#include "internal.h"
77/* FIXME: need to abstract out TX command (once we know what it looks like) */
78#include "dvm/commands.h"
79
80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 (~(1<<(trans_pcie)->cmd_queue)))
83
84static int iwl_trans_rx_alloc(struct iwl_trans *trans)
85{
86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
87 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
88 struct device *dev = trans->dev;
89
90 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
91
92 spin_lock_init(&rxq->lock);
93
94 if (WARN_ON(rxq->bd || rxq->rb_stts))
95 return -EINVAL;
96
97 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
98 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
99 &rxq->bd_dma, GFP_KERNEL);
100 if (!rxq->bd)
101 goto err_bd;
102
103 /*Allocate the driver's pointer to receive buffer status */
104 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
105 &rxq->rb_stts_dma, GFP_KERNEL);
106 if (!rxq->rb_stts)
107 goto err_rb_stts;
108
109 return 0;
110
111err_rb_stts:
112 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
113 rxq->bd, rxq->bd_dma);
114 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
115 rxq->bd = NULL;
116err_bd:
117 return -ENOMEM;
118}
119
120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
121{
122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
123 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
124 int i;
125
126 /* Fill the rx_used queue with _all_ of the Rx buffers */
127 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
128 /* In the reset function, these buffers may have been allocated
129 * to an SKB, so we need to unmap and free potential storage */
130 if (rxq->pool[i].page != NULL) {
131 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
132 PAGE_SIZE << trans_pcie->rx_page_order,
133 DMA_FROM_DEVICE);
134 __free_pages(rxq->pool[i].page,
135 trans_pcie->rx_page_order);
136 rxq->pool[i].page = NULL;
137 }
138 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
139 }
140}
141
142static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
143 struct iwl_rx_queue *rxq)
144{
145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
146 u32 rb_size;
147 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
148 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
149
150 if (trans_pcie->rx_buf_size_8k)
151 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
152 else
153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
154
155 /* Stop Rx DMA */
156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157
158 /* Reset driver's Rx queue write index */
159 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
160
161 /* Tell device where to find RBD circular buffer in DRAM */
162 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
163 (u32)(rxq->bd_dma >> 8));
164
165 /* Tell device where in DRAM to update its Rx status */
166 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
167 rxq->rb_stts_dma >> 4);
168
169 /* Enable Rx DMA
170 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
171 * the credit mechanism in 5000 HW RX FIFO
172 * Direct rx interrupts to hosts
173 * Rx buffer size 4 or 8k
174 * RB timeout 0x10
175 * 256 RBDs
176 */
177 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
178 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
179 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
180 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
181 rb_size|
182 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184
185 /* Set interrupt coalescing timer to default (2048 usecs) */
186 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187}
188
189static int iwl_rx_init(struct iwl_trans *trans)
190{
191 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
192 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193
194 int i, err;
195 unsigned long flags;
196
197 if (!rxq->bd) {
198 err = iwl_trans_rx_alloc(trans);
199 if (err)
200 return err;
201 }
202
203 spin_lock_irqsave(&rxq->lock, flags);
204 INIT_LIST_HEAD(&rxq->rx_free);
205 INIT_LIST_HEAD(&rxq->rx_used);
206
207 iwl_trans_rxq_free_rx_bufs(trans);
208
209 for (i = 0; i < RX_QUEUE_SIZE; i++)
210 rxq->queue[i] = NULL;
211
212 /* Set us so that we have processed and used all buffers, but have
213 * not restocked the Rx queue with fresh buffers */
214 rxq->read = rxq->write = 0;
215 rxq->write_actual = 0;
216 rxq->free_count = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags);
218
219 iwlagn_rx_replenish(trans);
220
221 iwl_trans_rx_hw_init(trans, rxq);
222
223 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
224 rxq->need_update = 1;
225 iwl_rx_queue_update_write_ptr(trans, rxq);
226 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
227
228 return 0;
229}
230
231static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232{
233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
234 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
235 unsigned long flags;
236
237 /*if rxq->bd is NULL, it means that nothing has been allocated,
238 * exit now */
239 if (!rxq->bd) {
240 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
241 return;
242 }
243
244 spin_lock_irqsave(&rxq->lock, flags);
245 iwl_trans_rxq_free_rx_bufs(trans);
246 spin_unlock_irqrestore(&rxq->lock, flags);
247
248 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
249 rxq->bd, rxq->bd_dma);
250 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
251 rxq->bd = NULL;
252
253 if (rxq->rb_stts)
254 dma_free_coherent(trans->dev,
255 sizeof(struct iwl_rb_status),
256 rxq->rb_stts, rxq->rb_stts_dma);
257 else
258 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
259 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
260 rxq->rb_stts = NULL;
261}
262
263static int iwl_trans_rx_stop(struct iwl_trans *trans)
264{
265
266 /* stop Rx DMA */
267 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
268 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
269 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
270}
271
272static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
273 struct iwl_dma_ptr *ptr, size_t size)
274{
275 if (WARN_ON(ptr->addr))
276 return -EINVAL;
277
278 ptr->addr = dma_alloc_coherent(trans->dev, size,
279 &ptr->dma, GFP_KERNEL);
280 if (!ptr->addr)
281 return -ENOMEM;
282 ptr->size = size;
283 return 0;
284}
285
286static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
287 struct iwl_dma_ptr *ptr)
288{
289 if (unlikely(!ptr->addr))
290 return;
291
292 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
293 memset(ptr, 0, sizeof(*ptr));
294}
295
296static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
297{
298 struct iwl_tx_queue *txq = (void *)data;
299 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
300 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
301
302 spin_lock(&txq->lock);
303 /* check if triggered erroneously */
304 if (txq->q.read_ptr == txq->q.write_ptr) {
305 spin_unlock(&txq->lock);
306 return;
307 }
308 spin_unlock(&txq->lock);
309
310
311 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
312 jiffies_to_msecs(trans_pcie->wd_timeout));
313 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
314 txq->q.read_ptr, txq->q.write_ptr);
315 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
316 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
317 & (TFD_QUEUE_SIZE_MAX - 1),
318 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
319
320 iwl_op_mode_nic_error(trans->op_mode);
321}
322
323static int iwl_trans_txq_alloc(struct iwl_trans *trans,
324 struct iwl_tx_queue *txq, int slots_num,
325 u32 txq_id)
326{
327 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
328 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
329 int i;
330
331 if (WARN_ON(txq->entries || txq->tfds))
332 return -EINVAL;
333
334 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
335 (unsigned long)txq);
336 txq->trans_pcie = trans_pcie;
337
338 txq->q.n_window = slots_num;
339
340 txq->entries = kcalloc(slots_num,
341 sizeof(struct iwl_pcie_tx_queue_entry),
342 GFP_KERNEL);
343
344 if (!txq->entries)
345 goto error;
346
347 if (txq_id == trans_pcie->cmd_queue)
348 for (i = 0; i < slots_num; i++) {
349 txq->entries[i].cmd =
350 kmalloc(sizeof(struct iwl_device_cmd),
351 GFP_KERNEL);
352 if (!txq->entries[i].cmd)
353 goto error;
354 }
355
356 /* Circular buffer of transmit frame descriptors (TFDs),
357 * shared with device */
358 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
359 &txq->q.dma_addr, GFP_KERNEL);
360 if (!txq->tfds) {
361 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
362 goto error;
363 }
364 txq->q.id = txq_id;
365
366 return 0;
367error:
368 if (txq->entries && txq_id == trans_pcie->cmd_queue)
369 for (i = 0; i < slots_num; i++)
370 kfree(txq->entries[i].cmd);
371 kfree(txq->entries);
372 txq->entries = NULL;
373
374 return -ENOMEM;
375
376}
377
378static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
379 int slots_num, u32 txq_id)
380{
381 int ret;
382
383 txq->need_update = 0;
384
385 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
386 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
387 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
388
389 /* Initialize queue's high/low-water marks, and head/tail indexes */
390 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
391 txq_id);
392 if (ret)
393 return ret;
394
395 spin_lock_init(&txq->lock);
396
397 /*
398 * Tell nic where to find circular buffer of Tx Frame Descriptors for
399 * given Tx queue, and enable the DMA channel used for that queue.
400 * Circular buffer (TFD queue in DRAM) physical base address */
401 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
402 txq->q.dma_addr >> 8);
403
404 return 0;
405}
406
407/**
408 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
409 */
410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
411{
412 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
413 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
414 struct iwl_queue *q = &txq->q;
415 enum dma_data_direction dma_dir;
416
417 if (!q->n_bd)
418 return;
419
420 /* In the command queue, all the TBs are mapped as BIDI
421 * so unmap them as such.
422 */
423 if (txq_id == trans_pcie->cmd_queue)
424 dma_dir = DMA_BIDIRECTIONAL;
425 else
426 dma_dir = DMA_TO_DEVICE;
427
428 spin_lock_bh(&txq->lock);
429 while (q->write_ptr != q->read_ptr) {
430 iwl_txq_free_tfd(trans, txq, dma_dir);
431 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
432 }
433 spin_unlock_bh(&txq->lock);
434}
435
436/**
437 * iwl_tx_queue_free - Deallocate DMA queue.
438 * @txq: Transmit queue to deallocate.
439 *
440 * Empty queue by removing and destroying all BD's.
441 * Free all buffers.
442 * 0-fill, but do not free "txq" descriptor structure.
443 */
444static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
445{
446 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
447 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
448 struct device *dev = trans->dev;
449 int i;
450
451 if (WARN_ON(!txq))
452 return;
453
454 iwl_tx_queue_unmap(trans, txq_id);
455
456 /* De-alloc array of command/tx buffers */
457
458 if (txq_id == trans_pcie->cmd_queue)
459 for (i = 0; i < txq->q.n_window; i++)
460 kfree(txq->entries[i].cmd);
461
462 /* De-alloc circular buffer of TFDs */
463 if (txq->q.n_bd) {
464 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
465 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
466 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
467 }
468
469 kfree(txq->entries);
470 txq->entries = NULL;
471
472 del_timer_sync(&txq->stuck_timer);
473
474 /* 0-fill queue descriptor structure */
475 memset(txq, 0, sizeof(*txq));
476}
477
478/**
479 * iwl_trans_tx_free - Free TXQ Context
480 *
481 * Destroy all TX DMA queues and structures
482 */
483static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
484{
485 int txq_id;
486 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
487
488 /* Tx queues */
489 if (trans_pcie->txq) {
490 for (txq_id = 0;
491 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
492 iwl_tx_queue_free(trans, txq_id);
493 }
494
495 kfree(trans_pcie->txq);
496 trans_pcie->txq = NULL;
497
498 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
499
500 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
501}
502
503/**
504 * iwl_trans_tx_alloc - allocate TX context
505 * Allocate all Tx DMA structures and initialize them
506 *
507 * @param priv
508 * @return error code
509 */
510static int iwl_trans_tx_alloc(struct iwl_trans *trans)
511{
512 int ret;
513 int txq_id, slots_num;
514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515
516 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
517 sizeof(struct iwlagn_scd_bc_tbl);
518
519 /*It is not allowed to alloc twice, so warn when this happens.
520 * We cannot rely on the previous allocation, so free and fail */
521 if (WARN_ON(trans_pcie->txq)) {
522 ret = -EINVAL;
523 goto error;
524 }
525
526 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
527 scd_bc_tbls_size);
528 if (ret) {
529 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
530 goto error;
531 }
532
533 /* Alloc keep-warm buffer */
534 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
535 if (ret) {
536 IWL_ERR(trans, "Keep Warm allocation failed\n");
537 goto error;
538 }
539
540 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
541 sizeof(struct iwl_tx_queue), GFP_KERNEL);
542 if (!trans_pcie->txq) {
543 IWL_ERR(trans, "Not enough memory for txq\n");
544 ret = ENOMEM;
545 goto error;
546 }
547
548 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
549 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
550 txq_id++) {
551 slots_num = (txq_id == trans_pcie->cmd_queue) ?
552 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
553 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
554 slots_num, txq_id);
555 if (ret) {
556 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
557 goto error;
558 }
559 }
560
561 return 0;
562
563error:
564 iwl_trans_pcie_tx_free(trans);
565
566 return ret;
567}
568static int iwl_tx_init(struct iwl_trans *trans)
569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571 int ret;
572 int txq_id, slots_num;
573 unsigned long flags;
574 bool alloc = false;
575
576 if (!trans_pcie->txq) {
577 ret = iwl_trans_tx_alloc(trans);
578 if (ret)
579 goto error;
580 alloc = true;
581 }
582
583 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
584
585 /* Turn off all Tx DMA fifos */
586 iwl_write_prph(trans, SCD_TXFACT, 0);
587
588 /* Tell NIC where to find the "keep warm" buffer */
589 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
590 trans_pcie->kw.dma >> 4);
591
592 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
593
594 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
595 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
596 txq_id++) {
597 slots_num = (txq_id == trans_pcie->cmd_queue) ?
598 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
599 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
600 slots_num, txq_id);
601 if (ret) {
602 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
603 goto error;
604 }
605 }
606
607 return 0;
608error:
609 /*Upon error, free only if we allocated something */
610 if (alloc)
611 iwl_trans_pcie_tx_free(trans);
612 return ret;
613}
614
615static void iwl_set_pwr_vmain(struct iwl_trans *trans)
616{
617/*
618 * (for documentation purposes)
619 * to set power to V_AUX, do:
620
621 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
622 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
623 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
624 ~APMG_PS_CTRL_MSK_PWR_SRC);
625 */
626
627 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
628 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
629 ~APMG_PS_CTRL_MSK_PWR_SRC);
630}
631
632/* PCI registers */
633#define PCI_CFG_RETRY_TIMEOUT 0x041
634#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
635#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
636
637static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
638{
639 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
640 int pos;
641 u16 pci_lnk_ctl;
642
643 struct pci_dev *pci_dev = trans_pcie->pci_dev;
644
645 pos = pci_pcie_cap(pci_dev);
646 pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
647 return pci_lnk_ctl;
648}
649
650static void iwl_apm_config(struct iwl_trans *trans)
651{
652 /*
653 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
654 * Check if BIOS (or OS) enabled L1-ASPM on this device.
655 * If so (likely), disable L0S, so device moves directly L0->L1;
656 * costs negligible amount of power savings.
657 * If not (unlikely), enable L0S, so there is at least some
658 * power savings, even without L1.
659 */
660 u16 lctl = iwl_pciexp_link_ctrl(trans);
661
662 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
663 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
664 /* L1-ASPM enabled; disable(!) L0S */
665 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
666 dev_printk(KERN_INFO, trans->dev,
667 "L1 Enabled; Disabling L0S\n");
668 } else {
669 /* L1-ASPM disabled; enable(!) L0S */
670 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
671 dev_printk(KERN_INFO, trans->dev,
672 "L1 Disabled; Enabling L0S\n");
673 }
674 trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
675}
676
677/*
678 * Start up NIC's basic functionality after it has been reset
679 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
680 * NOTE: This does not load uCode nor start the embedded processor
681 */
682static int iwl_apm_init(struct iwl_trans *trans)
683{
684 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
685 int ret = 0;
686 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
687
688 /*
689 * Use "set_bit" below rather than "write", to preserve any hardware
690 * bits already set by default after reset.
691 */
692
693 /* Disable L0S exit timer (platform NMI Work/Around) */
694 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
695 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
696
697 /*
698 * Disable L0s without affecting L1;
699 * don't wait for ICH L0s (ICH bug W/A)
700 */
701 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
702 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
703
704 /* Set FH wait threshold to maximum (HW error during stress W/A) */
705 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
706
707 /*
708 * Enable HAP INTA (interrupt from management bus) to
709 * wake device's PCI Express link L1a -> L0s
710 */
711 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
712 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
713
714 iwl_apm_config(trans);
715
716 /* Configure analog phase-lock-loop before activating to D0A */
717 if (trans->cfg->base_params->pll_cfg_val)
718 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
719 trans->cfg->base_params->pll_cfg_val);
720
721 /*
722 * Set "initialization complete" bit to move adapter from
723 * D0U* --> D0A* (powered-up active) state.
724 */
725 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
726
727 /*
728 * Wait for clock stabilization; once stabilized, access to
729 * device-internal resources is supported, e.g. iwl_write_prph()
730 * and accesses to uCode SRAM.
731 */
732 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
733 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
734 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
735 if (ret < 0) {
736 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
737 goto out;
738 }
739
740 /*
741 * Enable DMA clock and wait for it to stabilize.
742 *
743 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
744 * do not disable clocks. This preserves any hardware bits already
745 * set by default in "CLK_CTRL_REG" after reset.
746 */
747 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
748 udelay(20);
749
750 /* Disable L1-Active */
751 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
752 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
753
754 set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
755
756out:
757 return ret;
758}
759
760static int iwl_apm_stop_master(struct iwl_trans *trans)
761{
762 int ret = 0;
763
764 /* stop device's busmaster DMA activity */
765 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
766
767 ret = iwl_poll_bit(trans, CSR_RESET,
768 CSR_RESET_REG_FLAG_MASTER_DISABLED,
769 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
770 if (ret)
771 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
772
773 IWL_DEBUG_INFO(trans, "stop master\n");
774
775 return ret;
776}
777
778static void iwl_apm_stop(struct iwl_trans *trans)
779{
780 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
781 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
782
783 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
784
785 /* Stop device's DMA activity */
786 iwl_apm_stop_master(trans);
787
788 /* Reset the entire device */
789 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
790
791 udelay(10);
792
793 /*
794 * Clear "initialization complete" bit to move adapter from
795 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
796 */
797 iwl_clear_bit(trans, CSR_GP_CNTRL,
798 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
799}
800
801static int iwl_nic_init(struct iwl_trans *trans)
802{
803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
804 unsigned long flags;
805
806 /* nic_init */
807 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
808 iwl_apm_init(trans);
809
810 /* Set interrupt coalescing calibration timer to default (512 usecs) */
811 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
812
813 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
814
815 iwl_set_pwr_vmain(trans);
816
817 iwl_op_mode_nic_config(trans->op_mode);
818
819#ifndef CONFIG_IWLWIFI_IDI
820 /* Allocate the RX queue, or reset if it is already allocated */
821 iwl_rx_init(trans);
822#endif
823
824 /* Allocate or reset and init all Tx and Command queues */
825 if (iwl_tx_init(trans))
826 return -ENOMEM;
827
828 if (trans->cfg->base_params->shadow_reg_enable) {
829 /* enable shadow regs in HW */
830 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
831 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
832 }
833
834 return 0;
835}
836
837#define HW_READY_TIMEOUT (50)
838
839/* Note: returns poll_bit return value, which is >= 0 if success */
840static int iwl_set_hw_ready(struct iwl_trans *trans)
841{
842 int ret;
843
844 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
845 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
846
847 /* See if we got it */
848 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
849 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
850 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
851 HW_READY_TIMEOUT);
852
853 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
854 return ret;
855}
856
857/* Note: returns standard 0/-ERROR code */
858static int iwl_prepare_card_hw(struct iwl_trans *trans)
859{
860 int ret;
861
862 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
863
864 ret = iwl_set_hw_ready(trans);
865 /* If the card is ready, exit 0 */
866 if (ret >= 0)
867 return 0;
868
869 /* If HW is not ready, prepare the conditions to check again */
870 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
871 CSR_HW_IF_CONFIG_REG_PREPARE);
872
873 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
874 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
875 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
876
877 if (ret < 0)
878 return ret;
879
880 /* HW should be ready by now, check again. */
881 ret = iwl_set_hw_ready(trans);
882 if (ret >= 0)
883 return 0;
884 return ret;
885}
886
887/*
888 * ucode
889 */
890static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
891 const struct fw_desc *section)
892{
893 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
894 dma_addr_t phy_addr = section->p_addr;
895 u32 byte_cnt = section->len;
896 u32 dst_addr = section->offset;
897 int ret;
898
899 trans_pcie->ucode_write_complete = false;
900
901 iwl_write_direct32(trans,
902 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
903 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
904
905 iwl_write_direct32(trans,
906 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
907 dst_addr);
908
909 iwl_write_direct32(trans,
910 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
911 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
912
913 iwl_write_direct32(trans,
914 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
915 (iwl_get_dma_hi_addr(phy_addr)
916 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
917
918 iwl_write_direct32(trans,
919 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
920 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
921 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
922 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
923
924 iwl_write_direct32(trans,
925 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
926 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
927 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
928 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
929
930 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
931 section_num);
932 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
933 trans_pcie->ucode_write_complete, 5 * HZ);
934 if (!ret) {
935 IWL_ERR(trans, "Could not load the [%d] uCode section\n",
936 section_num);
937 return -ETIMEDOUT;
938 }
939
940 return 0;
941}
942
943static int iwl_load_given_ucode(struct iwl_trans *trans,
944 const struct fw_img *image)
945{
946 int ret = 0;
947 int i;
948
949 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
950 if (!image->sec[i].p_addr)
951 break;
952
953 ret = iwl_load_section(trans, i, &image->sec[i]);
954 if (ret)
955 return ret;
956 }
957
958 /* Remove all resets to allow NIC to operate */
959 iwl_write32(trans, CSR_RESET, 0);
960
961 return 0;
962}
963
964static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
965 const struct fw_img *fw)
966{
967 int ret;
968 bool hw_rfkill;
969
970 /* This may fail if AMT took ownership of the device */
971 if (iwl_prepare_card_hw(trans)) {
972 IWL_WARN(trans, "Exit HW not ready\n");
973 return -EIO;
974 }
975
976 iwl_enable_rfkill_int(trans);
977
978 /* If platform's RF_KILL switch is NOT set to KILL */
979 hw_rfkill = iwl_is_rfkill_set(trans);
980 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
981 if (hw_rfkill)
982 return -ERFKILL;
983
984 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
985
986 ret = iwl_nic_init(trans);
987 if (ret) {
988 IWL_ERR(trans, "Unable to init nic\n");
989 return ret;
990 }
991
992 /* make sure rfkill handshake bits are cleared */
993 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
994 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
995 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
996
997 /* clear (again), then enable host interrupts */
998 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
999 iwl_enable_interrupts(trans);
1000
1001 /* really make sure rfkill handshake bits are cleared */
1002 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1003 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1004
1005 /* Load the given image to the HW */
1006 return iwl_load_given_ucode(trans, fw);
1007}
1008
1009/*
1010 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1011 * must be called under the irq lock and with MAC access
1012 */
1013static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1014{
1015 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1016 IWL_TRANS_GET_PCIE_TRANS(trans);
1017
1018 lockdep_assert_held(&trans_pcie->irq_lock);
1019
1020 iwl_write_prph(trans, SCD_TXFACT, mask);
1021}
1022
1023static void iwl_tx_start(struct iwl_trans *trans)
1024{
1025 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1026 u32 a;
1027 unsigned long flags;
1028 int i, chan;
1029 u32 reg_val;
1030
1031 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1032
1033 /* make sure all queue are not stopped/used */
1034 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1035 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1036
1037 trans_pcie->scd_base_addr =
1038 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
1039 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
1040 /* reset conext data memory */
1041 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
1042 a += 4)
1043 iwl_write_targ_mem(trans, a, 0);
1044 /* reset tx status memory */
1045 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
1046 a += 4)
1047 iwl_write_targ_mem(trans, a, 0);
1048 for (; a < trans_pcie->scd_base_addr +
1049 SCD_TRANS_TBL_OFFSET_QUEUE(
1050 trans->cfg->base_params->num_of_queues);
1051 a += 4)
1052 iwl_write_targ_mem(trans, a, 0);
1053
1054 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1055 trans_pcie->scd_bc_tbls.dma >> 10);
1056
1057 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1058 int fifo = trans_pcie->setup_q_to_fifo[i];
1059
1060 __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
1061 IWL_TID_NON_QOS,
1062 SCD_FRAME_LIMIT, 0);
1063 }
1064
1065 /* Activate all Tx DMA/FIFO channels */
1066 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1067
1068 /* Enable DMA channel */
1069 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1070 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1071 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1072 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1073
1074 /* Update FH chicken bits */
1075 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
1076 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
1077 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1078
1079 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1080
1081 /* Enable L1-Active */
1082 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1083 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1084}
1085
1086static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1087{
1088 iwl_reset_ict(trans);
1089 iwl_tx_start(trans);
1090}
1091
1092/**
1093 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
1094 */
1095static int iwl_trans_tx_stop(struct iwl_trans *trans)
1096{
1097 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1098 int ch, txq_id, ret;
1099 unsigned long flags;
1100
1101 /* Turn off all Tx DMA fifos */
1102 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1103
1104 iwl_trans_txq_set_sched(trans, 0);
1105
1106 /* Stop each Tx DMA channel, and wait for it to be idle */
1107 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1108 iwl_write_direct32(trans,
1109 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1110 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1111 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
1112 if (ret < 0)
1113 IWL_ERR(trans,
1114 "Failing on timeout while stopping DMA channel %d [0x%08x]",
1115 ch,
1116 iwl_read_direct32(trans,
1117 FH_TSSR_TX_STATUS_REG));
1118 }
1119 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1120
1121 if (!trans_pcie->txq) {
1122 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
1123 return 0;
1124 }
1125
1126 /* Unmap DMA from host system and free skb's */
1127 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1128 txq_id++)
1129 iwl_tx_queue_unmap(trans, txq_id);
1130
1131 return 0;
1132}
1133
1134static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1135{
1136 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1137 unsigned long flags;
1138
1139 /* tell the device to stop sending interrupts */
1140 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1141 iwl_disable_interrupts(trans);
1142 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1143
1144 /* device going down, Stop using ICT table */
1145 iwl_disable_ict(trans);
1146
1147 /*
1148 * If a HW restart happens during firmware loading,
1149 * then the firmware loading might call this function
1150 * and later it might be called again due to the
1151 * restart. So don't process again if the device is
1152 * already dead.
1153 */
1154 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1155 iwl_trans_tx_stop(trans);
1156#ifndef CONFIG_IWLWIFI_IDI
1157 iwl_trans_rx_stop(trans);
1158#endif
1159 /* Power-down device's busmaster DMA clocks */
1160 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1161 APMG_CLK_VAL_DMA_CLK_RQT);
1162 udelay(5);
1163 }
1164
1165 /* Make sure (redundant) we've released our request to stay awake */
1166 iwl_clear_bit(trans, CSR_GP_CNTRL,
1167 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1168
1169 /* Stop the device, and put it in low power state */
1170 iwl_apm_stop(trans);
1171
1172 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1173 * Clean again the interrupt here
1174 */
1175 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1176 iwl_disable_interrupts(trans);
1177 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1178
1179 iwl_enable_rfkill_int(trans);
1180
1181 /* wait to make sure we flush pending tasklet*/
1182 synchronize_irq(trans_pcie->irq);
1183 tasklet_kill(&trans_pcie->irq_tasklet);
1184
1185 cancel_work_sync(&trans_pcie->rx_replenish);
1186
1187 /* stop and reset the on-board processor */
1188 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1189
1190 /* clear all status bits */
1191 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1192 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1193 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1194 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1195}
1196
1197static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1198{
1199 /* let the ucode operate on its own */
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
1201 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
1202
1203 iwl_disable_interrupts(trans);
1204 iwl_clear_bit(trans, CSR_GP_CNTRL,
1205 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1206}
1207
1208static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1209 struct iwl_device_cmd *dev_cmd, int txq_id)
1210{
1211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1212 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1213 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1214 struct iwl_cmd_meta *out_meta;
1215 struct iwl_tx_queue *txq;
1216 struct iwl_queue *q;
1217 dma_addr_t phys_addr = 0;
1218 dma_addr_t txcmd_phys;
1219 dma_addr_t scratch_phys;
1220 u16 len, firstlen, secondlen;
1221 u8 wait_write_ptr = 0;
1222 __le16 fc = hdr->frame_control;
1223 u8 hdr_len = ieee80211_hdrlen(fc);
1224 u16 __maybe_unused wifi_seq;
1225
1226 txq = &trans_pcie->txq[txq_id];
1227 q = &txq->q;
1228
1229 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1230 WARN_ON_ONCE(1);
1231 return -EINVAL;
1232 }
1233
1234 spin_lock(&txq->lock);
1235
1236 /* Set up driver data for this TFD */
1237 txq->entries[q->write_ptr].skb = skb;
1238 txq->entries[q->write_ptr].cmd = dev_cmd;
1239
1240 dev_cmd->hdr.cmd = REPLY_TX;
1241 dev_cmd->hdr.sequence =
1242 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1243 INDEX_TO_SEQ(q->write_ptr)));
1244
1245 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1246 out_meta = &txq->entries[q->write_ptr].meta;
1247
1248 /*
1249 * Use the first empty entry in this queue's command buffer array
1250 * to contain the Tx command and MAC header concatenated together
1251 * (payload data will be in another buffer).
1252 * Size of this varies, due to varying MAC header length.
1253 * If end is not dword aligned, we'll have 2 extra bytes at the end
1254 * of the MAC header (device reads on dword boundaries).
1255 * We'll tell device about this padding later.
1256 */
1257 len = sizeof(struct iwl_tx_cmd) +
1258 sizeof(struct iwl_cmd_header) + hdr_len;
1259 firstlen = (len + 3) & ~3;
1260
1261 /* Tell NIC about any 2-byte padding after MAC header */
1262 if (firstlen != len)
1263 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1264
1265 /* Physical address of this Tx command's header (not MAC header!),
1266 * within command buffer array. */
1267 txcmd_phys = dma_map_single(trans->dev,
1268 &dev_cmd->hdr, firstlen,
1269 DMA_BIDIRECTIONAL);
1270 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1271 goto out_err;
1272 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1273 dma_unmap_len_set(out_meta, len, firstlen);
1274
1275 if (!ieee80211_has_morefrags(fc)) {
1276 txq->need_update = 1;
1277 } else {
1278 wait_write_ptr = 1;
1279 txq->need_update = 0;
1280 }
1281
1282 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1283 * if any (802.11 null frames have no payload). */
1284 secondlen = skb->len - hdr_len;
1285 if (secondlen > 0) {
1286 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1287 secondlen, DMA_TO_DEVICE);
1288 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1289 dma_unmap_single(trans->dev,
1290 dma_unmap_addr(out_meta, mapping),
1291 dma_unmap_len(out_meta, len),
1292 DMA_BIDIRECTIONAL);
1293 goto out_err;
1294 }
1295 }
1296
1297 /* Attach buffers to TFD */
1298 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1299 if (secondlen > 0)
1300 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1301 secondlen, 0);
1302
1303 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1304 offsetof(struct iwl_tx_cmd, scratch);
1305
1306 /* take back ownership of DMA buffer to enable update */
1307 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1308 DMA_BIDIRECTIONAL);
1309 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1310 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1311
1312 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1313 le16_to_cpu(dev_cmd->hdr.sequence));
1314 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1315
1316 /* Set up entry for this TFD in Tx byte-count array */
1317 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1318
1319 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1320 DMA_BIDIRECTIONAL);
1321
1322 trace_iwlwifi_dev_tx(trans->dev,
1323 &txq->tfds[txq->q.write_ptr],
1324 sizeof(struct iwl_tfd),
1325 &dev_cmd->hdr, firstlen,
1326 skb->data + hdr_len, secondlen);
1327
1328 /* start timer if queue currently empty */
1329 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1330 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1331
1332 /* Tell device the write index *just past* this latest filled TFD */
1333 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1334 iwl_txq_update_write_ptr(trans, txq);
1335
1336 /*
1337 * At this point the frame is "transmitted" successfully
1338 * and we will get a TX status notification eventually,
1339 * regardless of the value of ret. "ret" only indicates
1340 * whether or not we should update the write pointer.
1341 */
1342 if (iwl_queue_space(q) < q->high_mark) {
1343 if (wait_write_ptr) {
1344 txq->need_update = 1;
1345 iwl_txq_update_write_ptr(trans, txq);
1346 } else {
1347 iwl_stop_queue(trans, txq);
1348 }
1349 }
1350 spin_unlock(&txq->lock);
1351 return 0;
1352 out_err:
1353 spin_unlock(&txq->lock);
1354 return -1;
1355}
1356
1357static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1358{
1359 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1360 int err;
1361 bool hw_rfkill;
1362
1363 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1364
1365 if (!trans_pcie->irq_requested) {
1366 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1367 iwl_irq_tasklet, (unsigned long)trans);
1368
1369 iwl_alloc_isr_ict(trans);
1370
1371 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
1372 DRV_NAME, trans);
1373 if (err) {
1374 IWL_ERR(trans, "Error allocating IRQ %d\n",
1375 trans_pcie->irq);
1376 goto error;
1377 }
1378
1379 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1380 trans_pcie->irq_requested = true;
1381 }
1382
1383 err = iwl_prepare_card_hw(trans);
1384 if (err) {
1385 IWL_ERR(trans, "Error while preparing HW: %d", err);
1386 goto err_free_irq;
1387 }
1388
1389 iwl_apm_init(trans);
1390
1391 /* From now on, the op_mode will be kept updated about RF kill state */
1392 iwl_enable_rfkill_int(trans);
1393
1394 hw_rfkill = iwl_is_rfkill_set(trans);
1395 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1396
1397 return err;
1398
1399err_free_irq:
1400 free_irq(trans_pcie->irq, trans);
1401error:
1402 iwl_free_isr_ict(trans);
1403 tasklet_kill(&trans_pcie->irq_tasklet);
1404 return err;
1405}
1406
1407static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1408 bool op_mode_leaving)
1409{
1410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1411 bool hw_rfkill;
1412 unsigned long flags;
1413
1414 iwl_apm_stop(trans);
1415
1416 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1417 iwl_disable_interrupts(trans);
1418 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1419
1420 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1421
1422 if (!op_mode_leaving) {
1423 /*
1424 * Even if we stop the HW, we still want the RF kill
1425 * interrupt
1426 */
1427 iwl_enable_rfkill_int(trans);
1428
1429 /*
1430 * Check again since the RF kill state may have changed while
1431 * all the interrupts were disabled, in this case we couldn't
1432 * receive the RF kill interrupt and update the state in the
1433 * op_mode.
1434 */
1435 hw_rfkill = iwl_is_rfkill_set(trans);
1436 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1437 }
1438}
1439
1440static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1441 struct sk_buff_head *skbs)
1442{
1443 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1444 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1445 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1446 int tfd_num = ssn & (txq->q.n_bd - 1);
1447 int freed = 0;
1448
1449 spin_lock(&txq->lock);
1450
1451 if (txq->q.read_ptr != tfd_num) {
1452 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1453 txq_id, txq->q.read_ptr, tfd_num, ssn);
1454 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1455 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1456 iwl_wake_queue(trans, txq);
1457 }
1458
1459 spin_unlock(&txq->lock);
1460}
1461
1462static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1463{
1464 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1465}
1466
1467static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1468{
1469 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1470}
1471
1472static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1473{
1474 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1475}
1476
1477static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1478 const struct iwl_trans_config *trans_cfg)
1479{
1480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1481
1482 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1483 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1484 trans_pcie->n_no_reclaim_cmds = 0;
1485 else
1486 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1487 if (trans_pcie->n_no_reclaim_cmds)
1488 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1489 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1490
1491 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1492
1493 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1494 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1495
1496 /* at least the command queue must be mapped */
1497 WARN_ON(!trans_pcie->n_q_to_fifo);
1498
1499 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1500 trans_pcie->n_q_to_fifo * sizeof(u8));
1501
1502 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1503 if (trans_pcie->rx_buf_size_8k)
1504 trans_pcie->rx_page_order = get_order(8 * 1024);
1505 else
1506 trans_pcie->rx_page_order = get_order(4 * 1024);
1507
1508 trans_pcie->wd_timeout =
1509 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
1510
1511 trans_pcie->command_names = trans_cfg->command_names;
1512}
1513
1514void iwl_trans_pcie_free(struct iwl_trans *trans)
1515{
1516 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1517
1518 iwl_trans_pcie_tx_free(trans);
1519#ifndef CONFIG_IWLWIFI_IDI
1520 iwl_trans_pcie_rx_free(trans);
1521#endif
1522 if (trans_pcie->irq_requested == true) {
1523 free_irq(trans_pcie->irq, trans);
1524 iwl_free_isr_ict(trans);
1525 }
1526
1527 pci_disable_msi(trans_pcie->pci_dev);
1528 iounmap(trans_pcie->hw_base);
1529 pci_release_regions(trans_pcie->pci_dev);
1530 pci_disable_device(trans_pcie->pci_dev);
1531 kmem_cache_destroy(trans->dev_cmd_pool);
1532
1533 kfree(trans);
1534}
1535
1536static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1537{
1538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1539
1540 if (state)
1541 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1542 else
1543 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1544}
1545
1546#ifdef CONFIG_PM_SLEEP
1547static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1548{
1549 return 0;
1550}
1551
1552static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1553{
1554 bool hw_rfkill;
1555
1556 iwl_enable_rfkill_int(trans);
1557
1558 hw_rfkill = iwl_is_rfkill_set(trans);
1559 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1560
1561 if (!hw_rfkill)
1562 iwl_enable_interrupts(trans);
1563
1564 return 0;
1565}
1566#endif /* CONFIG_PM_SLEEP */
1567
1568#define IWL_FLUSH_WAIT_MS 2000
1569
1570static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1571{
1572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1573 struct iwl_tx_queue *txq;
1574 struct iwl_queue *q;
1575 int cnt;
1576 unsigned long now = jiffies;
1577 int ret = 0;
1578
1579 /* waiting for all the tx frames complete might take a while */
1580 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1581 if (cnt == trans_pcie->cmd_queue)
1582 continue;
1583 txq = &trans_pcie->txq[cnt];
1584 q = &txq->q;
1585 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1586 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1587 msleep(1);
1588
1589 if (q->read_ptr != q->write_ptr) {
1590 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1591 ret = -ETIMEDOUT;
1592 break;
1593 }
1594 }
1595 return ret;
1596}
1597
1598static const char *get_fh_string(int cmd)
1599{
1600#define IWL_CMD(x) case x: return #x
1601 switch (cmd) {
1602 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1603 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1604 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1605 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1606 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1607 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1608 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1609 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1610 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1611 default:
1612 return "UNKNOWN";
1613 }
1614#undef IWL_CMD
1615}
1616
1617int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1618{
1619 int i;
1620#ifdef CONFIG_IWLWIFI_DEBUG
1621 int pos = 0;
1622 size_t bufsz = 0;
1623#endif
1624 static const u32 fh_tbl[] = {
1625 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1626 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1627 FH_RSCSR_CHNL0_WPTR,
1628 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1629 FH_MEM_RSSR_SHARED_CTRL_REG,
1630 FH_MEM_RSSR_RX_STATUS_REG,
1631 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1632 FH_TSSR_TX_STATUS_REG,
1633 FH_TSSR_TX_ERROR_REG
1634 };
1635#ifdef CONFIG_IWLWIFI_DEBUG
1636 if (display) {
1637 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1638 *buf = kmalloc(bufsz, GFP_KERNEL);
1639 if (!*buf)
1640 return -ENOMEM;
1641 pos += scnprintf(*buf + pos, bufsz - pos,
1642 "FH register values:\n");
1643 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1644 pos += scnprintf(*buf + pos, bufsz - pos,
1645 " %34s: 0X%08x\n",
1646 get_fh_string(fh_tbl[i]),
1647 iwl_read_direct32(trans, fh_tbl[i]));
1648 }
1649 return pos;
1650 }
1651#endif
1652 IWL_ERR(trans, "FH register values:\n");
1653 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1654 IWL_ERR(trans, " %34s: 0X%08x\n",
1655 get_fh_string(fh_tbl[i]),
1656 iwl_read_direct32(trans, fh_tbl[i]));
1657 }
1658 return 0;
1659}
1660
1661static const char *get_csr_string(int cmd)
1662{
1663#define IWL_CMD(x) case x: return #x
1664 switch (cmd) {
1665 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1666 IWL_CMD(CSR_INT_COALESCING);
1667 IWL_CMD(CSR_INT);
1668 IWL_CMD(CSR_INT_MASK);
1669 IWL_CMD(CSR_FH_INT_STATUS);
1670 IWL_CMD(CSR_GPIO_IN);
1671 IWL_CMD(CSR_RESET);
1672 IWL_CMD(CSR_GP_CNTRL);
1673 IWL_CMD(CSR_HW_REV);
1674 IWL_CMD(CSR_EEPROM_REG);
1675 IWL_CMD(CSR_EEPROM_GP);
1676 IWL_CMD(CSR_OTP_GP_REG);
1677 IWL_CMD(CSR_GIO_REG);
1678 IWL_CMD(CSR_GP_UCODE_REG);
1679 IWL_CMD(CSR_GP_DRIVER_REG);
1680 IWL_CMD(CSR_UCODE_DRV_GP1);
1681 IWL_CMD(CSR_UCODE_DRV_GP2);
1682 IWL_CMD(CSR_LED_REG);
1683 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1684 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1685 IWL_CMD(CSR_ANA_PLL_CFG);
1686 IWL_CMD(CSR_HW_REV_WA_REG);
1687 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1688 default:
1689 return "UNKNOWN";
1690 }
1691#undef IWL_CMD
1692}
1693
1694void iwl_dump_csr(struct iwl_trans *trans)
1695{
1696 int i;
1697 static const u32 csr_tbl[] = {
1698 CSR_HW_IF_CONFIG_REG,
1699 CSR_INT_COALESCING,
1700 CSR_INT,
1701 CSR_INT_MASK,
1702 CSR_FH_INT_STATUS,
1703 CSR_GPIO_IN,
1704 CSR_RESET,
1705 CSR_GP_CNTRL,
1706 CSR_HW_REV,
1707 CSR_EEPROM_REG,
1708 CSR_EEPROM_GP,
1709 CSR_OTP_GP_REG,
1710 CSR_GIO_REG,
1711 CSR_GP_UCODE_REG,
1712 CSR_GP_DRIVER_REG,
1713 CSR_UCODE_DRV_GP1,
1714 CSR_UCODE_DRV_GP2,
1715 CSR_LED_REG,
1716 CSR_DRAM_INT_TBL_REG,
1717 CSR_GIO_CHICKEN_BITS,
1718 CSR_ANA_PLL_CFG,
1719 CSR_HW_REV_WA_REG,
1720 CSR_DBG_HPET_MEM_REG
1721 };
1722 IWL_ERR(trans, "CSR values:\n");
1723 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1724 "CSR_INT_PERIODIC_REG)\n");
1725 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1726 IWL_ERR(trans, " %25s: 0X%08x\n",
1727 get_csr_string(csr_tbl[i]),
1728 iwl_read32(trans, csr_tbl[i]));
1729 }
1730}
1731
1732#ifdef CONFIG_IWLWIFI_DEBUGFS
1733/* create and remove of files */
1734#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1735 if (!debugfs_create_file(#name, mode, parent, trans, \
1736 &iwl_dbgfs_##name##_ops)) \
1737 return -ENOMEM; \
1738} while (0)
1739
1740/* file operation */
1741#define DEBUGFS_READ_FUNC(name) \
1742static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1743 char __user *user_buf, \
1744 size_t count, loff_t *ppos);
1745
1746#define DEBUGFS_WRITE_FUNC(name) \
1747static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1748 const char __user *user_buf, \
1749 size_t count, loff_t *ppos);
1750
1751
1752#define DEBUGFS_READ_FILE_OPS(name) \
1753 DEBUGFS_READ_FUNC(name); \
1754static const struct file_operations iwl_dbgfs_##name##_ops = { \
1755 .read = iwl_dbgfs_##name##_read, \
1756 .open = simple_open, \
1757 .llseek = generic_file_llseek, \
1758};
1759
1760#define DEBUGFS_WRITE_FILE_OPS(name) \
1761 DEBUGFS_WRITE_FUNC(name); \
1762static const struct file_operations iwl_dbgfs_##name##_ops = { \
1763 .write = iwl_dbgfs_##name##_write, \
1764 .open = simple_open, \
1765 .llseek = generic_file_llseek, \
1766};
1767
1768#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1769 DEBUGFS_READ_FUNC(name); \
1770 DEBUGFS_WRITE_FUNC(name); \
1771static const struct file_operations iwl_dbgfs_##name##_ops = { \
1772 .write = iwl_dbgfs_##name##_write, \
1773 .read = iwl_dbgfs_##name##_read, \
1774 .open = simple_open, \
1775 .llseek = generic_file_llseek, \
1776};
1777
1778static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1779 char __user *user_buf,
1780 size_t count, loff_t *ppos)
1781{
1782 struct iwl_trans *trans = file->private_data;
1783 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1784 struct iwl_tx_queue *txq;
1785 struct iwl_queue *q;
1786 char *buf;
1787 int pos = 0;
1788 int cnt;
1789 int ret;
1790 size_t bufsz;
1791
1792 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1793
1794 if (!trans_pcie->txq)
1795 return -EAGAIN;
1796
1797 buf = kzalloc(bufsz, GFP_KERNEL);
1798 if (!buf)
1799 return -ENOMEM;
1800
1801 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1802 txq = &trans_pcie->txq[cnt];
1803 q = &txq->q;
1804 pos += scnprintf(buf + pos, bufsz - pos,
1805 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1806 cnt, q->read_ptr, q->write_ptr,
1807 !!test_bit(cnt, trans_pcie->queue_used),
1808 !!test_bit(cnt, trans_pcie->queue_stopped));
1809 }
1810 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1811 kfree(buf);
1812 return ret;
1813}
1814
1815static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1816 char __user *user_buf,
1817 size_t count, loff_t *ppos)
1818{
1819 struct iwl_trans *trans = file->private_data;
1820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1821 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1822 char buf[256];
1823 int pos = 0;
1824 const size_t bufsz = sizeof(buf);
1825
1826 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1827 rxq->read);
1828 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1829 rxq->write);
1830 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1831 rxq->free_count);
1832 if (rxq->rb_stts) {
1833 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1834 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1835 } else {
1836 pos += scnprintf(buf + pos, bufsz - pos,
1837 "closed_rb_num: Not Allocated\n");
1838 }
1839 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1840}
1841
1842static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1843 char __user *user_buf,
1844 size_t count, loff_t *ppos)
1845{
1846 struct iwl_trans *trans = file->private_data;
1847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1848 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1849
1850 int pos = 0;
1851 char *buf;
1852 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1853 ssize_t ret;
1854
1855 buf = kzalloc(bufsz, GFP_KERNEL);
1856 if (!buf)
1857 return -ENOMEM;
1858
1859 pos += scnprintf(buf + pos, bufsz - pos,
1860 "Interrupt Statistics Report:\n");
1861
1862 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1863 isr_stats->hw);
1864 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1865 isr_stats->sw);
1866 if (isr_stats->sw || isr_stats->hw) {
1867 pos += scnprintf(buf + pos, bufsz - pos,
1868 "\tLast Restarting Code: 0x%X\n",
1869 isr_stats->err_code);
1870 }
1871#ifdef CONFIG_IWLWIFI_DEBUG
1872 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1873 isr_stats->sch);
1874 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1875 isr_stats->alive);
1876#endif
1877 pos += scnprintf(buf + pos, bufsz - pos,
1878 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1879
1880 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1881 isr_stats->ctkill);
1882
1883 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1884 isr_stats->wakeup);
1885
1886 pos += scnprintf(buf + pos, bufsz - pos,
1887 "Rx command responses:\t\t %u\n", isr_stats->rx);
1888
1889 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1890 isr_stats->tx);
1891
1892 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1893 isr_stats->unhandled);
1894
1895 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1896 kfree(buf);
1897 return ret;
1898}
1899
1900static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1901 const char __user *user_buf,
1902 size_t count, loff_t *ppos)
1903{
1904 struct iwl_trans *trans = file->private_data;
1905 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1906 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1907
1908 char buf[8];
1909 int buf_size;
1910 u32 reset_flag;
1911
1912 memset(buf, 0, sizeof(buf));
1913 buf_size = min(count, sizeof(buf) - 1);
1914 if (copy_from_user(buf, user_buf, buf_size))
1915 return -EFAULT;
1916 if (sscanf(buf, "%x", &reset_flag) != 1)
1917 return -EFAULT;
1918 if (reset_flag == 0)
1919 memset(isr_stats, 0, sizeof(*isr_stats));
1920
1921 return count;
1922}
1923
1924static ssize_t iwl_dbgfs_csr_write(struct file *file,
1925 const char __user *user_buf,
1926 size_t count, loff_t *ppos)
1927{
1928 struct iwl_trans *trans = file->private_data;
1929 char buf[8];
1930 int buf_size;
1931 int csr;
1932
1933 memset(buf, 0, sizeof(buf));
1934 buf_size = min(count, sizeof(buf) - 1);
1935 if (copy_from_user(buf, user_buf, buf_size))
1936 return -EFAULT;
1937 if (sscanf(buf, "%d", &csr) != 1)
1938 return -EFAULT;
1939
1940 iwl_dump_csr(trans);
1941
1942 return count;
1943}
1944
1945static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1946 char __user *user_buf,
1947 size_t count, loff_t *ppos)
1948{
1949 struct iwl_trans *trans = file->private_data;
1950 char *buf;
1951 int pos = 0;
1952 ssize_t ret = -EFAULT;
1953
1954 ret = pos = iwl_dump_fh(trans, &buf, true);
1955 if (buf) {
1956 ret = simple_read_from_buffer(user_buf,
1957 count, ppos, buf, pos);
1958 kfree(buf);
1959 }
1960
1961 return ret;
1962}
1963
1964static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1965 const char __user *user_buf,
1966 size_t count, loff_t *ppos)
1967{
1968 struct iwl_trans *trans = file->private_data;
1969
1970 if (!trans->op_mode)
1971 return -EAGAIN;
1972
1973 iwl_op_mode_nic_error(trans->op_mode);
1974
1975 return count;
1976}
1977
1978DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1979DEBUGFS_READ_FILE_OPS(fh_reg);
1980DEBUGFS_READ_FILE_OPS(rx_queue);
1981DEBUGFS_READ_FILE_OPS(tx_queue);
1982DEBUGFS_WRITE_FILE_OPS(csr);
1983DEBUGFS_WRITE_FILE_OPS(fw_restart);
1984
1985/*
1986 * Create the debugfs files and directories
1987 *
1988 */
1989static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1990 struct dentry *dir)
1991{
1992 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1993 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1994 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1995 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1996 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1997 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
1998 return 0;
1999}
2000#else
2001static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2002 struct dentry *dir)
2003{
2004 return 0;
2005}
2006#endif /*CONFIG_IWLWIFI_DEBUGFS */
2007
2008static const struct iwl_trans_ops trans_ops_pcie = {
2009 .start_hw = iwl_trans_pcie_start_hw,
2010 .stop_hw = iwl_trans_pcie_stop_hw,
2011 .fw_alive = iwl_trans_pcie_fw_alive,
2012 .start_fw = iwl_trans_pcie_start_fw,
2013 .stop_device = iwl_trans_pcie_stop_device,
2014
2015 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
2016
2017 .send_cmd = iwl_trans_pcie_send_cmd,
2018
2019 .tx = iwl_trans_pcie_tx,
2020 .reclaim = iwl_trans_pcie_reclaim,
2021
2022 .txq_disable = iwl_trans_pcie_txq_disable,
2023 .txq_enable = iwl_trans_pcie_txq_enable,
2024
2025 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2026
2027 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2028
2029#ifdef CONFIG_PM_SLEEP
2030 .suspend = iwl_trans_pcie_suspend,
2031 .resume = iwl_trans_pcie_resume,
2032#endif
2033 .write8 = iwl_trans_pcie_write8,
2034 .write32 = iwl_trans_pcie_write32,
2035 .read32 = iwl_trans_pcie_read32,
2036 .configure = iwl_trans_pcie_configure,
2037 .set_pmi = iwl_trans_pcie_set_pmi,
2038};
2039
2040struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2041 const struct pci_device_id *ent,
2042 const struct iwl_cfg *cfg)
2043{
2044 struct iwl_trans_pcie *trans_pcie;
2045 struct iwl_trans *trans;
2046 char cmd_pool_name[100];
2047 u16 pci_cmd;
2048 int err;
2049
2050 trans = kzalloc(sizeof(struct iwl_trans) +
2051 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2052
2053 if (WARN_ON(!trans))
2054 return NULL;
2055
2056 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2057
2058 trans->ops = &trans_ops_pcie;
2059 trans->cfg = cfg;
2060 trans_pcie->trans = trans;
2061 spin_lock_init(&trans_pcie->irq_lock);
2062 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2063
2064 /* W/A - seems to solve weird behavior. We need to remove this if we
2065 * don't want to stay in L1 all the time. This wastes a lot of power */
2066 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2067 PCIE_LINK_STATE_CLKPM);
2068
2069 if (pci_enable_device(pdev)) {
2070 err = -ENODEV;
2071 goto out_no_pci;
2072 }
2073
2074 pci_set_master(pdev);
2075
2076 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2077 if (!err)
2078 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2079 if (err) {
2080 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2081 if (!err)
2082 err = pci_set_consistent_dma_mask(pdev,
2083 DMA_BIT_MASK(32));
2084 /* both attempts failed: */
2085 if (err) {
2086 dev_printk(KERN_ERR, &pdev->dev,
2087 "No suitable DMA available.\n");
2088 goto out_pci_disable_device;
2089 }
2090 }
2091
2092 err = pci_request_regions(pdev, DRV_NAME);
2093 if (err) {
2094 dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
2095 goto out_pci_disable_device;
2096 }
2097
2098 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2099 if (!trans_pcie->hw_base) {
2100 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
2101 err = -ENODEV;
2102 goto out_pci_release_regions;
2103 }
2104
2105 dev_printk(KERN_INFO, &pdev->dev,
2106 "pci_resource_len = 0x%08llx\n",
2107 (unsigned long long) pci_resource_len(pdev, 0));
2108 dev_printk(KERN_INFO, &pdev->dev,
2109 "pci_resource_base = %p\n", trans_pcie->hw_base);
2110
2111 dev_printk(KERN_INFO, &pdev->dev,
2112 "HW Revision ID = 0x%X\n", pdev->revision);
2113
2114 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2115 * PCI Tx retries from interfering with C3 CPU state */
2116 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2117
2118 err = pci_enable_msi(pdev);
2119 if (err)
2120 dev_printk(KERN_ERR, &pdev->dev,
2121 "pci_enable_msi failed(0X%x)", err);
2122
2123 trans->dev = &pdev->dev;
2124 trans_pcie->irq = pdev->irq;
2125 trans_pcie->pci_dev = pdev;
2126 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2127 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2128 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2129 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2130
2131 /* TODO: Move this away, not needed if not MSI */
2132 /* enable rfkill interrupt: hw bug w/a */
2133 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2134 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2135 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2136 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2137 }
2138
2139 /* Initialize the wait queue for commands */
2140 init_waitqueue_head(&trans->wait_command_queue);
2141 spin_lock_init(&trans->reg_lock);
2142
2143 snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
2144 dev_name(trans->dev));
2145
2146 trans->dev_cmd_headroom = 0;
2147 trans->dev_cmd_pool =
2148 kmem_cache_create(cmd_pool_name,
2149 sizeof(struct iwl_device_cmd)
2150 + trans->dev_cmd_headroom,
2151 sizeof(void *),
2152 SLAB_HWCACHE_ALIGN,
2153 NULL);
2154
2155 if (!trans->dev_cmd_pool)
2156 goto out_pci_disable_msi;
2157
2158 return trans;
2159
2160out_pci_disable_msi:
2161 pci_disable_msi(pdev);
2162out_pci_release_regions:
2163 pci_release_regions(pdev);
2164out_pci_disable_device:
2165 pci_disable_device(pdev);
2166out_no_pci:
2167 kfree(trans);
2168 return NULL;
2169}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
new file mode 100644
index 00000000000..35e82161ca4
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -0,0 +1,998 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/slab.h>
31#include <linux/sched.h>
32
33#include "iwl-debug.h"
34#include "iwl-csr.h"
35#include "iwl-prph.h"
36#include "iwl-io.h"
37#include "iwl-op-mode.h"
38#include "internal.h"
39/* FIXME: need to abstract out TX command (once we know what it looks like) */
40#include "dvm/commands.h"
41
42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4
44
45/**
46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 */
48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 struct iwl_tx_queue *txq,
50 u16 byte_cnt)
51{
52 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
54 int write_ptr = txq->q.write_ptr;
55 int txq_id = txq->q.id;
56 u8 sec_ctl = 0;
57 u8 sta_id = 0;
58 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59 __le16 bc_ent;
60 struct iwl_tx_cmd *tx_cmd =
61 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
62
63 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64
65 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66
67 sta_id = tx_cmd->sta_id;
68 sec_ctl = tx_cmd->sec_ctl;
69
70 switch (sec_ctl & TX_CMD_SEC_MSK) {
71 case TX_CMD_SEC_CCM:
72 len += CCMP_MIC_LEN;
73 break;
74 case TX_CMD_SEC_TKIP:
75 len += TKIP_ICV_LEN;
76 break;
77 case TX_CMD_SEC_WEP:
78 len += WEP_IV_LEN + WEP_ICV_LEN;
79 break;
80 }
81
82 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83
84 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85
86 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87 scd_bc_tbl[txq_id].
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89}
90
91/**
92 * iwl_txq_update_write_ptr - Send new write index to hardware
93 */
94void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95{
96 u32 reg = 0;
97 int txq_id = txq->q.id;
98
99 if (txq->need_update == 0)
100 return;
101
102 if (trans->cfg->base_params->shadow_reg_enable) {
103 /* shadow register enabled */
104 iwl_write32(trans, HBUS_TARG_WRPTR,
105 txq->q.write_ptr | (txq_id << 8));
106 } else {
107 struct iwl_trans_pcie *trans_pcie =
108 IWL_TRANS_GET_PCIE_TRANS(trans);
109 /* if we're trying to save power */
110 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
111 /* wake up nic if it's powered down ...
112 * uCode will wake up, and interrupt us again, so next
113 * time we'll skip this part. */
114 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
115
116 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
117 IWL_DEBUG_INFO(trans,
118 "Tx queue %d requesting wakeup,"
119 " GP1 = 0x%x\n", txq_id, reg);
120 iwl_set_bit(trans, CSR_GP_CNTRL,
121 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
122 return;
123 }
124
125 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
126 txq->q.write_ptr | (txq_id << 8));
127
128 /*
129 * else not in power-save mode,
130 * uCode will never sleep when we're
131 * trying to tx (during RFKILL, we're not trying to tx).
132 */
133 } else
134 iwl_write32(trans, HBUS_TARG_WRPTR,
135 txq->q.write_ptr | (txq_id << 8));
136 }
137 txq->need_update = 0;
138}
139
140static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
141{
142 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
143
144 dma_addr_t addr = get_unaligned_le32(&tb->lo);
145 if (sizeof(dma_addr_t) > sizeof(u32))
146 addr |=
147 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
148
149 return addr;
150}
151
152static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
153{
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155
156 return le16_to_cpu(tb->hi_n_len) >> 4;
157}
158
159static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
160 dma_addr_t addr, u16 len)
161{
162 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
163 u16 hi_n_len = len << 4;
164
165 put_unaligned_le32(addr, &tb->lo);
166 if (sizeof(dma_addr_t) > sizeof(u32))
167 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
168
169 tb->hi_n_len = cpu_to_le16(hi_n_len);
170
171 tfd->num_tbs = idx + 1;
172}
173
174static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
175{
176 return tfd->num_tbs & 0x1f;
177}
178
179static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
180 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
181{
182 int i;
183 int num_tbs;
184
185 /* Sanity check on number of chunks */
186 num_tbs = iwl_tfd_get_num_tbs(tfd);
187
188 if (num_tbs >= IWL_NUM_OF_TBS) {
189 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
190 /* @todo issue fatal error, it is quite serious situation */
191 return;
192 }
193
194 /* Unmap tx_cmd */
195 if (num_tbs)
196 dma_unmap_single(trans->dev,
197 dma_unmap_addr(meta, mapping),
198 dma_unmap_len(meta, len),
199 DMA_BIDIRECTIONAL);
200
201 /* Unmap chunks, if any. */
202 for (i = 1; i < num_tbs; i++)
203 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
204 iwl_tfd_tb_get_len(tfd, i), dma_dir);
205
206 tfd->num_tbs = 0;
207}
208
209/**
210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data
212 * @txq - tx queue
213 * @dma_dir - the direction of the DMA mapping
214 *
215 * Does NOT advance any TFD circular buffer read/write indexes
216 * Does NOT free the TFD itself (which is within circular buffer)
217 */
218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
219 enum dma_data_direction dma_dir)
220{
221 struct iwl_tfd *tfd_tmp = txq->tfds;
222
223 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
224 int rd_ptr = txq->q.read_ptr;
225 int idx = get_cmd_index(&txq->q, rd_ptr);
226
227 lockdep_assert_held(&txq->lock);
228
229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
230 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
231 dma_dir);
232
233 /* free SKB */
234 if (txq->entries) {
235 struct sk_buff *skb;
236
237 skb = txq->entries[idx].skb;
238
239 /* Can be called from irqs-disabled context
240 * If skb is not NULL, it means that the whole queue is being
241 * freed and that the queue is not empty - free the skb
242 */
243 if (skb) {
244 iwl_op_mode_free_skb(trans->op_mode, skb);
245 txq->entries[idx].skb = NULL;
246 }
247 }
248}
249
250int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
251 struct iwl_tx_queue *txq,
252 dma_addr_t addr, u16 len,
253 u8 reset)
254{
255 struct iwl_queue *q;
256 struct iwl_tfd *tfd, *tfd_tmp;
257 u32 num_tbs;
258
259 q = &txq->q;
260 tfd_tmp = txq->tfds;
261 tfd = &tfd_tmp[q->write_ptr];
262
263 if (reset)
264 memset(tfd, 0, sizeof(*tfd));
265
266 num_tbs = iwl_tfd_get_num_tbs(tfd);
267
268 /* Each TFD can point to a maximum 20 Tx buffers */
269 if (num_tbs >= IWL_NUM_OF_TBS) {
270 IWL_ERR(trans, "Error can not send more than %d chunks\n",
271 IWL_NUM_OF_TBS);
272 return -EINVAL;
273 }
274
275 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
276 return -EINVAL;
277
278 if (unlikely(addr & ~IWL_TX_DMA_MASK))
279 IWL_ERR(trans, "Unaligned address = %llx\n",
280 (unsigned long long)addr);
281
282 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
283
284 return 0;
285}
286
287/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
288 * DMA services
289 *
290 * Theory of operation
291 *
292 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
293 * of buffer descriptors, each of which points to one or more data buffers for
294 * the device to read from or fill. Driver and device exchange status of each
295 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
296 * entries in each circular buffer, to protect against confusing empty and full
297 * queue states.
298 *
299 * The device reads or writes the data in the queues via the device's several
300 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
301 *
302 * For Tx queue, there are low mark and high mark limits. If, after queuing
303 * the packet for Tx, free space become < low mark, Tx queue stopped. When
304 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
305 * Tx queue resumed.
306 *
307 ***************************************************/
308
309int iwl_queue_space(const struct iwl_queue *q)
310{
311 int s = q->read_ptr - q->write_ptr;
312
313 if (q->read_ptr > q->write_ptr)
314 s -= q->n_bd;
315
316 if (s <= 0)
317 s += q->n_window;
318 /* keep some reserve to not confuse empty and full situations */
319 s -= 2;
320 if (s < 0)
321 s = 0;
322 return s;
323}
324
325/**
326 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
327 */
328int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
329{
330 q->n_bd = count;
331 q->n_window = slots_num;
332 q->id = id;
333
334 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
335 * and iwl_queue_dec_wrap are broken. */
336 if (WARN_ON(!is_power_of_2(count)))
337 return -EINVAL;
338
339 /* slots_num must be power-of-two size, otherwise
340 * get_cmd_index is broken. */
341 if (WARN_ON(!is_power_of_2(slots_num)))
342 return -EINVAL;
343
344 q->low_mark = q->n_window / 4;
345 if (q->low_mark < 4)
346 q->low_mark = 4;
347
348 q->high_mark = q->n_window / 8;
349 if (q->high_mark < 2)
350 q->high_mark = 2;
351
352 q->write_ptr = q->read_ptr = 0;
353
354 return 0;
355}
356
357static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
358 struct iwl_tx_queue *txq)
359{
360 struct iwl_trans_pcie *trans_pcie =
361 IWL_TRANS_GET_PCIE_TRANS(trans);
362 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
363 int txq_id = txq->q.id;
364 int read_ptr = txq->q.read_ptr;
365 u8 sta_id = 0;
366 __le16 bc_ent;
367 struct iwl_tx_cmd *tx_cmd =
368 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
369
370 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
371
372 if (txq_id != trans_pcie->cmd_queue)
373 sta_id = tx_cmd->sta_id;
374
375 bc_ent = cpu_to_le16(1 | (sta_id << 12));
376 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
377
378 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
379 scd_bc_tbl[txq_id].
380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
381}
382
383static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
384 u16 txq_id)
385{
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
387 u32 tbl_dw_addr;
388 u32 tbl_dw;
389 u16 scd_q2ratid;
390
391 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
392
393 tbl_dw_addr = trans_pcie->scd_base_addr +
394 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
395
396 tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
397
398 if (txq_id & 0x1)
399 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
400 else
401 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
402
403 iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
404
405 return 0;
406}
407
408static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
409{
410 /* Simply stop the queue, but don't change any configuration;
411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
412 iwl_write_prph(trans,
413 SCD_QUEUE_STATUS_BITS(txq_id),
414 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
416}
417
418void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index)
419{
420 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
421 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
422 (index & 0xff) | (txq_id << 8));
423 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
424}
425
426void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
427 struct iwl_tx_queue *txq,
428 int tx_fifo_id, bool active)
429{
430 int txq_id = txq->q.id;
431
432 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
433 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
434 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
435 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
436 SCD_QUEUE_STTS_REG_MSK);
437
438 if (active)
439 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
440 txq_id, tx_fifo_id);
441 else
442 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
443}
444
445void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
446 int fifo, int sta_id, int tid,
447 int frame_limit, u16 ssn)
448{
449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
450
451 lockdep_assert_held(&trans_pcie->irq_lock);
452
453 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
454 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
455
456 /* Stop this Tx queue before configuring it */
457 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
458
459 /* Set this queue as a chain-building queue unless it is CMD queue */
460 if (txq_id != trans_pcie->cmd_queue)
461 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
462
463 /* If this queue is mapped to a certain station: it is an AGG queue */
464 if (sta_id != IWL_INVALID_STATION) {
465 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
466
467 /* Map receiver-address / traffic-ID to this queue */
468 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
469
470 /* enable aggregations for the queue */
471 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
472 }
473
474 /* Place first TFD at index corresponding to start sequence number.
475 * Assumes that ssn_idx is valid (!= 0xFFF) */
476 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
477 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
478 iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
479
480 /* Set up Tx window size and frame limit for this queue */
481 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
482 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
483 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
484 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
485 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
486 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
487 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
488 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
489
490 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
491 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
492 fifo, true);
493}
494
495void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
496 int sta_id, int tid, int frame_limit, u16 ssn)
497{
498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
499 unsigned long flags;
500
501 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
502
503 __iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
504 tid, frame_limit, ssn);
505
506 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
507}
508
509void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
510{
511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
512
513 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
514 WARN_ONCE(1, "queue %d not used", txq_id);
515 return;
516 }
517
518 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
519
520 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
521
522 trans_pcie->txq[txq_id].q.read_ptr = 0;
523 trans_pcie->txq[txq_id].q.write_ptr = 0;
524 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
525
526 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
527 0, false);
528}
529
530/*************** HOST COMMAND QUEUE FUNCTIONS *****/
531
532/**
533 * iwl_enqueue_hcmd - enqueue a uCode command
534 * @priv: device private data point
535 * @cmd: a point to the ucode command structure
536 *
537 * The function returns < 0 values to indicate the operation is
538 * failed. On success, it turns the index (> 0) of command in the
539 * command queue.
540 */
541static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
542{
543 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
544 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
545 struct iwl_queue *q = &txq->q;
546 struct iwl_device_cmd *out_cmd;
547 struct iwl_cmd_meta *out_meta;
548 dma_addr_t phys_addr;
549 u32 idx;
550 u16 copy_size, cmd_size;
551 bool had_nocopy = false;
552 int i;
553 u8 *cmd_dest;
554#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
555 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
556 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
557 int trace_idx;
558#endif
559
560 copy_size = sizeof(out_cmd->hdr);
561 cmd_size = sizeof(out_cmd->hdr);
562
563 /* need one for the header if the first is NOCOPY */
564 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
565
566 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
567 if (!cmd->len[i])
568 continue;
569 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
570 had_nocopy = true;
571 } else {
572 /* NOCOPY must not be followed by normal! */
573 if (WARN_ON(had_nocopy))
574 return -EINVAL;
575 copy_size += cmd->len[i];
576 }
577 cmd_size += cmd->len[i];
578 }
579
580 /*
581 * If any of the command structures end up being larger than
582 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
583 * allocated into separate TFDs, then we will need to
584 * increase the size of the buffers.
585 */
586 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
587 return -EINVAL;
588
589 spin_lock_bh(&txq->lock);
590
591 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
592 spin_unlock_bh(&txq->lock);
593
594 IWL_ERR(trans, "No space in command queue\n");
595 iwl_op_mode_cmd_queue_full(trans->op_mode);
596 return -ENOSPC;
597 }
598
599 idx = get_cmd_index(q, q->write_ptr);
600 out_cmd = txq->entries[idx].cmd;
601 out_meta = &txq->entries[idx].meta;
602
603 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
604 if (cmd->flags & CMD_WANT_SKB)
605 out_meta->source = cmd;
606
607 /* set up the header */
608
609 out_cmd->hdr.cmd = cmd->id;
610 out_cmd->hdr.flags = 0;
611 out_cmd->hdr.sequence =
612 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
613 INDEX_TO_SEQ(q->write_ptr));
614
615 /* and copy the data that needs to be copied */
616
617 cmd_dest = out_cmd->payload;
618 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
619 if (!cmd->len[i])
620 continue;
621 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
622 break;
623 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
624 cmd_dest += cmd->len[i];
625 }
626
627 IWL_DEBUG_HC(trans,
628 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
629 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
630 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
631 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
632
633 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
634 DMA_BIDIRECTIONAL);
635 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
636 idx = -ENOMEM;
637 goto out;
638 }
639
640 dma_unmap_addr_set(out_meta, mapping, phys_addr);
641 dma_unmap_len_set(out_meta, len, copy_size);
642
643 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
644#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
645 trace_bufs[0] = &out_cmd->hdr;
646 trace_lens[0] = copy_size;
647 trace_idx = 1;
648#endif
649
650 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
651 if (!cmd->len[i])
652 continue;
653 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
654 continue;
655 phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
656 cmd->len[i], DMA_BIDIRECTIONAL);
657 if (dma_mapping_error(trans->dev, phys_addr)) {
658 iwl_unmap_tfd(trans, out_meta,
659 &txq->tfds[q->write_ptr],
660 DMA_BIDIRECTIONAL);
661 idx = -ENOMEM;
662 goto out;
663 }
664
665 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
666 cmd->len[i], 0);
667#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
668 trace_bufs[trace_idx] = cmd->data[i];
669 trace_lens[trace_idx] = cmd->len[i];
670 trace_idx++;
671#endif
672 }
673
674 out_meta->flags = cmd->flags;
675
676 txq->need_update = 1;
677
678 /* check that tracing gets all possible blocks */
679 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
680#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
681 trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
682 trace_bufs[0], trace_lens[0],
683 trace_bufs[1], trace_lens[1],
684 trace_bufs[2], trace_lens[2]);
685#endif
686
687 /* start timer if queue currently empty */
688 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
689 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
690
691 /* Increment and update queue's write index */
692 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
693 iwl_txq_update_write_ptr(trans, txq);
694
695 out:
696 spin_unlock_bh(&txq->lock);
697 return idx;
698}
699
700static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
701 struct iwl_tx_queue *txq)
702{
703 if (!trans_pcie->wd_timeout)
704 return;
705
706 /*
707 * if empty delete timer, otherwise move timer forward
708 * since we're making progress on this queue
709 */
710 if (txq->q.read_ptr == txq->q.write_ptr)
711 del_timer(&txq->stuck_timer);
712 else
713 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
714}
715
716/**
717 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
718 *
719 * When FW advances 'R' index, all entries between old and new 'R' index
720 * need to be reclaimed. As result, some free space forms. If there is
721 * enough free space (> low mark), wake the stack that feeds us.
722 */
723static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
724 int idx)
725{
726 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
728 struct iwl_queue *q = &txq->q;
729 int nfreed = 0;
730
731 lockdep_assert_held(&txq->lock);
732
733 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
734 IWL_ERR(trans,
735 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
736 __func__, txq_id, idx, q->n_bd,
737 q->write_ptr, q->read_ptr);
738 return;
739 }
740
741 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
742 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
743
744 if (nfreed++ > 0) {
745 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
746 idx, q->write_ptr, q->read_ptr);
747 iwl_op_mode_nic_error(trans->op_mode);
748 }
749
750 }
751
752 iwl_queue_progress(trans_pcie, txq);
753}
754
755/**
756 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
757 * @rxb: Rx buffer to reclaim
758 * @handler_status: return value of the handler of the command
759 * (put in setup_rx_handlers)
760 *
761 * If an Rx buffer has an async callback associated with it the callback
762 * will be executed. The attached skb (if present) will only be freed
763 * if the callback returns 1
764 */
765void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
766 int handler_status)
767{
768 struct iwl_rx_packet *pkt = rxb_addr(rxb);
769 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
770 int txq_id = SEQ_TO_QUEUE(sequence);
771 int index = SEQ_TO_INDEX(sequence);
772 int cmd_index;
773 struct iwl_device_cmd *cmd;
774 struct iwl_cmd_meta *meta;
775 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
776 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
777
778 /* If a Tx command is being handled and it isn't in the actual
779 * command queue then there a command routing bug has been introduced
780 * in the queue management code. */
781 if (WARN(txq_id != trans_pcie->cmd_queue,
782 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
783 txq_id, trans_pcie->cmd_queue, sequence,
784 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
785 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
786 iwl_print_hex_error(trans, pkt, 32);
787 return;
788 }
789
790 spin_lock(&txq->lock);
791
792 cmd_index = get_cmd_index(&txq->q, index);
793 cmd = txq->entries[cmd_index].cmd;
794 meta = &txq->entries[cmd_index].meta;
795
796 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
797
798 /* Input error checking is done when commands are added to queue. */
799 if (meta->flags & CMD_WANT_SKB) {
800 struct page *p = rxb_steal_page(rxb);
801
802 meta->source->resp_pkt = pkt;
803 meta->source->_rx_page_addr = (unsigned long)page_address(p);
804 meta->source->_rx_page_order = trans_pcie->rx_page_order;
805 meta->source->handler_status = handler_status;
806 }
807
808 iwl_hcmd_queue_reclaim(trans, txq_id, index);
809
810 if (!(meta->flags & CMD_ASYNC)) {
811 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
812 IWL_WARN(trans,
813 "HCMD_ACTIVE already clear for command %s\n",
814 trans_pcie_get_cmd_string(trans_pcie,
815 cmd->hdr.cmd));
816 }
817 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
818 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
819 trans_pcie_get_cmd_string(trans_pcie,
820 cmd->hdr.cmd));
821 wake_up(&trans->wait_command_queue);
822 }
823
824 meta->flags = 0;
825
826 spin_unlock(&txq->lock);
827}
828
829#define HOST_COMPLETE_TIMEOUT (2 * HZ)
830
831static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
832{
833 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
834 int ret;
835
836 /* An asynchronous command can not expect an SKB to be set. */
837 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
838 return -EINVAL;
839
840
841 ret = iwl_enqueue_hcmd(trans, cmd);
842 if (ret < 0) {
843 IWL_ERR(trans,
844 "Error sending %s: enqueue_hcmd failed: %d\n",
845 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
846 return ret;
847 }
848 return 0;
849}
850
851static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
852{
853 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
854 int cmd_idx;
855 int ret;
856
857 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
858 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
859
860 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
861 &trans_pcie->status))) {
862 IWL_ERR(trans, "Command %s: a command is already active!\n",
863 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
864 return -EIO;
865 }
866
867 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
868 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
869
870 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
871 if (cmd_idx < 0) {
872 ret = cmd_idx;
873 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
874 IWL_ERR(trans,
875 "Error sending %s: enqueue_hcmd failed: %d\n",
876 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
877 return ret;
878 }
879
880 ret = wait_event_timeout(trans->wait_command_queue,
881 !test_bit(STATUS_HCMD_ACTIVE,
882 &trans_pcie->status),
883 HOST_COMPLETE_TIMEOUT);
884 if (!ret) {
885 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
886 struct iwl_tx_queue *txq =
887 &trans_pcie->txq[trans_pcie->cmd_queue];
888 struct iwl_queue *q = &txq->q;
889
890 IWL_ERR(trans,
891 "Error sending %s: time out after %dms.\n",
892 trans_pcie_get_cmd_string(trans_pcie, cmd->id),
893 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
894
895 IWL_ERR(trans,
896 "Current CMD queue read_ptr %d write_ptr %d\n",
897 q->read_ptr, q->write_ptr);
898
899 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
900 IWL_DEBUG_INFO(trans,
901 "Clearing HCMD_ACTIVE for command %s\n",
902 trans_pcie_get_cmd_string(trans_pcie,
903 cmd->id));
904 ret = -ETIMEDOUT;
905 goto cancel;
906 }
907 }
908
909 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
910 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
911 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
912 ret = -EIO;
913 goto cancel;
914 }
915
916 return 0;
917
918cancel:
919 if (cmd->flags & CMD_WANT_SKB) {
920 /*
921 * Cancel the CMD_WANT_SKB flag for the cmd in the
922 * TX cmd queue. Otherwise in case the cmd comes
923 * in later, it will possibly set an invalid
924 * address (cmd->meta.source).
925 */
926 trans_pcie->txq[trans_pcie->cmd_queue].
927 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
928 }
929
930 if (cmd->resp_pkt) {
931 iwl_free_resp(cmd);
932 cmd->resp_pkt = NULL;
933 }
934
935 return ret;
936}
937
938int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
939{
940 if (cmd->flags & CMD_ASYNC)
941 return iwl_send_cmd_async(trans, cmd);
942
943 return iwl_send_cmd_sync(trans, cmd);
944}
945
946/* Frees buffers until index _not_ inclusive */
947int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
948 struct sk_buff_head *skbs)
949{
950 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
951 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
952 struct iwl_queue *q = &txq->q;
953 int last_to_free;
954 int freed = 0;
955
956 /* This function is not meant to release cmd queue*/
957 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
958 return 0;
959
960 lockdep_assert_held(&txq->lock);
961
962 /*Since we free until index _not_ inclusive, the one before index is
963 * the last we will free. This one must be used */
964 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
965
966 if ((index >= q->n_bd) ||
967 (iwl_queue_used(q, last_to_free) == 0)) {
968 IWL_ERR(trans,
969 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
970 __func__, txq_id, last_to_free, q->n_bd,
971 q->write_ptr, q->read_ptr);
972 return 0;
973 }
974
975 if (WARN_ON(!skb_queue_empty(skbs)))
976 return 0;
977
978 for (;
979 q->read_ptr != index;
980 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
981
982 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
983 continue;
984
985 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
986
987 txq->entries[txq->q.read_ptr].skb = NULL;
988
989 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
990
991 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
992 freed++;
993 }
994
995 iwl_queue_progress(trans_pcie, txq);
996
997 return freed;
998}