diff options
author | Kalle Valo <kvalo@qca.qualcomm.com> | 2012-06-14 07:20:18 -0400 |
---|---|---|
committer | Kalle Valo <kvalo@qca.qualcomm.com> | 2012-06-14 07:44:49 -0400 |
commit | d987dd137bac8dca9b0015763d3106f48bb8a596 (patch) | |
tree | ef887505b3c904c548d58ec9bb6f4970a0877042 /drivers/net/wireless/iwlwifi/pcie | |
parent | c85251f8562095cd6fd63ae786354283c5318303 (diff) | |
parent | 211c17aaee644bb808fbdeef547ac99db92c01ed (diff) |
Merge remote branch 'wireless-next/master' into ath6kl-next
Conflicts:
drivers/net/wireless/ath/ath6kl/cfg80211.c
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/1000.c | 141 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/2000.c | 243 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/5000.c | 180 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/6000.c | 403 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/cfg.h | 113 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/drv.c | 380 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/internal.h | 440 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 1058 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 2216 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 969 |
10 files changed, 6143 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c new file mode 100644 index 000000000000..81b83f484f08 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/1000.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/stringify.h> | ||
29 | #include "iwl-config.h" | ||
30 | #include "iwl-csr.h" | ||
31 | #include "iwl-agn-hw.h" | ||
32 | #include "cfg.h" | ||
33 | |||
34 | /* Highest firmware API version supported */ | ||
35 | #define IWL1000_UCODE_API_MAX 5 | ||
36 | #define IWL100_UCODE_API_MAX 5 | ||
37 | |||
38 | /* Oldest version we won't warn about */ | ||
39 | #define IWL1000_UCODE_API_OK 5 | ||
40 | #define IWL100_UCODE_API_OK 5 | ||
41 | |||
42 | /* Lowest firmware API version supported */ | ||
43 | #define IWL1000_UCODE_API_MIN 1 | ||
44 | #define IWL100_UCODE_API_MIN 5 | ||
45 | |||
46 | /* EEPROM version */ | ||
47 | #define EEPROM_1000_TX_POWER_VERSION (4) | ||
48 | #define EEPROM_1000_EEPROM_VERSION (0x15C) | ||
49 | |||
50 | #define IWL1000_FW_PRE "iwlwifi-1000-" | ||
51 | #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" | ||
52 | |||
53 | #define IWL100_FW_PRE "iwlwifi-100-" | ||
54 | #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" | ||
55 | |||
56 | |||
57 | static const struct iwl_base_params iwl1000_base_params = { | ||
58 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
59 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
60 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | ||
61 | .max_ll_items = OTP_MAX_LL_ITEMS_1000, | ||
62 | .shadow_ram_support = false, | ||
63 | .led_compensation = 51, | ||
64 | .support_ct_kill_exit = true, | ||
65 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, | ||
66 | .chain_noise_scale = 1000, | ||
67 | .wd_timeout = IWL_WATCHDOG_DISABLED, | ||
68 | .max_event_log_size = 128, | ||
69 | }; | ||
70 | |||
71 | static const struct iwl_ht_params iwl1000_ht_params = { | ||
72 | .ht_greenfield_support = true, | ||
73 | .use_rts_for_aggregation = true, /* use rts/cts protection */ | ||
74 | .ht40_bands = BIT(IEEE80211_BAND_2GHZ), | ||
75 | }; | ||
76 | |||
77 | static const struct iwl_eeprom_params iwl1000_eeprom_params = { | ||
78 | .regulatory_bands = { | ||
79 | EEPROM_REG_BAND_1_CHANNELS, | ||
80 | EEPROM_REG_BAND_2_CHANNELS, | ||
81 | EEPROM_REG_BAND_3_CHANNELS, | ||
82 | EEPROM_REG_BAND_4_CHANNELS, | ||
83 | EEPROM_REG_BAND_5_CHANNELS, | ||
84 | EEPROM_REG_BAND_24_HT40_CHANNELS, | ||
85 | EEPROM_REGULATORY_BAND_NO_HT40, | ||
86 | } | ||
87 | }; | ||
88 | |||
89 | #define IWL_DEVICE_1000 \ | ||
90 | .fw_name_pre = IWL1000_FW_PRE, \ | ||
91 | .ucode_api_max = IWL1000_UCODE_API_MAX, \ | ||
92 | .ucode_api_ok = IWL1000_UCODE_API_OK, \ | ||
93 | .ucode_api_min = IWL1000_UCODE_API_MIN, \ | ||
94 | .device_family = IWL_DEVICE_FAMILY_1000, \ | ||
95 | .max_inst_size = IWLAGN_RTC_INST_SIZE, \ | ||
96 | .max_data_size = IWLAGN_RTC_DATA_SIZE, \ | ||
97 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ | ||
98 | .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ | ||
99 | .base_params = &iwl1000_base_params, \ | ||
100 | .eeprom_params = &iwl1000_eeprom_params, \ | ||
101 | .led_mode = IWL_LED_BLINK | ||
102 | |||
103 | const struct iwl_cfg iwl1000_bgn_cfg = { | ||
104 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", | ||
105 | IWL_DEVICE_1000, | ||
106 | .ht_params = &iwl1000_ht_params, | ||
107 | }; | ||
108 | |||
109 | const struct iwl_cfg iwl1000_bg_cfg = { | ||
110 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", | ||
111 | IWL_DEVICE_1000, | ||
112 | }; | ||
113 | |||
114 | #define IWL_DEVICE_100 \ | ||
115 | .fw_name_pre = IWL100_FW_PRE, \ | ||
116 | .ucode_api_max = IWL100_UCODE_API_MAX, \ | ||
117 | .ucode_api_ok = IWL100_UCODE_API_OK, \ | ||
118 | .ucode_api_min = IWL100_UCODE_API_MIN, \ | ||
119 | .device_family = IWL_DEVICE_FAMILY_100, \ | ||
120 | .max_inst_size = IWLAGN_RTC_INST_SIZE, \ | ||
121 | .max_data_size = IWLAGN_RTC_DATA_SIZE, \ | ||
122 | .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ | ||
123 | .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ | ||
124 | .base_params = &iwl1000_base_params, \ | ||
125 | .eeprom_params = &iwl1000_eeprom_params, \ | ||
126 | .led_mode = IWL_LED_RF_STATE, \ | ||
127 | .rx_with_siso_diversity = true | ||
128 | |||
129 | const struct iwl_cfg iwl100_bgn_cfg = { | ||
130 | .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", | ||
131 | IWL_DEVICE_100, | ||
132 | .ht_params = &iwl1000_ht_params, | ||
133 | }; | ||
134 | |||
135 | const struct iwl_cfg iwl100_bg_cfg = { | ||
136 | .name = "Intel(R) Centrino(R) Wireless-N 100 BG", | ||
137 | IWL_DEVICE_100, | ||
138 | }; | ||
139 | |||
140 | MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); | ||
141 | MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c new file mode 100644 index 000000000000..fd4e78f56fa6 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/2000.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/stringify.h> | ||
29 | #include "iwl-config.h" | ||
30 | #include "iwl-agn-hw.h" | ||
31 | #include "cfg.h" | ||
32 | #include "dvm/commands.h" /* needed for BT for now */ | ||
33 | |||
34 | /* Highest firmware API version supported */ | ||
35 | #define IWL2030_UCODE_API_MAX 6 | ||
36 | #define IWL2000_UCODE_API_MAX 6 | ||
37 | #define IWL105_UCODE_API_MAX 6 | ||
38 | #define IWL135_UCODE_API_MAX 6 | ||
39 | |||
40 | /* Oldest version we won't warn about */ | ||
41 | #define IWL2030_UCODE_API_OK 6 | ||
42 | #define IWL2000_UCODE_API_OK 6 | ||
43 | #define IWL105_UCODE_API_OK 6 | ||
44 | #define IWL135_UCODE_API_OK 6 | ||
45 | |||
46 | /* Lowest firmware API version supported */ | ||
47 | #define IWL2030_UCODE_API_MIN 5 | ||
48 | #define IWL2000_UCODE_API_MIN 5 | ||
49 | #define IWL105_UCODE_API_MIN 5 | ||
50 | #define IWL135_UCODE_API_MIN 5 | ||
51 | |||
52 | /* EEPROM version */ | ||
53 | #define EEPROM_2000_TX_POWER_VERSION (6) | ||
54 | #define EEPROM_2000_EEPROM_VERSION (0x805) | ||
55 | |||
56 | |||
57 | #define IWL2030_FW_PRE "iwlwifi-2030-" | ||
58 | #define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" | ||
59 | |||
60 | #define IWL2000_FW_PRE "iwlwifi-2000-" | ||
61 | #define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode" | ||
62 | |||
63 | #define IWL105_FW_PRE "iwlwifi-105-" | ||
64 | #define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode" | ||
65 | |||
66 | #define IWL135_FW_PRE "iwlwifi-135-" | ||
67 | #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" | ||
68 | |||
69 | static const struct iwl_base_params iwl2000_base_params = { | ||
70 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
71 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
72 | .pll_cfg_val = 0, | ||
73 | .max_ll_items = OTP_MAX_LL_ITEMS_2x00, | ||
74 | .shadow_ram_support = true, | ||
75 | .led_compensation = 51, | ||
76 | .adv_thermal_throttle = true, | ||
77 | .support_ct_kill_exit = true, | ||
78 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
79 | .chain_noise_scale = 1000, | ||
80 | .wd_timeout = IWL_DEF_WD_TIMEOUT, | ||
81 | .max_event_log_size = 512, | ||
82 | .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ | ||
83 | .hd_v2 = true, | ||
84 | }; | ||
85 | |||
86 | |||
87 | static const struct iwl_base_params iwl2030_base_params = { | ||
88 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
89 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
90 | .pll_cfg_val = 0, | ||
91 | .max_ll_items = OTP_MAX_LL_ITEMS_2x00, | ||
92 | .shadow_ram_support = true, | ||
93 | .led_compensation = 57, | ||
94 | .adv_thermal_throttle = true, | ||
95 | .support_ct_kill_exit = true, | ||
96 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
97 | .chain_noise_scale = 1000, | ||
98 | .wd_timeout = IWL_LONG_WD_TIMEOUT, | ||
99 | .max_event_log_size = 512, | ||
100 | .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ | ||
101 | .hd_v2 = true, | ||
102 | }; | ||
103 | |||
104 | static const struct iwl_ht_params iwl2000_ht_params = { | ||
105 | .ht_greenfield_support = true, | ||
106 | .use_rts_for_aggregation = true, /* use rts/cts protection */ | ||
107 | .ht40_bands = BIT(IEEE80211_BAND_2GHZ), | ||
108 | }; | ||
109 | |||
110 | static const struct iwl_bt_params iwl2030_bt_params = { | ||
111 | /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ | ||
112 | .advanced_bt_coexist = true, | ||
113 | .agg_time_limit = BT_AGG_THRESHOLD_DEF, | ||
114 | .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, | ||
115 | .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, | ||
116 | .bt_sco_disable = true, | ||
117 | .bt_session_2 = true, | ||
118 | }; | ||
119 | |||
120 | static const struct iwl_eeprom_params iwl20x0_eeprom_params = { | ||
121 | .regulatory_bands = { | ||
122 | EEPROM_REG_BAND_1_CHANNELS, | ||
123 | EEPROM_REG_BAND_2_CHANNELS, | ||
124 | EEPROM_REG_BAND_3_CHANNELS, | ||
125 | EEPROM_REG_BAND_4_CHANNELS, | ||
126 | EEPROM_REG_BAND_5_CHANNELS, | ||
127 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, | ||
128 | EEPROM_REGULATORY_BAND_NO_HT40, | ||
129 | }, | ||
130 | .enhanced_txpower = true, | ||
131 | }; | ||
132 | |||
133 | #define IWL_DEVICE_2000 \ | ||
134 | .fw_name_pre = IWL2000_FW_PRE, \ | ||
135 | .ucode_api_max = IWL2000_UCODE_API_MAX, \ | ||
136 | .ucode_api_ok = IWL2000_UCODE_API_OK, \ | ||
137 | .ucode_api_min = IWL2000_UCODE_API_MIN, \ | ||
138 | .device_family = IWL_DEVICE_FAMILY_2000, \ | ||
139 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
140 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
141 | .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ | ||
142 | .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | ||
143 | .base_params = &iwl2000_base_params, \ | ||
144 | .eeprom_params = &iwl20x0_eeprom_params, \ | ||
145 | .need_temp_offset_calib = true, \ | ||
146 | .temp_offset_v2 = true, \ | ||
147 | .led_mode = IWL_LED_RF_STATE | ||
148 | |||
149 | const struct iwl_cfg iwl2000_2bgn_cfg = { | ||
150 | .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", | ||
151 | IWL_DEVICE_2000, | ||
152 | .ht_params = &iwl2000_ht_params, | ||
153 | }; | ||
154 | |||
155 | const struct iwl_cfg iwl2000_2bgn_d_cfg = { | ||
156 | .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", | ||
157 | IWL_DEVICE_2000, | ||
158 | .ht_params = &iwl2000_ht_params, | ||
159 | }; | ||
160 | |||
161 | #define IWL_DEVICE_2030 \ | ||
162 | .fw_name_pre = IWL2030_FW_PRE, \ | ||
163 | .ucode_api_max = IWL2030_UCODE_API_MAX, \ | ||
164 | .ucode_api_ok = IWL2030_UCODE_API_OK, \ | ||
165 | .ucode_api_min = IWL2030_UCODE_API_MIN, \ | ||
166 | .device_family = IWL_DEVICE_FAMILY_2030, \ | ||
167 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
168 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
169 | .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ | ||
170 | .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | ||
171 | .base_params = &iwl2030_base_params, \ | ||
172 | .bt_params = &iwl2030_bt_params, \ | ||
173 | .eeprom_params = &iwl20x0_eeprom_params, \ | ||
174 | .need_temp_offset_calib = true, \ | ||
175 | .temp_offset_v2 = true, \ | ||
176 | .led_mode = IWL_LED_RF_STATE, \ | ||
177 | .adv_pm = true | ||
178 | |||
179 | const struct iwl_cfg iwl2030_2bgn_cfg = { | ||
180 | .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", | ||
181 | IWL_DEVICE_2030, | ||
182 | .ht_params = &iwl2000_ht_params, | ||
183 | }; | ||
184 | |||
185 | #define IWL_DEVICE_105 \ | ||
186 | .fw_name_pre = IWL105_FW_PRE, \ | ||
187 | .ucode_api_max = IWL105_UCODE_API_MAX, \ | ||
188 | .ucode_api_ok = IWL105_UCODE_API_OK, \ | ||
189 | .ucode_api_min = IWL105_UCODE_API_MIN, \ | ||
190 | .device_family = IWL_DEVICE_FAMILY_105, \ | ||
191 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
192 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
193 | .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ | ||
194 | .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | ||
195 | .base_params = &iwl2000_base_params, \ | ||
196 | .eeprom_params = &iwl20x0_eeprom_params, \ | ||
197 | .need_temp_offset_calib = true, \ | ||
198 | .temp_offset_v2 = true, \ | ||
199 | .led_mode = IWL_LED_RF_STATE, \ | ||
200 | .adv_pm = true, \ | ||
201 | .rx_with_siso_diversity = true | ||
202 | |||
203 | const struct iwl_cfg iwl105_bgn_cfg = { | ||
204 | .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", | ||
205 | IWL_DEVICE_105, | ||
206 | .ht_params = &iwl2000_ht_params, | ||
207 | }; | ||
208 | |||
209 | const struct iwl_cfg iwl105_bgn_d_cfg = { | ||
210 | .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", | ||
211 | IWL_DEVICE_105, | ||
212 | .ht_params = &iwl2000_ht_params, | ||
213 | }; | ||
214 | |||
215 | #define IWL_DEVICE_135 \ | ||
216 | .fw_name_pre = IWL135_FW_PRE, \ | ||
217 | .ucode_api_max = IWL135_UCODE_API_MAX, \ | ||
218 | .ucode_api_ok = IWL135_UCODE_API_OK, \ | ||
219 | .ucode_api_min = IWL135_UCODE_API_MIN, \ | ||
220 | .device_family = IWL_DEVICE_FAMILY_135, \ | ||
221 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
222 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
223 | .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ | ||
224 | .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | ||
225 | .base_params = &iwl2030_base_params, \ | ||
226 | .bt_params = &iwl2030_bt_params, \ | ||
227 | .eeprom_params = &iwl20x0_eeprom_params, \ | ||
228 | .need_temp_offset_calib = true, \ | ||
229 | .temp_offset_v2 = true, \ | ||
230 | .led_mode = IWL_LED_RF_STATE, \ | ||
231 | .adv_pm = true, \ | ||
232 | .rx_with_siso_diversity = true | ||
233 | |||
234 | const struct iwl_cfg iwl135_bgn_cfg = { | ||
235 | .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", | ||
236 | IWL_DEVICE_135, | ||
237 | .ht_params = &iwl2000_ht_params, | ||
238 | }; | ||
239 | |||
240 | MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); | ||
241 | MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); | ||
242 | MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); | ||
243 | MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c new file mode 100644 index 000000000000..d1665fa6d15a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/5000.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/stringify.h> | ||
29 | #include "iwl-config.h" | ||
30 | #include "iwl-agn-hw.h" | ||
31 | #include "iwl-csr.h" | ||
32 | #include "cfg.h" | ||
33 | |||
34 | /* Highest firmware API version supported */ | ||
35 | #define IWL5000_UCODE_API_MAX 5 | ||
36 | #define IWL5150_UCODE_API_MAX 2 | ||
37 | |||
38 | /* Oldest version we won't warn about */ | ||
39 | #define IWL5000_UCODE_API_OK 5 | ||
40 | #define IWL5150_UCODE_API_OK 2 | ||
41 | |||
42 | /* Lowest firmware API version supported */ | ||
43 | #define IWL5000_UCODE_API_MIN 1 | ||
44 | #define IWL5150_UCODE_API_MIN 1 | ||
45 | |||
46 | /* EEPROM versions */ | ||
47 | #define EEPROM_5000_TX_POWER_VERSION (4) | ||
48 | #define EEPROM_5000_EEPROM_VERSION (0x11A) | ||
49 | #define EEPROM_5050_TX_POWER_VERSION (4) | ||
50 | #define EEPROM_5050_EEPROM_VERSION (0x21E) | ||
51 | |||
52 | #define IWL5000_FW_PRE "iwlwifi-5000-" | ||
53 | #define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" | ||
54 | |||
55 | #define IWL5150_FW_PRE "iwlwifi-5150-" | ||
56 | #define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" | ||
57 | |||
58 | static const struct iwl_base_params iwl5000_base_params = { | ||
59 | .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, | ||
60 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
61 | .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, | ||
62 | .led_compensation = 51, | ||
63 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | ||
64 | .chain_noise_scale = 1000, | ||
65 | .wd_timeout = IWL_WATCHDOG_DISABLED, | ||
66 | .max_event_log_size = 512, | ||
67 | .no_idle_support = true, | ||
68 | }; | ||
69 | |||
70 | static const struct iwl_ht_params iwl5000_ht_params = { | ||
71 | .ht_greenfield_support = true, | ||
72 | .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), | ||
73 | }; | ||
74 | |||
75 | static const struct iwl_eeprom_params iwl5000_eeprom_params = { | ||
76 | .regulatory_bands = { | ||
77 | EEPROM_REG_BAND_1_CHANNELS, | ||
78 | EEPROM_REG_BAND_2_CHANNELS, | ||
79 | EEPROM_REG_BAND_3_CHANNELS, | ||
80 | EEPROM_REG_BAND_4_CHANNELS, | ||
81 | EEPROM_REG_BAND_5_CHANNELS, | ||
82 | EEPROM_REG_BAND_24_HT40_CHANNELS, | ||
83 | EEPROM_REG_BAND_52_HT40_CHANNELS | ||
84 | }, | ||
85 | }; | ||
86 | |||
87 | #define IWL_DEVICE_5000 \ | ||
88 | .fw_name_pre = IWL5000_FW_PRE, \ | ||
89 | .ucode_api_max = IWL5000_UCODE_API_MAX, \ | ||
90 | .ucode_api_ok = IWL5000_UCODE_API_OK, \ | ||
91 | .ucode_api_min = IWL5000_UCODE_API_MIN, \ | ||
92 | .device_family = IWL_DEVICE_FAMILY_5000, \ | ||
93 | .max_inst_size = IWLAGN_RTC_INST_SIZE, \ | ||
94 | .max_data_size = IWLAGN_RTC_DATA_SIZE, \ | ||
95 | .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ | ||
96 | .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ | ||
97 | .base_params = &iwl5000_base_params, \ | ||
98 | .eeprom_params = &iwl5000_eeprom_params, \ | ||
99 | .led_mode = IWL_LED_BLINK | ||
100 | |||
101 | const struct iwl_cfg iwl5300_agn_cfg = { | ||
102 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", | ||
103 | IWL_DEVICE_5000, | ||
104 | /* at least EEPROM 0x11A has wrong info */ | ||
105 | .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ | ||
106 | .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ | ||
107 | .ht_params = &iwl5000_ht_params, | ||
108 | }; | ||
109 | |||
110 | const struct iwl_cfg iwl5100_bgn_cfg = { | ||
111 | .name = "Intel(R) WiFi Link 5100 BGN", | ||
112 | IWL_DEVICE_5000, | ||
113 | .valid_tx_ant = ANT_B, /* .cfg overwrite */ | ||
114 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ | ||
115 | .ht_params = &iwl5000_ht_params, | ||
116 | }; | ||
117 | |||
118 | const struct iwl_cfg iwl5100_abg_cfg = { | ||
119 | .name = "Intel(R) WiFi Link 5100 ABG", | ||
120 | IWL_DEVICE_5000, | ||
121 | .valid_tx_ant = ANT_B, /* .cfg overwrite */ | ||
122 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ | ||
123 | }; | ||
124 | |||
125 | const struct iwl_cfg iwl5100_agn_cfg = { | ||
126 | .name = "Intel(R) WiFi Link 5100 AGN", | ||
127 | IWL_DEVICE_5000, | ||
128 | .valid_tx_ant = ANT_B, /* .cfg overwrite */ | ||
129 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ | ||
130 | .ht_params = &iwl5000_ht_params, | ||
131 | }; | ||
132 | |||
133 | const struct iwl_cfg iwl5350_agn_cfg = { | ||
134 | .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", | ||
135 | .fw_name_pre = IWL5000_FW_PRE, | ||
136 | .ucode_api_max = IWL5000_UCODE_API_MAX, | ||
137 | .ucode_api_ok = IWL5000_UCODE_API_OK, | ||
138 | .ucode_api_min = IWL5000_UCODE_API_MIN, | ||
139 | .device_family = IWL_DEVICE_FAMILY_5000, | ||
140 | .max_inst_size = IWLAGN_RTC_INST_SIZE, | ||
141 | .max_data_size = IWLAGN_RTC_DATA_SIZE, | ||
142 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, | ||
143 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, | ||
144 | .base_params = &iwl5000_base_params, | ||
145 | .eeprom_params = &iwl5000_eeprom_params, | ||
146 | .ht_params = &iwl5000_ht_params, | ||
147 | .led_mode = IWL_LED_BLINK, | ||
148 | .internal_wimax_coex = true, | ||
149 | }; | ||
150 | |||
151 | #define IWL_DEVICE_5150 \ | ||
152 | .fw_name_pre = IWL5150_FW_PRE, \ | ||
153 | .ucode_api_max = IWL5150_UCODE_API_MAX, \ | ||
154 | .ucode_api_ok = IWL5150_UCODE_API_OK, \ | ||
155 | .ucode_api_min = IWL5150_UCODE_API_MIN, \ | ||
156 | .device_family = IWL_DEVICE_FAMILY_5150, \ | ||
157 | .max_inst_size = IWLAGN_RTC_INST_SIZE, \ | ||
158 | .max_data_size = IWLAGN_RTC_DATA_SIZE, \ | ||
159 | .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ | ||
160 | .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ | ||
161 | .base_params = &iwl5000_base_params, \ | ||
162 | .eeprom_params = &iwl5000_eeprom_params, \ | ||
163 | .no_xtal_calib = true, \ | ||
164 | .led_mode = IWL_LED_BLINK, \ | ||
165 | .internal_wimax_coex = true | ||
166 | |||
167 | const struct iwl_cfg iwl5150_agn_cfg = { | ||
168 | .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", | ||
169 | IWL_DEVICE_5150, | ||
170 | .ht_params = &iwl5000_ht_params, | ||
171 | |||
172 | }; | ||
173 | |||
174 | const struct iwl_cfg iwl5150_abg_cfg = { | ||
175 | .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", | ||
176 | IWL_DEVICE_5150, | ||
177 | }; | ||
178 | |||
179 | MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); | ||
180 | MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c new file mode 100644 index 000000000000..4a57624afc40 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/6000.c | |||
@@ -0,0 +1,403 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/stringify.h> | ||
29 | #include "iwl-config.h" | ||
30 | #include "iwl-agn-hw.h" | ||
31 | #include "cfg.h" | ||
32 | #include "dvm/commands.h" /* needed for BT for now */ | ||
33 | |||
34 | /* Highest firmware API version supported */ | ||
35 | #define IWL6000_UCODE_API_MAX 6 | ||
36 | #define IWL6050_UCODE_API_MAX 5 | ||
37 | #define IWL6000G2_UCODE_API_MAX 6 | ||
38 | #define IWL6035_UCODE_API_MAX 6 | ||
39 | |||
40 | /* Oldest version we won't warn about */ | ||
41 | #define IWL6000_UCODE_API_OK 4 | ||
42 | #define IWL6000G2_UCODE_API_OK 5 | ||
43 | #define IWL6050_UCODE_API_OK 5 | ||
44 | #define IWL6000G2B_UCODE_API_OK 6 | ||
45 | #define IWL6035_UCODE_API_OK 6 | ||
46 | |||
47 | /* Lowest firmware API version supported */ | ||
48 | #define IWL6000_UCODE_API_MIN 4 | ||
49 | #define IWL6050_UCODE_API_MIN 4 | ||
50 | #define IWL6000G2_UCODE_API_MIN 5 | ||
51 | #define IWL6035_UCODE_API_MIN 6 | ||
52 | |||
53 | /* EEPROM versions */ | ||
54 | #define EEPROM_6000_TX_POWER_VERSION (4) | ||
55 | #define EEPROM_6000_EEPROM_VERSION (0x423) | ||
56 | #define EEPROM_6050_TX_POWER_VERSION (4) | ||
57 | #define EEPROM_6050_EEPROM_VERSION (0x532) | ||
58 | #define EEPROM_6150_TX_POWER_VERSION (6) | ||
59 | #define EEPROM_6150_EEPROM_VERSION (0x553) | ||
60 | #define EEPROM_6005_TX_POWER_VERSION (6) | ||
61 | #define EEPROM_6005_EEPROM_VERSION (0x709) | ||
62 | #define EEPROM_6030_TX_POWER_VERSION (6) | ||
63 | #define EEPROM_6030_EEPROM_VERSION (0x709) | ||
64 | #define EEPROM_6035_TX_POWER_VERSION (6) | ||
65 | #define EEPROM_6035_EEPROM_VERSION (0x753) | ||
66 | |||
67 | #define IWL6000_FW_PRE "iwlwifi-6000-" | ||
68 | #define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" | ||
69 | |||
70 | #define IWL6050_FW_PRE "iwlwifi-6050-" | ||
71 | #define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode" | ||
72 | |||
73 | #define IWL6005_FW_PRE "iwlwifi-6000g2a-" | ||
74 | #define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode" | ||
75 | |||
76 | #define IWL6030_FW_PRE "iwlwifi-6000g2b-" | ||
77 | #define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" | ||
78 | |||
79 | static const struct iwl_base_params iwl6000_base_params = { | ||
80 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
81 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
82 | .pll_cfg_val = 0, | ||
83 | .max_ll_items = OTP_MAX_LL_ITEMS_6x00, | ||
84 | .shadow_ram_support = true, | ||
85 | .led_compensation = 51, | ||
86 | .adv_thermal_throttle = true, | ||
87 | .support_ct_kill_exit = true, | ||
88 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
89 | .chain_noise_scale = 1000, | ||
90 | .wd_timeout = IWL_DEF_WD_TIMEOUT, | ||
91 | .max_event_log_size = 512, | ||
92 | .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ | ||
93 | }; | ||
94 | |||
95 | static const struct iwl_base_params iwl6050_base_params = { | ||
96 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
97 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
98 | .pll_cfg_val = 0, | ||
99 | .max_ll_items = OTP_MAX_LL_ITEMS_6x50, | ||
100 | .shadow_ram_support = true, | ||
101 | .led_compensation = 51, | ||
102 | .adv_thermal_throttle = true, | ||
103 | .support_ct_kill_exit = true, | ||
104 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
105 | .chain_noise_scale = 1500, | ||
106 | .wd_timeout = IWL_DEF_WD_TIMEOUT, | ||
107 | .max_event_log_size = 1024, | ||
108 | .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ | ||
109 | }; | ||
110 | |||
111 | static const struct iwl_base_params iwl6000_g2_base_params = { | ||
112 | .eeprom_size = OTP_LOW_IMAGE_SIZE, | ||
113 | .num_of_queues = IWLAGN_NUM_QUEUES, | ||
114 | .pll_cfg_val = 0, | ||
115 | .max_ll_items = OTP_MAX_LL_ITEMS_6x00, | ||
116 | .shadow_ram_support = true, | ||
117 | .led_compensation = 57, | ||
118 | .adv_thermal_throttle = true, | ||
119 | .support_ct_kill_exit = true, | ||
120 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | ||
121 | .chain_noise_scale = 1000, | ||
122 | .wd_timeout = IWL_LONG_WD_TIMEOUT, | ||
123 | .max_event_log_size = 512, | ||
124 | .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ | ||
125 | }; | ||
126 | |||
127 | static const struct iwl_ht_params iwl6000_ht_params = { | ||
128 | .ht_greenfield_support = true, | ||
129 | .use_rts_for_aggregation = true, /* use rts/cts protection */ | ||
130 | .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), | ||
131 | }; | ||
132 | |||
133 | static const struct iwl_bt_params iwl6000_bt_params = { | ||
134 | /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ | ||
135 | .advanced_bt_coexist = true, | ||
136 | .agg_time_limit = BT_AGG_THRESHOLD_DEF, | ||
137 | .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, | ||
138 | .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, | ||
139 | .bt_sco_disable = true, | ||
140 | }; | ||
141 | |||
142 | static const struct iwl_eeprom_params iwl6000_eeprom_params = { | ||
143 | .regulatory_bands = { | ||
144 | EEPROM_REG_BAND_1_CHANNELS, | ||
145 | EEPROM_REG_BAND_2_CHANNELS, | ||
146 | EEPROM_REG_BAND_3_CHANNELS, | ||
147 | EEPROM_REG_BAND_4_CHANNELS, | ||
148 | EEPROM_REG_BAND_5_CHANNELS, | ||
149 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, | ||
150 | EEPROM_REG_BAND_52_HT40_CHANNELS | ||
151 | }, | ||
152 | .enhanced_txpower = true, | ||
153 | }; | ||
154 | |||
155 | #define IWL_DEVICE_6005 \ | ||
156 | .fw_name_pre = IWL6005_FW_PRE, \ | ||
157 | .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ | ||
158 | .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ | ||
159 | .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ | ||
160 | .device_family = IWL_DEVICE_FAMILY_6005, \ | ||
161 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
162 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
163 | .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ | ||
164 | .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ | ||
165 | .base_params = &iwl6000_g2_base_params, \ | ||
166 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
167 | .need_temp_offset_calib = true, \ | ||
168 | .led_mode = IWL_LED_RF_STATE | ||
169 | |||
170 | const struct iwl_cfg iwl6005_2agn_cfg = { | ||
171 | .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", | ||
172 | IWL_DEVICE_6005, | ||
173 | .ht_params = &iwl6000_ht_params, | ||
174 | }; | ||
175 | |||
176 | const struct iwl_cfg iwl6005_2abg_cfg = { | ||
177 | .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG", | ||
178 | IWL_DEVICE_6005, | ||
179 | }; | ||
180 | |||
181 | const struct iwl_cfg iwl6005_2bg_cfg = { | ||
182 | .name = "Intel(R) Centrino(R) Advanced-N 6205 BG", | ||
183 | IWL_DEVICE_6005, | ||
184 | }; | ||
185 | |||
186 | const struct iwl_cfg iwl6005_2agn_sff_cfg = { | ||
187 | .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN", | ||
188 | IWL_DEVICE_6005, | ||
189 | .ht_params = &iwl6000_ht_params, | ||
190 | }; | ||
191 | |||
192 | const struct iwl_cfg iwl6005_2agn_d_cfg = { | ||
193 | .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN", | ||
194 | IWL_DEVICE_6005, | ||
195 | .ht_params = &iwl6000_ht_params, | ||
196 | }; | ||
197 | |||
198 | const struct iwl_cfg iwl6005_2agn_mow1_cfg = { | ||
199 | .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN", | ||
200 | IWL_DEVICE_6005, | ||
201 | .ht_params = &iwl6000_ht_params, | ||
202 | }; | ||
203 | |||
204 | const struct iwl_cfg iwl6005_2agn_mow2_cfg = { | ||
205 | .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN", | ||
206 | IWL_DEVICE_6005, | ||
207 | .ht_params = &iwl6000_ht_params, | ||
208 | }; | ||
209 | |||
210 | #define IWL_DEVICE_6030 \ | ||
211 | .fw_name_pre = IWL6030_FW_PRE, \ | ||
212 | .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ | ||
213 | .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ | ||
214 | .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ | ||
215 | .device_family = IWL_DEVICE_FAMILY_6030, \ | ||
216 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
217 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
218 | .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ | ||
219 | .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | ||
220 | .base_params = &iwl6000_g2_base_params, \ | ||
221 | .bt_params = &iwl6000_bt_params, \ | ||
222 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
223 | .need_temp_offset_calib = true, \ | ||
224 | .led_mode = IWL_LED_RF_STATE, \ | ||
225 | .adv_pm = true \ | ||
226 | |||
227 | const struct iwl_cfg iwl6030_2agn_cfg = { | ||
228 | .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", | ||
229 | IWL_DEVICE_6030, | ||
230 | .ht_params = &iwl6000_ht_params, | ||
231 | }; | ||
232 | |||
233 | const struct iwl_cfg iwl6030_2abg_cfg = { | ||
234 | .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG", | ||
235 | IWL_DEVICE_6030, | ||
236 | }; | ||
237 | |||
238 | const struct iwl_cfg iwl6030_2bgn_cfg = { | ||
239 | .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN", | ||
240 | IWL_DEVICE_6030, | ||
241 | .ht_params = &iwl6000_ht_params, | ||
242 | }; | ||
243 | |||
244 | const struct iwl_cfg iwl6030_2bg_cfg = { | ||
245 | .name = "Intel(R) Centrino(R) Advanced-N 6230 BG", | ||
246 | IWL_DEVICE_6030, | ||
247 | }; | ||
248 | |||
249 | #define IWL_DEVICE_6035 \ | ||
250 | .fw_name_pre = IWL6030_FW_PRE, \ | ||
251 | .ucode_api_max = IWL6035_UCODE_API_MAX, \ | ||
252 | .ucode_api_ok = IWL6035_UCODE_API_OK, \ | ||
253 | .ucode_api_min = IWL6035_UCODE_API_MIN, \ | ||
254 | .device_family = IWL_DEVICE_FAMILY_6030, \ | ||
255 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
256 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
257 | .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ | ||
258 | .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | ||
259 | .base_params = &iwl6000_g2_base_params, \ | ||
260 | .bt_params = &iwl6000_bt_params, \ | ||
261 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
262 | .need_temp_offset_calib = true, \ | ||
263 | .led_mode = IWL_LED_RF_STATE, \ | ||
264 | .adv_pm = true | ||
265 | |||
266 | const struct iwl_cfg iwl6035_2agn_cfg = { | ||
267 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | ||
268 | IWL_DEVICE_6035, | ||
269 | .ht_params = &iwl6000_ht_params, | ||
270 | }; | ||
271 | |||
272 | const struct iwl_cfg iwl1030_bgn_cfg = { | ||
273 | .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", | ||
274 | IWL_DEVICE_6030, | ||
275 | .ht_params = &iwl6000_ht_params, | ||
276 | }; | ||
277 | |||
278 | const struct iwl_cfg iwl1030_bg_cfg = { | ||
279 | .name = "Intel(R) Centrino(R) Wireless-N 1030 BG", | ||
280 | IWL_DEVICE_6030, | ||
281 | }; | ||
282 | |||
283 | const struct iwl_cfg iwl130_bgn_cfg = { | ||
284 | .name = "Intel(R) Centrino(R) Wireless-N 130 BGN", | ||
285 | IWL_DEVICE_6030, | ||
286 | .ht_params = &iwl6000_ht_params, | ||
287 | .rx_with_siso_diversity = true, | ||
288 | }; | ||
289 | |||
290 | const struct iwl_cfg iwl130_bg_cfg = { | ||
291 | .name = "Intel(R) Centrino(R) Wireless-N 130 BG", | ||
292 | IWL_DEVICE_6030, | ||
293 | .rx_with_siso_diversity = true, | ||
294 | }; | ||
295 | |||
296 | /* | ||
297 | * "i": Internal configuration, use internal Power Amplifier | ||
298 | */ | ||
299 | #define IWL_DEVICE_6000i \ | ||
300 | .fw_name_pre = IWL6000_FW_PRE, \ | ||
301 | .ucode_api_max = IWL6000_UCODE_API_MAX, \ | ||
302 | .ucode_api_ok = IWL6000_UCODE_API_OK, \ | ||
303 | .ucode_api_min = IWL6000_UCODE_API_MIN, \ | ||
304 | .device_family = IWL_DEVICE_FAMILY_6000i, \ | ||
305 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
306 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
307 | .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ | ||
308 | .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ | ||
309 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ | ||
310 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ | ||
311 | .base_params = &iwl6000_base_params, \ | ||
312 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
313 | .led_mode = IWL_LED_BLINK | ||
314 | |||
315 | const struct iwl_cfg iwl6000i_2agn_cfg = { | ||
316 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", | ||
317 | IWL_DEVICE_6000i, | ||
318 | .ht_params = &iwl6000_ht_params, | ||
319 | }; | ||
320 | |||
321 | const struct iwl_cfg iwl6000i_2abg_cfg = { | ||
322 | .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG", | ||
323 | IWL_DEVICE_6000i, | ||
324 | }; | ||
325 | |||
326 | const struct iwl_cfg iwl6000i_2bg_cfg = { | ||
327 | .name = "Intel(R) Centrino(R) Advanced-N 6200 BG", | ||
328 | IWL_DEVICE_6000i, | ||
329 | }; | ||
330 | |||
331 | #define IWL_DEVICE_6050 \ | ||
332 | .fw_name_pre = IWL6050_FW_PRE, \ | ||
333 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ | ||
334 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ | ||
335 | .device_family = IWL_DEVICE_FAMILY_6050, \ | ||
336 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
337 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
338 | .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
339 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
340 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ | ||
341 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ | ||
342 | .base_params = &iwl6050_base_params, \ | ||
343 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
344 | .led_mode = IWL_LED_BLINK, \ | ||
345 | .internal_wimax_coex = true | ||
346 | |||
347 | const struct iwl_cfg iwl6050_2agn_cfg = { | ||
348 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", | ||
349 | IWL_DEVICE_6050, | ||
350 | .ht_params = &iwl6000_ht_params, | ||
351 | }; | ||
352 | |||
353 | const struct iwl_cfg iwl6050_2abg_cfg = { | ||
354 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG", | ||
355 | IWL_DEVICE_6050, | ||
356 | }; | ||
357 | |||
358 | #define IWL_DEVICE_6150 \ | ||
359 | .fw_name_pre = IWL6050_FW_PRE, \ | ||
360 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ | ||
361 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ | ||
362 | .device_family = IWL_DEVICE_FAMILY_6150, \ | ||
363 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
364 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
365 | .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ | ||
366 | .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ | ||
367 | .base_params = &iwl6050_base_params, \ | ||
368 | .eeprom_params = &iwl6000_eeprom_params, \ | ||
369 | .led_mode = IWL_LED_BLINK, \ | ||
370 | .internal_wimax_coex = true | ||
371 | |||
372 | const struct iwl_cfg iwl6150_bgn_cfg = { | ||
373 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", | ||
374 | IWL_DEVICE_6150, | ||
375 | .ht_params = &iwl6000_ht_params, | ||
376 | }; | ||
377 | |||
378 | const struct iwl_cfg iwl6150_bg_cfg = { | ||
379 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG", | ||
380 | IWL_DEVICE_6150, | ||
381 | }; | ||
382 | |||
383 | const struct iwl_cfg iwl6000_3agn_cfg = { | ||
384 | .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", | ||
385 | .fw_name_pre = IWL6000_FW_PRE, | ||
386 | .ucode_api_max = IWL6000_UCODE_API_MAX, | ||
387 | .ucode_api_ok = IWL6000_UCODE_API_OK, | ||
388 | .ucode_api_min = IWL6000_UCODE_API_MIN, | ||
389 | .device_family = IWL_DEVICE_FAMILY_6000, | ||
390 | .max_inst_size = IWL60_RTC_INST_SIZE, | ||
391 | .max_data_size = IWL60_RTC_DATA_SIZE, | ||
392 | .eeprom_ver = EEPROM_6000_EEPROM_VERSION, | ||
393 | .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, | ||
394 | .base_params = &iwl6000_base_params, | ||
395 | .eeprom_params = &iwl6000_eeprom_params, | ||
396 | .ht_params = &iwl6000_ht_params, | ||
397 | .led_mode = IWL_LED_BLINK, | ||
398 | }; | ||
399 | |||
400 | MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); | ||
401 | MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); | ||
402 | MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); | ||
403 | MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h new file mode 100644 index 000000000000..82152311d73b --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
4 | * redistributing this file, you may do so under either license. | ||
5 | * | ||
6 | * GPL LICENSE SUMMARY | ||
7 | * | ||
8 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
22 | * USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution | ||
25 | * in the file called LICENSE.GPL. | ||
26 | * | ||
27 | * Contact Information: | ||
28 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
30 | * | ||
31 | * BSD LICENSE | ||
32 | * | ||
33 | * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Redistribution and use in source and binary forms, with or without | ||
37 | * modification, are permitted provided that the following conditions | ||
38 | * are met: | ||
39 | * | ||
40 | * * Redistributions of source code must retain the above copyright | ||
41 | * notice, this list of conditions and the following disclaimer. | ||
42 | * * Redistributions in binary form must reproduce the above copyright | ||
43 | * notice, this list of conditions and the following disclaimer in | ||
44 | * the documentation and/or other materials provided with the | ||
45 | * distribution. | ||
46 | * * Neither the name Intel Corporation nor the names of its | ||
47 | * contributors may be used to endorse or promote products derived | ||
48 | * from this software without specific prior written permission. | ||
49 | * | ||
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
61 | * | ||
62 | *****************************************************************************/ | ||
63 | #ifndef __iwl_pci_h__ | ||
64 | #define __iwl_pci_h__ | ||
65 | |||
66 | |||
67 | /* | ||
68 | * This file declares the config structures for all devices. | ||
69 | */ | ||
70 | |||
71 | extern const struct iwl_cfg iwl5300_agn_cfg; | ||
72 | extern const struct iwl_cfg iwl5100_agn_cfg; | ||
73 | extern const struct iwl_cfg iwl5350_agn_cfg; | ||
74 | extern const struct iwl_cfg iwl5100_bgn_cfg; | ||
75 | extern const struct iwl_cfg iwl5100_abg_cfg; | ||
76 | extern const struct iwl_cfg iwl5150_agn_cfg; | ||
77 | extern const struct iwl_cfg iwl5150_abg_cfg; | ||
78 | extern const struct iwl_cfg iwl6005_2agn_cfg; | ||
79 | extern const struct iwl_cfg iwl6005_2abg_cfg; | ||
80 | extern const struct iwl_cfg iwl6005_2bg_cfg; | ||
81 | extern const struct iwl_cfg iwl6005_2agn_sff_cfg; | ||
82 | extern const struct iwl_cfg iwl6005_2agn_d_cfg; | ||
83 | extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; | ||
84 | extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; | ||
85 | extern const struct iwl_cfg iwl1030_bgn_cfg; | ||
86 | extern const struct iwl_cfg iwl1030_bg_cfg; | ||
87 | extern const struct iwl_cfg iwl6030_2agn_cfg; | ||
88 | extern const struct iwl_cfg iwl6030_2abg_cfg; | ||
89 | extern const struct iwl_cfg iwl6030_2bgn_cfg; | ||
90 | extern const struct iwl_cfg iwl6030_2bg_cfg; | ||
91 | extern const struct iwl_cfg iwl6000i_2agn_cfg; | ||
92 | extern const struct iwl_cfg iwl6000i_2abg_cfg; | ||
93 | extern const struct iwl_cfg iwl6000i_2bg_cfg; | ||
94 | extern const struct iwl_cfg iwl6000_3agn_cfg; | ||
95 | extern const struct iwl_cfg iwl6050_2agn_cfg; | ||
96 | extern const struct iwl_cfg iwl6050_2abg_cfg; | ||
97 | extern const struct iwl_cfg iwl6150_bgn_cfg; | ||
98 | extern const struct iwl_cfg iwl6150_bg_cfg; | ||
99 | extern const struct iwl_cfg iwl1000_bgn_cfg; | ||
100 | extern const struct iwl_cfg iwl1000_bg_cfg; | ||
101 | extern const struct iwl_cfg iwl100_bgn_cfg; | ||
102 | extern const struct iwl_cfg iwl100_bg_cfg; | ||
103 | extern const struct iwl_cfg iwl130_bgn_cfg; | ||
104 | extern const struct iwl_cfg iwl130_bg_cfg; | ||
105 | extern const struct iwl_cfg iwl2000_2bgn_cfg; | ||
106 | extern const struct iwl_cfg iwl2000_2bgn_d_cfg; | ||
107 | extern const struct iwl_cfg iwl2030_2bgn_cfg; | ||
108 | extern const struct iwl_cfg iwl6035_2agn_cfg; | ||
109 | extern const struct iwl_cfg iwl105_bgn_cfg; | ||
110 | extern const struct iwl_cfg iwl105_bgn_d_cfg; | ||
111 | extern const struct iwl_cfg iwl135_bgn_cfg; | ||
112 | |||
113 | #endif /* __iwl_pci_h__ */ | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c new file mode 100644 index 000000000000..f4c3500b68c6 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -0,0 +1,380 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
4 | * redistributing this file, you may do so under either license. | ||
5 | * | ||
6 | * GPL LICENSE SUMMARY | ||
7 | * | ||
8 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
22 | * USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution | ||
25 | * in the file called LICENSE.GPL. | ||
26 | * | ||
27 | * Contact Information: | ||
28 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
30 | * | ||
31 | * BSD LICENSE | ||
32 | * | ||
33 | * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Redistribution and use in source and binary forms, with or without | ||
37 | * modification, are permitted provided that the following conditions | ||
38 | * are met: | ||
39 | * | ||
40 | * * Redistributions of source code must retain the above copyright | ||
41 | * notice, this list of conditions and the following disclaimer. | ||
42 | * * Redistributions in binary form must reproduce the above copyright | ||
43 | * notice, this list of conditions and the following disclaimer in | ||
44 | * the documentation and/or other materials provided with the | ||
45 | * distribution. | ||
46 | * * Neither the name Intel Corporation nor the names of its | ||
47 | * contributors may be used to endorse or promote products derived | ||
48 | * from this software without specific prior written permission. | ||
49 | * | ||
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
61 | * | ||
62 | *****************************************************************************/ | ||
63 | |||
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
65 | |||
66 | #include <linux/module.h> | ||
67 | #include <linux/pci.h> | ||
68 | #include <linux/pci-aspm.h> | ||
69 | |||
70 | #include "iwl-trans.h" | ||
71 | #include "iwl-drv.h" | ||
72 | #include "iwl-trans.h" | ||
73 | |||
74 | #include "cfg.h" | ||
75 | #include "internal.h" | ||
76 | |||
77 | #define IWL_PCI_DEVICE(dev, subdev, cfg) \ | ||
78 | .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ | ||
79 | .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ | ||
80 | .driver_data = (kernel_ulong_t)&(cfg) | ||
81 | |||
82 | /* Hardware specific file defines the PCI IDs table for that hardware module */ | ||
83 | static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | ||
84 | {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ | ||
85 | {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
86 | {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ | ||
87 | {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
88 | {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ | ||
89 | {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ | ||
90 | {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ | ||
91 | {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ | ||
92 | {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ | ||
93 | {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
94 | {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ | ||
95 | {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
96 | {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ | ||
97 | {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ | ||
98 | {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ | ||
99 | {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ | ||
100 | {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ | ||
101 | {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
102 | {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ | ||
103 | {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ | ||
104 | {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ | ||
105 | {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ | ||
106 | {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ | ||
107 | {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ | ||
108 | |||
109 | /* 5300 Series WiFi */ | ||
110 | {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ | ||
111 | {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
112 | {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ | ||
113 | {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
114 | {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ | ||
115 | {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
116 | {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ | ||
117 | {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
118 | {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ | ||
119 | {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
120 | {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ | ||
121 | {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ | ||
122 | |||
123 | /* 5350 Series WiFi/WiMax */ | ||
124 | {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ | ||
125 | {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ | ||
126 | {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ | ||
127 | |||
128 | /* 5150 Series Wifi/WiMax */ | ||
129 | {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ | ||
130 | {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ | ||
131 | {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ | ||
132 | {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ | ||
133 | {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ | ||
134 | {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ | ||
135 | |||
136 | {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ | ||
137 | {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ | ||
138 | {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ | ||
139 | {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ | ||
140 | |||
141 | /* 6x00 Series */ | ||
142 | {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, | ||
143 | {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, | ||
144 | {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, | ||
145 | {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, | ||
146 | {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, | ||
147 | {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, | ||
148 | {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, | ||
149 | {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, | ||
150 | {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, | ||
151 | {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, | ||
152 | |||
153 | /* 6x05 Series */ | ||
154 | {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, | ||
155 | {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, | ||
156 | {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, | ||
157 | {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, | ||
158 | {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, | ||
159 | {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, | ||
160 | {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, | ||
161 | {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, | ||
162 | {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, | ||
163 | {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, | ||
164 | {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ | ||
165 | {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ | ||
166 | |||
167 | /* 6x30 Series */ | ||
168 | {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, | ||
169 | {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, | ||
170 | {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, | ||
171 | {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, | ||
172 | {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, | ||
173 | {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, | ||
174 | {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, | ||
175 | {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, | ||
176 | {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, | ||
177 | {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, | ||
178 | {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, | ||
179 | {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, | ||
180 | {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, | ||
181 | {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, | ||
182 | {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, | ||
183 | {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, | ||
184 | |||
185 | /* 6x50 WiFi/WiMax Series */ | ||
186 | {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, | ||
187 | {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, | ||
188 | {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, | ||
189 | {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, | ||
190 | {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, | ||
191 | {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, | ||
192 | |||
193 | /* 6150 WiFi/WiMax Series */ | ||
194 | {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, | ||
195 | {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, | ||
196 | {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, | ||
197 | {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, | ||
198 | {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, | ||
199 | {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, | ||
200 | |||
201 | /* 1000 Series WiFi */ | ||
202 | {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, | ||
203 | {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, | ||
204 | {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, | ||
205 | {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, | ||
206 | {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, | ||
207 | {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, | ||
208 | {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, | ||
209 | {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, | ||
210 | {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, | ||
211 | {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, | ||
212 | {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, | ||
213 | {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, | ||
214 | |||
215 | /* 100 Series WiFi */ | ||
216 | {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, | ||
217 | {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, | ||
218 | {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, | ||
219 | {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, | ||
220 | {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, | ||
221 | {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, | ||
222 | |||
223 | /* 130 Series WiFi */ | ||
224 | {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, | ||
225 | {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, | ||
226 | {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, | ||
227 | {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, | ||
228 | {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, | ||
229 | {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, | ||
230 | |||
231 | /* 2x00 Series */ | ||
232 | {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, | ||
233 | {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, | ||
234 | {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, | ||
235 | {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, | ||
236 | |||
237 | /* 2x30 Series */ | ||
238 | {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, | ||
239 | {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, | ||
240 | {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, | ||
241 | |||
242 | /* 6x35 Series */ | ||
243 | {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, | ||
244 | {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, | ||
245 | {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, | ||
246 | {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, | ||
247 | |||
248 | /* 105 Series */ | ||
249 | {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, | ||
250 | {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, | ||
251 | {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, | ||
252 | {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, | ||
253 | |||
254 | /* 135 Series */ | ||
255 | {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, | ||
256 | {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, | ||
257 | {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, | ||
258 | |||
259 | {0} | ||
260 | }; | ||
261 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); | ||
262 | |||
263 | /* PCI registers */ | ||
264 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | ||
265 | |||
266 | #ifndef CONFIG_IWLWIFI_IDI | ||
267 | |||
268 | static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
269 | { | ||
270 | const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); | ||
271 | struct iwl_trans *iwl_trans; | ||
272 | struct iwl_trans_pcie *trans_pcie; | ||
273 | |||
274 | iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); | ||
275 | if (iwl_trans == NULL) | ||
276 | return -ENOMEM; | ||
277 | |||
278 | pci_set_drvdata(pdev, iwl_trans); | ||
279 | |||
280 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); | ||
281 | trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); | ||
282 | if (!trans_pcie->drv) | ||
283 | goto out_free_trans; | ||
284 | |||
285 | return 0; | ||
286 | |||
287 | out_free_trans: | ||
288 | iwl_trans_pcie_free(iwl_trans); | ||
289 | pci_set_drvdata(pdev, NULL); | ||
290 | return -EFAULT; | ||
291 | } | ||
292 | |||
293 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) | ||
294 | { | ||
295 | struct iwl_trans *trans = pci_get_drvdata(pdev); | ||
296 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
297 | |||
298 | iwl_drv_stop(trans_pcie->drv); | ||
299 | iwl_trans_pcie_free(trans); | ||
300 | |||
301 | pci_set_drvdata(pdev, NULL); | ||
302 | } | ||
303 | |||
304 | #endif /* CONFIG_IWLWIFI_IDI */ | ||
305 | |||
306 | #ifdef CONFIG_PM_SLEEP | ||
307 | |||
308 | static int iwl_pci_suspend(struct device *device) | ||
309 | { | ||
310 | struct pci_dev *pdev = to_pci_dev(device); | ||
311 | struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); | ||
312 | |||
313 | /* Before you put code here, think about WoWLAN. You cannot check here | ||
314 | * whether WoWLAN is enabled or not, and your code will run even if | ||
315 | * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. | ||
316 | */ | ||
317 | |||
318 | return iwl_trans_suspend(iwl_trans); | ||
319 | } | ||
320 | |||
321 | static int iwl_pci_resume(struct device *device) | ||
322 | { | ||
323 | struct pci_dev *pdev = to_pci_dev(device); | ||
324 | struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); | ||
325 | |||
326 | /* Before you put code here, think about WoWLAN. You cannot check here | ||
327 | * whether WoWLAN is enabled or not, and your code will run even if | ||
328 | * WoWLAN is enabled - the NIC may be alive. | ||
329 | */ | ||
330 | |||
331 | /* | ||
332 | * We disable the RETRY_TIMEOUT register (0x41) to keep | ||
333 | * PCI Tx retries from interfering with C3 CPU state. | ||
334 | */ | ||
335 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | ||
336 | |||
337 | return iwl_trans_resume(iwl_trans); | ||
338 | } | ||
339 | |||
340 | static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); | ||
341 | |||
342 | #define IWL_PM_OPS (&iwl_dev_pm_ops) | ||
343 | |||
344 | #else | ||
345 | |||
346 | #define IWL_PM_OPS NULL | ||
347 | |||
348 | #endif | ||
349 | |||
350 | #ifdef CONFIG_IWLWIFI_IDI | ||
351 | /* | ||
352 | * Defined externally in iwl-idi.c | ||
353 | */ | ||
354 | int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | ||
355 | void __devexit iwl_pci_remove(struct pci_dev *pdev); | ||
356 | |||
357 | #endif /* CONFIG_IWLWIFI_IDI */ | ||
358 | |||
359 | static struct pci_driver iwl_pci_driver = { | ||
360 | .name = DRV_NAME, | ||
361 | .id_table = iwl_hw_card_ids, | ||
362 | .probe = iwl_pci_probe, | ||
363 | .remove = __devexit_p(iwl_pci_remove), | ||
364 | .driver.pm = IWL_PM_OPS, | ||
365 | }; | ||
366 | |||
367 | int __must_check iwl_pci_register_driver(void) | ||
368 | { | ||
369 | int ret; | ||
370 | ret = pci_register_driver(&iwl_pci_driver); | ||
371 | if (ret) | ||
372 | pr_err("Unable to initialize PCI module\n"); | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | void iwl_pci_unregister_driver(void) | ||
378 | { | ||
379 | pci_unregister_driver(&iwl_pci_driver); | ||
380 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h new file mode 100644 index 000000000000..5024fb662bf6 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -0,0 +1,440 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #ifndef __iwl_trans_int_pcie_h__ | ||
30 | #define __iwl_trans_int_pcie_h__ | ||
31 | |||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/timer.h> | ||
38 | |||
39 | #include "iwl-fh.h" | ||
40 | #include "iwl-csr.h" | ||
41 | #include "iwl-trans.h" | ||
42 | #include "iwl-debug.h" | ||
43 | #include "iwl-io.h" | ||
44 | #include "iwl-op-mode.h" | ||
45 | |||
46 | struct iwl_host_cmd; | ||
47 | |||
48 | /*This file includes the declaration that are internal to the | ||
49 | * trans_pcie layer */ | ||
50 | |||
51 | struct iwl_rx_mem_buffer { | ||
52 | dma_addr_t page_dma; | ||
53 | struct page *page; | ||
54 | struct list_head list; | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * struct isr_statistics - interrupt statistics | ||
59 | * | ||
60 | */ | ||
61 | struct isr_statistics { | ||
62 | u32 hw; | ||
63 | u32 sw; | ||
64 | u32 err_code; | ||
65 | u32 sch; | ||
66 | u32 alive; | ||
67 | u32 rfkill; | ||
68 | u32 ctkill; | ||
69 | u32 wakeup; | ||
70 | u32 rx; | ||
71 | u32 tx; | ||
72 | u32 unhandled; | ||
73 | }; | ||
74 | |||
75 | /** | ||
76 | * struct iwl_rx_queue - Rx queue | ||
77 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | ||
78 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | ||
79 | * @pool: | ||
80 | * @queue: | ||
81 | * @read: Shared index to newest available Rx buffer | ||
82 | * @write: Shared index to oldest written Rx packet | ||
83 | * @free_count: Number of pre-allocated buffers in rx_free | ||
84 | * @write_actual: | ||
85 | * @rx_free: list of free SKBs for use | ||
86 | * @rx_used: List of Rx buffers with no SKB | ||
87 | * @need_update: flag to indicate we need to update read/write index | ||
88 | * @rb_stts: driver's pointer to receive buffer status | ||
89 | * @rb_stts_dma: bus address of receive buffer status | ||
90 | * @lock: | ||
91 | * | ||
92 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | ||
93 | */ | ||
94 | struct iwl_rx_queue { | ||
95 | __le32 *bd; | ||
96 | dma_addr_t bd_dma; | ||
97 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | ||
98 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | ||
99 | u32 read; | ||
100 | u32 write; | ||
101 | u32 free_count; | ||
102 | u32 write_actual; | ||
103 | struct list_head rx_free; | ||
104 | struct list_head rx_used; | ||
105 | int need_update; | ||
106 | struct iwl_rb_status *rb_stts; | ||
107 | dma_addr_t rb_stts_dma; | ||
108 | spinlock_t lock; | ||
109 | }; | ||
110 | |||
111 | struct iwl_dma_ptr { | ||
112 | dma_addr_t dma; | ||
113 | void *addr; | ||
114 | size_t size; | ||
115 | }; | ||
116 | |||
117 | /** | ||
118 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning | ||
119 | * @index -- current index | ||
120 | * @n_bd -- total number of entries in queue (must be power of 2) | ||
121 | */ | ||
122 | static inline int iwl_queue_inc_wrap(int index, int n_bd) | ||
123 | { | ||
124 | return ++index & (n_bd - 1); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end | ||
129 | * @index -- current index | ||
130 | * @n_bd -- total number of entries in queue (must be power of 2) | ||
131 | */ | ||
132 | static inline int iwl_queue_dec_wrap(int index, int n_bd) | ||
133 | { | ||
134 | return --index & (n_bd - 1); | ||
135 | } | ||
136 | |||
137 | struct iwl_cmd_meta { | ||
138 | /* only for SYNC commands, iff the reply skb is wanted */ | ||
139 | struct iwl_host_cmd *source; | ||
140 | |||
141 | DEFINE_DMA_UNMAP_ADDR(mapping); | ||
142 | DEFINE_DMA_UNMAP_LEN(len); | ||
143 | |||
144 | u32 flags; | ||
145 | }; | ||
146 | |||
147 | /* | ||
148 | * Generic queue structure | ||
149 | * | ||
150 | * Contains common data for Rx and Tx queues. | ||
151 | * | ||
152 | * Note the difference between n_bd and n_window: the hardware | ||
153 | * always assumes 256 descriptors, so n_bd is always 256 (unless | ||
154 | * there might be HW changes in the future). For the normal TX | ||
155 | * queues, n_window, which is the size of the software queue data | ||
156 | * is also 256; however, for the command queue, n_window is only | ||
157 | * 32 since we don't need so many commands pending. Since the HW | ||
158 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | ||
159 | * the software buffers (in the variables @meta, @txb in struct | ||
160 | * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds | ||
161 | * in the same struct) have 256. | ||
162 | * This means that we end up with the following: | ||
163 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | ||
164 | * SW entries: | 0 | ... | 31 | | ||
165 | * where N is a number between 0 and 7. This means that the SW | ||
166 | * data is a window overlayed over the HW queue. | ||
167 | */ | ||
168 | struct iwl_queue { | ||
169 | int n_bd; /* number of BDs in this queue */ | ||
170 | int write_ptr; /* 1-st empty entry (index) host_w*/ | ||
171 | int read_ptr; /* last used entry (index) host_r*/ | ||
172 | /* use for monitoring and recovering the stuck queue */ | ||
173 | dma_addr_t dma_addr; /* physical addr for BD's */ | ||
174 | int n_window; /* safe queue window */ | ||
175 | u32 id; | ||
176 | int low_mark; /* low watermark, resume queue if free | ||
177 | * space more than this */ | ||
178 | int high_mark; /* high watermark, stop queue if free | ||
179 | * space less than this */ | ||
180 | }; | ||
181 | |||
182 | #define TFD_TX_CMD_SLOTS 256 | ||
183 | #define TFD_CMD_SLOTS 32 | ||
184 | |||
185 | struct iwl_pcie_tx_queue_entry { | ||
186 | struct iwl_device_cmd *cmd; | ||
187 | struct sk_buff *skb; | ||
188 | struct iwl_cmd_meta meta; | ||
189 | }; | ||
190 | |||
191 | /** | ||
192 | * struct iwl_tx_queue - Tx Queue for DMA | ||
193 | * @q: generic Rx/Tx queue descriptor | ||
194 | * @tfds: transmit frame descriptors (DMA memory) | ||
195 | * @entries: transmit entries (driver state) | ||
196 | * @lock: queue lock | ||
197 | * @stuck_timer: timer that fires if queue gets stuck | ||
198 | * @trans_pcie: pointer back to transport (for timer) | ||
199 | * @need_update: indicates need to update read/write index | ||
200 | * @active: stores if queue is active | ||
201 | * | ||
202 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | ||
203 | * descriptors) and required locking structures. | ||
204 | */ | ||
205 | struct iwl_tx_queue { | ||
206 | struct iwl_queue q; | ||
207 | struct iwl_tfd *tfds; | ||
208 | struct iwl_pcie_tx_queue_entry *entries; | ||
209 | spinlock_t lock; | ||
210 | struct timer_list stuck_timer; | ||
211 | struct iwl_trans_pcie *trans_pcie; | ||
212 | u8 need_update; | ||
213 | u8 active; | ||
214 | }; | ||
215 | |||
216 | /** | ||
217 | * struct iwl_trans_pcie - PCIe transport specific data | ||
218 | * @rxq: all the RX queue data | ||
219 | * @rx_replenish: work that will be called when buffers need to be allocated | ||
220 | * @drv - pointer to iwl_drv | ||
221 | * @trans: pointer to the generic transport area | ||
222 | * @irq - the irq number for the device | ||
223 | * @irq_requested: true when the irq has been requested | ||
224 | * @scd_base_addr: scheduler sram base address in SRAM | ||
225 | * @scd_bc_tbls: pointer to the byte count table of the scheduler | ||
226 | * @kw: keep warm address | ||
227 | * @pci_dev: basic pci-network driver stuff | ||
228 | * @hw_base: pci hardware address support | ||
229 | * @ucode_write_complete: indicates that the ucode has been copied. | ||
230 | * @ucode_write_waitq: wait queue for uCode load | ||
231 | * @status - transport specific status flags | ||
232 | * @cmd_queue - command queue number | ||
233 | * @rx_buf_size_8k: 8 kB RX buffer size | ||
234 | * @rx_page_order: page order for receive buffer size | ||
235 | * @wd_timeout: queue watchdog timeout (jiffies) | ||
236 | */ | ||
237 | struct iwl_trans_pcie { | ||
238 | struct iwl_rx_queue rxq; | ||
239 | struct work_struct rx_replenish; | ||
240 | struct iwl_trans *trans; | ||
241 | struct iwl_drv *drv; | ||
242 | |||
243 | /* INT ICT Table */ | ||
244 | __le32 *ict_tbl; | ||
245 | dma_addr_t ict_tbl_dma; | ||
246 | int ict_index; | ||
247 | u32 inta; | ||
248 | bool use_ict; | ||
249 | bool irq_requested; | ||
250 | struct tasklet_struct irq_tasklet; | ||
251 | struct isr_statistics isr_stats; | ||
252 | |||
253 | unsigned int irq; | ||
254 | spinlock_t irq_lock; | ||
255 | u32 inta_mask; | ||
256 | u32 scd_base_addr; | ||
257 | struct iwl_dma_ptr scd_bc_tbls; | ||
258 | struct iwl_dma_ptr kw; | ||
259 | |||
260 | struct iwl_tx_queue *txq; | ||
261 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | ||
262 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | ||
263 | |||
264 | /* PCI bus related data */ | ||
265 | struct pci_dev *pci_dev; | ||
266 | void __iomem *hw_base; | ||
267 | |||
268 | bool ucode_write_complete; | ||
269 | wait_queue_head_t ucode_write_waitq; | ||
270 | unsigned long status; | ||
271 | u8 cmd_queue; | ||
272 | u8 n_no_reclaim_cmds; | ||
273 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; | ||
274 | u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES]; | ||
275 | u8 n_q_to_fifo; | ||
276 | |||
277 | bool rx_buf_size_8k; | ||
278 | u32 rx_page_order; | ||
279 | |||
280 | const char **command_names; | ||
281 | |||
282 | /* queue watchdog */ | ||
283 | unsigned long wd_timeout; | ||
284 | }; | ||
285 | |||
286 | /***************************************************** | ||
287 | * DRIVER STATUS FUNCTIONS | ||
288 | ******************************************************/ | ||
289 | #define STATUS_HCMD_ACTIVE 0 | ||
290 | #define STATUS_DEVICE_ENABLED 1 | ||
291 | #define STATUS_TPOWER_PMI 2 | ||
292 | #define STATUS_INT_ENABLED 3 | ||
293 | |||
294 | #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ | ||
295 | ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) | ||
296 | |||
297 | static inline struct iwl_trans * | ||
298 | iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) | ||
299 | { | ||
300 | return container_of((void *)trans_pcie, struct iwl_trans, | ||
301 | trans_specific); | ||
302 | } | ||
303 | |||
304 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | ||
305 | const struct pci_device_id *ent, | ||
306 | const struct iwl_cfg *cfg); | ||
307 | void iwl_trans_pcie_free(struct iwl_trans *trans); | ||
308 | |||
309 | /***************************************************** | ||
310 | * RX | ||
311 | ******************************************************/ | ||
312 | void iwl_bg_rx_replenish(struct work_struct *data); | ||
313 | void iwl_irq_tasklet(struct iwl_trans *trans); | ||
314 | void iwlagn_rx_replenish(struct iwl_trans *trans); | ||
315 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | ||
316 | struct iwl_rx_queue *q); | ||
317 | |||
318 | /***************************************************** | ||
319 | * ICT | ||
320 | ******************************************************/ | ||
321 | void iwl_reset_ict(struct iwl_trans *trans); | ||
322 | void iwl_disable_ict(struct iwl_trans *trans); | ||
323 | int iwl_alloc_isr_ict(struct iwl_trans *trans); | ||
324 | void iwl_free_isr_ict(struct iwl_trans *trans); | ||
325 | irqreturn_t iwl_isr_ict(int irq, void *data); | ||
326 | |||
327 | /***************************************************** | ||
328 | * TX / HCMD | ||
329 | ******************************************************/ | ||
330 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, | ||
331 | struct iwl_tx_queue *txq); | ||
332 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | ||
333 | struct iwl_tx_queue *txq, | ||
334 | dma_addr_t addr, u16 len, u8 reset); | ||
335 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); | ||
336 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); | ||
337 | void iwl_tx_cmd_complete(struct iwl_trans *trans, | ||
338 | struct iwl_rx_cmd_buffer *rxb, int handler_status); | ||
339 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | ||
340 | struct iwl_tx_queue *txq, | ||
341 | u16 byte_cnt); | ||
342 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | ||
343 | int sta_id, int tid, int frame_limit, u16 ssn); | ||
344 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); | ||
345 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | ||
346 | enum dma_data_direction dma_dir); | ||
347 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | ||
348 | struct sk_buff_head *skbs); | ||
349 | int iwl_queue_space(const struct iwl_queue *q); | ||
350 | |||
351 | /***************************************************** | ||
352 | * Error handling | ||
353 | ******************************************************/ | ||
354 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); | ||
355 | void iwl_dump_csr(struct iwl_trans *trans); | ||
356 | |||
357 | /***************************************************** | ||
358 | * Helpers | ||
359 | ******************************************************/ | ||
360 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) | ||
361 | { | ||
362 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
363 | clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); | ||
364 | |||
365 | /* disable interrupts from uCode/NIC to host */ | ||
366 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | ||
367 | |||
368 | /* acknowledge/clear/reset any interrupts still pending | ||
369 | * from uCode or flow handler (Rx/Tx DMA) */ | ||
370 | iwl_write32(trans, CSR_INT, 0xffffffff); | ||
371 | iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); | ||
372 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); | ||
373 | } | ||
374 | |||
375 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) | ||
376 | { | ||
377 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
378 | |||
379 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); | ||
380 | set_bit(STATUS_INT_ENABLED, &trans_pcie->status); | ||
381 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); | ||
382 | } | ||
383 | |||
384 | static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) | ||
385 | { | ||
386 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); | ||
387 | iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); | ||
388 | } | ||
389 | |||
390 | static inline void iwl_wake_queue(struct iwl_trans *trans, | ||
391 | struct iwl_tx_queue *txq) | ||
392 | { | ||
393 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
394 | |||
395 | if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { | ||
396 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); | ||
397 | iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | static inline void iwl_stop_queue(struct iwl_trans *trans, | ||
402 | struct iwl_tx_queue *txq) | ||
403 | { | ||
404 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
405 | |||
406 | if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { | ||
407 | iwl_op_mode_queue_full(trans->op_mode, txq->q.id); | ||
408 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); | ||
409 | } else | ||
410 | IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", | ||
411 | txq->q.id); | ||
412 | } | ||
413 | |||
414 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | ||
415 | { | ||
416 | return q->write_ptr >= q->read_ptr ? | ||
417 | (i >= q->read_ptr && i < q->write_ptr) : | ||
418 | !(i < q->read_ptr && i >= q->write_ptr); | ||
419 | } | ||
420 | |||
421 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | ||
422 | { | ||
423 | return index & (q->n_window - 1); | ||
424 | } | ||
425 | |||
426 | static inline const char * | ||
427 | trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) | ||
428 | { | ||
429 | if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) | ||
430 | return "UNKNOWN"; | ||
431 | return trans_pcie->command_names[cmd]; | ||
432 | } | ||
433 | |||
434 | static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) | ||
435 | { | ||
436 | return !(iwl_read32(trans, CSR_GP_CNTRL) & | ||
437 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | ||
438 | } | ||
439 | |||
440 | #endif /* __iwl_trans_int_pcie_h__ */ | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c new file mode 100644 index 000000000000..d6860c070c16 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -0,0 +1,1058 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/wait.h> | ||
31 | #include <linux/gfp.h> | ||
32 | |||
33 | #include "iwl-prph.h" | ||
34 | #include "iwl-io.h" | ||
35 | #include "internal.h" | ||
36 | #include "iwl-op-mode.h" | ||
37 | |||
38 | #ifdef CONFIG_IWLWIFI_IDI | ||
39 | #include "iwl-amfh.h" | ||
40 | #endif | ||
41 | |||
42 | /****************************************************************************** | ||
43 | * | ||
44 | * RX path functions | ||
45 | * | ||
46 | ******************************************************************************/ | ||
47 | |||
48 | /* | ||
49 | * Rx theory of operation | ||
50 | * | ||
51 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | ||
52 | * each of which point to Receive Buffers to be filled by the NIC. These get | ||
53 | * used not only for Rx frames, but for any command response or notification | ||
54 | * from the NIC. The driver and NIC manage the Rx buffers by means | ||
55 | * of indexes into the circular buffer. | ||
56 | * | ||
57 | * Rx Queue Indexes | ||
58 | * The host/firmware share two index registers for managing the Rx buffers. | ||
59 | * | ||
60 | * The READ index maps to the first position that the firmware may be writing | ||
61 | * to -- the driver can read up to (but not including) this position and get | ||
62 | * good data. | ||
63 | * The READ index is managed by the firmware once the card is enabled. | ||
64 | * | ||
65 | * The WRITE index maps to the last position the driver has read from -- the | ||
66 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
67 | * | ||
68 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
69 | * WRITE = READ. | ||
70 | * | ||
71 | * During initialization, the host sets up the READ queue position to the first | ||
72 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
73 | * | ||
74 | * When the firmware places a packet in a buffer, it will advance the READ index | ||
75 | * and fire the RX interrupt. The driver can then query the READ index and | ||
76 | * process as many packets as possible, moving the WRITE index forward as it | ||
77 | * resets the Rx queue buffers with new memory. | ||
78 | * | ||
79 | * The management in the driver is as follows: | ||
80 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
81 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
82 | * to replenish the iwl->rxq->rx_free. | ||
83 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
84 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
85 | * 'processed' and 'read' driver indexes as well) | ||
86 | * + A received packet is processed and handed to the kernel network stack, | ||
87 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
88 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
89 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
90 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
91 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
92 | * | ||
93 | * | ||
94 | * Driver sequence: | ||
95 | * | ||
96 | * iwl_rx_queue_alloc() Allocates rx_free | ||
97 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
98 | * iwl_rx_queue_restock | ||
99 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
100 | * queue, updates firmware pointers, and updates | ||
101 | * the WRITE index. If insufficient rx_free buffers | ||
102 | * are available, schedules iwl_rx_replenish | ||
103 | * | ||
104 | * -- enable interrupts -- | ||
105 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
106 | * READ INDEX, detaching the SKB from the pool. | ||
107 | * Moves the packet buffer from queue to rx_used. | ||
108 | * Calls iwl_rx_queue_restock to refill any empty | ||
109 | * slots. | ||
110 | * ... | ||
111 | * | ||
112 | */ | ||
113 | |||
114 | /** | ||
115 | * iwl_rx_queue_space - Return number of free slots available in queue. | ||
116 | */ | ||
117 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | ||
118 | { | ||
119 | int s = q->read - q->write; | ||
120 | if (s <= 0) | ||
121 | s += RX_QUEUE_SIZE; | ||
122 | /* keep some buffer to not confuse full and empty queue */ | ||
123 | s -= 2; | ||
124 | if (s < 0) | ||
125 | s = 0; | ||
126 | return s; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | ||
131 | */ | ||
132 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | ||
133 | struct iwl_rx_queue *q) | ||
134 | { | ||
135 | unsigned long flags; | ||
136 | u32 reg; | ||
137 | |||
138 | spin_lock_irqsave(&q->lock, flags); | ||
139 | |||
140 | if (q->need_update == 0) | ||
141 | goto exit_unlock; | ||
142 | |||
143 | if (trans->cfg->base_params->shadow_reg_enable) { | ||
144 | /* shadow register enabled */ | ||
145 | /* Device expects a multiple of 8 */ | ||
146 | q->write_actual = (q->write & ~0x7); | ||
147 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); | ||
148 | } else { | ||
149 | struct iwl_trans_pcie *trans_pcie = | ||
150 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
151 | |||
152 | /* If power-saving is in use, make sure device is awake */ | ||
153 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { | ||
154 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | ||
155 | |||
156 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
157 | IWL_DEBUG_INFO(trans, | ||
158 | "Rx queue requesting wakeup," | ||
159 | " GP1 = 0x%x\n", reg); | ||
160 | iwl_set_bit(trans, CSR_GP_CNTRL, | ||
161 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
162 | goto exit_unlock; | ||
163 | } | ||
164 | |||
165 | q->write_actual = (q->write & ~0x7); | ||
166 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, | ||
167 | q->write_actual); | ||
168 | |||
169 | /* Else device is assumed to be awake */ | ||
170 | } else { | ||
171 | /* Device expects a multiple of 8 */ | ||
172 | q->write_actual = (q->write & ~0x7); | ||
173 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, | ||
174 | q->write_actual); | ||
175 | } | ||
176 | } | ||
177 | q->need_update = 0; | ||
178 | |||
179 | exit_unlock: | ||
180 | spin_unlock_irqrestore(&q->lock, flags); | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
185 | */ | ||
186 | static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr) | ||
187 | { | ||
188 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool | ||
193 | * | ||
194 | * If there are slots in the RX queue that need to be restocked, | ||
195 | * and we have free pre-allocated buffers, fill the ranks as much | ||
196 | * as we can, pulling from rx_free. | ||
197 | * | ||
198 | * This moves the 'write' index forward to catch up with 'processed', and | ||
199 | * also updates the memory address in the firmware to reference the new | ||
200 | * target buffer. | ||
201 | */ | ||
202 | static void iwlagn_rx_queue_restock(struct iwl_trans *trans) | ||
203 | { | ||
204 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
205 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
206 | struct list_head *element; | ||
207 | struct iwl_rx_mem_buffer *rxb; | ||
208 | unsigned long flags; | ||
209 | |||
210 | spin_lock_irqsave(&rxq->lock, flags); | ||
211 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
212 | /* The overwritten rxb must be a used one */ | ||
213 | rxb = rxq->queue[rxq->write]; | ||
214 | BUG_ON(rxb && rxb->page); | ||
215 | |||
216 | /* Get next free Rx buffer, remove from free list */ | ||
217 | element = rxq->rx_free.next; | ||
218 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
219 | list_del(element); | ||
220 | |||
221 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
222 | rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma); | ||
223 | rxq->queue[rxq->write] = rxb; | ||
224 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
225 | rxq->free_count--; | ||
226 | } | ||
227 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
228 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
229 | * refill it */ | ||
230 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
231 | schedule_work(&trans_pcie->rx_replenish); | ||
232 | |||
233 | |||
234 | /* If we've added more space for the firmware to place data, tell it. | ||
235 | * Increment device's write pointer in multiples of 8. */ | ||
236 | if (rxq->write_actual != (rxq->write & ~0x7)) { | ||
237 | spin_lock_irqsave(&rxq->lock, flags); | ||
238 | rxq->need_update = 1; | ||
239 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
240 | iwl_rx_queue_update_write_ptr(trans, rxq); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free | ||
246 | * | ||
247 | * When moving to rx_free an SKB is allocated for the slot. | ||
248 | * | ||
249 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
250 | * This is called as a scheduled work item (except for during initialization) | ||
251 | */ | ||
252 | static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) | ||
253 | { | ||
254 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
255 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
256 | struct list_head *element; | ||
257 | struct iwl_rx_mem_buffer *rxb; | ||
258 | struct page *page; | ||
259 | unsigned long flags; | ||
260 | gfp_t gfp_mask = priority; | ||
261 | |||
262 | while (1) { | ||
263 | spin_lock_irqsave(&rxq->lock, flags); | ||
264 | if (list_empty(&rxq->rx_used)) { | ||
265 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
266 | return; | ||
267 | } | ||
268 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
269 | |||
270 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
271 | gfp_mask |= __GFP_NOWARN; | ||
272 | |||
273 | if (trans_pcie->rx_page_order > 0) | ||
274 | gfp_mask |= __GFP_COMP; | ||
275 | |||
276 | /* Alloc a new receive buffer */ | ||
277 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | ||
278 | if (!page) { | ||
279 | if (net_ratelimit()) | ||
280 | IWL_DEBUG_INFO(trans, "alloc_pages failed, " | ||
281 | "order: %d\n", | ||
282 | trans_pcie->rx_page_order); | ||
283 | |||
284 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
285 | net_ratelimit()) | ||
286 | IWL_CRIT(trans, "Failed to alloc_pages with %s." | ||
287 | "Only %u free buffers remaining.\n", | ||
288 | priority == GFP_ATOMIC ? | ||
289 | "GFP_ATOMIC" : "GFP_KERNEL", | ||
290 | rxq->free_count); | ||
291 | /* We don't reschedule replenish work here -- we will | ||
292 | * call the restock method and if it still needs | ||
293 | * more buffers it will schedule replenish */ | ||
294 | return; | ||
295 | } | ||
296 | |||
297 | spin_lock_irqsave(&rxq->lock, flags); | ||
298 | |||
299 | if (list_empty(&rxq->rx_used)) { | ||
300 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
301 | __free_pages(page, trans_pcie->rx_page_order); | ||
302 | return; | ||
303 | } | ||
304 | element = rxq->rx_used.next; | ||
305 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
306 | list_del(element); | ||
307 | |||
308 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
309 | |||
310 | BUG_ON(rxb->page); | ||
311 | rxb->page = page; | ||
312 | /* Get physical address of the RB */ | ||
313 | rxb->page_dma = | ||
314 | dma_map_page(trans->dev, page, 0, | ||
315 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
316 | DMA_FROM_DEVICE); | ||
317 | /* dma address must be no more than 36 bits */ | ||
318 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
319 | /* and also 256 byte aligned! */ | ||
320 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
321 | |||
322 | spin_lock_irqsave(&rxq->lock, flags); | ||
323 | |||
324 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
325 | rxq->free_count++; | ||
326 | |||
327 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | void iwlagn_rx_replenish(struct iwl_trans *trans) | ||
332 | { | ||
333 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
334 | unsigned long flags; | ||
335 | |||
336 | iwlagn_rx_allocate(trans, GFP_KERNEL); | ||
337 | |||
338 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
339 | iwlagn_rx_queue_restock(trans); | ||
340 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
341 | } | ||
342 | |||
343 | static void iwlagn_rx_replenish_now(struct iwl_trans *trans) | ||
344 | { | ||
345 | iwlagn_rx_allocate(trans, GFP_ATOMIC); | ||
346 | |||
347 | iwlagn_rx_queue_restock(trans); | ||
348 | } | ||
349 | |||
350 | void iwl_bg_rx_replenish(struct work_struct *data) | ||
351 | { | ||
352 | struct iwl_trans_pcie *trans_pcie = | ||
353 | container_of(data, struct iwl_trans_pcie, rx_replenish); | ||
354 | |||
355 | iwlagn_rx_replenish(trans_pcie->trans); | ||
356 | } | ||
357 | |||
358 | static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | ||
359 | struct iwl_rx_mem_buffer *rxb) | ||
360 | { | ||
361 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
362 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
363 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
364 | unsigned long flags; | ||
365 | bool page_stolen = false; | ||
366 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; | ||
367 | u32 offset = 0; | ||
368 | |||
369 | if (WARN_ON(!rxb)) | ||
370 | return; | ||
371 | |||
372 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); | ||
373 | |||
374 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | ||
375 | struct iwl_rx_packet *pkt; | ||
376 | struct iwl_device_cmd *cmd; | ||
377 | u16 sequence; | ||
378 | bool reclaim; | ||
379 | int index, cmd_index, err, len; | ||
380 | struct iwl_rx_cmd_buffer rxcb = { | ||
381 | ._offset = offset, | ||
382 | ._page = rxb->page, | ||
383 | ._page_stolen = false, | ||
384 | .truesize = max_len, | ||
385 | }; | ||
386 | |||
387 | pkt = rxb_addr(&rxcb); | ||
388 | |||
389 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | ||
390 | break; | ||
391 | |||
392 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | ||
393 | rxcb._offset, | ||
394 | trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), | ||
395 | pkt->hdr.cmd); | ||
396 | |||
397 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | ||
398 | len += sizeof(u32); /* account for status word */ | ||
399 | trace_iwlwifi_dev_rx(trans->dev, pkt, len); | ||
400 | |||
401 | /* Reclaim a command buffer only if this packet is a response | ||
402 | * to a (driver-originated) command. | ||
403 | * If the packet (e.g. Rx frame) originated from uCode, | ||
404 | * there is no command buffer to reclaim. | ||
405 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | ||
406 | * but apparently a few don't get set; catch them here. */ | ||
407 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | ||
408 | if (reclaim) { | ||
409 | int i; | ||
410 | |||
411 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | ||
412 | if (trans_pcie->no_reclaim_cmds[i] == | ||
413 | pkt->hdr.cmd) { | ||
414 | reclaim = false; | ||
415 | break; | ||
416 | } | ||
417 | } | ||
418 | } | ||
419 | |||
420 | sequence = le16_to_cpu(pkt->hdr.sequence); | ||
421 | index = SEQ_TO_INDEX(sequence); | ||
422 | cmd_index = get_cmd_index(&txq->q, index); | ||
423 | |||
424 | if (reclaim) | ||
425 | cmd = txq->entries[cmd_index].cmd; | ||
426 | else | ||
427 | cmd = NULL; | ||
428 | |||
429 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); | ||
430 | |||
431 | /* | ||
432 | * After here, we should always check rxcb._page_stolen, | ||
433 | * if it is true then one of the handlers took the page. | ||
434 | */ | ||
435 | |||
436 | if (reclaim) { | ||
437 | /* Invoke any callbacks, transfer the buffer to caller, | ||
438 | * and fire off the (possibly) blocking | ||
439 | * iwl_trans_send_cmd() | ||
440 | * as we reclaim the driver command queue */ | ||
441 | if (!rxcb._page_stolen) | ||
442 | iwl_tx_cmd_complete(trans, &rxcb, err); | ||
443 | else | ||
444 | IWL_WARN(trans, "Claim null rxb?\n"); | ||
445 | } | ||
446 | |||
447 | page_stolen |= rxcb._page_stolen; | ||
448 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | ||
449 | } | ||
450 | |||
451 | /* page was stolen from us -- free our reference */ | ||
452 | if (page_stolen) { | ||
453 | __free_pages(rxb->page, trans_pcie->rx_page_order); | ||
454 | rxb->page = NULL; | ||
455 | } | ||
456 | |||
457 | /* Reuse the page if possible. For notification packets and | ||
458 | * SKBs that fail to Rx correctly, add them back into the | ||
459 | * rx_free list for reuse later. */ | ||
460 | spin_lock_irqsave(&rxq->lock, flags); | ||
461 | if (rxb->page != NULL) { | ||
462 | rxb->page_dma = | ||
463 | dma_map_page(trans->dev, rxb->page, 0, | ||
464 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
465 | DMA_FROM_DEVICE); | ||
466 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
467 | rxq->free_count++; | ||
468 | } else | ||
469 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
470 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * iwl_rx_handle - Main entry function for receiving responses from uCode | ||
475 | * | ||
476 | * Uses the priv->rx_handlers callback function array to invoke | ||
477 | * the appropriate handlers, including command responses, | ||
478 | * frame-received notifications, and other notifications. | ||
479 | */ | ||
480 | static void iwl_rx_handle(struct iwl_trans *trans) | ||
481 | { | ||
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
483 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
484 | u32 r, i; | ||
485 | u8 fill_rx = 0; | ||
486 | u32 count = 8; | ||
487 | int total_empty; | ||
488 | |||
489 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | ||
490 | * buffer that the driver may process (last buffer filled by ucode). */ | ||
491 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | ||
492 | i = rxq->read; | ||
493 | |||
494 | /* Rx interrupt, but nothing sent from uCode */ | ||
495 | if (i == r) | ||
496 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); | ||
497 | |||
498 | /* calculate total frames need to be restock after handling RX */ | ||
499 | total_empty = r - rxq->write_actual; | ||
500 | if (total_empty < 0) | ||
501 | total_empty += RX_QUEUE_SIZE; | ||
502 | |||
503 | if (total_empty > (RX_QUEUE_SIZE / 2)) | ||
504 | fill_rx = 1; | ||
505 | |||
506 | while (i != r) { | ||
507 | struct iwl_rx_mem_buffer *rxb; | ||
508 | |||
509 | rxb = rxq->queue[i]; | ||
510 | rxq->queue[i] = NULL; | ||
511 | |||
512 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", | ||
513 | r, i, rxb); | ||
514 | iwl_rx_handle_rxbuf(trans, rxb); | ||
515 | |||
516 | i = (i + 1) & RX_QUEUE_MASK; | ||
517 | /* If there are a lot of unused frames, | ||
518 | * restock the Rx queue so ucode wont assert. */ | ||
519 | if (fill_rx) { | ||
520 | count++; | ||
521 | if (count >= 8) { | ||
522 | rxq->read = i; | ||
523 | iwlagn_rx_replenish_now(trans); | ||
524 | count = 0; | ||
525 | } | ||
526 | } | ||
527 | } | ||
528 | |||
529 | /* Backtrack one entry */ | ||
530 | rxq->read = i; | ||
531 | if (fill_rx) | ||
532 | iwlagn_rx_replenish_now(trans); | ||
533 | else | ||
534 | iwlagn_rx_queue_restock(trans); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * iwl_irq_handle_error - called for HW or SW error interrupt from card | ||
539 | */ | ||
540 | static void iwl_irq_handle_error(struct iwl_trans *trans) | ||
541 | { | ||
542 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ | ||
543 | if (trans->cfg->internal_wimax_coex && | ||
544 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & | ||
545 | APMS_CLK_VAL_MRB_FUNC_MODE) || | ||
546 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & | ||
547 | APMG_PS_CTRL_VAL_RESET_REQ))) { | ||
548 | struct iwl_trans_pcie *trans_pcie = | ||
549 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
550 | |||
551 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
552 | iwl_op_mode_wimax_active(trans->op_mode); | ||
553 | wake_up(&trans->wait_command_queue); | ||
554 | return; | ||
555 | } | ||
556 | |||
557 | iwl_dump_csr(trans); | ||
558 | iwl_dump_fh(trans, NULL, false); | ||
559 | |||
560 | iwl_op_mode_nic_error(trans->op_mode); | ||
561 | } | ||
562 | |||
563 | /* tasklet for iwlagn interrupt */ | ||
564 | void iwl_irq_tasklet(struct iwl_trans *trans) | ||
565 | { | ||
566 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
567 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | ||
568 | u32 inta = 0; | ||
569 | u32 handled = 0; | ||
570 | unsigned long flags; | ||
571 | u32 i; | ||
572 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
573 | u32 inta_mask; | ||
574 | #endif | ||
575 | |||
576 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
577 | |||
578 | /* Ack/clear/reset pending uCode interrupts. | ||
579 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | ||
580 | */ | ||
581 | /* There is a hardware bug in the interrupt mask function that some | ||
582 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | ||
583 | * they are disabled in the CSR_INT_MASK register. Furthermore the | ||
584 | * ICT interrupt handling mechanism has another bug that might cause | ||
585 | * these unmasked interrupts fail to be detected. We workaround the | ||
586 | * hardware bugs here by ACKing all the possible interrupts so that | ||
587 | * interrupt coalescing can still be achieved. | ||
588 | */ | ||
589 | iwl_write32(trans, CSR_INT, | ||
590 | trans_pcie->inta | ~trans_pcie->inta_mask); | ||
591 | |||
592 | inta = trans_pcie->inta; | ||
593 | |||
594 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
595 | if (iwl_have_debug_level(IWL_DL_ISR)) { | ||
596 | /* just for debug */ | ||
597 | inta_mask = iwl_read32(trans, CSR_INT_MASK); | ||
598 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", | ||
599 | inta, inta_mask); | ||
600 | } | ||
601 | #endif | ||
602 | |||
603 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ | ||
604 | trans_pcie->inta = 0; | ||
605 | |||
606 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
607 | |||
608 | /* Now service all interrupt bits discovered above. */ | ||
609 | if (inta & CSR_INT_BIT_HW_ERR) { | ||
610 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); | ||
611 | |||
612 | /* Tell the device to stop sending interrupts */ | ||
613 | iwl_disable_interrupts(trans); | ||
614 | |||
615 | isr_stats->hw++; | ||
616 | iwl_irq_handle_error(trans); | ||
617 | |||
618 | handled |= CSR_INT_BIT_HW_ERR; | ||
619 | |||
620 | return; | ||
621 | } | ||
622 | |||
623 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
624 | if (iwl_have_debug_level(IWL_DL_ISR)) { | ||
625 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | ||
626 | if (inta & CSR_INT_BIT_SCD) { | ||
627 | IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " | ||
628 | "the frame/frames.\n"); | ||
629 | isr_stats->sch++; | ||
630 | } | ||
631 | |||
632 | /* Alive notification via Rx interrupt will do the real work */ | ||
633 | if (inta & CSR_INT_BIT_ALIVE) { | ||
634 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); | ||
635 | isr_stats->alive++; | ||
636 | } | ||
637 | } | ||
638 | #endif | ||
639 | /* Safely ignore these bits for debug checks below */ | ||
640 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | ||
641 | |||
642 | /* HW RF KILL switch toggled */ | ||
643 | if (inta & CSR_INT_BIT_RF_KILL) { | ||
644 | bool hw_rfkill; | ||
645 | |||
646 | hw_rfkill = iwl_is_rfkill_set(trans); | ||
647 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", | ||
648 | hw_rfkill ? "disable radio" : "enable radio"); | ||
649 | |||
650 | isr_stats->rfkill++; | ||
651 | |||
652 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | ||
653 | |||
654 | handled |= CSR_INT_BIT_RF_KILL; | ||
655 | } | ||
656 | |||
657 | /* Chip got too hot and stopped itself */ | ||
658 | if (inta & CSR_INT_BIT_CT_KILL) { | ||
659 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); | ||
660 | isr_stats->ctkill++; | ||
661 | handled |= CSR_INT_BIT_CT_KILL; | ||
662 | } | ||
663 | |||
664 | /* Error detected by uCode */ | ||
665 | if (inta & CSR_INT_BIT_SW_ERR) { | ||
666 | IWL_ERR(trans, "Microcode SW error detected. " | ||
667 | " Restarting 0x%X.\n", inta); | ||
668 | isr_stats->sw++; | ||
669 | iwl_irq_handle_error(trans); | ||
670 | handled |= CSR_INT_BIT_SW_ERR; | ||
671 | } | ||
672 | |||
673 | /* uCode wakes up after power-down sleep */ | ||
674 | if (inta & CSR_INT_BIT_WAKEUP) { | ||
675 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); | ||
676 | iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); | ||
677 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) | ||
678 | iwl_txq_update_write_ptr(trans, | ||
679 | &trans_pcie->txq[i]); | ||
680 | |||
681 | isr_stats->wakeup++; | ||
682 | |||
683 | handled |= CSR_INT_BIT_WAKEUP; | ||
684 | } | ||
685 | |||
686 | /* All uCode command responses, including Tx command responses, | ||
687 | * Rx "responses" (frame-received notification), and other | ||
688 | * notifications from uCode come through here*/ | ||
689 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | ||
690 | CSR_INT_BIT_RX_PERIODIC)) { | ||
691 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); | ||
692 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | ||
693 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | ||
694 | iwl_write32(trans, CSR_FH_INT_STATUS, | ||
695 | CSR_FH_INT_RX_MASK); | ||
696 | } | ||
697 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | ||
698 | handled |= CSR_INT_BIT_RX_PERIODIC; | ||
699 | iwl_write32(trans, | ||
700 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); | ||
701 | } | ||
702 | /* Sending RX interrupt require many steps to be done in the | ||
703 | * the device: | ||
704 | * 1- write interrupt to current index in ICT table. | ||
705 | * 2- dma RX frame. | ||
706 | * 3- update RX shared data to indicate last write index. | ||
707 | * 4- send interrupt. | ||
708 | * This could lead to RX race, driver could receive RX interrupt | ||
709 | * but the shared data changes does not reflect this; | ||
710 | * periodic interrupt will detect any dangling Rx activity. | ||
711 | */ | ||
712 | |||
713 | /* Disable periodic interrupt; we use it as just a one-shot. */ | ||
714 | iwl_write8(trans, CSR_INT_PERIODIC_REG, | ||
715 | CSR_INT_PERIODIC_DIS); | ||
716 | #ifdef CONFIG_IWLWIFI_IDI | ||
717 | iwl_amfh_rx_handler(); | ||
718 | #else | ||
719 | iwl_rx_handle(trans); | ||
720 | #endif | ||
721 | /* | ||
722 | * Enable periodic interrupt in 8 msec only if we received | ||
723 | * real RX interrupt (instead of just periodic int), to catch | ||
724 | * any dangling Rx interrupt. If it was just the periodic | ||
725 | * interrupt, there was no dangling Rx activity, and no need | ||
726 | * to extend the periodic interrupt; one-shot is enough. | ||
727 | */ | ||
728 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | ||
729 | iwl_write8(trans, CSR_INT_PERIODIC_REG, | ||
730 | CSR_INT_PERIODIC_ENA); | ||
731 | |||
732 | isr_stats->rx++; | ||
733 | } | ||
734 | |||
735 | /* This "Tx" DMA channel is used only for loading uCode */ | ||
736 | if (inta & CSR_INT_BIT_FH_TX) { | ||
737 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); | ||
738 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); | ||
739 | isr_stats->tx++; | ||
740 | handled |= CSR_INT_BIT_FH_TX; | ||
741 | /* Wake up uCode load routine, now that load is complete */ | ||
742 | trans_pcie->ucode_write_complete = true; | ||
743 | wake_up(&trans_pcie->ucode_write_waitq); | ||
744 | } | ||
745 | |||
746 | if (inta & ~handled) { | ||
747 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | ||
748 | isr_stats->unhandled++; | ||
749 | } | ||
750 | |||
751 | if (inta & ~(trans_pcie->inta_mask)) { | ||
752 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | ||
753 | inta & ~trans_pcie->inta_mask); | ||
754 | } | ||
755 | |||
756 | /* Re-enable all interrupts */ | ||
757 | /* only Re-enable if disabled by irq */ | ||
758 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) | ||
759 | iwl_enable_interrupts(trans); | ||
760 | /* Re-enable RF_KILL if it occurred */ | ||
761 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
762 | iwl_enable_rfkill_int(trans); | ||
763 | } | ||
764 | |||
765 | /****************************************************************************** | ||
766 | * | ||
767 | * ICT functions | ||
768 | * | ||
769 | ******************************************************************************/ | ||
770 | |||
771 | /* a device (PCI-E) page is 4096 bytes long */ | ||
772 | #define ICT_SHIFT 12 | ||
773 | #define ICT_SIZE (1 << ICT_SHIFT) | ||
774 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | ||
775 | |||
776 | /* Free dram table */ | ||
777 | void iwl_free_isr_ict(struct iwl_trans *trans) | ||
778 | { | ||
779 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
780 | |||
781 | if (trans_pcie->ict_tbl) { | ||
782 | dma_free_coherent(trans->dev, ICT_SIZE, | ||
783 | trans_pcie->ict_tbl, | ||
784 | trans_pcie->ict_tbl_dma); | ||
785 | trans_pcie->ict_tbl = NULL; | ||
786 | trans_pcie->ict_tbl_dma = 0; | ||
787 | } | ||
788 | } | ||
789 | |||
790 | |||
791 | /* | ||
792 | * allocate dram shared table, it is an aligned memory | ||
793 | * block of ICT_SIZE. | ||
794 | * also reset all data related to ICT table interrupt. | ||
795 | */ | ||
796 | int iwl_alloc_isr_ict(struct iwl_trans *trans) | ||
797 | { | ||
798 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
799 | |||
800 | trans_pcie->ict_tbl = | ||
801 | dma_alloc_coherent(trans->dev, ICT_SIZE, | ||
802 | &trans_pcie->ict_tbl_dma, | ||
803 | GFP_KERNEL); | ||
804 | if (!trans_pcie->ict_tbl) | ||
805 | return -ENOMEM; | ||
806 | |||
807 | /* just an API sanity check ... it is guaranteed to be aligned */ | ||
808 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | ||
809 | iwl_free_isr_ict(trans); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", | ||
814 | (unsigned long long)trans_pcie->ict_tbl_dma); | ||
815 | |||
816 | IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); | ||
817 | |||
818 | /* reset table and index to all 0 */ | ||
819 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); | ||
820 | trans_pcie->ict_index = 0; | ||
821 | |||
822 | /* add periodic RX interrupt */ | ||
823 | trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; | ||
824 | return 0; | ||
825 | } | ||
826 | |||
827 | /* Device is going up inform it about using ICT interrupt table, | ||
828 | * also we need to tell the driver to start using ICT interrupt. | ||
829 | */ | ||
830 | void iwl_reset_ict(struct iwl_trans *trans) | ||
831 | { | ||
832 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
833 | u32 val; | ||
834 | unsigned long flags; | ||
835 | |||
836 | if (!trans_pcie->ict_tbl) | ||
837 | return; | ||
838 | |||
839 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
840 | iwl_disable_interrupts(trans); | ||
841 | |||
842 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); | ||
843 | |||
844 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; | ||
845 | |||
846 | val |= CSR_DRAM_INT_TBL_ENABLE; | ||
847 | val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; | ||
848 | |||
849 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); | ||
850 | |||
851 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); | ||
852 | trans_pcie->use_ict = true; | ||
853 | trans_pcie->ict_index = 0; | ||
854 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); | ||
855 | iwl_enable_interrupts(trans); | ||
856 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
857 | } | ||
858 | |||
859 | /* Device is going down disable ict interrupt usage */ | ||
860 | void iwl_disable_ict(struct iwl_trans *trans) | ||
861 | { | ||
862 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
863 | unsigned long flags; | ||
864 | |||
865 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
866 | trans_pcie->use_ict = false; | ||
867 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
868 | } | ||
869 | |||
870 | static irqreturn_t iwl_isr(int irq, void *data) | ||
871 | { | ||
872 | struct iwl_trans *trans = data; | ||
873 | struct iwl_trans_pcie *trans_pcie; | ||
874 | u32 inta, inta_mask; | ||
875 | unsigned long flags; | ||
876 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
877 | u32 inta_fh; | ||
878 | #endif | ||
879 | if (!trans) | ||
880 | return IRQ_NONE; | ||
881 | |||
882 | trace_iwlwifi_dev_irq(trans->dev); | ||
883 | |||
884 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
885 | |||
886 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
887 | |||
888 | /* Disable (but don't clear!) interrupts here to avoid | ||
889 | * back-to-back ISRs and sporadic interrupts from our NIC. | ||
890 | * If we have something to service, the tasklet will re-enable ints. | ||
891 | * If we *don't* have something, we'll re-enable before leaving here. */ | ||
892 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ | ||
893 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | ||
894 | |||
895 | /* Discover which interrupts are active/pending */ | ||
896 | inta = iwl_read32(trans, CSR_INT); | ||
897 | |||
898 | /* Ignore interrupt if there's nothing in NIC to service. | ||
899 | * This may be due to IRQ shared with another device, | ||
900 | * or due to sporadic interrupts thrown from our NIC. */ | ||
901 | if (!inta) { | ||
902 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); | ||
903 | goto none; | ||
904 | } | ||
905 | |||
906 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | ||
907 | /* Hardware disappeared. It might have already raised | ||
908 | * an interrupt */ | ||
909 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | ||
910 | goto unplugged; | ||
911 | } | ||
912 | |||
913 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
914 | if (iwl_have_debug_level(IWL_DL_ISR)) { | ||
915 | inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); | ||
916 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " | ||
917 | "fh 0x%08x\n", inta, inta_mask, inta_fh); | ||
918 | } | ||
919 | #endif | ||
920 | |||
921 | trans_pcie->inta |= inta; | ||
922 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | ||
923 | if (likely(inta)) | ||
924 | tasklet_schedule(&trans_pcie->irq_tasklet); | ||
925 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | ||
926 | !trans_pcie->inta) | ||
927 | iwl_enable_interrupts(trans); | ||
928 | |||
929 | unplugged: | ||
930 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
931 | return IRQ_HANDLED; | ||
932 | |||
933 | none: | ||
934 | /* re-enable interrupts here since we don't have anything to service. */ | ||
935 | /* only Re-enable if disabled by irq and no schedules tasklet. */ | ||
936 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | ||
937 | !trans_pcie->inta) | ||
938 | iwl_enable_interrupts(trans); | ||
939 | |||
940 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
941 | return IRQ_NONE; | ||
942 | } | ||
943 | |||
944 | /* interrupt handler using ict table, with this interrupt driver will | ||
945 | * stop using INTA register to get device's interrupt, reading this register | ||
946 | * is expensive, device will write interrupts in ICT dram table, increment | ||
947 | * index then will fire interrupt to driver, driver will OR all ICT table | ||
948 | * entries from current index up to table entry with 0 value. the result is | ||
949 | * the interrupt we need to service, driver will set the entries back to 0 and | ||
950 | * set index. | ||
951 | */ | ||
952 | irqreturn_t iwl_isr_ict(int irq, void *data) | ||
953 | { | ||
954 | struct iwl_trans *trans = data; | ||
955 | struct iwl_trans_pcie *trans_pcie; | ||
956 | u32 inta, inta_mask; | ||
957 | u32 val = 0; | ||
958 | u32 read; | ||
959 | unsigned long flags; | ||
960 | |||
961 | if (!trans) | ||
962 | return IRQ_NONE; | ||
963 | |||
964 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
965 | |||
966 | /* dram interrupt table not set yet, | ||
967 | * use legacy interrupt. | ||
968 | */ | ||
969 | if (!trans_pcie->use_ict) | ||
970 | return iwl_isr(irq, data); | ||
971 | |||
972 | trace_iwlwifi_dev_irq(trans->dev); | ||
973 | |||
974 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
975 | |||
976 | /* Disable (but don't clear!) interrupts here to avoid | ||
977 | * back-to-back ISRs and sporadic interrupts from our NIC. | ||
978 | * If we have something to service, the tasklet will re-enable ints. | ||
979 | * If we *don't* have something, we'll re-enable before leaving here. | ||
980 | */ | ||
981 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ | ||
982 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | ||
983 | |||
984 | |||
985 | /* Ignore interrupt if there's nothing in NIC to service. | ||
986 | * This may be due to IRQ shared with another device, | ||
987 | * or due to sporadic interrupts thrown from our NIC. */ | ||
988 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | ||
989 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); | ||
990 | if (!read) { | ||
991 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); | ||
992 | goto none; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * Collect all entries up to the first 0, starting from ict_index; | ||
997 | * note we already read at ict_index. | ||
998 | */ | ||
999 | do { | ||
1000 | val |= read; | ||
1001 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", | ||
1002 | trans_pcie->ict_index, read); | ||
1003 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | ||
1004 | trans_pcie->ict_index = | ||
1005 | iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); | ||
1006 | |||
1007 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | ||
1008 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | ||
1009 | read); | ||
1010 | } while (read); | ||
1011 | |||
1012 | /* We should not get this value, just ignore it. */ | ||
1013 | if (val == 0xffffffff) | ||
1014 | val = 0; | ||
1015 | |||
1016 | /* | ||
1017 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | ||
1018 | * (bit 15 before shifting it to 31) to clear when using interrupt | ||
1019 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | ||
1020 | * so we use them to decide on the real state of the Rx bit. | ||
1021 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | ||
1022 | */ | ||
1023 | if (val & 0xC0000) | ||
1024 | val |= 0x8000; | ||
1025 | |||
1026 | inta = (0xff & val) | ((0xff00 & val) << 16); | ||
1027 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", | ||
1028 | inta, inta_mask, val); | ||
1029 | |||
1030 | inta &= trans_pcie->inta_mask; | ||
1031 | trans_pcie->inta |= inta; | ||
1032 | |||
1033 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | ||
1034 | if (likely(inta)) | ||
1035 | tasklet_schedule(&trans_pcie->irq_tasklet); | ||
1036 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | ||
1037 | !trans_pcie->inta) { | ||
1038 | /* Allow interrupt if was disabled by this handler and | ||
1039 | * no tasklet was schedules, We should not enable interrupt, | ||
1040 | * tasklet will enable it. | ||
1041 | */ | ||
1042 | iwl_enable_interrupts(trans); | ||
1043 | } | ||
1044 | |||
1045 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1046 | return IRQ_HANDLED; | ||
1047 | |||
1048 | none: | ||
1049 | /* re-enable interrupts here since we don't have anything to service. | ||
1050 | * only Re-enable if disabled by irq. | ||
1051 | */ | ||
1052 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | ||
1053 | !trans_pcie->inta) | ||
1054 | iwl_enable_interrupts(trans); | ||
1055 | |||
1056 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1057 | return IRQ_NONE; | ||
1058 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c new file mode 100644 index 000000000000..cc935168ae52 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -0,0 +1,2216 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
4 | * redistributing this file, you may do so under either license. | ||
5 | * | ||
6 | * GPL LICENSE SUMMARY | ||
7 | * | ||
8 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
22 | * USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution | ||
25 | * in the file called LICENSE.GPL. | ||
26 | * | ||
27 | * Contact Information: | ||
28 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
30 | * | ||
31 | * BSD LICENSE | ||
32 | * | ||
33 | * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Redistribution and use in source and binary forms, with or without | ||
37 | * modification, are permitted provided that the following conditions | ||
38 | * are met: | ||
39 | * | ||
40 | * * Redistributions of source code must retain the above copyright | ||
41 | * notice, this list of conditions and the following disclaimer. | ||
42 | * * Redistributions in binary form must reproduce the above copyright | ||
43 | * notice, this list of conditions and the following disclaimer in | ||
44 | * the documentation and/or other materials provided with the | ||
45 | * distribution. | ||
46 | * * Neither the name Intel Corporation nor the names of its | ||
47 | * contributors may be used to endorse or promote products derived | ||
48 | * from this software without specific prior written permission. | ||
49 | * | ||
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
61 | * | ||
62 | *****************************************************************************/ | ||
63 | #include <linux/pci.h> | ||
64 | #include <linux/pci-aspm.h> | ||
65 | #include <linux/interrupt.h> | ||
66 | #include <linux/debugfs.h> | ||
67 | #include <linux/sched.h> | ||
68 | #include <linux/bitops.h> | ||
69 | #include <linux/gfp.h> | ||
70 | |||
71 | #include "iwl-drv.h" | ||
72 | #include "iwl-trans.h" | ||
73 | #include "iwl-csr.h" | ||
74 | #include "iwl-prph.h" | ||
75 | #include "iwl-agn-hw.h" | ||
76 | #include "internal.h" | ||
77 | /* FIXME: need to abstract out TX command (once we know what it looks like) */ | ||
78 | #include "dvm/commands.h" | ||
79 | |||
80 | #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ | ||
81 | (((1<<trans->cfg->base_params->num_of_queues) - 1) &\ | ||
82 | (~(1<<(trans_pcie)->cmd_queue))) | ||
83 | |||
84 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) | ||
85 | { | ||
86 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
87 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
88 | struct device *dev = trans->dev; | ||
89 | |||
90 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | ||
91 | |||
92 | spin_lock_init(&rxq->lock); | ||
93 | |||
94 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | ||
95 | return -EINVAL; | ||
96 | |||
97 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
98 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
99 | &rxq->bd_dma, GFP_KERNEL); | ||
100 | if (!rxq->bd) | ||
101 | goto err_bd; | ||
102 | |||
103 | /*Allocate the driver's pointer to receive buffer status */ | ||
104 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | ||
105 | &rxq->rb_stts_dma, GFP_KERNEL); | ||
106 | if (!rxq->rb_stts) | ||
107 | goto err_rb_stts; | ||
108 | |||
109 | return 0; | ||
110 | |||
111 | err_rb_stts: | ||
112 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
113 | rxq->bd, rxq->bd_dma); | ||
114 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
115 | rxq->bd = NULL; | ||
116 | err_bd: | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
120 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) | ||
121 | { | ||
122 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
123 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
124 | int i; | ||
125 | |||
126 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
127 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
128 | /* In the reset function, these buffers may have been allocated | ||
129 | * to an SKB, so we need to unmap and free potential storage */ | ||
130 | if (rxq->pool[i].page != NULL) { | ||
131 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | ||
132 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
133 | DMA_FROM_DEVICE); | ||
134 | __free_pages(rxq->pool[i].page, | ||
135 | trans_pcie->rx_page_order); | ||
136 | rxq->pool[i].page = NULL; | ||
137 | } | ||
138 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void iwl_trans_rx_hw_init(struct iwl_trans *trans, | ||
143 | struct iwl_rx_queue *rxq) | ||
144 | { | ||
145 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
146 | u32 rb_size; | ||
147 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
148 | u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
149 | |||
150 | if (trans_pcie->rx_buf_size_8k) | ||
151 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
152 | else | ||
153 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
154 | |||
155 | /* Stop Rx DMA */ | ||
156 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
157 | |||
158 | /* Reset driver's Rx queue write index */ | ||
159 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
160 | |||
161 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
162 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
163 | (u32)(rxq->bd_dma >> 8)); | ||
164 | |||
165 | /* Tell device where in DRAM to update its Rx status */ | ||
166 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
167 | rxq->rb_stts_dma >> 4); | ||
168 | |||
169 | /* Enable Rx DMA | ||
170 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
171 | * the credit mechanism in 5000 HW RX FIFO | ||
172 | * Direct rx interrupts to hosts | ||
173 | * Rx buffer size 4 or 8k | ||
174 | * RB timeout 0x10 | ||
175 | * 256 RBDs | ||
176 | */ | ||
177 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
178 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
179 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
180 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
181 | rb_size| | ||
182 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
183 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
184 | |||
185 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
186 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
187 | } | ||
188 | |||
189 | static int iwl_rx_init(struct iwl_trans *trans) | ||
190 | { | ||
191 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
192 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
193 | |||
194 | int i, err; | ||
195 | unsigned long flags; | ||
196 | |||
197 | if (!rxq->bd) { | ||
198 | err = iwl_trans_rx_alloc(trans); | ||
199 | if (err) | ||
200 | return err; | ||
201 | } | ||
202 | |||
203 | spin_lock_irqsave(&rxq->lock, flags); | ||
204 | INIT_LIST_HEAD(&rxq->rx_free); | ||
205 | INIT_LIST_HEAD(&rxq->rx_used); | ||
206 | |||
207 | iwl_trans_rxq_free_rx_bufs(trans); | ||
208 | |||
209 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
210 | rxq->queue[i] = NULL; | ||
211 | |||
212 | /* Set us so that we have processed and used all buffers, but have | ||
213 | * not restocked the Rx queue with fresh buffers */ | ||
214 | rxq->read = rxq->write = 0; | ||
215 | rxq->write_actual = 0; | ||
216 | rxq->free_count = 0; | ||
217 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
218 | |||
219 | iwlagn_rx_replenish(trans); | ||
220 | |||
221 | iwl_trans_rx_hw_init(trans, rxq); | ||
222 | |||
223 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
224 | rxq->need_update = 1; | ||
225 | iwl_rx_queue_update_write_ptr(trans, rxq); | ||
226 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) | ||
232 | { | ||
233 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
234 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
235 | unsigned long flags; | ||
236 | |||
237 | /*if rxq->bd is NULL, it means that nothing has been allocated, | ||
238 | * exit now */ | ||
239 | if (!rxq->bd) { | ||
240 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | spin_lock_irqsave(&rxq->lock, flags); | ||
245 | iwl_trans_rxq_free_rx_bufs(trans); | ||
246 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
247 | |||
248 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
249 | rxq->bd, rxq->bd_dma); | ||
250 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
251 | rxq->bd = NULL; | ||
252 | |||
253 | if (rxq->rb_stts) | ||
254 | dma_free_coherent(trans->dev, | ||
255 | sizeof(struct iwl_rb_status), | ||
256 | rxq->rb_stts, rxq->rb_stts_dma); | ||
257 | else | ||
258 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | ||
259 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | ||
260 | rxq->rb_stts = NULL; | ||
261 | } | ||
262 | |||
263 | static int iwl_trans_rx_stop(struct iwl_trans *trans) | ||
264 | { | ||
265 | |||
266 | /* stop Rx DMA */ | ||
267 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
268 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | ||
269 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
270 | } | ||
271 | |||
272 | static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, | ||
273 | struct iwl_dma_ptr *ptr, size_t size) | ||
274 | { | ||
275 | if (WARN_ON(ptr->addr)) | ||
276 | return -EINVAL; | ||
277 | |||
278 | ptr->addr = dma_alloc_coherent(trans->dev, size, | ||
279 | &ptr->dma, GFP_KERNEL); | ||
280 | if (!ptr->addr) | ||
281 | return -ENOMEM; | ||
282 | ptr->size = size; | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void iwlagn_free_dma_ptr(struct iwl_trans *trans, | ||
287 | struct iwl_dma_ptr *ptr) | ||
288 | { | ||
289 | if (unlikely(!ptr->addr)) | ||
290 | return; | ||
291 | |||
292 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); | ||
293 | memset(ptr, 0, sizeof(*ptr)); | ||
294 | } | ||
295 | |||
296 | static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) | ||
297 | { | ||
298 | struct iwl_tx_queue *txq = (void *)data; | ||
299 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; | ||
300 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); | ||
301 | u32 scd_sram_addr = trans_pcie->scd_base_addr + | ||
302 | SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id); | ||
303 | u8 buf[16]; | ||
304 | int i; | ||
305 | |||
306 | spin_lock(&txq->lock); | ||
307 | /* check if triggered erroneously */ | ||
308 | if (txq->q.read_ptr == txq->q.write_ptr) { | ||
309 | spin_unlock(&txq->lock); | ||
310 | return; | ||
311 | } | ||
312 | spin_unlock(&txq->lock); | ||
313 | |||
314 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, | ||
315 | jiffies_to_msecs(trans_pcie->wd_timeout)); | ||
316 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", | ||
317 | txq->q.read_ptr, txq->q.write_ptr); | ||
318 | |||
319 | iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); | ||
320 | |||
321 | iwl_print_hex_error(trans, buf, sizeof(buf)); | ||
322 | |||
323 | for (i = 0; i < FH_TCSR_CHNL_NUM; i++) | ||
324 | IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, | ||
325 | iwl_read_direct32(trans, FH_TX_TRB_REG(i))); | ||
326 | |||
327 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { | ||
328 | u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); | ||
329 | u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; | ||
330 | bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); | ||
331 | u32 tbl_dw = | ||
332 | iwl_read_targ_mem(trans, | ||
333 | trans_pcie->scd_base_addr + | ||
334 | SCD_TRANS_TBL_OFFSET_QUEUE(i)); | ||
335 | |||
336 | if (i & 0x1) | ||
337 | tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; | ||
338 | else | ||
339 | tbl_dw = tbl_dw & 0x0000FFFF; | ||
340 | |||
341 | IWL_ERR(trans, | ||
342 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | ||
343 | i, active ? "" : "in", fifo, tbl_dw, | ||
344 | iwl_read_prph(trans, | ||
345 | SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), | ||
346 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | ||
347 | } | ||
348 | |||
349 | iwl_op_mode_nic_error(trans->op_mode); | ||
350 | } | ||
351 | |||
352 | static int iwl_trans_txq_alloc(struct iwl_trans *trans, | ||
353 | struct iwl_tx_queue *txq, int slots_num, | ||
354 | u32 txq_id) | ||
355 | { | ||
356 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
357 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; | ||
358 | int i; | ||
359 | |||
360 | if (WARN_ON(txq->entries || txq->tfds)) | ||
361 | return -EINVAL; | ||
362 | |||
363 | setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer, | ||
364 | (unsigned long)txq); | ||
365 | txq->trans_pcie = trans_pcie; | ||
366 | |||
367 | txq->q.n_window = slots_num; | ||
368 | |||
369 | txq->entries = kcalloc(slots_num, | ||
370 | sizeof(struct iwl_pcie_tx_queue_entry), | ||
371 | GFP_KERNEL); | ||
372 | |||
373 | if (!txq->entries) | ||
374 | goto error; | ||
375 | |||
376 | if (txq_id == trans_pcie->cmd_queue) | ||
377 | for (i = 0; i < slots_num; i++) { | ||
378 | txq->entries[i].cmd = | ||
379 | kmalloc(sizeof(struct iwl_device_cmd), | ||
380 | GFP_KERNEL); | ||
381 | if (!txq->entries[i].cmd) | ||
382 | goto error; | ||
383 | } | ||
384 | |||
385 | /* Circular buffer of transmit frame descriptors (TFDs), | ||
386 | * shared with device */ | ||
387 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, | ||
388 | &txq->q.dma_addr, GFP_KERNEL); | ||
389 | if (!txq->tfds) { | ||
390 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | ||
391 | goto error; | ||
392 | } | ||
393 | txq->q.id = txq_id; | ||
394 | |||
395 | return 0; | ||
396 | error: | ||
397 | if (txq->entries && txq_id == trans_pcie->cmd_queue) | ||
398 | for (i = 0; i < slots_num; i++) | ||
399 | kfree(txq->entries[i].cmd); | ||
400 | kfree(txq->entries); | ||
401 | txq->entries = NULL; | ||
402 | |||
403 | return -ENOMEM; | ||
404 | |||
405 | } | ||
406 | |||
407 | static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, | ||
408 | int slots_num, u32 txq_id) | ||
409 | { | ||
410 | int ret; | ||
411 | |||
412 | txq->need_update = 0; | ||
413 | |||
414 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
415 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
416 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
417 | |||
418 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
419 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | ||
420 | txq_id); | ||
421 | if (ret) | ||
422 | return ret; | ||
423 | |||
424 | spin_lock_init(&txq->lock); | ||
425 | |||
426 | /* | ||
427 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
428 | * given Tx queue, and enable the DMA channel used for that queue. | ||
429 | * Circular buffer (TFD queue in DRAM) physical base address */ | ||
430 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), | ||
431 | txq->q.dma_addr >> 8); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | ||
438 | */ | ||
439 | static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | ||
440 | { | ||
441 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
442 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
443 | struct iwl_queue *q = &txq->q; | ||
444 | enum dma_data_direction dma_dir; | ||
445 | |||
446 | if (!q->n_bd) | ||
447 | return; | ||
448 | |||
449 | /* In the command queue, all the TBs are mapped as BIDI | ||
450 | * so unmap them as such. | ||
451 | */ | ||
452 | if (txq_id == trans_pcie->cmd_queue) | ||
453 | dma_dir = DMA_BIDIRECTIONAL; | ||
454 | else | ||
455 | dma_dir = DMA_TO_DEVICE; | ||
456 | |||
457 | spin_lock_bh(&txq->lock); | ||
458 | while (q->write_ptr != q->read_ptr) { | ||
459 | iwl_txq_free_tfd(trans, txq, dma_dir); | ||
460 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | ||
461 | } | ||
462 | spin_unlock_bh(&txq->lock); | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * iwl_tx_queue_free - Deallocate DMA queue. | ||
467 | * @txq: Transmit queue to deallocate. | ||
468 | * | ||
469 | * Empty queue by removing and destroying all BD's. | ||
470 | * Free all buffers. | ||
471 | * 0-fill, but do not free "txq" descriptor structure. | ||
472 | */ | ||
473 | static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) | ||
474 | { | ||
475 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
476 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
477 | struct device *dev = trans->dev; | ||
478 | int i; | ||
479 | |||
480 | if (WARN_ON(!txq)) | ||
481 | return; | ||
482 | |||
483 | iwl_tx_queue_unmap(trans, txq_id); | ||
484 | |||
485 | /* De-alloc array of command/tx buffers */ | ||
486 | |||
487 | if (txq_id == trans_pcie->cmd_queue) | ||
488 | for (i = 0; i < txq->q.n_window; i++) | ||
489 | kfree(txq->entries[i].cmd); | ||
490 | |||
491 | /* De-alloc circular buffer of TFDs */ | ||
492 | if (txq->q.n_bd) { | ||
493 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | ||
494 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | ||
495 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | ||
496 | } | ||
497 | |||
498 | kfree(txq->entries); | ||
499 | txq->entries = NULL; | ||
500 | |||
501 | del_timer_sync(&txq->stuck_timer); | ||
502 | |||
503 | /* 0-fill queue descriptor structure */ | ||
504 | memset(txq, 0, sizeof(*txq)); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * iwl_trans_tx_free - Free TXQ Context | ||
509 | * | ||
510 | * Destroy all TX DMA queues and structures | ||
511 | */ | ||
512 | static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) | ||
513 | { | ||
514 | int txq_id; | ||
515 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
516 | |||
517 | /* Tx queues */ | ||
518 | if (trans_pcie->txq) { | ||
519 | for (txq_id = 0; | ||
520 | txq_id < trans->cfg->base_params->num_of_queues; txq_id++) | ||
521 | iwl_tx_queue_free(trans, txq_id); | ||
522 | } | ||
523 | |||
524 | kfree(trans_pcie->txq); | ||
525 | trans_pcie->txq = NULL; | ||
526 | |||
527 | iwlagn_free_dma_ptr(trans, &trans_pcie->kw); | ||
528 | |||
529 | iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); | ||
530 | } | ||
531 | |||
532 | /** | ||
533 | * iwl_trans_tx_alloc - allocate TX context | ||
534 | * Allocate all Tx DMA structures and initialize them | ||
535 | * | ||
536 | * @param priv | ||
537 | * @return error code | ||
538 | */ | ||
539 | static int iwl_trans_tx_alloc(struct iwl_trans *trans) | ||
540 | { | ||
541 | int ret; | ||
542 | int txq_id, slots_num; | ||
543 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
544 | |||
545 | u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * | ||
546 | sizeof(struct iwlagn_scd_bc_tbl); | ||
547 | |||
548 | /*It is not allowed to alloc twice, so warn when this happens. | ||
549 | * We cannot rely on the previous allocation, so free and fail */ | ||
550 | if (WARN_ON(trans_pcie->txq)) { | ||
551 | ret = -EINVAL; | ||
552 | goto error; | ||
553 | } | ||
554 | |||
555 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, | ||
556 | scd_bc_tbls_size); | ||
557 | if (ret) { | ||
558 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); | ||
559 | goto error; | ||
560 | } | ||
561 | |||
562 | /* Alloc keep-warm buffer */ | ||
563 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); | ||
564 | if (ret) { | ||
565 | IWL_ERR(trans, "Keep Warm allocation failed\n"); | ||
566 | goto error; | ||
567 | } | ||
568 | |||
569 | trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, | ||
570 | sizeof(struct iwl_tx_queue), GFP_KERNEL); | ||
571 | if (!trans_pcie->txq) { | ||
572 | IWL_ERR(trans, "Not enough memory for txq\n"); | ||
573 | ret = ENOMEM; | ||
574 | goto error; | ||
575 | } | ||
576 | |||
577 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
578 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
579 | txq_id++) { | ||
580 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
581 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
582 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], | ||
583 | slots_num, txq_id); | ||
584 | if (ret) { | ||
585 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); | ||
586 | goto error; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | return 0; | ||
591 | |||
592 | error: | ||
593 | iwl_trans_pcie_tx_free(trans); | ||
594 | |||
595 | return ret; | ||
596 | } | ||
597 | static int iwl_tx_init(struct iwl_trans *trans) | ||
598 | { | ||
599 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
600 | int ret; | ||
601 | int txq_id, slots_num; | ||
602 | unsigned long flags; | ||
603 | bool alloc = false; | ||
604 | |||
605 | if (!trans_pcie->txq) { | ||
606 | ret = iwl_trans_tx_alloc(trans); | ||
607 | if (ret) | ||
608 | goto error; | ||
609 | alloc = true; | ||
610 | } | ||
611 | |||
612 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
613 | |||
614 | /* Turn off all Tx DMA fifos */ | ||
615 | iwl_write_prph(trans, SCD_TXFACT, 0); | ||
616 | |||
617 | /* Tell NIC where to find the "keep warm" buffer */ | ||
618 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, | ||
619 | trans_pcie->kw.dma >> 4); | ||
620 | |||
621 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
622 | |||
623 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
624 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
625 | txq_id++) { | ||
626 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
627 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
628 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], | ||
629 | slots_num, txq_id); | ||
630 | if (ret) { | ||
631 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); | ||
632 | goto error; | ||
633 | } | ||
634 | } | ||
635 | |||
636 | return 0; | ||
637 | error: | ||
638 | /*Upon error, free only if we allocated something */ | ||
639 | if (alloc) | ||
640 | iwl_trans_pcie_tx_free(trans); | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | static void iwl_set_pwr_vmain(struct iwl_trans *trans) | ||
645 | { | ||
646 | /* | ||
647 | * (for documentation purposes) | ||
648 | * to set power to V_AUX, do: | ||
649 | |||
650 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | ||
651 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, | ||
652 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | ||
653 | ~APMG_PS_CTRL_MSK_PWR_SRC); | ||
654 | */ | ||
655 | |||
656 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, | ||
657 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, | ||
658 | ~APMG_PS_CTRL_MSK_PWR_SRC); | ||
659 | } | ||
660 | |||
661 | /* PCI registers */ | ||
662 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | ||
663 | #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 | ||
664 | #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 | ||
665 | |||
666 | static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) | ||
667 | { | ||
668 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
669 | int pos; | ||
670 | u16 pci_lnk_ctl; | ||
671 | |||
672 | struct pci_dev *pci_dev = trans_pcie->pci_dev; | ||
673 | |||
674 | pos = pci_pcie_cap(pci_dev); | ||
675 | pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); | ||
676 | return pci_lnk_ctl; | ||
677 | } | ||
678 | |||
679 | static void iwl_apm_config(struct iwl_trans *trans) | ||
680 | { | ||
681 | /* | ||
682 | * HW bug W/A for instability in PCIe bus L0S->L1 transition. | ||
683 | * Check if BIOS (or OS) enabled L1-ASPM on this device. | ||
684 | * If so (likely), disable L0S, so device moves directly L0->L1; | ||
685 | * costs negligible amount of power savings. | ||
686 | * If not (unlikely), enable L0S, so there is at least some | ||
687 | * power savings, even without L1. | ||
688 | */ | ||
689 | u16 lctl = iwl_pciexp_link_ctrl(trans); | ||
690 | |||
691 | if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == | ||
692 | PCI_CFG_LINK_CTRL_VAL_L1_EN) { | ||
693 | /* L1-ASPM enabled; disable(!) L0S */ | ||
694 | iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); | ||
695 | dev_printk(KERN_INFO, trans->dev, | ||
696 | "L1 Enabled; Disabling L0S\n"); | ||
697 | } else { | ||
698 | /* L1-ASPM disabled; enable(!) L0S */ | ||
699 | iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); | ||
700 | dev_printk(KERN_INFO, trans->dev, | ||
701 | "L1 Disabled; Enabling L0S\n"); | ||
702 | } | ||
703 | trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * Start up NIC's basic functionality after it has been reset | ||
708 | * (e.g. after platform boot, or shutdown via iwl_apm_stop()) | ||
709 | * NOTE: This does not load uCode nor start the embedded processor | ||
710 | */ | ||
711 | static int iwl_apm_init(struct iwl_trans *trans) | ||
712 | { | ||
713 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
714 | int ret = 0; | ||
715 | IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); | ||
716 | |||
717 | /* | ||
718 | * Use "set_bit" below rather than "write", to preserve any hardware | ||
719 | * bits already set by default after reset. | ||
720 | */ | ||
721 | |||
722 | /* Disable L0S exit timer (platform NMI Work/Around) */ | ||
723 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | ||
724 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | ||
725 | |||
726 | /* | ||
727 | * Disable L0s without affecting L1; | ||
728 | * don't wait for ICH L0s (ICH bug W/A) | ||
729 | */ | ||
730 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | ||
731 | CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); | ||
732 | |||
733 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ | ||
734 | iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); | ||
735 | |||
736 | /* | ||
737 | * Enable HAP INTA (interrupt from management bus) to | ||
738 | * wake device's PCI Express link L1a -> L0s | ||
739 | */ | ||
740 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
741 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); | ||
742 | |||
743 | iwl_apm_config(trans); | ||
744 | |||
745 | /* Configure analog phase-lock-loop before activating to D0A */ | ||
746 | if (trans->cfg->base_params->pll_cfg_val) | ||
747 | iwl_set_bit(trans, CSR_ANA_PLL_CFG, | ||
748 | trans->cfg->base_params->pll_cfg_val); | ||
749 | |||
750 | /* | ||
751 | * Set "initialization complete" bit to move adapter from | ||
752 | * D0U* --> D0A* (powered-up active) state. | ||
753 | */ | ||
754 | iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
755 | |||
756 | /* | ||
757 | * Wait for clock stabilization; once stabilized, access to | ||
758 | * device-internal resources is supported, e.g. iwl_write_prph() | ||
759 | * and accesses to uCode SRAM. | ||
760 | */ | ||
761 | ret = iwl_poll_bit(trans, CSR_GP_CNTRL, | ||
762 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | ||
763 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); | ||
764 | if (ret < 0) { | ||
765 | IWL_DEBUG_INFO(trans, "Failed to init the card\n"); | ||
766 | goto out; | ||
767 | } | ||
768 | |||
769 | /* | ||
770 | * Enable DMA clock and wait for it to stabilize. | ||
771 | * | ||
772 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits | ||
773 | * do not disable clocks. This preserves any hardware bits already | ||
774 | * set by default in "CLK_CTRL_REG" after reset. | ||
775 | */ | ||
776 | iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); | ||
777 | udelay(20); | ||
778 | |||
779 | /* Disable L1-Active */ | ||
780 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
781 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
782 | |||
783 | set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); | ||
784 | |||
785 | out: | ||
786 | return ret; | ||
787 | } | ||
788 | |||
789 | static int iwl_apm_stop_master(struct iwl_trans *trans) | ||
790 | { | ||
791 | int ret = 0; | ||
792 | |||
793 | /* stop device's busmaster DMA activity */ | ||
794 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); | ||
795 | |||
796 | ret = iwl_poll_bit(trans, CSR_RESET, | ||
797 | CSR_RESET_REG_FLAG_MASTER_DISABLED, | ||
798 | CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); | ||
799 | if (ret) | ||
800 | IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); | ||
801 | |||
802 | IWL_DEBUG_INFO(trans, "stop master\n"); | ||
803 | |||
804 | return ret; | ||
805 | } | ||
806 | |||
807 | static void iwl_apm_stop(struct iwl_trans *trans) | ||
808 | { | ||
809 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
810 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); | ||
811 | |||
812 | clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); | ||
813 | |||
814 | /* Stop device's DMA activity */ | ||
815 | iwl_apm_stop_master(trans); | ||
816 | |||
817 | /* Reset the entire device */ | ||
818 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
819 | |||
820 | udelay(10); | ||
821 | |||
822 | /* | ||
823 | * Clear "initialization complete" bit to move adapter from | ||
824 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. | ||
825 | */ | ||
826 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
827 | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
828 | } | ||
829 | |||
830 | static int iwl_nic_init(struct iwl_trans *trans) | ||
831 | { | ||
832 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
833 | unsigned long flags; | ||
834 | |||
835 | /* nic_init */ | ||
836 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
837 | iwl_apm_init(trans); | ||
838 | |||
839 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
840 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
841 | |||
842 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
843 | |||
844 | iwl_set_pwr_vmain(trans); | ||
845 | |||
846 | iwl_op_mode_nic_config(trans->op_mode); | ||
847 | |||
848 | #ifndef CONFIG_IWLWIFI_IDI | ||
849 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
850 | iwl_rx_init(trans); | ||
851 | #endif | ||
852 | |||
853 | /* Allocate or reset and init all Tx and Command queues */ | ||
854 | if (iwl_tx_init(trans)) | ||
855 | return -ENOMEM; | ||
856 | |||
857 | if (trans->cfg->base_params->shadow_reg_enable) { | ||
858 | /* enable shadow regs in HW */ | ||
859 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); | ||
860 | IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); | ||
861 | } | ||
862 | |||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | #define HW_READY_TIMEOUT (50) | ||
867 | |||
868 | /* Note: returns poll_bit return value, which is >= 0 if success */ | ||
869 | static int iwl_set_hw_ready(struct iwl_trans *trans) | ||
870 | { | ||
871 | int ret; | ||
872 | |||
873 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
874 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); | ||
875 | |||
876 | /* See if we got it */ | ||
877 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
878 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | ||
879 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | ||
880 | HW_READY_TIMEOUT); | ||
881 | |||
882 | IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); | ||
883 | return ret; | ||
884 | } | ||
885 | |||
886 | /* Note: returns standard 0/-ERROR code */ | ||
887 | static int iwl_prepare_card_hw(struct iwl_trans *trans) | ||
888 | { | ||
889 | int ret; | ||
890 | |||
891 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); | ||
892 | |||
893 | ret = iwl_set_hw_ready(trans); | ||
894 | /* If the card is ready, exit 0 */ | ||
895 | if (ret >= 0) | ||
896 | return 0; | ||
897 | |||
898 | /* If HW is not ready, prepare the conditions to check again */ | ||
899 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
900 | CSR_HW_IF_CONFIG_REG_PREPARE); | ||
901 | |||
902 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, | ||
903 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, | ||
904 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | ||
905 | |||
906 | if (ret < 0) | ||
907 | return ret; | ||
908 | |||
909 | /* HW should be ready by now, check again. */ | ||
910 | ret = iwl_set_hw_ready(trans); | ||
911 | if (ret >= 0) | ||
912 | return 0; | ||
913 | return ret; | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * ucode | ||
918 | */ | ||
919 | static int iwl_load_section(struct iwl_trans *trans, u8 section_num, | ||
920 | const struct fw_desc *section) | ||
921 | { | ||
922 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
923 | dma_addr_t phy_addr = section->p_addr; | ||
924 | u32 byte_cnt = section->len; | ||
925 | u32 dst_addr = section->offset; | ||
926 | int ret; | ||
927 | |||
928 | trans_pcie->ucode_write_complete = false; | ||
929 | |||
930 | iwl_write_direct32(trans, | ||
931 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
932 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); | ||
933 | |||
934 | iwl_write_direct32(trans, | ||
935 | FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), | ||
936 | dst_addr); | ||
937 | |||
938 | iwl_write_direct32(trans, | ||
939 | FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), | ||
940 | phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); | ||
941 | |||
942 | iwl_write_direct32(trans, | ||
943 | FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), | ||
944 | (iwl_get_dma_hi_addr(phy_addr) | ||
945 | << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); | ||
946 | |||
947 | iwl_write_direct32(trans, | ||
948 | FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), | ||
949 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | | ||
950 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | | ||
951 | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); | ||
952 | |||
953 | iwl_write_direct32(trans, | ||
954 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
955 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
956 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | | ||
957 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); | ||
958 | |||
959 | IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", | ||
960 | section_num); | ||
961 | ret = wait_event_timeout(trans_pcie->ucode_write_waitq, | ||
962 | trans_pcie->ucode_write_complete, 5 * HZ); | ||
963 | if (!ret) { | ||
964 | IWL_ERR(trans, "Could not load the [%d] uCode section\n", | ||
965 | section_num); | ||
966 | return -ETIMEDOUT; | ||
967 | } | ||
968 | |||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | static int iwl_load_given_ucode(struct iwl_trans *trans, | ||
973 | const struct fw_img *image) | ||
974 | { | ||
975 | int ret = 0; | ||
976 | int i; | ||
977 | |||
978 | for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { | ||
979 | if (!image->sec[i].p_addr) | ||
980 | break; | ||
981 | |||
982 | ret = iwl_load_section(trans, i, &image->sec[i]); | ||
983 | if (ret) | ||
984 | return ret; | ||
985 | } | ||
986 | |||
987 | /* Remove all resets to allow NIC to operate */ | ||
988 | iwl_write32(trans, CSR_RESET, 0); | ||
989 | |||
990 | return 0; | ||
991 | } | ||
992 | |||
993 | static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | ||
994 | const struct fw_img *fw) | ||
995 | { | ||
996 | int ret; | ||
997 | bool hw_rfkill; | ||
998 | |||
999 | /* This may fail if AMT took ownership of the device */ | ||
1000 | if (iwl_prepare_card_hw(trans)) { | ||
1001 | IWL_WARN(trans, "Exit HW not ready\n"); | ||
1002 | return -EIO; | ||
1003 | } | ||
1004 | |||
1005 | iwl_enable_rfkill_int(trans); | ||
1006 | |||
1007 | /* If platform's RF_KILL switch is NOT set to KILL */ | ||
1008 | hw_rfkill = iwl_is_rfkill_set(trans); | ||
1009 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | ||
1010 | if (hw_rfkill) | ||
1011 | return -ERFKILL; | ||
1012 | |||
1013 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); | ||
1014 | |||
1015 | ret = iwl_nic_init(trans); | ||
1016 | if (ret) { | ||
1017 | IWL_ERR(trans, "Unable to init nic\n"); | ||
1018 | return ret; | ||
1019 | } | ||
1020 | |||
1021 | /* make sure rfkill handshake bits are cleared */ | ||
1022 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
1023 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, | ||
1024 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
1025 | |||
1026 | /* clear (again), then enable host interrupts */ | ||
1027 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); | ||
1028 | iwl_enable_interrupts(trans); | ||
1029 | |||
1030 | /* really make sure rfkill handshake bits are cleared */ | ||
1031 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
1032 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
1033 | |||
1034 | /* Load the given image to the HW */ | ||
1035 | return iwl_load_given_ucode(trans, fw); | ||
1036 | } | ||
1037 | |||
1038 | /* | ||
1039 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
1040 | * must be called under the irq lock and with MAC access | ||
1041 | */ | ||
1042 | static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) | ||
1043 | { | ||
1044 | struct iwl_trans_pcie __maybe_unused *trans_pcie = | ||
1045 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1046 | |||
1047 | lockdep_assert_held(&trans_pcie->irq_lock); | ||
1048 | |||
1049 | iwl_write_prph(trans, SCD_TXFACT, mask); | ||
1050 | } | ||
1051 | |||
1052 | static void iwl_tx_start(struct iwl_trans *trans) | ||
1053 | { | ||
1054 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1055 | u32 a; | ||
1056 | unsigned long flags; | ||
1057 | int i, chan; | ||
1058 | u32 reg_val; | ||
1059 | |||
1060 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1061 | |||
1062 | /* make sure all queue are not stopped/used */ | ||
1063 | memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); | ||
1064 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | ||
1065 | |||
1066 | trans_pcie->scd_base_addr = | ||
1067 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); | ||
1068 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | ||
1069 | /* reset conext data memory */ | ||
1070 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | ||
1071 | a += 4) | ||
1072 | iwl_write_targ_mem(trans, a, 0); | ||
1073 | /* reset tx status memory */ | ||
1074 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | ||
1075 | a += 4) | ||
1076 | iwl_write_targ_mem(trans, a, 0); | ||
1077 | for (; a < trans_pcie->scd_base_addr + | ||
1078 | SCD_TRANS_TBL_OFFSET_QUEUE( | ||
1079 | trans->cfg->base_params->num_of_queues); | ||
1080 | a += 4) | ||
1081 | iwl_write_targ_mem(trans, a, 0); | ||
1082 | |||
1083 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | ||
1084 | trans_pcie->scd_bc_tbls.dma >> 10); | ||
1085 | |||
1086 | /* The chain extension of the SCD doesn't work well. This feature is | ||
1087 | * enabled by default by the HW, so we need to disable it manually. | ||
1088 | */ | ||
1089 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
1090 | |||
1091 | for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { | ||
1092 | int fifo = trans_pcie->setup_q_to_fifo[i]; | ||
1093 | |||
1094 | iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, | ||
1095 | IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0); | ||
1096 | } | ||
1097 | |||
1098 | /* Activate all Tx DMA/FIFO channels */ | ||
1099 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); | ||
1100 | |||
1101 | /* Enable DMA channel */ | ||
1102 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | ||
1103 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
1104 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
1105 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
1106 | |||
1107 | /* Update FH chicken bits */ | ||
1108 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); | ||
1109 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | ||
1110 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
1111 | |||
1112 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1113 | |||
1114 | /* Enable L1-Active */ | ||
1115 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
1116 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
1117 | } | ||
1118 | |||
1119 | static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) | ||
1120 | { | ||
1121 | iwl_reset_ict(trans); | ||
1122 | iwl_tx_start(trans); | ||
1123 | } | ||
1124 | |||
1125 | /** | ||
1126 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | ||
1127 | */ | ||
1128 | static int iwl_trans_tx_stop(struct iwl_trans *trans) | ||
1129 | { | ||
1130 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1131 | int ch, txq_id, ret; | ||
1132 | unsigned long flags; | ||
1133 | |||
1134 | /* Turn off all Tx DMA fifos */ | ||
1135 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1136 | |||
1137 | iwl_trans_txq_set_sched(trans, 0); | ||
1138 | |||
1139 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
1140 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { | ||
1141 | iwl_write_direct32(trans, | ||
1142 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
1143 | ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, | ||
1144 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); | ||
1145 | if (ret < 0) | ||
1146 | IWL_ERR(trans, | ||
1147 | "Failing on timeout while stopping DMA channel %d [0x%08x]", | ||
1148 | ch, | ||
1149 | iwl_read_direct32(trans, | ||
1150 | FH_TSSR_TX_STATUS_REG)); | ||
1151 | } | ||
1152 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1153 | |||
1154 | if (!trans_pcie->txq) { | ||
1155 | IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); | ||
1156 | return 0; | ||
1157 | } | ||
1158 | |||
1159 | /* Unmap DMA from host system and free skb's */ | ||
1160 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
1161 | txq_id++) | ||
1162 | iwl_tx_queue_unmap(trans, txq_id); | ||
1163 | |||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | ||
1168 | { | ||
1169 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1170 | unsigned long flags; | ||
1171 | |||
1172 | /* tell the device to stop sending interrupts */ | ||
1173 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1174 | iwl_disable_interrupts(trans); | ||
1175 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1176 | |||
1177 | /* device going down, Stop using ICT table */ | ||
1178 | iwl_disable_ict(trans); | ||
1179 | |||
1180 | /* | ||
1181 | * If a HW restart happens during firmware loading, | ||
1182 | * then the firmware loading might call this function | ||
1183 | * and later it might be called again due to the | ||
1184 | * restart. So don't process again if the device is | ||
1185 | * already dead. | ||
1186 | */ | ||
1187 | if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { | ||
1188 | iwl_trans_tx_stop(trans); | ||
1189 | #ifndef CONFIG_IWLWIFI_IDI | ||
1190 | iwl_trans_rx_stop(trans); | ||
1191 | #endif | ||
1192 | /* Power-down device's busmaster DMA clocks */ | ||
1193 | iwl_write_prph(trans, APMG_CLK_DIS_REG, | ||
1194 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
1195 | udelay(5); | ||
1196 | } | ||
1197 | |||
1198 | /* Make sure (redundant) we've released our request to stay awake */ | ||
1199 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
1200 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
1201 | |||
1202 | /* Stop the device, and put it in low power state */ | ||
1203 | iwl_apm_stop(trans); | ||
1204 | |||
1205 | /* Upon stop, the APM issues an interrupt if HW RF kill is set. | ||
1206 | * Clean again the interrupt here | ||
1207 | */ | ||
1208 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1209 | iwl_disable_interrupts(trans); | ||
1210 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1211 | |||
1212 | iwl_enable_rfkill_int(trans); | ||
1213 | |||
1214 | /* wait to make sure we flush pending tasklet*/ | ||
1215 | synchronize_irq(trans_pcie->irq); | ||
1216 | tasklet_kill(&trans_pcie->irq_tasklet); | ||
1217 | |||
1218 | cancel_work_sync(&trans_pcie->rx_replenish); | ||
1219 | |||
1220 | /* stop and reset the on-board processor */ | ||
1221 | iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
1222 | |||
1223 | /* clear all status bits */ | ||
1224 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
1225 | clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); | ||
1226 | clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); | ||
1227 | clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); | ||
1228 | } | ||
1229 | |||
1230 | static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) | ||
1231 | { | ||
1232 | /* let the ucode operate on its own */ | ||
1233 | iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, | ||
1234 | CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); | ||
1235 | |||
1236 | iwl_disable_interrupts(trans); | ||
1237 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
1238 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
1239 | } | ||
1240 | |||
1241 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | ||
1242 | struct iwl_device_cmd *dev_cmd, int txq_id) | ||
1243 | { | ||
1244 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1245 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1246 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; | ||
1247 | struct iwl_cmd_meta *out_meta; | ||
1248 | struct iwl_tx_queue *txq; | ||
1249 | struct iwl_queue *q; | ||
1250 | dma_addr_t phys_addr = 0; | ||
1251 | dma_addr_t txcmd_phys; | ||
1252 | dma_addr_t scratch_phys; | ||
1253 | u16 len, firstlen, secondlen; | ||
1254 | u8 wait_write_ptr = 0; | ||
1255 | __le16 fc = hdr->frame_control; | ||
1256 | u8 hdr_len = ieee80211_hdrlen(fc); | ||
1257 | u16 __maybe_unused wifi_seq; | ||
1258 | |||
1259 | txq = &trans_pcie->txq[txq_id]; | ||
1260 | q = &txq->q; | ||
1261 | |||
1262 | if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { | ||
1263 | WARN_ON_ONCE(1); | ||
1264 | return -EINVAL; | ||
1265 | } | ||
1266 | |||
1267 | spin_lock(&txq->lock); | ||
1268 | |||
1269 | /* In AGG mode, the index in the ring must correspond to the WiFi | ||
1270 | * sequence number. This is a HW requirements to help the SCD to parse | ||
1271 | * the BA. | ||
1272 | * Check here that the packets are in the right place on the ring. | ||
1273 | */ | ||
1274 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1275 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | ||
1276 | WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && | ||
1277 | ((wifi_seq & 0xff) != q->write_ptr), | ||
1278 | "Q: %d WiFi Seq %d tfdNum %d", | ||
1279 | txq_id, wifi_seq, q->write_ptr); | ||
1280 | #endif | ||
1281 | |||
1282 | /* Set up driver data for this TFD */ | ||
1283 | txq->entries[q->write_ptr].skb = skb; | ||
1284 | txq->entries[q->write_ptr].cmd = dev_cmd; | ||
1285 | |||
1286 | dev_cmd->hdr.cmd = REPLY_TX; | ||
1287 | dev_cmd->hdr.sequence = | ||
1288 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
1289 | INDEX_TO_SEQ(q->write_ptr))); | ||
1290 | |||
1291 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
1292 | out_meta = &txq->entries[q->write_ptr].meta; | ||
1293 | |||
1294 | /* | ||
1295 | * Use the first empty entry in this queue's command buffer array | ||
1296 | * to contain the Tx command and MAC header concatenated together | ||
1297 | * (payload data will be in another buffer). | ||
1298 | * Size of this varies, due to varying MAC header length. | ||
1299 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
1300 | * of the MAC header (device reads on dword boundaries). | ||
1301 | * We'll tell device about this padding later. | ||
1302 | */ | ||
1303 | len = sizeof(struct iwl_tx_cmd) + | ||
1304 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1305 | firstlen = (len + 3) & ~3; | ||
1306 | |||
1307 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
1308 | if (firstlen != len) | ||
1309 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
1310 | |||
1311 | /* Physical address of this Tx command's header (not MAC header!), | ||
1312 | * within command buffer array. */ | ||
1313 | txcmd_phys = dma_map_single(trans->dev, | ||
1314 | &dev_cmd->hdr, firstlen, | ||
1315 | DMA_BIDIRECTIONAL); | ||
1316 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | ||
1317 | goto out_err; | ||
1318 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1319 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1320 | |||
1321 | if (!ieee80211_has_morefrags(fc)) { | ||
1322 | txq->need_update = 1; | ||
1323 | } else { | ||
1324 | wait_write_ptr = 1; | ||
1325 | txq->need_update = 0; | ||
1326 | } | ||
1327 | |||
1328 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
1329 | * if any (802.11 null frames have no payload). */ | ||
1330 | secondlen = skb->len - hdr_len; | ||
1331 | if (secondlen > 0) { | ||
1332 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, | ||
1333 | secondlen, DMA_TO_DEVICE); | ||
1334 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
1335 | dma_unmap_single(trans->dev, | ||
1336 | dma_unmap_addr(out_meta, mapping), | ||
1337 | dma_unmap_len(out_meta, len), | ||
1338 | DMA_BIDIRECTIONAL); | ||
1339 | goto out_err; | ||
1340 | } | ||
1341 | } | ||
1342 | |||
1343 | /* Attach buffers to TFD */ | ||
1344 | iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); | ||
1345 | if (secondlen > 0) | ||
1346 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | ||
1347 | secondlen, 0); | ||
1348 | |||
1349 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
1350 | offsetof(struct iwl_tx_cmd, scratch); | ||
1351 | |||
1352 | /* take back ownership of DMA buffer to enable update */ | ||
1353 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, | ||
1354 | DMA_BIDIRECTIONAL); | ||
1355 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1356 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1357 | |||
1358 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", | ||
1359 | le16_to_cpu(dev_cmd->hdr.sequence)); | ||
1360 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
1361 | |||
1362 | /* Set up entry for this TFD in Tx byte-count array */ | ||
1363 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | ||
1364 | |||
1365 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, | ||
1366 | DMA_BIDIRECTIONAL); | ||
1367 | |||
1368 | trace_iwlwifi_dev_tx(trans->dev, | ||
1369 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
1370 | sizeof(struct iwl_tfd), | ||
1371 | &dev_cmd->hdr, firstlen, | ||
1372 | skb->data + hdr_len, secondlen); | ||
1373 | |||
1374 | /* start timer if queue currently empty */ | ||
1375 | if (txq->need_update && q->read_ptr == q->write_ptr && | ||
1376 | trans_pcie->wd_timeout) | ||
1377 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
1378 | |||
1379 | /* Tell device the write index *just past* this latest filled TFD */ | ||
1380 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1381 | iwl_txq_update_write_ptr(trans, txq); | ||
1382 | |||
1383 | /* | ||
1384 | * At this point the frame is "transmitted" successfully | ||
1385 | * and we will get a TX status notification eventually, | ||
1386 | * regardless of the value of ret. "ret" only indicates | ||
1387 | * whether or not we should update the write pointer. | ||
1388 | */ | ||
1389 | if (iwl_queue_space(q) < q->high_mark) { | ||
1390 | if (wait_write_ptr) { | ||
1391 | txq->need_update = 1; | ||
1392 | iwl_txq_update_write_ptr(trans, txq); | ||
1393 | } else { | ||
1394 | iwl_stop_queue(trans, txq); | ||
1395 | } | ||
1396 | } | ||
1397 | spin_unlock(&txq->lock); | ||
1398 | return 0; | ||
1399 | out_err: | ||
1400 | spin_unlock(&txq->lock); | ||
1401 | return -1; | ||
1402 | } | ||
1403 | |||
1404 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | ||
1405 | { | ||
1406 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1407 | int err; | ||
1408 | bool hw_rfkill; | ||
1409 | |||
1410 | trans_pcie->inta_mask = CSR_INI_SET_MASK; | ||
1411 | |||
1412 | if (!trans_pcie->irq_requested) { | ||
1413 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) | ||
1414 | iwl_irq_tasklet, (unsigned long)trans); | ||
1415 | |||
1416 | iwl_alloc_isr_ict(trans); | ||
1417 | |||
1418 | err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, | ||
1419 | DRV_NAME, trans); | ||
1420 | if (err) { | ||
1421 | IWL_ERR(trans, "Error allocating IRQ %d\n", | ||
1422 | trans_pcie->irq); | ||
1423 | goto error; | ||
1424 | } | ||
1425 | |||
1426 | INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); | ||
1427 | trans_pcie->irq_requested = true; | ||
1428 | } | ||
1429 | |||
1430 | err = iwl_prepare_card_hw(trans); | ||
1431 | if (err) { | ||
1432 | IWL_ERR(trans, "Error while preparing HW: %d", err); | ||
1433 | goto err_free_irq; | ||
1434 | } | ||
1435 | |||
1436 | iwl_apm_init(trans); | ||
1437 | |||
1438 | /* From now on, the op_mode will be kept updated about RF kill state */ | ||
1439 | iwl_enable_rfkill_int(trans); | ||
1440 | |||
1441 | hw_rfkill = iwl_is_rfkill_set(trans); | ||
1442 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | ||
1443 | |||
1444 | return err; | ||
1445 | |||
1446 | err_free_irq: | ||
1447 | free_irq(trans_pcie->irq, trans); | ||
1448 | error: | ||
1449 | iwl_free_isr_ict(trans); | ||
1450 | tasklet_kill(&trans_pcie->irq_tasklet); | ||
1451 | return err; | ||
1452 | } | ||
1453 | |||
1454 | static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | ||
1455 | bool op_mode_leaving) | ||
1456 | { | ||
1457 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1458 | bool hw_rfkill; | ||
1459 | unsigned long flags; | ||
1460 | |||
1461 | iwl_apm_stop(trans); | ||
1462 | |||
1463 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1464 | iwl_disable_interrupts(trans); | ||
1465 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1466 | |||
1467 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); | ||
1468 | |||
1469 | if (!op_mode_leaving) { | ||
1470 | /* | ||
1471 | * Even if we stop the HW, we still want the RF kill | ||
1472 | * interrupt | ||
1473 | */ | ||
1474 | iwl_enable_rfkill_int(trans); | ||
1475 | |||
1476 | /* | ||
1477 | * Check again since the RF kill state may have changed while | ||
1478 | * all the interrupts were disabled, in this case we couldn't | ||
1479 | * receive the RF kill interrupt and update the state in the | ||
1480 | * op_mode. | ||
1481 | */ | ||
1482 | hw_rfkill = iwl_is_rfkill_set(trans); | ||
1483 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | ||
1484 | } | ||
1485 | } | ||
1486 | |||
1487 | static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | ||
1488 | struct sk_buff_head *skbs) | ||
1489 | { | ||
1490 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1491 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
1492 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ | ||
1493 | int tfd_num = ssn & (txq->q.n_bd - 1); | ||
1494 | int freed = 0; | ||
1495 | |||
1496 | spin_lock(&txq->lock); | ||
1497 | |||
1498 | if (txq->q.read_ptr != tfd_num) { | ||
1499 | IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", | ||
1500 | txq_id, txq->q.read_ptr, tfd_num, ssn); | ||
1501 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); | ||
1502 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) | ||
1503 | iwl_wake_queue(trans, txq); | ||
1504 | } | ||
1505 | |||
1506 | spin_unlock(&txq->lock); | ||
1507 | } | ||
1508 | |||
1509 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) | ||
1510 | { | ||
1511 | writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | ||
1512 | } | ||
1513 | |||
1514 | static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) | ||
1515 | { | ||
1516 | writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | ||
1517 | } | ||
1518 | |||
1519 | static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) | ||
1520 | { | ||
1521 | return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | ||
1522 | } | ||
1523 | |||
1524 | static void iwl_trans_pcie_configure(struct iwl_trans *trans, | ||
1525 | const struct iwl_trans_config *trans_cfg) | ||
1526 | { | ||
1527 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1528 | |||
1529 | trans_pcie->cmd_queue = trans_cfg->cmd_queue; | ||
1530 | if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) | ||
1531 | trans_pcie->n_no_reclaim_cmds = 0; | ||
1532 | else | ||
1533 | trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; | ||
1534 | if (trans_pcie->n_no_reclaim_cmds) | ||
1535 | memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, | ||
1536 | trans_pcie->n_no_reclaim_cmds * sizeof(u8)); | ||
1537 | |||
1538 | trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo; | ||
1539 | |||
1540 | if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES)) | ||
1541 | trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES; | ||
1542 | |||
1543 | /* at least the command queue must be mapped */ | ||
1544 | WARN_ON(!trans_pcie->n_q_to_fifo); | ||
1545 | |||
1546 | memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo, | ||
1547 | trans_pcie->n_q_to_fifo * sizeof(u8)); | ||
1548 | |||
1549 | trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; | ||
1550 | if (trans_pcie->rx_buf_size_8k) | ||
1551 | trans_pcie->rx_page_order = get_order(8 * 1024); | ||
1552 | else | ||
1553 | trans_pcie->rx_page_order = get_order(4 * 1024); | ||
1554 | |||
1555 | trans_pcie->wd_timeout = | ||
1556 | msecs_to_jiffies(trans_cfg->queue_watchdog_timeout); | ||
1557 | |||
1558 | trans_pcie->command_names = trans_cfg->command_names; | ||
1559 | } | ||
1560 | |||
1561 | void iwl_trans_pcie_free(struct iwl_trans *trans) | ||
1562 | { | ||
1563 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1564 | |||
1565 | iwl_trans_pcie_tx_free(trans); | ||
1566 | #ifndef CONFIG_IWLWIFI_IDI | ||
1567 | iwl_trans_pcie_rx_free(trans); | ||
1568 | #endif | ||
1569 | if (trans_pcie->irq_requested == true) { | ||
1570 | free_irq(trans_pcie->irq, trans); | ||
1571 | iwl_free_isr_ict(trans); | ||
1572 | } | ||
1573 | |||
1574 | pci_disable_msi(trans_pcie->pci_dev); | ||
1575 | iounmap(trans_pcie->hw_base); | ||
1576 | pci_release_regions(trans_pcie->pci_dev); | ||
1577 | pci_disable_device(trans_pcie->pci_dev); | ||
1578 | kmem_cache_destroy(trans->dev_cmd_pool); | ||
1579 | |||
1580 | kfree(trans); | ||
1581 | } | ||
1582 | |||
1583 | static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) | ||
1584 | { | ||
1585 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1586 | |||
1587 | if (state) | ||
1588 | set_bit(STATUS_TPOWER_PMI, &trans_pcie->status); | ||
1589 | else | ||
1590 | clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); | ||
1591 | } | ||
1592 | |||
1593 | #ifdef CONFIG_PM_SLEEP | ||
1594 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | ||
1595 | { | ||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | ||
1600 | { | ||
1601 | bool hw_rfkill; | ||
1602 | |||
1603 | iwl_enable_rfkill_int(trans); | ||
1604 | |||
1605 | hw_rfkill = iwl_is_rfkill_set(trans); | ||
1606 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | ||
1607 | |||
1608 | if (!hw_rfkill) | ||
1609 | iwl_enable_interrupts(trans); | ||
1610 | |||
1611 | return 0; | ||
1612 | } | ||
1613 | #endif /* CONFIG_PM_SLEEP */ | ||
1614 | |||
1615 | #define IWL_FLUSH_WAIT_MS 2000 | ||
1616 | |||
1617 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | ||
1618 | { | ||
1619 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1620 | struct iwl_tx_queue *txq; | ||
1621 | struct iwl_queue *q; | ||
1622 | int cnt; | ||
1623 | unsigned long now = jiffies; | ||
1624 | int ret = 0; | ||
1625 | |||
1626 | /* waiting for all the tx frames complete might take a while */ | ||
1627 | for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { | ||
1628 | if (cnt == trans_pcie->cmd_queue) | ||
1629 | continue; | ||
1630 | txq = &trans_pcie->txq[cnt]; | ||
1631 | q = &txq->q; | ||
1632 | while (q->read_ptr != q->write_ptr && !time_after(jiffies, | ||
1633 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) | ||
1634 | msleep(1); | ||
1635 | |||
1636 | if (q->read_ptr != q->write_ptr) { | ||
1637 | IWL_ERR(trans, "fail to flush all tx fifo queues\n"); | ||
1638 | ret = -ETIMEDOUT; | ||
1639 | break; | ||
1640 | } | ||
1641 | } | ||
1642 | return ret; | ||
1643 | } | ||
1644 | |||
1645 | static const char *get_fh_string(int cmd) | ||
1646 | { | ||
1647 | #define IWL_CMD(x) case x: return #x | ||
1648 | switch (cmd) { | ||
1649 | IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); | ||
1650 | IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); | ||
1651 | IWL_CMD(FH_RSCSR_CHNL0_WPTR); | ||
1652 | IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); | ||
1653 | IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); | ||
1654 | IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); | ||
1655 | IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); | ||
1656 | IWL_CMD(FH_TSSR_TX_STATUS_REG); | ||
1657 | IWL_CMD(FH_TSSR_TX_ERROR_REG); | ||
1658 | default: | ||
1659 | return "UNKNOWN"; | ||
1660 | } | ||
1661 | #undef IWL_CMD | ||
1662 | } | ||
1663 | |||
1664 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | ||
1665 | { | ||
1666 | int i; | ||
1667 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1668 | int pos = 0; | ||
1669 | size_t bufsz = 0; | ||
1670 | #endif | ||
1671 | static const u32 fh_tbl[] = { | ||
1672 | FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
1673 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
1674 | FH_RSCSR_CHNL0_WPTR, | ||
1675 | FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
1676 | FH_MEM_RSSR_SHARED_CTRL_REG, | ||
1677 | FH_MEM_RSSR_RX_STATUS_REG, | ||
1678 | FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, | ||
1679 | FH_TSSR_TX_STATUS_REG, | ||
1680 | FH_TSSR_TX_ERROR_REG | ||
1681 | }; | ||
1682 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1683 | if (display) { | ||
1684 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | ||
1685 | *buf = kmalloc(bufsz, GFP_KERNEL); | ||
1686 | if (!*buf) | ||
1687 | return -ENOMEM; | ||
1688 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1689 | "FH register values:\n"); | ||
1690 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | ||
1691 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1692 | " %34s: 0X%08x\n", | ||
1693 | get_fh_string(fh_tbl[i]), | ||
1694 | iwl_read_direct32(trans, fh_tbl[i])); | ||
1695 | } | ||
1696 | return pos; | ||
1697 | } | ||
1698 | #endif | ||
1699 | IWL_ERR(trans, "FH register values:\n"); | ||
1700 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | ||
1701 | IWL_ERR(trans, " %34s: 0X%08x\n", | ||
1702 | get_fh_string(fh_tbl[i]), | ||
1703 | iwl_read_direct32(trans, fh_tbl[i])); | ||
1704 | } | ||
1705 | return 0; | ||
1706 | } | ||
1707 | |||
1708 | static const char *get_csr_string(int cmd) | ||
1709 | { | ||
1710 | #define IWL_CMD(x) case x: return #x | ||
1711 | switch (cmd) { | ||
1712 | IWL_CMD(CSR_HW_IF_CONFIG_REG); | ||
1713 | IWL_CMD(CSR_INT_COALESCING); | ||
1714 | IWL_CMD(CSR_INT); | ||
1715 | IWL_CMD(CSR_INT_MASK); | ||
1716 | IWL_CMD(CSR_FH_INT_STATUS); | ||
1717 | IWL_CMD(CSR_GPIO_IN); | ||
1718 | IWL_CMD(CSR_RESET); | ||
1719 | IWL_CMD(CSR_GP_CNTRL); | ||
1720 | IWL_CMD(CSR_HW_REV); | ||
1721 | IWL_CMD(CSR_EEPROM_REG); | ||
1722 | IWL_CMD(CSR_EEPROM_GP); | ||
1723 | IWL_CMD(CSR_OTP_GP_REG); | ||
1724 | IWL_CMD(CSR_GIO_REG); | ||
1725 | IWL_CMD(CSR_GP_UCODE_REG); | ||
1726 | IWL_CMD(CSR_GP_DRIVER_REG); | ||
1727 | IWL_CMD(CSR_UCODE_DRV_GP1); | ||
1728 | IWL_CMD(CSR_UCODE_DRV_GP2); | ||
1729 | IWL_CMD(CSR_LED_REG); | ||
1730 | IWL_CMD(CSR_DRAM_INT_TBL_REG); | ||
1731 | IWL_CMD(CSR_GIO_CHICKEN_BITS); | ||
1732 | IWL_CMD(CSR_ANA_PLL_CFG); | ||
1733 | IWL_CMD(CSR_HW_REV_WA_REG); | ||
1734 | IWL_CMD(CSR_DBG_HPET_MEM_REG); | ||
1735 | default: | ||
1736 | return "UNKNOWN"; | ||
1737 | } | ||
1738 | #undef IWL_CMD | ||
1739 | } | ||
1740 | |||
1741 | void iwl_dump_csr(struct iwl_trans *trans) | ||
1742 | { | ||
1743 | int i; | ||
1744 | static const u32 csr_tbl[] = { | ||
1745 | CSR_HW_IF_CONFIG_REG, | ||
1746 | CSR_INT_COALESCING, | ||
1747 | CSR_INT, | ||
1748 | CSR_INT_MASK, | ||
1749 | CSR_FH_INT_STATUS, | ||
1750 | CSR_GPIO_IN, | ||
1751 | CSR_RESET, | ||
1752 | CSR_GP_CNTRL, | ||
1753 | CSR_HW_REV, | ||
1754 | CSR_EEPROM_REG, | ||
1755 | CSR_EEPROM_GP, | ||
1756 | CSR_OTP_GP_REG, | ||
1757 | CSR_GIO_REG, | ||
1758 | CSR_GP_UCODE_REG, | ||
1759 | CSR_GP_DRIVER_REG, | ||
1760 | CSR_UCODE_DRV_GP1, | ||
1761 | CSR_UCODE_DRV_GP2, | ||
1762 | CSR_LED_REG, | ||
1763 | CSR_DRAM_INT_TBL_REG, | ||
1764 | CSR_GIO_CHICKEN_BITS, | ||
1765 | CSR_ANA_PLL_CFG, | ||
1766 | CSR_HW_REV_WA_REG, | ||
1767 | CSR_DBG_HPET_MEM_REG | ||
1768 | }; | ||
1769 | IWL_ERR(trans, "CSR values:\n"); | ||
1770 | IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " | ||
1771 | "CSR_INT_PERIODIC_REG)\n"); | ||
1772 | for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { | ||
1773 | IWL_ERR(trans, " %25s: 0X%08x\n", | ||
1774 | get_csr_string(csr_tbl[i]), | ||
1775 | iwl_read32(trans, csr_tbl[i])); | ||
1776 | } | ||
1777 | } | ||
1778 | |||
1779 | #ifdef CONFIG_IWLWIFI_DEBUGFS | ||
1780 | /* create and remove of files */ | ||
1781 | #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ | ||
1782 | if (!debugfs_create_file(#name, mode, parent, trans, \ | ||
1783 | &iwl_dbgfs_##name##_ops)) \ | ||
1784 | return -ENOMEM; \ | ||
1785 | } while (0) | ||
1786 | |||
1787 | /* file operation */ | ||
1788 | #define DEBUGFS_READ_FUNC(name) \ | ||
1789 | static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ | ||
1790 | char __user *user_buf, \ | ||
1791 | size_t count, loff_t *ppos); | ||
1792 | |||
1793 | #define DEBUGFS_WRITE_FUNC(name) \ | ||
1794 | static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ | ||
1795 | const char __user *user_buf, \ | ||
1796 | size_t count, loff_t *ppos); | ||
1797 | |||
1798 | |||
1799 | #define DEBUGFS_READ_FILE_OPS(name) \ | ||
1800 | DEBUGFS_READ_FUNC(name); \ | ||
1801 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | ||
1802 | .read = iwl_dbgfs_##name##_read, \ | ||
1803 | .open = simple_open, \ | ||
1804 | .llseek = generic_file_llseek, \ | ||
1805 | }; | ||
1806 | |||
1807 | #define DEBUGFS_WRITE_FILE_OPS(name) \ | ||
1808 | DEBUGFS_WRITE_FUNC(name); \ | ||
1809 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | ||
1810 | .write = iwl_dbgfs_##name##_write, \ | ||
1811 | .open = simple_open, \ | ||
1812 | .llseek = generic_file_llseek, \ | ||
1813 | }; | ||
1814 | |||
1815 | #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ | ||
1816 | DEBUGFS_READ_FUNC(name); \ | ||
1817 | DEBUGFS_WRITE_FUNC(name); \ | ||
1818 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | ||
1819 | .write = iwl_dbgfs_##name##_write, \ | ||
1820 | .read = iwl_dbgfs_##name##_read, \ | ||
1821 | .open = simple_open, \ | ||
1822 | .llseek = generic_file_llseek, \ | ||
1823 | }; | ||
1824 | |||
1825 | static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, | ||
1826 | char __user *user_buf, | ||
1827 | size_t count, loff_t *ppos) | ||
1828 | { | ||
1829 | struct iwl_trans *trans = file->private_data; | ||
1830 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1831 | struct iwl_tx_queue *txq; | ||
1832 | struct iwl_queue *q; | ||
1833 | char *buf; | ||
1834 | int pos = 0; | ||
1835 | int cnt; | ||
1836 | int ret; | ||
1837 | size_t bufsz; | ||
1838 | |||
1839 | bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues; | ||
1840 | |||
1841 | if (!trans_pcie->txq) | ||
1842 | return -EAGAIN; | ||
1843 | |||
1844 | buf = kzalloc(bufsz, GFP_KERNEL); | ||
1845 | if (!buf) | ||
1846 | return -ENOMEM; | ||
1847 | |||
1848 | for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { | ||
1849 | txq = &trans_pcie->txq[cnt]; | ||
1850 | q = &txq->q; | ||
1851 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1852 | "hwq %.2d: read=%u write=%u use=%d stop=%d\n", | ||
1853 | cnt, q->read_ptr, q->write_ptr, | ||
1854 | !!test_bit(cnt, trans_pcie->queue_used), | ||
1855 | !!test_bit(cnt, trans_pcie->queue_stopped)); | ||
1856 | } | ||
1857 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | ||
1858 | kfree(buf); | ||
1859 | return ret; | ||
1860 | } | ||
1861 | |||
1862 | static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, | ||
1863 | char __user *user_buf, | ||
1864 | size_t count, loff_t *ppos) | ||
1865 | { | ||
1866 | struct iwl_trans *trans = file->private_data; | ||
1867 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1868 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
1869 | char buf[256]; | ||
1870 | int pos = 0; | ||
1871 | const size_t bufsz = sizeof(buf); | ||
1872 | |||
1873 | pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", | ||
1874 | rxq->read); | ||
1875 | pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", | ||
1876 | rxq->write); | ||
1877 | pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", | ||
1878 | rxq->free_count); | ||
1879 | if (rxq->rb_stts) { | ||
1880 | pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", | ||
1881 | le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); | ||
1882 | } else { | ||
1883 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1884 | "closed_rb_num: Not Allocated\n"); | ||
1885 | } | ||
1886 | return simple_read_from_buffer(user_buf, count, ppos, buf, pos); | ||
1887 | } | ||
1888 | |||
1889 | static ssize_t iwl_dbgfs_interrupt_read(struct file *file, | ||
1890 | char __user *user_buf, | ||
1891 | size_t count, loff_t *ppos) | ||
1892 | { | ||
1893 | struct iwl_trans *trans = file->private_data; | ||
1894 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1895 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | ||
1896 | |||
1897 | int pos = 0; | ||
1898 | char *buf; | ||
1899 | int bufsz = 24 * 64; /* 24 items * 64 char per item */ | ||
1900 | ssize_t ret; | ||
1901 | |||
1902 | buf = kzalloc(bufsz, GFP_KERNEL); | ||
1903 | if (!buf) | ||
1904 | return -ENOMEM; | ||
1905 | |||
1906 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1907 | "Interrupt Statistics Report:\n"); | ||
1908 | |||
1909 | pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", | ||
1910 | isr_stats->hw); | ||
1911 | pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", | ||
1912 | isr_stats->sw); | ||
1913 | if (isr_stats->sw || isr_stats->hw) { | ||
1914 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1915 | "\tLast Restarting Code: 0x%X\n", | ||
1916 | isr_stats->err_code); | ||
1917 | } | ||
1918 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1919 | pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", | ||
1920 | isr_stats->sch); | ||
1921 | pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", | ||
1922 | isr_stats->alive); | ||
1923 | #endif | ||
1924 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1925 | "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); | ||
1926 | |||
1927 | pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", | ||
1928 | isr_stats->ctkill); | ||
1929 | |||
1930 | pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", | ||
1931 | isr_stats->wakeup); | ||
1932 | |||
1933 | pos += scnprintf(buf + pos, bufsz - pos, | ||
1934 | "Rx command responses:\t\t %u\n", isr_stats->rx); | ||
1935 | |||
1936 | pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", | ||
1937 | isr_stats->tx); | ||
1938 | |||
1939 | pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", | ||
1940 | isr_stats->unhandled); | ||
1941 | |||
1942 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | ||
1943 | kfree(buf); | ||
1944 | return ret; | ||
1945 | } | ||
1946 | |||
1947 | static ssize_t iwl_dbgfs_interrupt_write(struct file *file, | ||
1948 | const char __user *user_buf, | ||
1949 | size_t count, loff_t *ppos) | ||
1950 | { | ||
1951 | struct iwl_trans *trans = file->private_data; | ||
1952 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1953 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | ||
1954 | |||
1955 | char buf[8]; | ||
1956 | int buf_size; | ||
1957 | u32 reset_flag; | ||
1958 | |||
1959 | memset(buf, 0, sizeof(buf)); | ||
1960 | buf_size = min(count, sizeof(buf) - 1); | ||
1961 | if (copy_from_user(buf, user_buf, buf_size)) | ||
1962 | return -EFAULT; | ||
1963 | if (sscanf(buf, "%x", &reset_flag) != 1) | ||
1964 | return -EFAULT; | ||
1965 | if (reset_flag == 0) | ||
1966 | memset(isr_stats, 0, sizeof(*isr_stats)); | ||
1967 | |||
1968 | return count; | ||
1969 | } | ||
1970 | |||
1971 | static ssize_t iwl_dbgfs_csr_write(struct file *file, | ||
1972 | const char __user *user_buf, | ||
1973 | size_t count, loff_t *ppos) | ||
1974 | { | ||
1975 | struct iwl_trans *trans = file->private_data; | ||
1976 | char buf[8]; | ||
1977 | int buf_size; | ||
1978 | int csr; | ||
1979 | |||
1980 | memset(buf, 0, sizeof(buf)); | ||
1981 | buf_size = min(count, sizeof(buf) - 1); | ||
1982 | if (copy_from_user(buf, user_buf, buf_size)) | ||
1983 | return -EFAULT; | ||
1984 | if (sscanf(buf, "%d", &csr) != 1) | ||
1985 | return -EFAULT; | ||
1986 | |||
1987 | iwl_dump_csr(trans); | ||
1988 | |||
1989 | return count; | ||
1990 | } | ||
1991 | |||
1992 | static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, | ||
1993 | char __user *user_buf, | ||
1994 | size_t count, loff_t *ppos) | ||
1995 | { | ||
1996 | struct iwl_trans *trans = file->private_data; | ||
1997 | char *buf; | ||
1998 | int pos = 0; | ||
1999 | ssize_t ret = -EFAULT; | ||
2000 | |||
2001 | ret = pos = iwl_dump_fh(trans, &buf, true); | ||
2002 | if (buf) { | ||
2003 | ret = simple_read_from_buffer(user_buf, | ||
2004 | count, ppos, buf, pos); | ||
2005 | kfree(buf); | ||
2006 | } | ||
2007 | |||
2008 | return ret; | ||
2009 | } | ||
2010 | |||
2011 | static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, | ||
2012 | const char __user *user_buf, | ||
2013 | size_t count, loff_t *ppos) | ||
2014 | { | ||
2015 | struct iwl_trans *trans = file->private_data; | ||
2016 | |||
2017 | if (!trans->op_mode) | ||
2018 | return -EAGAIN; | ||
2019 | |||
2020 | iwl_op_mode_nic_error(trans->op_mode); | ||
2021 | |||
2022 | return count; | ||
2023 | } | ||
2024 | |||
2025 | DEBUGFS_READ_WRITE_FILE_OPS(interrupt); | ||
2026 | DEBUGFS_READ_FILE_OPS(fh_reg); | ||
2027 | DEBUGFS_READ_FILE_OPS(rx_queue); | ||
2028 | DEBUGFS_READ_FILE_OPS(tx_queue); | ||
2029 | DEBUGFS_WRITE_FILE_OPS(csr); | ||
2030 | DEBUGFS_WRITE_FILE_OPS(fw_restart); | ||
2031 | |||
2032 | /* | ||
2033 | * Create the debugfs files and directories | ||
2034 | * | ||
2035 | */ | ||
2036 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | ||
2037 | struct dentry *dir) | ||
2038 | { | ||
2039 | DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); | ||
2040 | DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); | ||
2041 | DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); | ||
2042 | DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); | ||
2043 | DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); | ||
2044 | DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); | ||
2045 | return 0; | ||
2046 | } | ||
2047 | #else | ||
2048 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | ||
2049 | struct dentry *dir) | ||
2050 | { | ||
2051 | return 0; | ||
2052 | } | ||
2053 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ | ||
2054 | |||
2055 | static const struct iwl_trans_ops trans_ops_pcie = { | ||
2056 | .start_hw = iwl_trans_pcie_start_hw, | ||
2057 | .stop_hw = iwl_trans_pcie_stop_hw, | ||
2058 | .fw_alive = iwl_trans_pcie_fw_alive, | ||
2059 | .start_fw = iwl_trans_pcie_start_fw, | ||
2060 | .stop_device = iwl_trans_pcie_stop_device, | ||
2061 | |||
2062 | .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, | ||
2063 | |||
2064 | .send_cmd = iwl_trans_pcie_send_cmd, | ||
2065 | |||
2066 | .tx = iwl_trans_pcie_tx, | ||
2067 | .reclaim = iwl_trans_pcie_reclaim, | ||
2068 | |||
2069 | .txq_disable = iwl_trans_pcie_txq_disable, | ||
2070 | .txq_enable = iwl_trans_pcie_txq_enable, | ||
2071 | |||
2072 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | ||
2073 | |||
2074 | .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, | ||
2075 | |||
2076 | #ifdef CONFIG_PM_SLEEP | ||
2077 | .suspend = iwl_trans_pcie_suspend, | ||
2078 | .resume = iwl_trans_pcie_resume, | ||
2079 | #endif | ||
2080 | .write8 = iwl_trans_pcie_write8, | ||
2081 | .write32 = iwl_trans_pcie_write32, | ||
2082 | .read32 = iwl_trans_pcie_read32, | ||
2083 | .configure = iwl_trans_pcie_configure, | ||
2084 | .set_pmi = iwl_trans_pcie_set_pmi, | ||
2085 | }; | ||
2086 | |||
2087 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | ||
2088 | const struct pci_device_id *ent, | ||
2089 | const struct iwl_cfg *cfg) | ||
2090 | { | ||
2091 | struct iwl_trans_pcie *trans_pcie; | ||
2092 | struct iwl_trans *trans; | ||
2093 | char cmd_pool_name[100]; | ||
2094 | u16 pci_cmd; | ||
2095 | int err; | ||
2096 | |||
2097 | trans = kzalloc(sizeof(struct iwl_trans) + | ||
2098 | sizeof(struct iwl_trans_pcie), GFP_KERNEL); | ||
2099 | |||
2100 | if (WARN_ON(!trans)) | ||
2101 | return NULL; | ||
2102 | |||
2103 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
2104 | |||
2105 | trans->ops = &trans_ops_pcie; | ||
2106 | trans->cfg = cfg; | ||
2107 | trans_pcie->trans = trans; | ||
2108 | spin_lock_init(&trans_pcie->irq_lock); | ||
2109 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | ||
2110 | |||
2111 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
2112 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
2113 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
2114 | PCIE_LINK_STATE_CLKPM); | ||
2115 | |||
2116 | if (pci_enable_device(pdev)) { | ||
2117 | err = -ENODEV; | ||
2118 | goto out_no_pci; | ||
2119 | } | ||
2120 | |||
2121 | pci_set_master(pdev); | ||
2122 | |||
2123 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | ||
2124 | if (!err) | ||
2125 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | ||
2126 | if (err) { | ||
2127 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2128 | if (!err) | ||
2129 | err = pci_set_consistent_dma_mask(pdev, | ||
2130 | DMA_BIT_MASK(32)); | ||
2131 | /* both attempts failed: */ | ||
2132 | if (err) { | ||
2133 | dev_printk(KERN_ERR, &pdev->dev, | ||
2134 | "No suitable DMA available.\n"); | ||
2135 | goto out_pci_disable_device; | ||
2136 | } | ||
2137 | } | ||
2138 | |||
2139 | err = pci_request_regions(pdev, DRV_NAME); | ||
2140 | if (err) { | ||
2141 | dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed"); | ||
2142 | goto out_pci_disable_device; | ||
2143 | } | ||
2144 | |||
2145 | trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); | ||
2146 | if (!trans_pcie->hw_base) { | ||
2147 | dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed"); | ||
2148 | err = -ENODEV; | ||
2149 | goto out_pci_release_regions; | ||
2150 | } | ||
2151 | |||
2152 | dev_printk(KERN_INFO, &pdev->dev, | ||
2153 | "pci_resource_len = 0x%08llx\n", | ||
2154 | (unsigned long long) pci_resource_len(pdev, 0)); | ||
2155 | dev_printk(KERN_INFO, &pdev->dev, | ||
2156 | "pci_resource_base = %p\n", trans_pcie->hw_base); | ||
2157 | |||
2158 | dev_printk(KERN_INFO, &pdev->dev, | ||
2159 | "HW Revision ID = 0x%X\n", pdev->revision); | ||
2160 | |||
2161 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | ||
2162 | * PCI Tx retries from interfering with C3 CPU state */ | ||
2163 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | ||
2164 | |||
2165 | err = pci_enable_msi(pdev); | ||
2166 | if (err) | ||
2167 | dev_printk(KERN_ERR, &pdev->dev, | ||
2168 | "pci_enable_msi failed(0X%x)", err); | ||
2169 | |||
2170 | trans->dev = &pdev->dev; | ||
2171 | trans_pcie->irq = pdev->irq; | ||
2172 | trans_pcie->pci_dev = pdev; | ||
2173 | trans->hw_rev = iwl_read32(trans, CSR_HW_REV); | ||
2174 | trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; | ||
2175 | snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), | ||
2176 | "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); | ||
2177 | |||
2178 | /* TODO: Move this away, not needed if not MSI */ | ||
2179 | /* enable rfkill interrupt: hw bug w/a */ | ||
2180 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | ||
2181 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | ||
2182 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | ||
2183 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | ||
2184 | } | ||
2185 | |||
2186 | /* Initialize the wait queue for commands */ | ||
2187 | init_waitqueue_head(&trans->wait_command_queue); | ||
2188 | spin_lock_init(&trans->reg_lock); | ||
2189 | |||
2190 | snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s", | ||
2191 | dev_name(trans->dev)); | ||
2192 | |||
2193 | trans->dev_cmd_headroom = 0; | ||
2194 | trans->dev_cmd_pool = | ||
2195 | kmem_cache_create(cmd_pool_name, | ||
2196 | sizeof(struct iwl_device_cmd) | ||
2197 | + trans->dev_cmd_headroom, | ||
2198 | sizeof(void *), | ||
2199 | SLAB_HWCACHE_ALIGN, | ||
2200 | NULL); | ||
2201 | |||
2202 | if (!trans->dev_cmd_pool) | ||
2203 | goto out_pci_disable_msi; | ||
2204 | |||
2205 | return trans; | ||
2206 | |||
2207 | out_pci_disable_msi: | ||
2208 | pci_disable_msi(pdev); | ||
2209 | out_pci_release_regions: | ||
2210 | pci_release_regions(pdev); | ||
2211 | out_pci_disable_device: | ||
2212 | pci_disable_device(pdev); | ||
2213 | out_no_pci: | ||
2214 | kfree(trans); | ||
2215 | return NULL; | ||
2216 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c new file mode 100644 index 000000000000..6baf8deef519 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -0,0 +1,969 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/sched.h> | ||
32 | |||
33 | #include "iwl-debug.h" | ||
34 | #include "iwl-csr.h" | ||
35 | #include "iwl-prph.h" | ||
36 | #include "iwl-io.h" | ||
37 | #include "iwl-op-mode.h" | ||
38 | #include "internal.h" | ||
39 | /* FIXME: need to abstract out TX command (once we know what it looks like) */ | ||
40 | #include "dvm/commands.h" | ||
41 | |||
42 | #define IWL_TX_CRC_SIZE 4 | ||
43 | #define IWL_TX_DELIMITER_SIZE 4 | ||
44 | |||
45 | /** | ||
46 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
47 | */ | ||
48 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | ||
49 | struct iwl_tx_queue *txq, | ||
50 | u16 byte_cnt) | ||
51 | { | ||
52 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | ||
53 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
54 | int write_ptr = txq->q.write_ptr; | ||
55 | int txq_id = txq->q.id; | ||
56 | u8 sec_ctl = 0; | ||
57 | u8 sta_id = 0; | ||
58 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
59 | __le16 bc_ent; | ||
60 | struct iwl_tx_cmd *tx_cmd = | ||
61 | (void *) txq->entries[txq->q.write_ptr].cmd->payload; | ||
62 | |||
63 | scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
64 | |||
65 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
66 | |||
67 | sta_id = tx_cmd->sta_id; | ||
68 | sec_ctl = tx_cmd->sec_ctl; | ||
69 | |||
70 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
71 | case TX_CMD_SEC_CCM: | ||
72 | len += CCMP_MIC_LEN; | ||
73 | break; | ||
74 | case TX_CMD_SEC_TKIP: | ||
75 | len += TKIP_ICV_LEN; | ||
76 | break; | ||
77 | case TX_CMD_SEC_WEP: | ||
78 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
79 | break; | ||
80 | } | ||
81 | |||
82 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
83 | |||
84 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
85 | |||
86 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
87 | scd_bc_tbl[txq_id]. | ||
88 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * iwl_txq_update_write_ptr - Send new write index to hardware | ||
93 | */ | ||
94 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | ||
95 | { | ||
96 | u32 reg = 0; | ||
97 | int txq_id = txq->q.id; | ||
98 | |||
99 | if (txq->need_update == 0) | ||
100 | return; | ||
101 | |||
102 | if (trans->cfg->base_params->shadow_reg_enable) { | ||
103 | /* shadow register enabled */ | ||
104 | iwl_write32(trans, HBUS_TARG_WRPTR, | ||
105 | txq->q.write_ptr | (txq_id << 8)); | ||
106 | } else { | ||
107 | struct iwl_trans_pcie *trans_pcie = | ||
108 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
109 | /* if we're trying to save power */ | ||
110 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { | ||
111 | /* wake up nic if it's powered down ... | ||
112 | * uCode will wake up, and interrupt us again, so next | ||
113 | * time we'll skip this part. */ | ||
114 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | ||
115 | |||
116 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
117 | IWL_DEBUG_INFO(trans, | ||
118 | "Tx queue %d requesting wakeup," | ||
119 | " GP1 = 0x%x\n", txq_id, reg); | ||
120 | iwl_set_bit(trans, CSR_GP_CNTRL, | ||
121 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
122 | return; | ||
123 | } | ||
124 | |||
125 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | ||
126 | txq->q.write_ptr | (txq_id << 8)); | ||
127 | |||
128 | /* | ||
129 | * else not in power-save mode, | ||
130 | * uCode will never sleep when we're | ||
131 | * trying to tx (during RFKILL, we're not trying to tx). | ||
132 | */ | ||
133 | } else | ||
134 | iwl_write32(trans, HBUS_TARG_WRPTR, | ||
135 | txq->q.write_ptr | (txq_id << 8)); | ||
136 | } | ||
137 | txq->need_update = 0; | ||
138 | } | ||
139 | |||
140 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | ||
141 | { | ||
142 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
143 | |||
144 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
145 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
146 | addr |= | ||
147 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | ||
148 | |||
149 | return addr; | ||
150 | } | ||
151 | |||
152 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | ||
153 | { | ||
154 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
155 | |||
156 | return le16_to_cpu(tb->hi_n_len) >> 4; | ||
157 | } | ||
158 | |||
159 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | ||
160 | dma_addr_t addr, u16 len) | ||
161 | { | ||
162 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
163 | u16 hi_n_len = len << 4; | ||
164 | |||
165 | put_unaligned_le32(addr, &tb->lo); | ||
166 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
167 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
168 | |||
169 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
170 | |||
171 | tfd->num_tbs = idx + 1; | ||
172 | } | ||
173 | |||
174 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | ||
175 | { | ||
176 | return tfd->num_tbs & 0x1f; | ||
177 | } | ||
178 | |||
179 | static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | ||
180 | struct iwl_tfd *tfd, enum dma_data_direction dma_dir) | ||
181 | { | ||
182 | int i; | ||
183 | int num_tbs; | ||
184 | |||
185 | /* Sanity check on number of chunks */ | ||
186 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
187 | |||
188 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
189 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | ||
190 | /* @todo issue fatal error, it is quite serious situation */ | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | /* Unmap tx_cmd */ | ||
195 | if (num_tbs) | ||
196 | dma_unmap_single(trans->dev, | ||
197 | dma_unmap_addr(meta, mapping), | ||
198 | dma_unmap_len(meta, len), | ||
199 | DMA_BIDIRECTIONAL); | ||
200 | |||
201 | /* Unmap chunks, if any. */ | ||
202 | for (i = 1; i < num_tbs; i++) | ||
203 | dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), | ||
204 | iwl_tfd_tb_get_len(tfd, i), dma_dir); | ||
205 | |||
206 | tfd->num_tbs = 0; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | ||
211 | * @trans - transport private data | ||
212 | * @txq - tx queue | ||
213 | * @dma_dir - the direction of the DMA mapping | ||
214 | * | ||
215 | * Does NOT advance any TFD circular buffer read/write indexes | ||
216 | * Does NOT free the TFD itself (which is within circular buffer) | ||
217 | */ | ||
218 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | ||
219 | enum dma_data_direction dma_dir) | ||
220 | { | ||
221 | struct iwl_tfd *tfd_tmp = txq->tfds; | ||
222 | |||
223 | /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ | ||
224 | int rd_ptr = txq->q.read_ptr; | ||
225 | int idx = get_cmd_index(&txq->q, rd_ptr); | ||
226 | |||
227 | lockdep_assert_held(&txq->lock); | ||
228 | |||
229 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | ||
230 | iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], | ||
231 | dma_dir); | ||
232 | |||
233 | /* free SKB */ | ||
234 | if (txq->entries) { | ||
235 | struct sk_buff *skb; | ||
236 | |||
237 | skb = txq->entries[idx].skb; | ||
238 | |||
239 | /* Can be called from irqs-disabled context | ||
240 | * If skb is not NULL, it means that the whole queue is being | ||
241 | * freed and that the queue is not empty - free the skb | ||
242 | */ | ||
243 | if (skb) { | ||
244 | iwl_op_mode_free_skb(trans->op_mode, skb); | ||
245 | txq->entries[idx].skb = NULL; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | ||
251 | struct iwl_tx_queue *txq, | ||
252 | dma_addr_t addr, u16 len, | ||
253 | u8 reset) | ||
254 | { | ||
255 | struct iwl_queue *q; | ||
256 | struct iwl_tfd *tfd, *tfd_tmp; | ||
257 | u32 num_tbs; | ||
258 | |||
259 | q = &txq->q; | ||
260 | tfd_tmp = txq->tfds; | ||
261 | tfd = &tfd_tmp[q->write_ptr]; | ||
262 | |||
263 | if (reset) | ||
264 | memset(tfd, 0, sizeof(*tfd)); | ||
265 | |||
266 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
267 | |||
268 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
269 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
270 | IWL_ERR(trans, "Error can not send more than %d chunks\n", | ||
271 | IWL_NUM_OF_TBS); | ||
272 | return -EINVAL; | ||
273 | } | ||
274 | |||
275 | if (WARN_ON(addr & ~DMA_BIT_MASK(36))) | ||
276 | return -EINVAL; | ||
277 | |||
278 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | ||
279 | IWL_ERR(trans, "Unaligned address = %llx\n", | ||
280 | (unsigned long long)addr); | ||
281 | |||
282 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
288 | * DMA services | ||
289 | * | ||
290 | * Theory of operation | ||
291 | * | ||
292 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
293 | * of buffer descriptors, each of which points to one or more data buffers for | ||
294 | * the device to read from or fill. Driver and device exchange status of each | ||
295 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
296 | * entries in each circular buffer, to protect against confusing empty and full | ||
297 | * queue states. | ||
298 | * | ||
299 | * The device reads or writes the data in the queues via the device's several | ||
300 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
301 | * | ||
302 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
303 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
304 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
305 | * Tx queue resumed. | ||
306 | * | ||
307 | ***************************************************/ | ||
308 | |||
309 | int iwl_queue_space(const struct iwl_queue *q) | ||
310 | { | ||
311 | int s = q->read_ptr - q->write_ptr; | ||
312 | |||
313 | if (q->read_ptr > q->write_ptr) | ||
314 | s -= q->n_bd; | ||
315 | |||
316 | if (s <= 0) | ||
317 | s += q->n_window; | ||
318 | /* keep some reserve to not confuse empty and full situations */ | ||
319 | s -= 2; | ||
320 | if (s < 0) | ||
321 | s = 0; | ||
322 | return s; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | ||
327 | */ | ||
328 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | ||
329 | { | ||
330 | q->n_bd = count; | ||
331 | q->n_window = slots_num; | ||
332 | q->id = id; | ||
333 | |||
334 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
335 | * and iwl_queue_dec_wrap are broken. */ | ||
336 | if (WARN_ON(!is_power_of_2(count))) | ||
337 | return -EINVAL; | ||
338 | |||
339 | /* slots_num must be power-of-two size, otherwise | ||
340 | * get_cmd_index is broken. */ | ||
341 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
342 | return -EINVAL; | ||
343 | |||
344 | q->low_mark = q->n_window / 4; | ||
345 | if (q->low_mark < 4) | ||
346 | q->low_mark = 4; | ||
347 | |||
348 | q->high_mark = q->n_window / 8; | ||
349 | if (q->high_mark < 2) | ||
350 | q->high_mark = 2; | ||
351 | |||
352 | q->write_ptr = q->read_ptr = 0; | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, | ||
358 | struct iwl_tx_queue *txq) | ||
359 | { | ||
360 | struct iwl_trans_pcie *trans_pcie = | ||
361 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
362 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
363 | int txq_id = txq->q.id; | ||
364 | int read_ptr = txq->q.read_ptr; | ||
365 | u8 sta_id = 0; | ||
366 | __le16 bc_ent; | ||
367 | struct iwl_tx_cmd *tx_cmd = | ||
368 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
369 | |||
370 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
371 | |||
372 | if (txq_id != trans_pcie->cmd_queue) | ||
373 | sta_id = tx_cmd->sta_id; | ||
374 | |||
375 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
376 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
377 | |||
378 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
379 | scd_bc_tbl[txq_id]. | ||
380 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
381 | } | ||
382 | |||
383 | static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | ||
384 | u16 txq_id) | ||
385 | { | ||
386 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
387 | u32 tbl_dw_addr; | ||
388 | u32 tbl_dw; | ||
389 | u16 scd_q2ratid; | ||
390 | |||
391 | scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
392 | |||
393 | tbl_dw_addr = trans_pcie->scd_base_addr + | ||
394 | SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); | ||
395 | |||
396 | tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr); | ||
397 | |||
398 | if (txq_id & 0x1) | ||
399 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
400 | else | ||
401 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
402 | |||
403 | iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw); | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) | ||
409 | { | ||
410 | /* Simply stop the queue, but don't change any configuration; | ||
411 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
412 | iwl_write_prph(trans, | ||
413 | SCD_QUEUE_STATUS_BITS(txq_id), | ||
414 | (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
415 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
416 | } | ||
417 | |||
418 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | ||
419 | int sta_id, int tid, int frame_limit, u16 ssn) | ||
420 | { | ||
421 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
422 | |||
423 | if (test_and_set_bit(txq_id, trans_pcie->queue_used)) | ||
424 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); | ||
425 | |||
426 | /* Stop this Tx queue before configuring it */ | ||
427 | iwl_txq_set_inactive(trans, txq_id); | ||
428 | |||
429 | /* Set this queue as a chain-building queue unless it is CMD queue */ | ||
430 | if (txq_id != trans_pcie->cmd_queue) | ||
431 | iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); | ||
432 | |||
433 | /* If this queue is mapped to a certain station: it is an AGG queue */ | ||
434 | if (sta_id != IWL_INVALID_STATION) { | ||
435 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); | ||
436 | |||
437 | /* Map receiver-address / traffic-ID to this queue */ | ||
438 | iwl_txq_set_ratid_map(trans, ra_tid, txq_id); | ||
439 | |||
440 | /* enable aggregations for the queue */ | ||
441 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | ||
442 | } else { | ||
443 | /* | ||
444 | * disable aggregations for the queue, this will also make the | ||
445 | * ra_tid mapping configuration irrelevant since it is now a | ||
446 | * non-AGG queue. | ||
447 | */ | ||
448 | iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | ||
449 | } | ||
450 | |||
451 | /* Place first TFD at index corresponding to start sequence number. | ||
452 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
453 | trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); | ||
454 | trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); | ||
455 | |||
456 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | ||
457 | (ssn & 0xff) | (txq_id << 8)); | ||
458 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); | ||
459 | |||
460 | /* Set up Tx window size and frame limit for this queue */ | ||
461 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | ||
462 | SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); | ||
463 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | ||
464 | SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | ||
465 | ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
466 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
467 | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
468 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
469 | |||
470 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
471 | iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), | ||
472 | (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
473 | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | | ||
474 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | | ||
475 | SCD_QUEUE_STTS_REG_MSK); | ||
476 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", | ||
477 | txq_id, fifo, ssn & 0xff); | ||
478 | } | ||
479 | |||
480 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | ||
481 | { | ||
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
483 | u16 rd_ptr, wr_ptr; | ||
484 | int n_bd = trans_pcie->txq[txq_id].q.n_bd; | ||
485 | |||
486 | if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { | ||
487 | WARN_ONCE(1, "queue %d not used", txq_id); | ||
488 | return; | ||
489 | } | ||
490 | |||
491 | rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); | ||
492 | wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); | ||
493 | |||
494 | WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", | ||
495 | txq_id, rd_ptr, wr_ptr); | ||
496 | |||
497 | iwl_txq_set_inactive(trans, txq_id); | ||
498 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | ||
499 | } | ||
500 | |||
501 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
502 | |||
503 | /** | ||
504 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
505 | * @priv: device private data point | ||
506 | * @cmd: a point to the ucode command structure | ||
507 | * | ||
508 | * The function returns < 0 values to indicate the operation is | ||
509 | * failed. On success, it turns the index (> 0) of command in the | ||
510 | * command queue. | ||
511 | */ | ||
512 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
513 | { | ||
514 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
515 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
516 | struct iwl_queue *q = &txq->q; | ||
517 | struct iwl_device_cmd *out_cmd; | ||
518 | struct iwl_cmd_meta *out_meta; | ||
519 | dma_addr_t phys_addr; | ||
520 | u32 idx; | ||
521 | u16 copy_size, cmd_size; | ||
522 | bool had_nocopy = false; | ||
523 | int i; | ||
524 | u8 *cmd_dest; | ||
525 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
526 | const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; | ||
527 | int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; | ||
528 | int trace_idx; | ||
529 | #endif | ||
530 | |||
531 | copy_size = sizeof(out_cmd->hdr); | ||
532 | cmd_size = sizeof(out_cmd->hdr); | ||
533 | |||
534 | /* need one for the header if the first is NOCOPY */ | ||
535 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | ||
536 | |||
537 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
538 | if (!cmd->len[i]) | ||
539 | continue; | ||
540 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | ||
541 | had_nocopy = true; | ||
542 | } else { | ||
543 | /* NOCOPY must not be followed by normal! */ | ||
544 | if (WARN_ON(had_nocopy)) | ||
545 | return -EINVAL; | ||
546 | copy_size += cmd->len[i]; | ||
547 | } | ||
548 | cmd_size += cmd->len[i]; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * If any of the command structures end up being larger than | ||
553 | * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically | ||
554 | * allocated into separate TFDs, then we will need to | ||
555 | * increase the size of the buffers. | ||
556 | */ | ||
557 | if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) | ||
558 | return -EINVAL; | ||
559 | |||
560 | spin_lock_bh(&txq->lock); | ||
561 | |||
562 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | ||
563 | spin_unlock_bh(&txq->lock); | ||
564 | |||
565 | IWL_ERR(trans, "No space in command queue\n"); | ||
566 | iwl_op_mode_cmd_queue_full(trans->op_mode); | ||
567 | return -ENOSPC; | ||
568 | } | ||
569 | |||
570 | idx = get_cmd_index(q, q->write_ptr); | ||
571 | out_cmd = txq->entries[idx].cmd; | ||
572 | out_meta = &txq->entries[idx].meta; | ||
573 | |||
574 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ | ||
575 | if (cmd->flags & CMD_WANT_SKB) | ||
576 | out_meta->source = cmd; | ||
577 | |||
578 | /* set up the header */ | ||
579 | |||
580 | out_cmd->hdr.cmd = cmd->id; | ||
581 | out_cmd->hdr.flags = 0; | ||
582 | out_cmd->hdr.sequence = | ||
583 | cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | | ||
584 | INDEX_TO_SEQ(q->write_ptr)); | ||
585 | |||
586 | /* and copy the data that needs to be copied */ | ||
587 | |||
588 | cmd_dest = out_cmd->payload; | ||
589 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
590 | if (!cmd->len[i]) | ||
591 | continue; | ||
592 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) | ||
593 | break; | ||
594 | memcpy(cmd_dest, cmd->data[i], cmd->len[i]); | ||
595 | cmd_dest += cmd->len[i]; | ||
596 | } | ||
597 | |||
598 | IWL_DEBUG_HC(trans, | ||
599 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | ||
600 | trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | ||
601 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
602 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | ||
603 | |||
604 | phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, | ||
605 | DMA_BIDIRECTIONAL); | ||
606 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
607 | idx = -ENOMEM; | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | ||
612 | dma_unmap_len_set(out_meta, len, copy_size); | ||
613 | |||
614 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); | ||
615 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
616 | trace_bufs[0] = &out_cmd->hdr; | ||
617 | trace_lens[0] = copy_size; | ||
618 | trace_idx = 1; | ||
619 | #endif | ||
620 | |||
621 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
622 | if (!cmd->len[i]) | ||
623 | continue; | ||
624 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | ||
625 | continue; | ||
626 | phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], | ||
627 | cmd->len[i], DMA_BIDIRECTIONAL); | ||
628 | if (dma_mapping_error(trans->dev, phys_addr)) { | ||
629 | iwl_unmap_tfd(trans, out_meta, | ||
630 | &txq->tfds[q->write_ptr], | ||
631 | DMA_BIDIRECTIONAL); | ||
632 | idx = -ENOMEM; | ||
633 | goto out; | ||
634 | } | ||
635 | |||
636 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | ||
637 | cmd->len[i], 0); | ||
638 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
639 | trace_bufs[trace_idx] = cmd->data[i]; | ||
640 | trace_lens[trace_idx] = cmd->len[i]; | ||
641 | trace_idx++; | ||
642 | #endif | ||
643 | } | ||
644 | |||
645 | out_meta->flags = cmd->flags; | ||
646 | |||
647 | txq->need_update = 1; | ||
648 | |||
649 | /* check that tracing gets all possible blocks */ | ||
650 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); | ||
651 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
652 | trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags, | ||
653 | trace_bufs[0], trace_lens[0], | ||
654 | trace_bufs[1], trace_lens[1], | ||
655 | trace_bufs[2], trace_lens[2]); | ||
656 | #endif | ||
657 | |||
658 | /* start timer if queue currently empty */ | ||
659 | if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) | ||
660 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
661 | |||
662 | /* Increment and update queue's write index */ | ||
663 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
664 | iwl_txq_update_write_ptr(trans, txq); | ||
665 | |||
666 | out: | ||
667 | spin_unlock_bh(&txq->lock); | ||
668 | return idx; | ||
669 | } | ||
670 | |||
671 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | ||
672 | struct iwl_tx_queue *txq) | ||
673 | { | ||
674 | if (!trans_pcie->wd_timeout) | ||
675 | return; | ||
676 | |||
677 | /* | ||
678 | * if empty delete timer, otherwise move timer forward | ||
679 | * since we're making progress on this queue | ||
680 | */ | ||
681 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
682 | del_timer(&txq->stuck_timer); | ||
683 | else | ||
684 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
689 | * | ||
690 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
691 | * need to be reclaimed. As result, some free space forms. If there is | ||
692 | * enough free space (> low mark), wake the stack that feeds us. | ||
693 | */ | ||
694 | static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | ||
695 | int idx) | ||
696 | { | ||
697 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
698 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
699 | struct iwl_queue *q = &txq->q; | ||
700 | int nfreed = 0; | ||
701 | |||
702 | lockdep_assert_held(&txq->lock); | ||
703 | |||
704 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | ||
705 | IWL_ERR(trans, | ||
706 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
707 | __func__, txq_id, idx, q->n_bd, | ||
708 | q->write_ptr, q->read_ptr); | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
713 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
714 | |||
715 | if (nfreed++ > 0) { | ||
716 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
717 | idx, q->write_ptr, q->read_ptr); | ||
718 | iwl_op_mode_nic_error(trans->op_mode); | ||
719 | } | ||
720 | |||
721 | } | ||
722 | |||
723 | iwl_queue_progress(trans_pcie, txq); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
728 | * @rxb: Rx buffer to reclaim | ||
729 | * @handler_status: return value of the handler of the command | ||
730 | * (put in setup_rx_handlers) | ||
731 | * | ||
732 | * If an Rx buffer has an async callback associated with it the callback | ||
733 | * will be executed. The attached skb (if present) will only be freed | ||
734 | * if the callback returns 1 | ||
735 | */ | ||
736 | void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | ||
737 | int handler_status) | ||
738 | { | ||
739 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
740 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
741 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
742 | int index = SEQ_TO_INDEX(sequence); | ||
743 | int cmd_index; | ||
744 | struct iwl_device_cmd *cmd; | ||
745 | struct iwl_cmd_meta *meta; | ||
746 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
747 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
748 | |||
749 | /* If a Tx command is being handled and it isn't in the actual | ||
750 | * command queue then there a command routing bug has been introduced | ||
751 | * in the queue management code. */ | ||
752 | if (WARN(txq_id != trans_pcie->cmd_queue, | ||
753 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", | ||
754 | txq_id, trans_pcie->cmd_queue, sequence, | ||
755 | trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, | ||
756 | trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { | ||
757 | iwl_print_hex_error(trans, pkt, 32); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | spin_lock(&txq->lock); | ||
762 | |||
763 | cmd_index = get_cmd_index(&txq->q, index); | ||
764 | cmd = txq->entries[cmd_index].cmd; | ||
765 | meta = &txq->entries[cmd_index].meta; | ||
766 | |||
767 | iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | ||
768 | |||
769 | /* Input error checking is done when commands are added to queue. */ | ||
770 | if (meta->flags & CMD_WANT_SKB) { | ||
771 | struct page *p = rxb_steal_page(rxb); | ||
772 | |||
773 | meta->source->resp_pkt = pkt; | ||
774 | meta->source->_rx_page_addr = (unsigned long)page_address(p); | ||
775 | meta->source->_rx_page_order = trans_pcie->rx_page_order; | ||
776 | meta->source->handler_status = handler_status; | ||
777 | } | ||
778 | |||
779 | iwl_hcmd_queue_reclaim(trans, txq_id, index); | ||
780 | |||
781 | if (!(meta->flags & CMD_ASYNC)) { | ||
782 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | ||
783 | IWL_WARN(trans, | ||
784 | "HCMD_ACTIVE already clear for command %s\n", | ||
785 | trans_pcie_get_cmd_string(trans_pcie, | ||
786 | cmd->hdr.cmd)); | ||
787 | } | ||
788 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
789 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | ||
790 | trans_pcie_get_cmd_string(trans_pcie, | ||
791 | cmd->hdr.cmd)); | ||
792 | wake_up(&trans->wait_command_queue); | ||
793 | } | ||
794 | |||
795 | meta->flags = 0; | ||
796 | |||
797 | spin_unlock(&txq->lock); | ||
798 | } | ||
799 | |||
800 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | ||
801 | |||
802 | static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
803 | { | ||
804 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
805 | int ret; | ||
806 | |||
807 | /* An asynchronous command can not expect an SKB to be set. */ | ||
808 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | ||
809 | return -EINVAL; | ||
810 | |||
811 | |||
812 | ret = iwl_enqueue_hcmd(trans, cmd); | ||
813 | if (ret < 0) { | ||
814 | IWL_ERR(trans, | ||
815 | "Error sending %s: enqueue_hcmd failed: %d\n", | ||
816 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | ||
817 | return ret; | ||
818 | } | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
823 | { | ||
824 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
825 | int cmd_idx; | ||
826 | int ret; | ||
827 | |||
828 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", | ||
829 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
830 | |||
831 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, | ||
832 | &trans_pcie->status))) { | ||
833 | IWL_ERR(trans, "Command %s: a command is already active!\n", | ||
834 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
835 | return -EIO; | ||
836 | } | ||
837 | |||
838 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", | ||
839 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
840 | |||
841 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); | ||
842 | if (cmd_idx < 0) { | ||
843 | ret = cmd_idx; | ||
844 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
845 | IWL_ERR(trans, | ||
846 | "Error sending %s: enqueue_hcmd failed: %d\n", | ||
847 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | ret = wait_event_timeout(trans->wait_command_queue, | ||
852 | !test_bit(STATUS_HCMD_ACTIVE, | ||
853 | &trans_pcie->status), | ||
854 | HOST_COMPLETE_TIMEOUT); | ||
855 | if (!ret) { | ||
856 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | ||
857 | struct iwl_tx_queue *txq = | ||
858 | &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
859 | struct iwl_queue *q = &txq->q; | ||
860 | |||
861 | IWL_ERR(trans, | ||
862 | "Error sending %s: time out after %dms.\n", | ||
863 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | ||
864 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | ||
865 | |||
866 | IWL_ERR(trans, | ||
867 | "Current CMD queue read_ptr %d write_ptr %d\n", | ||
868 | q->read_ptr, q->write_ptr); | ||
869 | |||
870 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
871 | IWL_DEBUG_INFO(trans, | ||
872 | "Clearing HCMD_ACTIVE for command %s\n", | ||
873 | trans_pcie_get_cmd_string(trans_pcie, | ||
874 | cmd->id)); | ||
875 | ret = -ETIMEDOUT; | ||
876 | goto cancel; | ||
877 | } | ||
878 | } | ||
879 | |||
880 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | ||
881 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", | ||
882 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
883 | ret = -EIO; | ||
884 | goto cancel; | ||
885 | } | ||
886 | |||
887 | return 0; | ||
888 | |||
889 | cancel: | ||
890 | if (cmd->flags & CMD_WANT_SKB) { | ||
891 | /* | ||
892 | * Cancel the CMD_WANT_SKB flag for the cmd in the | ||
893 | * TX cmd queue. Otherwise in case the cmd comes | ||
894 | * in later, it will possibly set an invalid | ||
895 | * address (cmd->meta.source). | ||
896 | */ | ||
897 | trans_pcie->txq[trans_pcie->cmd_queue]. | ||
898 | entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; | ||
899 | } | ||
900 | |||
901 | if (cmd->resp_pkt) { | ||
902 | iwl_free_resp(cmd); | ||
903 | cmd->resp_pkt = NULL; | ||
904 | } | ||
905 | |||
906 | return ret; | ||
907 | } | ||
908 | |||
909 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
910 | { | ||
911 | if (cmd->flags & CMD_ASYNC) | ||
912 | return iwl_send_cmd_async(trans, cmd); | ||
913 | |||
914 | return iwl_send_cmd_sync(trans, cmd); | ||
915 | } | ||
916 | |||
917 | /* Frees buffers until index _not_ inclusive */ | ||
918 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | ||
919 | struct sk_buff_head *skbs) | ||
920 | { | ||
921 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
922 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
923 | struct iwl_queue *q = &txq->q; | ||
924 | int last_to_free; | ||
925 | int freed = 0; | ||
926 | |||
927 | /* This function is not meant to release cmd queue*/ | ||
928 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | ||
929 | return 0; | ||
930 | |||
931 | lockdep_assert_held(&txq->lock); | ||
932 | |||
933 | /*Since we free until index _not_ inclusive, the one before index is | ||
934 | * the last we will free. This one must be used */ | ||
935 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); | ||
936 | |||
937 | if ((index >= q->n_bd) || | ||
938 | (iwl_queue_used(q, last_to_free) == 0)) { | ||
939 | IWL_ERR(trans, | ||
940 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | ||
941 | __func__, txq_id, last_to_free, q->n_bd, | ||
942 | q->write_ptr, q->read_ptr); | ||
943 | return 0; | ||
944 | } | ||
945 | |||
946 | if (WARN_ON(!skb_queue_empty(skbs))) | ||
947 | return 0; | ||
948 | |||
949 | for (; | ||
950 | q->read_ptr != index; | ||
951 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
952 | |||
953 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | ||
954 | continue; | ||
955 | |||
956 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | ||
957 | |||
958 | txq->entries[txq->q.read_ptr].skb = NULL; | ||
959 | |||
960 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); | ||
961 | |||
962 | iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | ||
963 | freed++; | ||
964 | } | ||
965 | |||
966 | iwl_queue_progress(trans_pcie, txq); | ||
967 | |||
968 | return freed; | ||
969 | } | ||