aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-ucode.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c416
1 files changed, 416 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 000000000000..52ae157968b2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,416 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
53};
54
55/*
56 * ucode
57 */
58static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
59 struct fw_desc *image, u32 dst_addr)
60{
61 dma_addr_t phy_addr = image->p_addr;
62 u32 byte_cnt = image->len;
63 int ret;
64
65 priv->ucode_write_complete = 0;
66
67 iwl_write_direct32(priv,
68 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
69 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
70
71 iwl_write_direct32(priv,
72 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
73
74 iwl_write_direct32(priv,
75 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
76 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
77
78 iwl_write_direct32(priv,
79 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
80 (iwl_get_dma_hi_addr(phy_addr)
81 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
82
83 iwl_write_direct32(priv,
84 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
85 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
86 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
87 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
88
89 iwl_write_direct32(priv,
90 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
91 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
92 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
93 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
94
95 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
96 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
97 priv->ucode_write_complete, 5 * HZ);
98 if (ret == -ERESTARTSYS) {
99 IWL_ERR(priv, "Could not load the %s uCode section due "
100 "to interrupt\n", name);
101 return ret;
102 }
103 if (!ret) {
104 IWL_ERR(priv, "Could not load the %s uCode section\n",
105 name);
106 return -ETIMEDOUT;
107 }
108
109 return 0;
110}
111
112static int iwlagn_load_given_ucode(struct iwl_priv *priv,
113 struct fw_desc *inst_image,
114 struct fw_desc *data_image)
115{
116 int ret = 0;
117
118 ret = iwlagn_load_section(priv, "INST", inst_image,
119 IWLAGN_RTC_INST_LOWER_BOUND);
120 if (ret)
121 return ret;
122
123 return iwlagn_load_section(priv, "DATA", data_image,
124 IWLAGN_RTC_DATA_LOWER_BOUND);
125}
126
127int iwlagn_load_ucode(struct iwl_priv *priv)
128{
129 int ret = 0;
130
131 /* check whether init ucode should be loaded, or rather runtime ucode */
132 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
133 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
134 ret = iwlagn_load_given_ucode(priv,
135 &priv->ucode_init, &priv->ucode_init_data);
136 if (!ret) {
137 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
138 priv->ucode_type = UCODE_INIT;
139 }
140 } else {
141 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
142 "Loading runtime ucode...\n");
143 ret = iwlagn_load_given_ucode(priv,
144 &priv->ucode_code, &priv->ucode_data);
145 if (!ret) {
146 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
147 priv->ucode_type = UCODE_RT;
148 }
149 }
150
151 return ret;
152}
153
154#define IWL_UCODE_GET(item) \
155static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\
156 u32 api_ver) \
157{ \
158 if (api_ver <= 2) \
159 return le32_to_cpu(ucode->u.v1.item); \
160 return le32_to_cpu(ucode->u.v2.item); \
161}
162
163static u32 iwlagn_ucode_get_header_size(u32 api_ver)
164{
165 if (api_ver <= 2)
166 return UCODE_HEADER_SIZE(1);
167 return UCODE_HEADER_SIZE(2);
168}
169
170static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode,
171 u32 api_ver)
172{
173 if (api_ver <= 2)
174 return 0;
175 return le32_to_cpu(ucode->u.v2.build);
176}
177
178static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode,
179 u32 api_ver)
180{
181 if (api_ver <= 2)
182 return (u8 *) ucode->u.v1.data;
183 return (u8 *) ucode->u.v2.data;
184}
185
186IWL_UCODE_GET(inst_size);
187IWL_UCODE_GET(data_size);
188IWL_UCODE_GET(init_size);
189IWL_UCODE_GET(init_data_size);
190IWL_UCODE_GET(boot_size);
191
192struct iwl_ucode_ops iwlagn_ucode = {
193 .get_header_size = iwlagn_ucode_get_header_size,
194 .get_build = iwlagn_ucode_get_build,
195 .get_inst_size = iwlagn_ucode_get_inst_size,
196 .get_data_size = iwlagn_ucode_get_data_size,
197 .get_init_size = iwlagn_ucode_get_init_size,
198 .get_init_data_size = iwlagn_ucode_get_init_data_size,
199 .get_boot_size = iwlagn_ucode_get_boot_size,
200 .get_data = iwlagn_ucode_get_data,
201};
202
203/*
204 * Calibration
205 */
206static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
207{
208 struct iwl_calib_xtal_freq_cmd cmd;
209 __le16 *xtal_calib =
210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
211
212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
213 cmd.hdr.first_group = 0;
214 cmd.hdr.groups_num = 1;
215 cmd.hdr.data_valid = 1;
216 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
217 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
218 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
219 (u8 *)&cmd, sizeof(cmd));
220}
221
222static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
223{
224 struct iwl_calib_cfg_cmd calib_cfg_cmd;
225 struct iwl_host_cmd cmd = {
226 .id = CALIBRATION_CFG_CMD,
227 .len = sizeof(struct iwl_calib_cfg_cmd),
228 .data = &calib_cfg_cmd,
229 };
230
231 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
232 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
233 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
234 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
235 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
236
237 return iwl_send_cmd(priv, &cmd);
238}
239
240void iwlagn_rx_calib_result(struct iwl_priv *priv,
241 struct iwl_rx_mem_buffer *rxb)
242{
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
245 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
246 int index;
247
248 /* reduce the size of the length field itself */
249 len -= 4;
250
251 /* Define the order in which the results will be sent to the runtime
252 * uCode. iwl_send_calib_results sends them in a row according to
253 * their index. We sort them here
254 */
255 switch (hdr->op_code) {
256 case IWL_PHY_CALIBRATE_DC_CMD:
257 index = IWL_CALIB_DC;
258 break;
259 case IWL_PHY_CALIBRATE_LO_CMD:
260 index = IWL_CALIB_LO;
261 break;
262 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
263 index = IWL_CALIB_TX_IQ;
264 break;
265 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
266 index = IWL_CALIB_TX_IQ_PERD;
267 break;
268 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
269 index = IWL_CALIB_BASE_BAND;
270 break;
271 default:
272 IWL_ERR(priv, "Unknown calibration notification %d\n",
273 hdr->op_code);
274 return;
275 }
276 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
277}
278
279void iwlagn_rx_calib_complete(struct iwl_priv *priv,
280 struct iwl_rx_mem_buffer *rxb)
281{
282 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
283 queue_work(priv->workqueue, &priv->restart);
284}
285
286void iwlagn_init_alive_start(struct iwl_priv *priv)
287{
288 int ret = 0;
289
290 /* Check alive response for "valid" sign from uCode */
291 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
292 /* We had an error bringing up the hardware, so take it
293 * all the way back down so we can try again */
294 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
295 goto restart;
296 }
297
298 /* initialize uCode was loaded... verify inst image.
299 * This is a paranoid check, because we would not have gotten the
300 * "initialize" alive if code weren't properly loaded. */
301 if (iwl_verify_ucode(priv)) {
302 /* Runtime instruction load was bad;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
305 goto restart;
306 }
307
308 ret = priv->cfg->ops->lib->alive_notify(priv);
309 if (ret) {
310 IWL_WARN(priv,
311 "Could not complete ALIVE transition: %d\n", ret);
312 goto restart;
313 }
314
315 iwlagn_send_calib_cfg(priv);
316 return;
317
318restart:
319 /* real restart (first load init_ucode) */
320 queue_work(priv->workqueue, &priv->restart);
321}
322
323int iwlagn_alive_notify(struct iwl_priv *priv)
324{
325 u32 a;
326 unsigned long flags;
327 int i, chan;
328 u32 reg_val;
329
330 spin_lock_irqsave(&priv->lock, flags);
331
332 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
333 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
334 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
335 a += 4)
336 iwl_write_targ_mem(priv, a, 0);
337 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
338 a += 4)
339 iwl_write_targ_mem(priv, a, 0);
340 for (; a < priv->scd_base_addr +
341 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
342 iwl_write_targ_mem(priv, a, 0);
343
344 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
345 priv->scd_bc_tbls.dma >> 10);
346
347 /* Enable DMA channel */
348 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
349 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
350 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
351 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
352
353 /* Update FH chicken bits */
354 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
357
358 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
359 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
360 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
361
362 /* initiate the queues */
363 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
364 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
366 iwl_write_targ_mem(priv, priv->scd_base_addr +
367 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
368 iwl_write_targ_mem(priv, priv->scd_base_addr +
369 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
370 sizeof(u32),
371 ((SCD_WIN_SIZE <<
372 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
373 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
374 ((SCD_FRAME_LIMIT <<
375 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
376 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
377 }
378
379 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
380 IWL_MASK(0, priv->hw_params.max_txq_num));
381
382 /* Activate all Tx DMA/FIFO channels */
383 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
384
385 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
386
387 /* make sure all queue are not stopped */
388 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
389 for (i = 0; i < 4; i++)
390 atomic_set(&priv->queue_stop_count[i], 0);
391
392 /* reset to 0 to enable all the queue first */
393 priv->txq_ctx_active_msk = 0;
394 /* map qos queues to fifos one-to-one */
395 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
396
397 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
398 int ac = iwlagn_default_queue_to_tx_fifo[i];
399
400 iwl_txq_ctx_activate(priv, i);
401
402 if (ac == IWL_TX_FIFO_UNUSED)
403 continue;
404
405 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
406 }
407
408 spin_unlock_irqrestore(&priv->lock, flags);
409
410 iwl_send_wimax_coex(priv);
411
412 iwlagn_set_Xtal_calib(priv);
413 iwl_send_calib_results(priv);
414
415 return 0;
416}