aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
diff options
context:
space:
mode:
authorWey-Yi Guy <wey-yi.w.guy@intel.com>2010-03-16 15:37:24 -0400
committerReinette Chatre <reinette.chatre@intel.com>2010-03-25 14:18:20 -0400
commit741a626627e42812afd957f875c34c89be8a103e (patch)
treec45d47757df649ed33f2d583ceff0edc96955080 /drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
parent19e6cda094002e9756a3d181cbb4c31ef2a9b6bb (diff)
iwlwifi: move ucode alive related code to separate file
uCode alive for iwlagn based devices share the same functions. Move those functions from iwl-5000.c to iwl-agn-ucode.c. Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-ucode.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c230
1 files changed, 230 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index f0af3881b74c..52ae157968b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -35,7 +35,22 @@
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-helpers.h"
38#include "iwl-agn-hw.h" 39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
53};
39 54
40/* 55/*
41 * ucode 56 * ucode
@@ -184,3 +199,218 @@ struct iwl_ucode_ops iwlagn_ucode = {
184 .get_boot_size = iwlagn_ucode_get_boot_size, 199 .get_boot_size = iwlagn_ucode_get_boot_size,
185 .get_data = iwlagn_ucode_get_data, 200 .get_data = iwlagn_ucode_get_data,
186}; 201};
202
203/*
204 * Calibration
205 */
206static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
207{
208 struct iwl_calib_xtal_freq_cmd cmd;
209 __le16 *xtal_calib =
210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
211
212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
213 cmd.hdr.first_group = 0;
214 cmd.hdr.groups_num = 1;
215 cmd.hdr.data_valid = 1;
216 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
217 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
218 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
219 (u8 *)&cmd, sizeof(cmd));
220}
221
222static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
223{
224 struct iwl_calib_cfg_cmd calib_cfg_cmd;
225 struct iwl_host_cmd cmd = {
226 .id = CALIBRATION_CFG_CMD,
227 .len = sizeof(struct iwl_calib_cfg_cmd),
228 .data = &calib_cfg_cmd,
229 };
230
231 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
232 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
233 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
234 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
235 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
236
237 return iwl_send_cmd(priv, &cmd);
238}
239
240void iwlagn_rx_calib_result(struct iwl_priv *priv,
241 struct iwl_rx_mem_buffer *rxb)
242{
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
245 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
246 int index;
247
248 /* reduce the size of the length field itself */
249 len -= 4;
250
251 /* Define the order in which the results will be sent to the runtime
252 * uCode. iwl_send_calib_results sends them in a row according to
253 * their index. We sort them here
254 */
255 switch (hdr->op_code) {
256 case IWL_PHY_CALIBRATE_DC_CMD:
257 index = IWL_CALIB_DC;
258 break;
259 case IWL_PHY_CALIBRATE_LO_CMD:
260 index = IWL_CALIB_LO;
261 break;
262 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
263 index = IWL_CALIB_TX_IQ;
264 break;
265 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
266 index = IWL_CALIB_TX_IQ_PERD;
267 break;
268 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
269 index = IWL_CALIB_BASE_BAND;
270 break;
271 default:
272 IWL_ERR(priv, "Unknown calibration notification %d\n",
273 hdr->op_code);
274 return;
275 }
276 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
277}
278
279void iwlagn_rx_calib_complete(struct iwl_priv *priv,
280 struct iwl_rx_mem_buffer *rxb)
281{
282 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
283 queue_work(priv->workqueue, &priv->restart);
284}
285
286void iwlagn_init_alive_start(struct iwl_priv *priv)
287{
288 int ret = 0;
289
290 /* Check alive response for "valid" sign from uCode */
291 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
292 /* We had an error bringing up the hardware, so take it
293 * all the way back down so we can try again */
294 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
295 goto restart;
296 }
297
298 /* initialize uCode was loaded... verify inst image.
299 * This is a paranoid check, because we would not have gotten the
300 * "initialize" alive if code weren't properly loaded. */
301 if (iwl_verify_ucode(priv)) {
302 /* Runtime instruction load was bad;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
305 goto restart;
306 }
307
308 ret = priv->cfg->ops->lib->alive_notify(priv);
309 if (ret) {
310 IWL_WARN(priv,
311 "Could not complete ALIVE transition: %d\n", ret);
312 goto restart;
313 }
314
315 iwlagn_send_calib_cfg(priv);
316 return;
317
318restart:
319 /* real restart (first load init_ucode) */
320 queue_work(priv->workqueue, &priv->restart);
321}
322
323int iwlagn_alive_notify(struct iwl_priv *priv)
324{
325 u32 a;
326 unsigned long flags;
327 int i, chan;
328 u32 reg_val;
329
330 spin_lock_irqsave(&priv->lock, flags);
331
332 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
333 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
334 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
335 a += 4)
336 iwl_write_targ_mem(priv, a, 0);
337 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
338 a += 4)
339 iwl_write_targ_mem(priv, a, 0);
340 for (; a < priv->scd_base_addr +
341 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
342 iwl_write_targ_mem(priv, a, 0);
343
344 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
345 priv->scd_bc_tbls.dma >> 10);
346
347 /* Enable DMA channel */
348 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
349 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
350 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
351 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
352
353 /* Update FH chicken bits */
354 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
357
358 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
359 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
360 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
361
362 /* initiate the queues */
363 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
364 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
366 iwl_write_targ_mem(priv, priv->scd_base_addr +
367 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
368 iwl_write_targ_mem(priv, priv->scd_base_addr +
369 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
370 sizeof(u32),
371 ((SCD_WIN_SIZE <<
372 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
373 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
374 ((SCD_FRAME_LIMIT <<
375 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
376 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
377 }
378
379 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
380 IWL_MASK(0, priv->hw_params.max_txq_num));
381
382 /* Activate all Tx DMA/FIFO channels */
383 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
384
385 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
386
387 /* make sure all queue are not stopped */
388 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
389 for (i = 0; i < 4; i++)
390 atomic_set(&priv->queue_stop_count[i], 0);
391
392 /* reset to 0 to enable all the queue first */
393 priv->txq_ctx_active_msk = 0;
394 /* map qos queues to fifos one-to-one */
395 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
396
397 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
398 int ac = iwlagn_default_queue_to_tx_fifo[i];
399
400 iwl_txq_ctx_activate(priv, i);
401
402 if (ac == IWL_TX_FIFO_UNUSED)
403 continue;
404
405 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
406 }
407
408 spin_unlock_irqrestore(&priv->lock, flags);
409
410 iwl_send_wimax_coex(priv);
411
412 iwlagn_set_Xtal_calib(priv);
413 iwl_send_calib_results(priv);
414
415 return 0;
416}