diff options
author | David S. Miller <davem@davemloft.net> | 2008-06-10 04:54:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-06-10 04:54:31 -0400 |
commit | 788c0a53164c05c5ccdb1472474372b72ba74644 (patch) | |
tree | 5f274102e3dc4bcca6cb3a695aa2c8228ad5fc4f /drivers/net/wireless/iwlwifi/iwl-5000.c | |
parent | e64bda89b8fe81cce9b4a20885d2c204c2d52532 (diff) | |
parent | 78cf07472f0ede8394bacc4bc02354505080cfe1 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Conflicts:
drivers/net/ps3_gelic_wireless.c
drivers/net/wireless/libertas/main.c
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-5000.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-5000.c | 897 |
1 files changed, 877 insertions, 20 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index b5e28b811796..7e525ad45135 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -46,6 +46,41 @@ | |||
46 | 46 | ||
47 | #define IWL5000_UCODE_API "-1" | 47 | #define IWL5000_UCODE_API "-1" |
48 | 48 | ||
49 | static const u16 iwl5000_default_queue_to_tx_fifo[] = { | ||
50 | IWL_TX_FIFO_AC3, | ||
51 | IWL_TX_FIFO_AC2, | ||
52 | IWL_TX_FIFO_AC1, | ||
53 | IWL_TX_FIFO_AC0, | ||
54 | IWL50_CMD_FIFO_NUM, | ||
55 | IWL_TX_FIFO_HCCA_1, | ||
56 | IWL_TX_FIFO_HCCA_2 | ||
57 | }; | ||
58 | |||
59 | /* FIXME: same implementation as 4965 */ | ||
60 | static int iwl5000_apm_stop_master(struct iwl_priv *priv) | ||
61 | { | ||
62 | int ret = 0; | ||
63 | unsigned long flags; | ||
64 | |||
65 | spin_lock_irqsave(&priv->lock, flags); | ||
66 | |||
67 | /* set stop master bit */ | ||
68 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); | ||
69 | |||
70 | ret = iwl_poll_bit(priv, CSR_RESET, | ||
71 | CSR_RESET_REG_FLAG_MASTER_DISABLED, | ||
72 | CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); | ||
73 | if (ret < 0) | ||
74 | goto out; | ||
75 | |||
76 | out: | ||
77 | spin_unlock_irqrestore(&priv->lock, flags); | ||
78 | IWL_DEBUG_INFO("stop master\n"); | ||
79 | |||
80 | return ret; | ||
81 | } | ||
82 | |||
83 | |||
49 | static int iwl5000_apm_init(struct iwl_priv *priv) | 84 | static int iwl5000_apm_init(struct iwl_priv *priv) |
50 | { | 85 | { |
51 | int ret = 0; | 86 | int ret = 0; |
@@ -53,6 +88,10 @@ static int iwl5000_apm_init(struct iwl_priv *priv) | |||
53 | iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, | 88 | iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, |
54 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | 89 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); |
55 | 90 | ||
91 | /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */ | ||
92 | iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, | ||
93 | CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); | ||
94 | |||
56 | iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); | 95 | iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); |
57 | 96 | ||
58 | /* set "initialization complete" bit to move adapter | 97 | /* set "initialization complete" bit to move adapter |
@@ -73,19 +112,91 @@ static int iwl5000_apm_init(struct iwl_priv *priv) | |||
73 | return ret; | 112 | return ret; |
74 | 113 | ||
75 | /* enable DMA */ | 114 | /* enable DMA */ |
76 | iwl_write_prph(priv, APMG_CLK_EN_REG, | 115 | iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); |
77 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
78 | 116 | ||
79 | udelay(20); | 117 | udelay(20); |
80 | 118 | ||
119 | /* disable L1-Active */ | ||
81 | iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, | 120 | iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, |
82 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | 121 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
83 | 122 | ||
84 | iwl_release_nic_access(priv); | 123 | iwl_release_nic_access(priv); |
85 | 124 | ||
86 | return ret; | 125 | return ret; |
87 | } | 126 | } |
88 | 127 | ||
128 | /* FIXME: this is indentical to 4965 */ | ||
129 | static void iwl5000_apm_stop(struct iwl_priv *priv) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | |||
133 | iwl5000_apm_stop_master(priv); | ||
134 | |||
135 | spin_lock_irqsave(&priv->lock, flags); | ||
136 | |||
137 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
138 | |||
139 | udelay(10); | ||
140 | |||
141 | iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
142 | |||
143 | spin_unlock_irqrestore(&priv->lock, flags); | ||
144 | } | ||
145 | |||
146 | |||
147 | static int iwl5000_apm_reset(struct iwl_priv *priv) | ||
148 | { | ||
149 | int ret = 0; | ||
150 | unsigned long flags; | ||
151 | |||
152 | iwl5000_apm_stop_master(priv); | ||
153 | |||
154 | spin_lock_irqsave(&priv->lock, flags); | ||
155 | |||
156 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
157 | |||
158 | udelay(10); | ||
159 | |||
160 | |||
161 | /* FIXME: put here L1A -L0S w/a */ | ||
162 | |||
163 | iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); | ||
164 | |||
165 | /* set "initialization complete" bit to move adapter | ||
166 | * D0U* --> D0A* state */ | ||
167 | iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | ||
168 | |||
169 | /* wait for clock stabilization */ | ||
170 | ret = iwl_poll_bit(priv, CSR_GP_CNTRL, | ||
171 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | ||
172 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); | ||
173 | if (ret < 0) { | ||
174 | IWL_DEBUG_INFO("Failed to init the card\n"); | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | ret = iwl_grab_nic_access(priv); | ||
179 | if (ret) | ||
180 | goto out; | ||
181 | |||
182 | /* enable DMA */ | ||
183 | iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); | ||
184 | |||
185 | udelay(20); | ||
186 | |||
187 | /* disable L1-Active */ | ||
188 | iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, | ||
189 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
190 | |||
191 | iwl_release_nic_access(priv); | ||
192 | |||
193 | out: | ||
194 | spin_unlock_irqrestore(&priv->lock, flags); | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | |||
89 | static void iwl5000_nic_config(struct iwl_priv *priv) | 200 | static void iwl5000_nic_config(struct iwl_priv *priv) |
90 | { | 201 | { |
91 | unsigned long flags; | 202 | unsigned long flags; |
@@ -96,8 +207,13 @@ static void iwl5000_nic_config(struct iwl_priv *priv) | |||
96 | 207 | ||
97 | pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); | 208 | pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); |
98 | 209 | ||
99 | /* disable L1 entry -- workaround for pre-B1 */ | 210 | /* L1 is enabled by BIOS */ |
100 | pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); | 211 | if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN) |
212 | /* diable L0S disabled L1A enabled */ | ||
213 | iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); | ||
214 | else | ||
215 | /* L0S enabled L1A disabled */ | ||
216 | iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); | ||
101 | 217 | ||
102 | radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); | 218 | radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); |
103 | 219 | ||
@@ -279,6 +395,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = { | |||
279 | 395 | ||
280 | #endif /* CONFIG_IWL5000_RUN_TIME_CALIB */ | 396 | #endif /* CONFIG_IWL5000_RUN_TIME_CALIB */ |
281 | 397 | ||
398 | |||
399 | |||
282 | static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, | 400 | static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, |
283 | size_t offset) | 401 | size_t offset) |
284 | { | 402 | { |
@@ -287,6 +405,423 @@ static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, | |||
287 | return &priv->eeprom[address]; | 405 | return &priv->eeprom[address]; |
288 | } | 406 | } |
289 | 407 | ||
408 | /* | ||
409 | * Calibration | ||
410 | */ | ||
411 | static int iwl5000_send_Xtal_calib(struct iwl_priv *priv) | ||
412 | { | ||
413 | u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
414 | |||
415 | struct iwl5000_calibration cal_cmd = { | ||
416 | .op_code = IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD, | ||
417 | .data = { | ||
418 | (u8)xtal_calib[0], | ||
419 | (u8)xtal_calib[1], | ||
420 | } | ||
421 | }; | ||
422 | |||
423 | return iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, | ||
424 | sizeof(cal_cmd), &cal_cmd); | ||
425 | } | ||
426 | |||
427 | static int iwl5000_send_calib_results(struct iwl_priv *priv) | ||
428 | { | ||
429 | int ret = 0; | ||
430 | |||
431 | struct iwl_host_cmd hcmd = { | ||
432 | .id = REPLY_PHY_CALIBRATION_CMD, | ||
433 | .meta.flags = CMD_SIZE_HUGE, | ||
434 | }; | ||
435 | |||
436 | if (priv->calib_results.lo_res) { | ||
437 | hcmd.len = priv->calib_results.lo_res_len; | ||
438 | hcmd.data = priv->calib_results.lo_res; | ||
439 | ret = iwl_send_cmd_sync(priv, &hcmd); | ||
440 | |||
441 | if (ret) | ||
442 | goto err; | ||
443 | } | ||
444 | |||
445 | if (priv->calib_results.tx_iq_res) { | ||
446 | hcmd.len = priv->calib_results.tx_iq_res_len; | ||
447 | hcmd.data = priv->calib_results.tx_iq_res; | ||
448 | ret = iwl_send_cmd_sync(priv, &hcmd); | ||
449 | |||
450 | if (ret) | ||
451 | goto err; | ||
452 | } | ||
453 | |||
454 | if (priv->calib_results.tx_iq_perd_res) { | ||
455 | hcmd.len = priv->calib_results.tx_iq_perd_res_len; | ||
456 | hcmd.data = priv->calib_results.tx_iq_perd_res; | ||
457 | ret = iwl_send_cmd_sync(priv, &hcmd); | ||
458 | |||
459 | if (ret) | ||
460 | goto err; | ||
461 | } | ||
462 | |||
463 | return 0; | ||
464 | err: | ||
465 | IWL_ERROR("Error %d\n", ret); | ||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | static int iwl5000_send_calib_cfg(struct iwl_priv *priv) | ||
470 | { | ||
471 | struct iwl5000_calib_cfg_cmd calib_cfg_cmd; | ||
472 | struct iwl_host_cmd cmd = { | ||
473 | .id = CALIBRATION_CFG_CMD, | ||
474 | .len = sizeof(struct iwl5000_calib_cfg_cmd), | ||
475 | .data = &calib_cfg_cmd, | ||
476 | }; | ||
477 | |||
478 | memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); | ||
479 | calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; | ||
480 | calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; | ||
481 | calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; | ||
482 | calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; | ||
483 | |||
484 | return iwl_send_cmd(priv, &cmd); | ||
485 | } | ||
486 | |||
487 | static void iwl5000_rx_calib_result(struct iwl_priv *priv, | ||
488 | struct iwl_rx_mem_buffer *rxb) | ||
489 | { | ||
490 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
491 | struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw; | ||
492 | int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; | ||
493 | |||
494 | iwl_free_calib_results(priv); | ||
495 | |||
496 | /* reduce the size of the length field itself */ | ||
497 | len -= 4; | ||
498 | |||
499 | switch (hdr->op_code) { | ||
500 | case IWL5000_PHY_CALIBRATE_LO_CMD: | ||
501 | priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC); | ||
502 | priv->calib_results.lo_res_len = len; | ||
503 | memcpy(priv->calib_results.lo_res, pkt->u.raw, len); | ||
504 | break; | ||
505 | case IWL5000_PHY_CALIBRATE_TX_IQ_CMD: | ||
506 | priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC); | ||
507 | priv->calib_results.tx_iq_res_len = len; | ||
508 | memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len); | ||
509 | break; | ||
510 | case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD: | ||
511 | priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC); | ||
512 | priv->calib_results.tx_iq_perd_res_len = len; | ||
513 | memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len); | ||
514 | break; | ||
515 | default: | ||
516 | IWL_ERROR("Unknown calibration notification %d\n", | ||
517 | hdr->op_code); | ||
518 | return; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static void iwl5000_rx_calib_complete(struct iwl_priv *priv, | ||
523 | struct iwl_rx_mem_buffer *rxb) | ||
524 | { | ||
525 | IWL_DEBUG_INFO("Init. calibration is completed, restarting fw.\n"); | ||
526 | queue_work(priv->workqueue, &priv->restart); | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * ucode | ||
531 | */ | ||
532 | static int iwl5000_load_section(struct iwl_priv *priv, | ||
533 | struct fw_desc *image, | ||
534 | u32 dst_addr) | ||
535 | { | ||
536 | int ret = 0; | ||
537 | unsigned long flags; | ||
538 | |||
539 | dma_addr_t phy_addr = image->p_addr; | ||
540 | u32 byte_cnt = image->len; | ||
541 | |||
542 | spin_lock_irqsave(&priv->lock, flags); | ||
543 | ret = iwl_grab_nic_access(priv); | ||
544 | if (ret) { | ||
545 | spin_unlock_irqrestore(&priv->lock, flags); | ||
546 | return ret; | ||
547 | } | ||
548 | |||
549 | iwl_write_direct32(priv, | ||
550 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
551 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); | ||
552 | |||
553 | iwl_write_direct32(priv, | ||
554 | FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); | ||
555 | |||
556 | iwl_write_direct32(priv, | ||
557 | FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), | ||
558 | phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); | ||
559 | |||
560 | /* FIME: write the MSB of the phy_addr in CTRL1 | ||
561 | * iwl_write_direct32(priv, | ||
562 | IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL), | ||
563 | ((phy_addr & MSB_MSK) | ||
564 | << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count); | ||
565 | */ | ||
566 | iwl_write_direct32(priv, | ||
567 | FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt); | ||
568 | iwl_write_direct32(priv, | ||
569 | FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), | ||
570 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | | ||
571 | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | | ||
572 | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); | ||
573 | |||
574 | iwl_write_direct32(priv, | ||
575 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | ||
576 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
577 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL | | ||
578 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); | ||
579 | |||
580 | iwl_release_nic_access(priv); | ||
581 | spin_unlock_irqrestore(&priv->lock, flags); | ||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int iwl5000_load_given_ucode(struct iwl_priv *priv, | ||
586 | struct fw_desc *inst_image, | ||
587 | struct fw_desc *data_image) | ||
588 | { | ||
589 | int ret = 0; | ||
590 | |||
591 | ret = iwl5000_load_section( | ||
592 | priv, inst_image, RTC_INST_LOWER_BOUND); | ||
593 | if (ret) | ||
594 | return ret; | ||
595 | |||
596 | IWL_DEBUG_INFO("INST uCode section being loaded...\n"); | ||
597 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
598 | priv->ucode_write_complete, 5 * HZ); | ||
599 | if (ret == -ERESTARTSYS) { | ||
600 | IWL_ERROR("Could not load the INST uCode section due " | ||
601 | "to interrupt\n"); | ||
602 | return ret; | ||
603 | } | ||
604 | if (!ret) { | ||
605 | IWL_ERROR("Could not load the INST uCode section\n"); | ||
606 | return -ETIMEDOUT; | ||
607 | } | ||
608 | |||
609 | priv->ucode_write_complete = 0; | ||
610 | |||
611 | ret = iwl5000_load_section( | ||
612 | priv, data_image, RTC_DATA_LOWER_BOUND); | ||
613 | if (ret) | ||
614 | return ret; | ||
615 | |||
616 | IWL_DEBUG_INFO("DATA uCode section being loaded...\n"); | ||
617 | |||
618 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
619 | priv->ucode_write_complete, 5 * HZ); | ||
620 | if (ret == -ERESTARTSYS) { | ||
621 | IWL_ERROR("Could not load the INST uCode section due " | ||
622 | "to interrupt\n"); | ||
623 | return ret; | ||
624 | } else if (!ret) { | ||
625 | IWL_ERROR("Could not load the DATA uCode section\n"); | ||
626 | return -ETIMEDOUT; | ||
627 | } else | ||
628 | ret = 0; | ||
629 | |||
630 | priv->ucode_write_complete = 0; | ||
631 | |||
632 | return ret; | ||
633 | } | ||
634 | |||
635 | static int iwl5000_load_ucode(struct iwl_priv *priv) | ||
636 | { | ||
637 | int ret = 0; | ||
638 | |||
639 | /* check whether init ucode should be loaded, or rather runtime ucode */ | ||
640 | if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { | ||
641 | IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n"); | ||
642 | ret = iwl5000_load_given_ucode(priv, | ||
643 | &priv->ucode_init, &priv->ucode_init_data); | ||
644 | if (!ret) { | ||
645 | IWL_DEBUG_INFO("Init ucode load complete.\n"); | ||
646 | priv->ucode_type = UCODE_INIT; | ||
647 | } | ||
648 | } else { | ||
649 | IWL_DEBUG_INFO("Init ucode not found, or already loaded. " | ||
650 | "Loading runtime ucode...\n"); | ||
651 | ret = iwl5000_load_given_ucode(priv, | ||
652 | &priv->ucode_code, &priv->ucode_data); | ||
653 | if (!ret) { | ||
654 | IWL_DEBUG_INFO("Runtime ucode load complete.\n"); | ||
655 | priv->ucode_type = UCODE_RT; | ||
656 | } | ||
657 | } | ||
658 | |||
659 | return ret; | ||
660 | } | ||
661 | |||
662 | static void iwl5000_init_alive_start(struct iwl_priv *priv) | ||
663 | { | ||
664 | int ret = 0; | ||
665 | |||
666 | /* Check alive response for "valid" sign from uCode */ | ||
667 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
668 | /* We had an error bringing up the hardware, so take it | ||
669 | * all the way back down so we can try again */ | ||
670 | IWL_DEBUG_INFO("Initialize Alive failed.\n"); | ||
671 | goto restart; | ||
672 | } | ||
673 | |||
674 | /* initialize uCode was loaded... verify inst image. | ||
675 | * This is a paranoid check, because we would not have gotten the | ||
676 | * "initialize" alive if code weren't properly loaded. */ | ||
677 | if (iwl_verify_ucode(priv)) { | ||
678 | /* Runtime instruction load was bad; | ||
679 | * take it all the way back down so we can try again */ | ||
680 | IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); | ||
681 | goto restart; | ||
682 | } | ||
683 | |||
684 | iwlcore_clear_stations_table(priv); | ||
685 | ret = priv->cfg->ops->lib->alive_notify(priv); | ||
686 | if (ret) { | ||
687 | IWL_WARNING("Could not complete ALIVE transition: %d\n", ret); | ||
688 | goto restart; | ||
689 | } | ||
690 | |||
691 | iwl5000_send_calib_cfg(priv); | ||
692 | return; | ||
693 | |||
694 | restart: | ||
695 | /* real restart (first load init_ucode) */ | ||
696 | queue_work(priv->workqueue, &priv->restart); | ||
697 | } | ||
698 | |||
699 | static void iwl5000_set_wr_ptrs(struct iwl_priv *priv, | ||
700 | int txq_id, u32 index) | ||
701 | { | ||
702 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
703 | (index & 0xff) | (txq_id << 8)); | ||
704 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); | ||
705 | } | ||
706 | |||
707 | static void iwl5000_tx_queue_set_status(struct iwl_priv *priv, | ||
708 | struct iwl_tx_queue *txq, | ||
709 | int tx_fifo_id, int scd_retry) | ||
710 | { | ||
711 | int txq_id = txq->q.id; | ||
712 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; | ||
713 | |||
714 | iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
715 | (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
716 | (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | | ||
717 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | | ||
718 | IWL50_SCD_QUEUE_STTS_REG_MSK); | ||
719 | |||
720 | txq->sched_retry = scd_retry; | ||
721 | |||
722 | IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", | ||
723 | active ? "Activate" : "Deactivate", | ||
724 | scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); | ||
725 | } | ||
726 | |||
727 | static int iwl5000_send_wimax_coex(struct iwl_priv *priv) | ||
728 | { | ||
729 | struct iwl_wimax_coex_cmd coex_cmd; | ||
730 | |||
731 | memset(&coex_cmd, 0, sizeof(coex_cmd)); | ||
732 | |||
733 | return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD, | ||
734 | sizeof(coex_cmd), &coex_cmd); | ||
735 | } | ||
736 | |||
737 | static int iwl5000_alive_notify(struct iwl_priv *priv) | ||
738 | { | ||
739 | u32 a; | ||
740 | int i = 0; | ||
741 | unsigned long flags; | ||
742 | int ret; | ||
743 | |||
744 | spin_lock_irqsave(&priv->lock, flags); | ||
745 | |||
746 | ret = iwl_grab_nic_access(priv); | ||
747 | if (ret) { | ||
748 | spin_unlock_irqrestore(&priv->lock, flags); | ||
749 | return ret; | ||
750 | } | ||
751 | |||
752 | priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); | ||
753 | a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; | ||
754 | for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; | ||
755 | a += 4) | ||
756 | iwl_write_targ_mem(priv, a, 0); | ||
757 | for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; | ||
758 | a += 4) | ||
759 | iwl_write_targ_mem(priv, a, 0); | ||
760 | for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) | ||
761 | iwl_write_targ_mem(priv, a, 0); | ||
762 | |||
763 | iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, | ||
764 | (priv->shared_phys + | ||
765 | offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10); | ||
766 | iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, | ||
767 | IWL50_SCD_QUEUECHAIN_SEL_ALL( | ||
768 | priv->hw_params.max_txq_num)); | ||
769 | iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); | ||
770 | |||
771 | /* initiate the queues */ | ||
772 | for (i = 0; i < priv->hw_params.max_txq_num; i++) { | ||
773 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); | ||
774 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | ||
775 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
776 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); | ||
777 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
778 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + | ||
779 | sizeof(u32), | ||
780 | ((SCD_WIN_SIZE << | ||
781 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
782 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
783 | ((SCD_FRAME_LIMIT << | ||
784 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
785 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
786 | } | ||
787 | |||
788 | iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, | ||
789 | IWL_MASK(0, priv->hw_params.max_txq_num)); | ||
790 | |||
791 | /* Activate all Tx DMA/FIFO channels */ | ||
792 | priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); | ||
793 | |||
794 | iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | ||
795 | /* map qos queues to fifos one-to-one */ | ||
796 | for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { | ||
797 | int ac = iwl5000_default_queue_to_tx_fifo[i]; | ||
798 | iwl_txq_ctx_activate(priv, i); | ||
799 | iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); | ||
800 | } | ||
801 | /* TODO - need to initialize those FIFOs inside the loop above, | ||
802 | * not only mark them as active */ | ||
803 | iwl_txq_ctx_activate(priv, 4); | ||
804 | iwl_txq_ctx_activate(priv, 7); | ||
805 | iwl_txq_ctx_activate(priv, 8); | ||
806 | iwl_txq_ctx_activate(priv, 9); | ||
807 | |||
808 | iwl_release_nic_access(priv); | ||
809 | spin_unlock_irqrestore(&priv->lock, flags); | ||
810 | |||
811 | |||
812 | iwl5000_send_wimax_coex(priv); | ||
813 | |||
814 | iwl5000_send_Xtal_calib(priv); | ||
815 | |||
816 | if (priv->ucode_type == UCODE_RT) { | ||
817 | iwl5000_send_calib_results(priv); | ||
818 | set_bit(STATUS_READY, &priv->status); | ||
819 | priv->is_open = 1; | ||
820 | } | ||
821 | |||
822 | return 0; | ||
823 | } | ||
824 | |||
290 | static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) | 825 | static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) |
291 | { | 826 | { |
292 | if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || | 827 | if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || |
@@ -298,7 +833,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) | |||
298 | 833 | ||
299 | priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; | 834 | priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; |
300 | priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto; | 835 | priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto; |
301 | priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); | ||
302 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; | 836 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; |
303 | priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; | 837 | priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; |
304 | if (priv->cfg->mod_params->amsdu_size_8K) | 838 | if (priv->cfg->mod_params->amsdu_size_8K) |
@@ -430,6 +964,26 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | |||
430 | } | 964 | } |
431 | } | 965 | } |
432 | 966 | ||
967 | static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
968 | struct iwl_tx_queue *txq) | ||
969 | { | ||
970 | int txq_id = txq->q.id; | ||
971 | struct iwl5000_shared *shared_data = priv->shared_virt; | ||
972 | u8 sta = 0; | ||
973 | |||
974 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
975 | sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id; | ||
976 | |||
977 | shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. | ||
978 | val = cpu_to_le16(1 | (sta << 12)); | ||
979 | |||
980 | if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) { | ||
981 | shared_data->queues_byte_cnt_tbls[txq_id]. | ||
982 | tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr]. | ||
983 | val = cpu_to_le16(1 | (sta << 12)); | ||
984 | } | ||
985 | } | ||
986 | |||
433 | static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) | 987 | static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) |
434 | { | 988 | { |
435 | u16 size = (u16)sizeof(struct iwl_addsta_cmd); | 989 | u16 size = (u16)sizeof(struct iwl_addsta_cmd); |
@@ -438,31 +992,326 @@ static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) | |||
438 | } | 992 | } |
439 | 993 | ||
440 | 994 | ||
441 | static int iwl5000_disable_tx_fifo(struct iwl_priv *priv) | 995 | /* |
996 | * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask | ||
997 | * must be called under priv->lock and mac access | ||
998 | */ | ||
999 | static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) | ||
442 | { | 1000 | { |
443 | unsigned long flags; | 1001 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); |
444 | int ret; | 1002 | } |
445 | 1003 | ||
446 | spin_lock_irqsave(&priv->lock, flags); | ||
447 | 1004 | ||
448 | ret = iwl_grab_nic_access(priv); | 1005 | static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) |
449 | if (unlikely(ret)) { | 1006 | { |
450 | IWL_ERROR("Tx fifo reset failed"); | 1007 | __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status + |
451 | spin_unlock_irqrestore(&priv->lock, flags); | 1008 | tx_resp->frame_count); |
452 | return ret; | 1009 | return le32_to_cpu(*scd_ssn) & MAX_SN; |
453 | } | ||
454 | 1010 | ||
455 | iwl_write_prph(priv, IWL50_SCD_TXFACT, 0); | 1011 | } |
456 | iwl_release_nic_access(priv); | 1012 | |
457 | spin_unlock_irqrestore(&priv->lock, flags); | 1013 | static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv, |
1014 | struct iwl_ht_agg *agg, | ||
1015 | struct iwl5000_tx_resp *tx_resp, | ||
1016 | u16 start_idx) | ||
1017 | { | ||
1018 | u16 status; | ||
1019 | struct agg_tx_status *frame_status = &tx_resp->status; | ||
1020 | struct ieee80211_tx_info *info = NULL; | ||
1021 | struct ieee80211_hdr *hdr = NULL; | ||
1022 | int i, sh; | ||
1023 | int txq_id, idx; | ||
1024 | u16 seq; | ||
1025 | |||
1026 | if (agg->wait_for_ba) | ||
1027 | IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n"); | ||
1028 | |||
1029 | agg->frame_count = tx_resp->frame_count; | ||
1030 | agg->start_idx = start_idx; | ||
1031 | agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
1032 | agg->bitmap = 0; | ||
1033 | |||
1034 | /* # frames attempted by Tx command */ | ||
1035 | if (agg->frame_count == 1) { | ||
1036 | /* Only one frame was attempted; no block-ack will arrive */ | ||
1037 | status = le16_to_cpu(frame_status[0].status); | ||
1038 | seq = le16_to_cpu(frame_status[0].sequence); | ||
1039 | idx = SEQ_TO_INDEX(seq); | ||
1040 | txq_id = SEQ_TO_QUEUE(seq); | ||
1041 | |||
1042 | /* FIXME: code repetition */ | ||
1043 | IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", | ||
1044 | agg->frame_count, agg->start_idx, idx); | ||
1045 | |||
1046 | info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); | ||
1047 | info->status.retry_count = tx_resp->failure_frame; | ||
1048 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | ||
1049 | info->flags |= iwl_is_tx_success(status)? | ||
1050 | IEEE80211_TX_STAT_ACK : 0; | ||
1051 | iwl4965_hwrate_to_tx_control(priv, | ||
1052 | le32_to_cpu(tx_resp->rate_n_flags), | ||
1053 | info); | ||
1054 | /* FIXME: code repetition end */ | ||
1055 | |||
1056 | IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", | ||
1057 | status & 0xff, tx_resp->failure_frame); | ||
1058 | IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", | ||
1059 | iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags)); | ||
1060 | |||
1061 | agg->wait_for_ba = 0; | ||
1062 | } else { | ||
1063 | /* Two or more frames were attempted; expect block-ack */ | ||
1064 | u64 bitmap = 0; | ||
1065 | int start = agg->start_idx; | ||
1066 | |||
1067 | /* Construct bit-map of pending frames within Tx window */ | ||
1068 | for (i = 0; i < agg->frame_count; i++) { | ||
1069 | u16 sc; | ||
1070 | status = le16_to_cpu(frame_status[i].status); | ||
1071 | seq = le16_to_cpu(frame_status[i].sequence); | ||
1072 | idx = SEQ_TO_INDEX(seq); | ||
1073 | txq_id = SEQ_TO_QUEUE(seq); | ||
1074 | |||
1075 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | ||
1076 | AGG_TX_STATE_ABORT_MSK)) | ||
1077 | continue; | ||
1078 | |||
1079 | IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", | ||
1080 | agg->frame_count, txq_id, idx); | ||
1081 | |||
1082 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); | ||
1083 | |||
1084 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
1085 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | ||
1086 | IWL_ERROR("BUG_ON idx doesn't match seq control" | ||
1087 | " idx=%d, seq_idx=%d, seq=%d\n", | ||
1088 | idx, SEQ_TO_SN(sc), | ||
1089 | hdr->seq_ctrl); | ||
1090 | return -1; | ||
1091 | } | ||
1092 | |||
1093 | IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", | ||
1094 | i, idx, SEQ_TO_SN(sc)); | ||
1095 | |||
1096 | sh = idx - start; | ||
1097 | if (sh > 64) { | ||
1098 | sh = (start - idx) + 0xff; | ||
1099 | bitmap = bitmap << sh; | ||
1100 | sh = 0; | ||
1101 | start = idx; | ||
1102 | } else if (sh < -64) | ||
1103 | sh = 0xff - (start - idx); | ||
1104 | else if (sh < 0) { | ||
1105 | sh = start - idx; | ||
1106 | start = idx; | ||
1107 | bitmap = bitmap << sh; | ||
1108 | sh = 0; | ||
1109 | } | ||
1110 | bitmap |= (1 << sh); | ||
1111 | IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", | ||
1112 | start, (u32)(bitmap & 0xFFFFFFFF)); | ||
1113 | } | ||
1114 | |||
1115 | agg->bitmap = bitmap; | ||
1116 | agg->start_idx = start; | ||
1117 | agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
1118 | IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", | ||
1119 | agg->frame_count, agg->start_idx, | ||
1120 | (unsigned long long)agg->bitmap); | ||
458 | 1121 | ||
1122 | if (bitmap) | ||
1123 | agg->wait_for_ba = 1; | ||
1124 | } | ||
459 | return 0; | 1125 | return 0; |
460 | } | 1126 | } |
461 | 1127 | ||
1128 | static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | ||
1129 | struct iwl_rx_mem_buffer *rxb) | ||
1130 | { | ||
1131 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
1132 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
1133 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
1134 | int index = SEQ_TO_INDEX(sequence); | ||
1135 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1136 | struct ieee80211_tx_info *info; | ||
1137 | struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
1138 | u32 status = le16_to_cpu(tx_resp->status.status); | ||
1139 | #ifdef CONFIG_IWL4965_HT | ||
1140 | int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION; | ||
1141 | u16 fc; | ||
1142 | struct ieee80211_hdr *hdr; | ||
1143 | u8 *qc = NULL; | ||
1144 | #endif | ||
1145 | |||
1146 | if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { | ||
1147 | IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " | ||
1148 | "is out of range [0-%d] %d %d\n", txq_id, | ||
1149 | index, txq->q.n_bd, txq->q.write_ptr, | ||
1150 | txq->q.read_ptr); | ||
1151 | return; | ||
1152 | } | ||
1153 | |||
1154 | info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); | ||
1155 | memset(&info->status, 0, sizeof(info->status)); | ||
1156 | |||
1157 | #ifdef CONFIG_IWL4965_HT | ||
1158 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); | ||
1159 | fc = le16_to_cpu(hdr->frame_control); | ||
1160 | if (ieee80211_is_qos_data(fc)) { | ||
1161 | qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc)); | ||
1162 | tid = qc[0] & 0xf; | ||
1163 | } | ||
1164 | |||
1165 | sta_id = iwl_get_ra_sta_id(priv, hdr); | ||
1166 | if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { | ||
1167 | IWL_ERROR("Station not known\n"); | ||
1168 | return; | ||
1169 | } | ||
1170 | |||
1171 | if (txq->sched_retry) { | ||
1172 | const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp); | ||
1173 | struct iwl_ht_agg *agg = NULL; | ||
1174 | |||
1175 | if (!qc) | ||
1176 | return; | ||
1177 | |||
1178 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1179 | |||
1180 | iwl5000_tx_status_reply_tx(priv, agg, tx_resp, index); | ||
1181 | |||
1182 | if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) { | ||
1183 | /* TODO: send BAR */ | ||
1184 | } | ||
1185 | |||
1186 | if (txq->q.read_ptr != (scd_ssn & 0xff)) { | ||
1187 | int freed, ampdu_q; | ||
1188 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | ||
1189 | IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " | ||
1190 | "%d index %d\n", scd_ssn , index); | ||
1191 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | ||
1192 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
1193 | |||
1194 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && | ||
1195 | txq_id >= 0 && priv->mac80211_registered && | ||
1196 | agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) { | ||
1197 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1198 | ampdu_q = txq_id - IWL_BACK_QUEUE_FIRST_ID + | ||
1199 | priv->hw->queues; | ||
1200 | if (agg->state == IWL_AGG_OFF) | ||
1201 | ieee80211_wake_queue(priv->hw, txq_id); | ||
1202 | else | ||
1203 | ieee80211_wake_queue(priv->hw, ampdu_q); | ||
1204 | } | ||
1205 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | ||
1206 | } | ||
1207 | } else { | ||
1208 | #endif /* CONFIG_IWL4965_HT */ | ||
1209 | |||
1210 | info->status.retry_count = tx_resp->failure_frame; | ||
1211 | info->flags = iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0; | ||
1212 | iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), | ||
1213 | info); | ||
1214 | |||
1215 | IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x " | ||
1216 | "retries %d\n", txq_id, iwl_get_tx_fail_reason(status), | ||
1217 | status, le32_to_cpu(tx_resp->rate_n_flags), | ||
1218 | tx_resp->failure_frame); | ||
1219 | |||
1220 | IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); | ||
1221 | #ifdef CONFIG_IWL4965_HT | ||
1222 | if (index != -1) { | ||
1223 | int freed = iwl_tx_queue_reclaim(priv, txq_id, index); | ||
1224 | if (tid != MAX_TID_COUNT) | ||
1225 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
1226 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && | ||
1227 | (txq_id >= 0) && priv->mac80211_registered) | ||
1228 | ieee80211_wake_queue(priv->hw, txq_id); | ||
1229 | if (tid != MAX_TID_COUNT) | ||
1230 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | ||
1231 | } | ||
1232 | } | ||
1233 | #endif /* CONFIG_IWL4965_HT */ | ||
1234 | |||
1235 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | ||
1236 | IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); | ||
1237 | } | ||
1238 | |||
1239 | /* Currently 5000 is the supperset of everything */ | ||
1240 | static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) | ||
1241 | { | ||
1242 | return len; | ||
1243 | } | ||
1244 | |||
1245 | static void iwl5000_rx_handler_setup(struct iwl_priv *priv) | ||
1246 | { | ||
1247 | /* init calibration handlers */ | ||
1248 | priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = | ||
1249 | iwl5000_rx_calib_result; | ||
1250 | priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = | ||
1251 | iwl5000_rx_calib_complete; | ||
1252 | priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx; | ||
1253 | } | ||
1254 | |||
1255 | |||
1256 | static int iwl5000_hw_valid_rtc_data_addr(u32 addr) | ||
1257 | { | ||
1258 | return (addr >= RTC_DATA_LOWER_BOUND) && | ||
1259 | (addr < IWL50_RTC_DATA_UPPER_BOUND); | ||
1260 | } | ||
1261 | |||
1262 | static int iwl5000_send_rxon_assoc(struct iwl_priv *priv) | ||
1263 | { | ||
1264 | int ret = 0; | ||
1265 | struct iwl5000_rxon_assoc_cmd rxon_assoc; | ||
1266 | const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; | ||
1267 | const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; | ||
1268 | |||
1269 | if ((rxon1->flags == rxon2->flags) && | ||
1270 | (rxon1->filter_flags == rxon2->filter_flags) && | ||
1271 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | ||
1272 | (rxon1->ofdm_ht_single_stream_basic_rates == | ||
1273 | rxon2->ofdm_ht_single_stream_basic_rates) && | ||
1274 | (rxon1->ofdm_ht_dual_stream_basic_rates == | ||
1275 | rxon2->ofdm_ht_dual_stream_basic_rates) && | ||
1276 | (rxon1->ofdm_ht_triple_stream_basic_rates == | ||
1277 | rxon2->ofdm_ht_triple_stream_basic_rates) && | ||
1278 | (rxon1->acquisition_data == rxon2->acquisition_data) && | ||
1279 | (rxon1->rx_chain == rxon2->rx_chain) && | ||
1280 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | ||
1281 | IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); | ||
1282 | return 0; | ||
1283 | } | ||
1284 | |||
1285 | rxon_assoc.flags = priv->staging_rxon.flags; | ||
1286 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | ||
1287 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | ||
1288 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | ||
1289 | rxon_assoc.reserved1 = 0; | ||
1290 | rxon_assoc.reserved2 = 0; | ||
1291 | rxon_assoc.reserved3 = 0; | ||
1292 | rxon_assoc.ofdm_ht_single_stream_basic_rates = | ||
1293 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates; | ||
1294 | rxon_assoc.ofdm_ht_dual_stream_basic_rates = | ||
1295 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; | ||
1296 | rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; | ||
1297 | rxon_assoc.ofdm_ht_triple_stream_basic_rates = | ||
1298 | priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; | ||
1299 | rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; | ||
1300 | |||
1301 | ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, | ||
1302 | sizeof(rxon_assoc), &rxon_assoc, NULL); | ||
1303 | if (ret) | ||
1304 | return ret; | ||
1305 | |||
1306 | return ret; | ||
1307 | } | ||
1308 | |||
462 | static struct iwl_hcmd_ops iwl5000_hcmd = { | 1309 | static struct iwl_hcmd_ops iwl5000_hcmd = { |
1310 | .rxon_assoc = iwl5000_send_rxon_assoc, | ||
463 | }; | 1311 | }; |
464 | 1312 | ||
465 | static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { | 1313 | static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { |
1314 | .get_hcmd_size = iwl5000_get_hcmd_size, | ||
466 | .build_addsta_hcmd = iwl5000_build_addsta_hcmd, | 1315 | .build_addsta_hcmd = iwl5000_build_addsta_hcmd, |
467 | #ifdef CONFIG_IWL5000_RUN_TIME_CALIB | 1316 | #ifdef CONFIG_IWL5000_RUN_TIME_CALIB |
468 | .gain_computation = iwl5000_gain_computation, | 1317 | .gain_computation = iwl5000_gain_computation, |
@@ -476,9 +1325,17 @@ static struct iwl_lib_ops iwl5000_lib = { | |||
476 | .free_shared_mem = iwl5000_free_shared_mem, | 1325 | .free_shared_mem = iwl5000_free_shared_mem, |
477 | .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx, | 1326 | .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx, |
478 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, | 1327 | .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, |
479 | .disable_tx_fifo = iwl5000_disable_tx_fifo, | 1328 | .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, |
1329 | .txq_set_sched = iwl5000_txq_set_sched, | ||
1330 | .rx_handler_setup = iwl5000_rx_handler_setup, | ||
1331 | .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, | ||
1332 | .load_ucode = iwl5000_load_ucode, | ||
1333 | .init_alive_start = iwl5000_init_alive_start, | ||
1334 | .alive_notify = iwl5000_alive_notify, | ||
480 | .apm_ops = { | 1335 | .apm_ops = { |
481 | .init = iwl5000_apm_init, | 1336 | .init = iwl5000_apm_init, |
1337 | .reset = iwl5000_apm_reset, | ||
1338 | .stop = iwl5000_apm_stop, | ||
482 | .config = iwl5000_nic_config, | 1339 | .config = iwl5000_nic_config, |
483 | .set_pwr_src = iwl4965_set_pwr_src, | 1340 | .set_pwr_src = iwl4965_set_pwr_src, |
484 | }, | 1341 | }, |