diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | 327 |
1 files changed, 157 insertions, 170 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 9f8b23909404..b4f796c82e1e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -75,8 +75,12 @@ | |||
75 | #include "iwl-shared.h" | 75 | #include "iwl-shared.h" |
76 | #include "iwl-eeprom.h" | 76 | #include "iwl-eeprom.h" |
77 | #include "iwl-agn-hw.h" | 77 | #include "iwl-agn-hw.h" |
78 | #include "iwl-core.h" | 78 | |
79 | #include "iwl-ucode.h" | 79 | #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) |
80 | |||
81 | #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ | ||
82 | (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\ | ||
83 | (~(1<<(trans_pcie)->cmd_queue))) | ||
80 | 84 | ||
81 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) | 85 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) |
82 | { | 86 | { |
@@ -301,6 +305,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, | |||
301 | { | 305 | { |
302 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; | 306 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
303 | int i; | 307 | int i; |
308 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
304 | 309 | ||
305 | if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) | 310 | if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) |
306 | return -EINVAL; | 311 | return -EINVAL; |
@@ -313,7 +318,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, | |||
313 | if (!txq->meta || !txq->cmd) | 318 | if (!txq->meta || !txq->cmd) |
314 | goto error; | 319 | goto error; |
315 | 320 | ||
316 | if (txq_id == trans->shrd->cmd_queue) | 321 | if (txq_id == trans_pcie->cmd_queue) |
317 | for (i = 0; i < slots_num; i++) { | 322 | for (i = 0; i < slots_num; i++) { |
318 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | 323 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), |
319 | GFP_KERNEL); | 324 | GFP_KERNEL); |
@@ -324,7 +329,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans, | |||
324 | /* Alloc driver data array and TFD circular buffer */ | 329 | /* Alloc driver data array and TFD circular buffer */ |
325 | /* Driver private data, only for Tx (not command) queues, | 330 | /* Driver private data, only for Tx (not command) queues, |
326 | * not shared with device. */ | 331 | * not shared with device. */ |
327 | if (txq_id != trans->shrd->cmd_queue) { | 332 | if (txq_id != trans_pcie->cmd_queue) { |
328 | txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]), | 333 | txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]), |
329 | GFP_KERNEL); | 334 | GFP_KERNEL); |
330 | if (!txq->skbs) { | 335 | if (!txq->skbs) { |
@@ -352,7 +357,7 @@ error: | |||
352 | txq->skbs = NULL; | 357 | txq->skbs = NULL; |
353 | /* since txq->cmd has been zeroed, | 358 | /* since txq->cmd has been zeroed, |
354 | * all non allocated cmd[i] will be NULL */ | 359 | * all non allocated cmd[i] will be NULL */ |
355 | if (txq->cmd && txq_id == trans->shrd->cmd_queue) | 360 | if (txq->cmd && txq_id == trans_pcie->cmd_queue) |
356 | for (i = 0; i < slots_num; i++) | 361 | for (i = 0; i < slots_num; i++) |
357 | kfree(txq->cmd[i]); | 362 | kfree(txq->cmd[i]); |
358 | kfree(txq->meta); | 363 | kfree(txq->meta); |
@@ -390,6 +395,8 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
390 | if (ret) | 395 | if (ret) |
391 | return ret; | 396 | return ret; |
392 | 397 | ||
398 | spin_lock_init(&txq->lock); | ||
399 | |||
393 | /* | 400 | /* |
394 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | 401 | * Tell nic where to find circular buffer of Tx Frame Descriptors for |
395 | * given Tx queue, and enable the DMA channel used for that queue. | 402 | * given Tx queue, and enable the DMA channel used for that queue. |
@@ -409,8 +416,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
409 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 416 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; |
410 | struct iwl_queue *q = &txq->q; | 417 | struct iwl_queue *q = &txq->q; |
411 | enum dma_data_direction dma_dir; | 418 | enum dma_data_direction dma_dir; |
412 | unsigned long flags; | ||
413 | spinlock_t *lock; | ||
414 | 419 | ||
415 | if (!q->n_bd) | 420 | if (!q->n_bd) |
416 | return; | 421 | return; |
@@ -418,22 +423,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
418 | /* In the command queue, all the TBs are mapped as BIDI | 423 | /* In the command queue, all the TBs are mapped as BIDI |
419 | * so unmap them as such. | 424 | * so unmap them as such. |
420 | */ | 425 | */ |
421 | if (txq_id == trans->shrd->cmd_queue) { | 426 | if (txq_id == trans_pcie->cmd_queue) |
422 | dma_dir = DMA_BIDIRECTIONAL; | 427 | dma_dir = DMA_BIDIRECTIONAL; |
423 | lock = &trans->hcmd_lock; | 428 | else |
424 | } else { | ||
425 | dma_dir = DMA_TO_DEVICE; | 429 | dma_dir = DMA_TO_DEVICE; |
426 | lock = &trans->shrd->sta_lock; | ||
427 | } | ||
428 | 430 | ||
429 | spin_lock_irqsave(lock, flags); | 431 | spin_lock_bh(&txq->lock); |
430 | while (q->write_ptr != q->read_ptr) { | 432 | while (q->write_ptr != q->read_ptr) { |
431 | /* The read_ptr needs to bound by q->n_window */ | 433 | /* The read_ptr needs to bound by q->n_window */ |
432 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), | 434 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
433 | dma_dir); | 435 | dma_dir); |
434 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 436 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
435 | } | 437 | } |
436 | spin_unlock_irqrestore(lock, flags); | 438 | spin_unlock_bh(&txq->lock); |
437 | } | 439 | } |
438 | 440 | ||
439 | /** | 441 | /** |
@@ -457,7 +459,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) | |||
457 | 459 | ||
458 | /* De-alloc array of command/tx buffers */ | 460 | /* De-alloc array of command/tx buffers */ |
459 | 461 | ||
460 | if (txq_id == trans->shrd->cmd_queue) | 462 | if (txq_id == trans_pcie->cmd_queue) |
461 | for (i = 0; i < txq->q.n_window; i++) | 463 | for (i = 0; i < txq->q.n_window; i++) |
462 | kfree(txq->cmd[i]); | 464 | kfree(txq->cmd[i]); |
463 | 465 | ||
@@ -495,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) | |||
495 | /* Tx queues */ | 497 | /* Tx queues */ |
496 | if (trans_pcie->txq) { | 498 | if (trans_pcie->txq) { |
497 | for (txq_id = 0; | 499 | for (txq_id = 0; |
498 | txq_id < hw_params(trans).max_txq_num; txq_id++) | 500 | txq_id < cfg(trans)->base_params->num_of_queues; txq_id++) |
499 | iwl_tx_queue_free(trans, txq_id); | 501 | iwl_tx_queue_free(trans, txq_id); |
500 | } | 502 | } |
501 | 503 | ||
@@ -520,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) | |||
520 | int txq_id, slots_num; | 522 | int txq_id, slots_num; |
521 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 523 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
522 | 524 | ||
523 | u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * | 525 | u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues * |
524 | sizeof(struct iwlagn_scd_bc_tbl); | 526 | sizeof(struct iwlagn_scd_bc_tbl); |
525 | 527 | ||
526 | /*It is not allowed to alloc twice, so warn when this happens. | 528 | /*It is not allowed to alloc twice, so warn when this happens. |
@@ -544,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) | |||
544 | goto error; | 546 | goto error; |
545 | } | 547 | } |
546 | 548 | ||
547 | trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num, | 549 | trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues, |
548 | sizeof(struct iwl_tx_queue), GFP_KERNEL); | 550 | sizeof(struct iwl_tx_queue), GFP_KERNEL); |
549 | if (!trans_pcie->txq) { | 551 | if (!trans_pcie->txq) { |
550 | IWL_ERR(trans, "Not enough memory for txq\n"); | 552 | IWL_ERR(trans, "Not enough memory for txq\n"); |
@@ -553,8 +555,9 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans) | |||
553 | } | 555 | } |
554 | 556 | ||
555 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | 557 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ |
556 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { | 558 | for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; |
557 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | 559 | txq_id++) { |
560 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
558 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | 561 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
559 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], | 562 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], |
560 | slots_num, txq_id); | 563 | slots_num, txq_id); |
@@ -598,8 +601,9 @@ static int iwl_tx_init(struct iwl_trans *trans) | |||
598 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 601 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
599 | 602 | ||
600 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | 603 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ |
601 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { | 604 | for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; |
602 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | 605 | txq_id++) { |
606 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
603 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | 607 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
604 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], | 608 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], |
605 | slots_num, txq_id); | 609 | slots_num, txq_id); |
@@ -687,6 +691,7 @@ static void iwl_apm_config(struct iwl_trans *trans) | |||
687 | */ | 691 | */ |
688 | static int iwl_apm_init(struct iwl_trans *trans) | 692 | static int iwl_apm_init(struct iwl_trans *trans) |
689 | { | 693 | { |
694 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
690 | int ret = 0; | 695 | int ret = 0; |
691 | IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); | 696 | IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); |
692 | 697 | ||
@@ -756,7 +761,7 @@ static int iwl_apm_init(struct iwl_trans *trans) | |||
756 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, | 761 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, |
757 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | 762 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
758 | 763 | ||
759 | set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status); | 764 | set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); |
760 | 765 | ||
761 | out: | 766 | out: |
762 | return ret; | 767 | return ret; |
@@ -782,9 +787,10 @@ static int iwl_apm_stop_master(struct iwl_trans *trans) | |||
782 | 787 | ||
783 | static void iwl_apm_stop(struct iwl_trans *trans) | 788 | static void iwl_apm_stop(struct iwl_trans *trans) |
784 | { | 789 | { |
790 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
785 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); | 791 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); |
786 | 792 | ||
787 | clear_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status); | 793 | clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); |
788 | 794 | ||
789 | /* Stop device's DMA activity */ | 795 | /* Stop device's DMA activity */ |
790 | iwl_apm_stop_master(trans); | 796 | iwl_apm_stop_master(trans); |
@@ -819,7 +825,7 @@ static int iwl_nic_init(struct iwl_trans *trans) | |||
819 | 825 | ||
820 | iwl_set_pwr_vmain(trans); | 826 | iwl_set_pwr_vmain(trans); |
821 | 827 | ||
822 | iwl_nic_config(priv(trans)); | 828 | iwl_op_mode_nic_config(trans->op_mode); |
823 | 829 | ||
824 | #ifndef CONFIG_IWLWIFI_IDI | 830 | #ifndef CONFIG_IWLWIFI_IDI |
825 | /* Allocate the RX queue, or reset if it is already allocated */ | 831 | /* Allocate the RX queue, or reset if it is already allocated */ |
@@ -830,14 +836,12 @@ static int iwl_nic_init(struct iwl_trans *trans) | |||
830 | if (iwl_tx_init(trans)) | 836 | if (iwl_tx_init(trans)) |
831 | return -ENOMEM; | 837 | return -ENOMEM; |
832 | 838 | ||
833 | if (hw_params(trans).shadow_reg_enable) { | 839 | if (cfg(trans)->base_params->shadow_reg_enable) { |
834 | /* enable shadow regs in HW */ | 840 | /* enable shadow regs in HW */ |
835 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, | 841 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, |
836 | 0x800FFFFF); | 842 | 0x800FFFFF); |
837 | } | 843 | } |
838 | 844 | ||
839 | set_bit(STATUS_INIT, &trans->shrd->status); | ||
840 | |||
841 | return 0; | 845 | return 0; |
842 | } | 846 | } |
843 | 847 | ||
@@ -947,14 +951,16 @@ static const u8 iwlagn_pan_ac_to_queue[] = { | |||
947 | /* | 951 | /* |
948 | * ucode | 952 | * ucode |
949 | */ | 953 | */ |
950 | static int iwl_load_section(struct iwl_trans *trans, const char *name, | 954 | static int iwl_load_section(struct iwl_trans *trans, u8 section_num, |
951 | struct fw_desc *image, u32 dst_addr) | 955 | const struct fw_desc *section) |
952 | { | 956 | { |
953 | dma_addr_t phy_addr = image->p_addr; | 957 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
954 | u32 byte_cnt = image->len; | 958 | dma_addr_t phy_addr = section->p_addr; |
959 | u32 byte_cnt = section->len; | ||
960 | u32 dst_addr = section->offset; | ||
955 | int ret; | 961 | int ret; |
956 | 962 | ||
957 | trans->ucode_write_complete = 0; | 963 | trans_pcie->ucode_write_complete = false; |
958 | 964 | ||
959 | iwl_write_direct32(trans, | 965 | iwl_write_direct32(trans, |
960 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), | 966 | FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), |
@@ -984,31 +990,33 @@ static int iwl_load_section(struct iwl_trans *trans, const char *name, | |||
984 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | | 990 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | |
985 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); | 991 | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); |
986 | 992 | ||
987 | IWL_DEBUG_FW(trans, "%s uCode section being loaded...\n", name); | 993 | IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", |
988 | ret = wait_event_timeout(trans->shrd->wait_command_queue, | 994 | section_num); |
989 | trans->ucode_write_complete, 5 * HZ); | 995 | ret = wait_event_timeout(trans_pcie->ucode_write_waitq, |
996 | trans_pcie->ucode_write_complete, 5 * HZ); | ||
990 | if (!ret) { | 997 | if (!ret) { |
991 | IWL_ERR(trans, "Could not load the %s uCode section\n", | 998 | IWL_ERR(trans, "Could not load the [%d] uCode section\n", |
992 | name); | 999 | section_num); |
993 | return -ETIMEDOUT; | 1000 | return -ETIMEDOUT; |
994 | } | 1001 | } |
995 | 1002 | ||
996 | return 0; | 1003 | return 0; |
997 | } | 1004 | } |
998 | 1005 | ||
999 | static int iwl_load_given_ucode(struct iwl_trans *trans, struct fw_img *image) | 1006 | static int iwl_load_given_ucode(struct iwl_trans *trans, |
1007 | const struct fw_img *image) | ||
1000 | { | 1008 | { |
1001 | int ret = 0; | 1009 | int ret = 0; |
1010 | int i; | ||
1002 | 1011 | ||
1003 | ret = iwl_load_section(trans, "INST", &image->code, | 1012 | for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { |
1004 | IWLAGN_RTC_INST_LOWER_BOUND); | 1013 | if (!image->sec[i].p_addr) |
1005 | if (ret) | 1014 | break; |
1006 | return ret; | ||
1007 | 1015 | ||
1008 | ret = iwl_load_section(trans, "DATA", &image->data, | 1016 | ret = iwl_load_section(trans, i, &image->sec[i]); |
1009 | IWLAGN_RTC_DATA_LOWER_BOUND); | 1017 | if (ret) |
1010 | if (ret) | 1018 | return ret; |
1011 | return ret; | 1019 | } |
1012 | 1020 | ||
1013 | /* Remove all resets to allow NIC to operate */ | 1021 | /* Remove all resets to allow NIC to operate */ |
1014 | iwl_write32(trans, CSR_RESET, 0); | 1022 | iwl_write32(trans, CSR_RESET, 0); |
@@ -1016,13 +1024,14 @@ static int iwl_load_given_ucode(struct iwl_trans *trans, struct fw_img *image) | |||
1016 | return 0; | 1024 | return 0; |
1017 | } | 1025 | } |
1018 | 1026 | ||
1019 | static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, struct fw_img *fw) | 1027 | static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, |
1028 | const struct fw_img *fw) | ||
1020 | { | 1029 | { |
1021 | int ret; | 1030 | int ret; |
1022 | struct iwl_trans_pcie *trans_pcie = | 1031 | struct iwl_trans_pcie *trans_pcie = |
1023 | IWL_TRANS_GET_PCIE_TRANS(trans); | 1032 | IWL_TRANS_GET_PCIE_TRANS(trans); |
1033 | bool hw_rfkill; | ||
1024 | 1034 | ||
1025 | trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER; | ||
1026 | trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; | 1035 | trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; |
1027 | trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; | 1036 | trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; |
1028 | 1037 | ||
@@ -1032,22 +1041,19 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, struct fw_img *fw) | |||
1032 | trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; | 1041 | trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; |
1033 | trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; | 1042 | trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; |
1034 | 1043 | ||
1035 | if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) && | 1044 | /* This may fail if AMT took ownership of the device */ |
1036 | iwl_prepare_card_hw(trans)) { | 1045 | if (iwl_prepare_card_hw(trans)) { |
1037 | IWL_WARN(trans, "Exit HW not ready\n"); | 1046 | IWL_WARN(trans, "Exit HW not ready\n"); |
1038 | return -EIO; | 1047 | return -EIO; |
1039 | } | 1048 | } |
1040 | 1049 | ||
1041 | /* If platform's RF_KILL switch is NOT set to KILL */ | 1050 | /* If platform's RF_KILL switch is NOT set to KILL */ |
1042 | if (iwl_read32(trans, CSR_GP_CNTRL) & | 1051 | hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & |
1043 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | 1052 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); |
1044 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | 1053 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
1045 | else | ||
1046 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | ||
1047 | 1054 | ||
1048 | if (iwl_is_rfkill(trans->shrd)) { | 1055 | if (hw_rfkill) { |
1049 | iwl_op_mode_hw_rf_kill(trans->op_mode, true); | 1056 | iwl_enable_rfkill_int(trans); |
1050 | iwl_enable_interrupts(trans); | ||
1051 | return -ERFKILL; | 1057 | return -ERFKILL; |
1052 | } | 1058 | } |
1053 | 1059 | ||
@@ -1073,9 +1079,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, struct fw_img *fw) | |||
1073 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | 1079 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
1074 | 1080 | ||
1075 | /* Load the given image to the HW */ | 1081 | /* Load the given image to the HW */ |
1076 | iwl_load_given_ucode(trans, fw); | 1082 | return iwl_load_given_ucode(trans, fw); |
1077 | |||
1078 | return 0; | ||
1079 | } | 1083 | } |
1080 | 1084 | ||
1081 | /* | 1085 | /* |
@@ -1116,7 +1120,8 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1116 | a += 4) | 1120 | a += 4) |
1117 | iwl_write_targ_mem(trans, a, 0); | 1121 | iwl_write_targ_mem(trans, a, 0); |
1118 | for (; a < trans_pcie->scd_base_addr + | 1122 | for (; a < trans_pcie->scd_base_addr + |
1119 | SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num); | 1123 | SCD_TRANS_TBL_OFFSET_QUEUE( |
1124 | cfg(trans)->base_params->num_of_queues); | ||
1120 | a += 4) | 1125 | a += 4) |
1121 | iwl_write_targ_mem(trans, a, 0); | 1126 | iwl_write_targ_mem(trans, a, 0); |
1122 | 1127 | ||
@@ -1135,11 +1140,11 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1135 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | 1140 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
1136 | 1141 | ||
1137 | iwl_write_prph(trans, SCD_QUEUECHAIN_SEL, | 1142 | iwl_write_prph(trans, SCD_QUEUECHAIN_SEL, |
1138 | SCD_QUEUECHAIN_SEL_ALL(trans)); | 1143 | SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)); |
1139 | iwl_write_prph(trans, SCD_AGGR_SEL, 0); | 1144 | iwl_write_prph(trans, SCD_AGGR_SEL, 0); |
1140 | 1145 | ||
1141 | /* initiate the queues */ | 1146 | /* initiate the queues */ |
1142 | for (i = 0; i < hw_params(trans).max_txq_num; i++) { | 1147 | for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) { |
1143 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); | 1148 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); |
1144 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); | 1149 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); |
1145 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | 1150 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + |
@@ -1156,7 +1161,7 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1156 | } | 1161 | } |
1157 | 1162 | ||
1158 | iwl_write_prph(trans, SCD_INTERRUPT_MASK, | 1163 | iwl_write_prph(trans, SCD_INTERRUPT_MASK, |
1159 | IWL_MASK(0, hw_params(trans).max_txq_num)); | 1164 | IWL_MASK(0, cfg(trans)->base_params->num_of_queues)); |
1160 | 1165 | ||
1161 | /* Activate all Tx DMA/FIFO channels */ | 1166 | /* Activate all Tx DMA/FIFO channels */ |
1162 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); | 1167 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); |
@@ -1167,7 +1172,7 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1167 | else | 1172 | else |
1168 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | 1173 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; |
1169 | 1174 | ||
1170 | iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); | 1175 | iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0); |
1171 | 1176 | ||
1172 | /* make sure all queue are not stopped */ | 1177 | /* make sure all queue are not stopped */ |
1173 | memset(&trans_pcie->queue_stopped[0], 0, | 1178 | memset(&trans_pcie->queue_stopped[0], 0, |
@@ -1216,7 +1221,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) | |||
1216 | */ | 1221 | */ |
1217 | static int iwl_trans_tx_stop(struct iwl_trans *trans) | 1222 | static int iwl_trans_tx_stop(struct iwl_trans *trans) |
1218 | { | 1223 | { |
1219 | int ch, txq_id; | 1224 | int ch, txq_id, ret; |
1220 | unsigned long flags; | 1225 | unsigned long flags; |
1221 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1226 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1222 | 1227 | ||
@@ -1229,9 +1234,10 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) | |||
1229 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { | 1234 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
1230 | iwl_write_direct32(trans, | 1235 | iwl_write_direct32(trans, |
1231 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | 1236 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); |
1232 | if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, | 1237 | ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, |
1233 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | 1238 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
1234 | 1000)) | 1239 | 1000); |
1240 | if (ret < 0) | ||
1235 | IWL_ERR(trans, "Failing on timeout while stopping" | 1241 | IWL_ERR(trans, "Failing on timeout while stopping" |
1236 | " DMA channel %d [0x%08x]", ch, | 1242 | " DMA channel %d [0x%08x]", ch, |
1237 | iwl_read_direct32(trans, | 1243 | iwl_read_direct32(trans, |
@@ -1245,7 +1251,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) | |||
1245 | } | 1251 | } |
1246 | 1252 | ||
1247 | /* Unmap DMA from host system and free skb's */ | 1253 | /* Unmap DMA from host system and free skb's */ |
1248 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) | 1254 | for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; |
1255 | txq_id++) | ||
1249 | iwl_tx_queue_unmap(trans, txq_id); | 1256 | iwl_tx_queue_unmap(trans, txq_id); |
1250 | 1257 | ||
1251 | return 0; | 1258 | return 0; |
@@ -1271,7 +1278,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1271 | * restart. So don't process again if the device is | 1278 | * restart. So don't process again if the device is |
1272 | * already dead. | 1279 | * already dead. |
1273 | */ | 1280 | */ |
1274 | if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) { | 1281 | if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { |
1275 | iwl_trans_tx_stop(trans); | 1282 | iwl_trans_tx_stop(trans); |
1276 | #ifndef CONFIG_IWLWIFI_IDI | 1283 | #ifndef CONFIG_IWLWIFI_IDI |
1277 | iwl_trans_rx_stop(trans); | 1284 | iwl_trans_rx_stop(trans); |
@@ -1297,7 +1304,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1297 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 1304 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1298 | 1305 | ||
1299 | /* wait to make sure we flush pending tasklet*/ | 1306 | /* wait to make sure we flush pending tasklet*/ |
1300 | synchronize_irq(trans->irq); | 1307 | synchronize_irq(trans_pcie->irq); |
1301 | tasklet_kill(&trans_pcie->irq_tasklet); | 1308 | tasklet_kill(&trans_pcie->irq_tasklet); |
1302 | 1309 | ||
1303 | cancel_work_sync(&trans_pcie->rx_replenish); | 1310 | cancel_work_sync(&trans_pcie->rx_replenish); |
@@ -1306,6 +1313,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1306 | iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | 1313 | iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
1307 | } | 1314 | } |
1308 | 1315 | ||
1316 | static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) | ||
1317 | { | ||
1318 | /* let the ucode operate on its own */ | ||
1319 | iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, | ||
1320 | CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); | ||
1321 | |||
1322 | iwl_disable_interrupts(trans); | ||
1323 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
1324 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
1325 | } | ||
1326 | |||
1309 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | 1327 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
1310 | struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, | 1328 | struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, |
1311 | u8 sta_id, u8 tid) | 1329 | u8 sta_id, u8 tid) |
@@ -1358,6 +1376,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1358 | txq = &trans_pcie->txq[txq_id]; | 1376 | txq = &trans_pcie->txq[txq_id]; |
1359 | q = &txq->q; | 1377 | q = &txq->q; |
1360 | 1378 | ||
1379 | spin_lock(&txq->lock); | ||
1380 | |||
1361 | /* In AGG mode, the index in the ring must correspond to the WiFi | 1381 | /* In AGG mode, the index in the ring must correspond to the WiFi |
1362 | * sequence number. This is a HW requirements to help the SCD to parse | 1382 | * sequence number. This is a HW requirements to help the SCD to parse |
1363 | * the BA. | 1383 | * the BA. |
@@ -1404,7 +1424,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1404 | &dev_cmd->hdr, firstlen, | 1424 | &dev_cmd->hdr, firstlen, |
1405 | DMA_BIDIRECTIONAL); | 1425 | DMA_BIDIRECTIONAL); |
1406 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | 1426 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) |
1407 | return -1; | 1427 | goto out_err; |
1408 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | 1428 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
1409 | dma_unmap_len_set(out_meta, len, firstlen); | 1429 | dma_unmap_len_set(out_meta, len, firstlen); |
1410 | 1430 | ||
@@ -1426,7 +1446,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1426 | dma_unmap_addr(out_meta, mapping), | 1446 | dma_unmap_addr(out_meta, mapping), |
1427 | dma_unmap_len(out_meta, len), | 1447 | dma_unmap_len(out_meta, len), |
1428 | DMA_BIDIRECTIONAL); | 1448 | DMA_BIDIRECTIONAL); |
1429 | return -1; | 1449 | goto out_err; |
1430 | } | 1450 | } |
1431 | } | 1451 | } |
1432 | 1452 | ||
@@ -1448,8 +1468,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1448 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", | 1468 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
1449 | le16_to_cpu(dev_cmd->hdr.sequence)); | 1469 | le16_to_cpu(dev_cmd->hdr.sequence)); |
1450 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | 1470 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
1451 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
1452 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
1453 | 1471 | ||
1454 | /* Set up entry for this TFD in Tx byte-count array */ | 1472 | /* Set up entry for this TFD in Tx byte-count array */ |
1455 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | 1473 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); |
@@ -1457,7 +1475,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1457 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, | 1475 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, |
1458 | DMA_BIDIRECTIONAL); | 1476 | DMA_BIDIRECTIONAL); |
1459 | 1477 | ||
1460 | trace_iwlwifi_dev_tx(priv(trans), | 1478 | trace_iwlwifi_dev_tx(trans->dev, |
1461 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | 1479 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], |
1462 | sizeof(struct iwl_tfd), | 1480 | sizeof(struct iwl_tfd), |
1463 | &dev_cmd->hdr, firstlen, | 1481 | &dev_cmd->hdr, firstlen, |
@@ -1478,10 +1496,14 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1478 | txq->need_update = 1; | 1496 | txq->need_update = 1; |
1479 | iwl_txq_update_write_ptr(trans, txq); | 1497 | iwl_txq_update_write_ptr(trans, txq); |
1480 | } else { | 1498 | } else { |
1481 | iwl_stop_queue(trans, txq, "Queue is full"); | 1499 | iwl_stop_queue(trans, txq); |
1482 | } | 1500 | } |
1483 | } | 1501 | } |
1502 | spin_unlock(&txq->lock); | ||
1484 | return 0; | 1503 | return 0; |
1504 | out_err: | ||
1505 | spin_unlock(&txq->lock); | ||
1506 | return -1; | ||
1485 | } | 1507 | } |
1486 | 1508 | ||
1487 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 1509 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
@@ -1489,6 +1511,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
1489 | struct iwl_trans_pcie *trans_pcie = | 1511 | struct iwl_trans_pcie *trans_pcie = |
1490 | IWL_TRANS_GET_PCIE_TRANS(trans); | 1512 | IWL_TRANS_GET_PCIE_TRANS(trans); |
1491 | int err; | 1513 | int err; |
1514 | bool hw_rfkill; | ||
1492 | 1515 | ||
1493 | trans_pcie->inta_mask = CSR_INI_SET_MASK; | 1516 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
1494 | 1517 | ||
@@ -1498,11 +1521,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
1498 | 1521 | ||
1499 | iwl_alloc_isr_ict(trans); | 1522 | iwl_alloc_isr_ict(trans); |
1500 | 1523 | ||
1501 | err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED, | 1524 | err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, |
1502 | DRV_NAME, trans); | 1525 | DRV_NAME, trans); |
1503 | if (err) { | 1526 | if (err) { |
1504 | IWL_ERR(trans, "Error allocating IRQ %d\n", | 1527 | IWL_ERR(trans, "Error allocating IRQ %d\n", |
1505 | trans->irq); | 1528 | trans_pcie->irq); |
1506 | goto error; | 1529 | goto error; |
1507 | } | 1530 | } |
1508 | 1531 | ||
@@ -1518,21 +1541,14 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
1518 | 1541 | ||
1519 | iwl_apm_init(trans); | 1542 | iwl_apm_init(trans); |
1520 | 1543 | ||
1521 | /* If platform's RF_KILL switch is NOT set to KILL */ | 1544 | hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & |
1522 | if (iwl_read32(trans, | 1545 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); |
1523 | CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | 1546 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
1524 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | ||
1525 | else | ||
1526 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | ||
1527 | |||
1528 | iwl_op_mode_hw_rf_kill(trans->op_mode, | ||
1529 | test_bit(STATUS_RF_KILL_HW, | ||
1530 | &trans->shrd->status)); | ||
1531 | 1547 | ||
1532 | return err; | 1548 | return err; |
1533 | 1549 | ||
1534 | err_free_irq: | 1550 | err_free_irq: |
1535 | free_irq(trans->irq, trans); | 1551 | free_irq(trans_pcie->irq, trans); |
1536 | error: | 1552 | error: |
1537 | iwl_free_isr_ict(trans); | 1553 | iwl_free_isr_ict(trans); |
1538 | tasklet_kill(&trans_pcie->irq_tasklet); | 1554 | tasklet_kill(&trans_pcie->irq_tasklet); |
@@ -1546,13 +1562,11 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans) | |||
1546 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); | 1562 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
1547 | 1563 | ||
1548 | /* Even if we stop the HW, we still want the RF kill interrupt */ | 1564 | /* Even if we stop the HW, we still want the RF kill interrupt */ |
1549 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); | 1565 | iwl_enable_rfkill_int(trans); |
1550 | iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); | ||
1551 | } | 1566 | } |
1552 | 1567 | ||
1553 | static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | 1568 | static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, |
1554 | int txq_id, int ssn, u32 status, | 1569 | int txq_id, int ssn, struct sk_buff_head *skbs) |
1555 | struct sk_buff_head *skbs) | ||
1556 | { | 1570 | { |
1557 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1571 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1558 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 1572 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; |
@@ -1560,6 +1574,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1560 | int tfd_num = ssn & (txq->q.n_bd - 1); | 1574 | int tfd_num = ssn & (txq->q.n_bd - 1); |
1561 | int freed = 0; | 1575 | int freed = 0; |
1562 | 1576 | ||
1577 | spin_lock(&txq->lock); | ||
1578 | |||
1563 | txq->time_stamp = jiffies; | 1579 | txq->time_stamp = jiffies; |
1564 | 1580 | ||
1565 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && | 1581 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && |
@@ -1574,6 +1590,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1574 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " | 1590 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " |
1575 | "agg_txq[sta_id[tid] %d", txq_id, | 1591 | "agg_txq[sta_id[tid] %d", txq_id, |
1576 | trans_pcie->agg_txq[sta_id][tid]); | 1592 | trans_pcie->agg_txq[sta_id][tid]); |
1593 | spin_unlock(&txq->lock); | ||
1577 | return 1; | 1594 | return 1; |
1578 | } | 1595 | } |
1579 | 1596 | ||
@@ -1582,28 +1599,42 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1582 | txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, | 1599 | txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, |
1583 | tfd_num, ssn); | 1600 | tfd_num, ssn); |
1584 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); | 1601 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); |
1585 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && | 1602 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) |
1586 | (!txq->sched_retry || | 1603 | iwl_wake_queue(trans, txq); |
1587 | status != TX_STATUS_FAIL_PASSIVE_NO_RX)) | ||
1588 | iwl_wake_queue(trans, txq, "Packets reclaimed"); | ||
1589 | } | 1604 | } |
1605 | |||
1606 | spin_unlock(&txq->lock); | ||
1590 | return 0; | 1607 | return 0; |
1591 | } | 1608 | } |
1592 | 1609 | ||
1593 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) | 1610 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) |
1594 | { | 1611 | { |
1595 | iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | 1612 | writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); |
1596 | } | 1613 | } |
1597 | 1614 | ||
1598 | static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) | 1615 | static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) |
1599 | { | 1616 | { |
1600 | iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | 1617 | writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); |
1601 | } | 1618 | } |
1602 | 1619 | ||
1603 | static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) | 1620 | static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) |
1604 | { | 1621 | { |
1605 | u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | 1622 | return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); |
1606 | return val; | 1623 | } |
1624 | |||
1625 | static void iwl_trans_pcie_configure(struct iwl_trans *trans, | ||
1626 | const struct iwl_trans_config *trans_cfg) | ||
1627 | { | ||
1628 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1629 | |||
1630 | trans_pcie->cmd_queue = trans_cfg->cmd_queue; | ||
1631 | if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) | ||
1632 | trans_pcie->n_no_reclaim_cmds = 0; | ||
1633 | else | ||
1634 | trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; | ||
1635 | if (trans_pcie->n_no_reclaim_cmds) | ||
1636 | memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, | ||
1637 | trans_pcie->n_no_reclaim_cmds * sizeof(u8)); | ||
1607 | } | 1638 | } |
1608 | 1639 | ||
1609 | static void iwl_trans_pcie_free(struct iwl_trans *trans) | 1640 | static void iwl_trans_pcie_free(struct iwl_trans *trans) |
@@ -1611,18 +1642,17 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans) | |||
1611 | struct iwl_trans_pcie *trans_pcie = | 1642 | struct iwl_trans_pcie *trans_pcie = |
1612 | IWL_TRANS_GET_PCIE_TRANS(trans); | 1643 | IWL_TRANS_GET_PCIE_TRANS(trans); |
1613 | 1644 | ||
1614 | iwl_calib_free_results(trans); | ||
1615 | iwl_trans_pcie_tx_free(trans); | 1645 | iwl_trans_pcie_tx_free(trans); |
1616 | #ifndef CONFIG_IWLWIFI_IDI | 1646 | #ifndef CONFIG_IWLWIFI_IDI |
1617 | iwl_trans_pcie_rx_free(trans); | 1647 | iwl_trans_pcie_rx_free(trans); |
1618 | #endif | 1648 | #endif |
1619 | if (trans_pcie->irq_requested == true) { | 1649 | if (trans_pcie->irq_requested == true) { |
1620 | free_irq(trans->irq, trans); | 1650 | free_irq(trans_pcie->irq, trans); |
1621 | iwl_free_isr_ict(trans); | 1651 | iwl_free_isr_ict(trans); |
1622 | } | 1652 | } |
1623 | 1653 | ||
1624 | pci_disable_msi(trans_pcie->pci_dev); | 1654 | pci_disable_msi(trans_pcie->pci_dev); |
1625 | pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base); | 1655 | iounmap(trans_pcie->hw_base); |
1626 | pci_release_regions(trans_pcie->pci_dev); | 1656 | pci_release_regions(trans_pcie->pci_dev); |
1627 | pci_disable_device(trans_pcie->pci_dev); | 1657 | pci_disable_device(trans_pcie->pci_dev); |
1628 | 1658 | ||
@@ -1633,42 +1663,20 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans) | |||
1633 | #ifdef CONFIG_PM_SLEEP | 1663 | #ifdef CONFIG_PM_SLEEP |
1634 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | 1664 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
1635 | { | 1665 | { |
1636 | /* | ||
1637 | * This function is called when system goes into suspend state | ||
1638 | * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend | ||
1639 | * function first but since iwlagn_mac_stop() has no knowledge of | ||
1640 | * who the caller is, | ||
1641 | * it will not call apm_ops.stop() to stop the DMA operation. | ||
1642 | * Calling apm_ops.stop here to make sure we stop the DMA. | ||
1643 | * | ||
1644 | * But of course ... if we have configured WoWLAN then we did other | ||
1645 | * things already :-) | ||
1646 | */ | ||
1647 | if (!trans->shrd->wowlan) { | ||
1648 | iwl_apm_stop(trans); | ||
1649 | } else { | ||
1650 | iwl_disable_interrupts(trans); | ||
1651 | iwl_clear_bit(trans, CSR_GP_CNTRL, | ||
1652 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
1653 | } | ||
1654 | |||
1655 | return 0; | 1666 | return 0; |
1656 | } | 1667 | } |
1657 | 1668 | ||
1658 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | 1669 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) |
1659 | { | 1670 | { |
1660 | bool hw_rfkill = false; | 1671 | bool hw_rfkill; |
1661 | |||
1662 | iwl_enable_interrupts(trans); | ||
1663 | 1672 | ||
1664 | if (!(iwl_read32(trans, CSR_GP_CNTRL) & | 1673 | hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & |
1665 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | 1674 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); |
1666 | hw_rfkill = true; | ||
1667 | 1675 | ||
1668 | if (hw_rfkill) | 1676 | if (hw_rfkill) |
1669 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | 1677 | iwl_enable_rfkill_int(trans); |
1670 | else | 1678 | else |
1671 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | 1679 | iwl_enable_interrupts(trans); |
1672 | 1680 | ||
1673 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 1681 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
1674 | 1682 | ||
@@ -1676,32 +1684,6 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |||
1676 | } | 1684 | } |
1677 | #endif /* CONFIG_PM_SLEEP */ | 1685 | #endif /* CONFIG_PM_SLEEP */ |
1678 | 1686 | ||
1679 | static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, | ||
1680 | enum iwl_rxon_context_id ctx, | ||
1681 | const char *msg) | ||
1682 | { | ||
1683 | u8 ac, txq_id; | ||
1684 | struct iwl_trans_pcie *trans_pcie = | ||
1685 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1686 | |||
1687 | for (ac = 0; ac < AC_NUM; ac++) { | ||
1688 | txq_id = trans_pcie->ac_to_queue[ctx][ac]; | ||
1689 | IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n", | ||
1690 | ac, | ||
1691 | (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) | ||
1692 | ? "stopped" : "awake"); | ||
1693 | iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg); | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id, | ||
1698 | const char *msg) | ||
1699 | { | ||
1700 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1701 | |||
1702 | iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg); | ||
1703 | } | ||
1704 | |||
1705 | #define IWL_FLUSH_WAIT_MS 2000 | 1687 | #define IWL_FLUSH_WAIT_MS 2000 |
1706 | 1688 | ||
1707 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | 1689 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) |
@@ -1714,8 +1696,8 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | |||
1714 | int ret = 0; | 1696 | int ret = 0; |
1715 | 1697 | ||
1716 | /* waiting for all the tx frames complete might take a while */ | 1698 | /* waiting for all the tx frames complete might take a while */ |
1717 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { | 1699 | for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { |
1718 | if (cnt == trans->shrd->cmd_queue) | 1700 | if (cnt == trans_pcie->cmd_queue) |
1719 | continue; | 1701 | continue; |
1720 | txq = &trans_pcie->txq[cnt]; | 1702 | txq = &trans_pcie->txq[cnt]; |
1721 | q = &txq->q; | 1703 | q = &txq->q; |
@@ -1960,7 +1942,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, | |||
1960 | int pos = 0; | 1942 | int pos = 0; |
1961 | int cnt; | 1943 | int cnt; |
1962 | int ret; | 1944 | int ret; |
1963 | const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; | 1945 | size_t bufsz; |
1946 | |||
1947 | bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues; | ||
1964 | 1948 | ||
1965 | if (!trans_pcie->txq) { | 1949 | if (!trans_pcie->txq) { |
1966 | IWL_ERR(trans, "txq not ready\n"); | 1950 | IWL_ERR(trans, "txq not ready\n"); |
@@ -1970,7 +1954,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, | |||
1970 | if (!buf) | 1954 | if (!buf) |
1971 | return -ENOMEM; | 1955 | return -ENOMEM; |
1972 | 1956 | ||
1973 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { | 1957 | for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { |
1974 | txq = &trans_pcie->txq[cnt]; | 1958 | txq = &trans_pcie->txq[cnt]; |
1975 | q = &txq->q; | 1959 | q = &txq->q; |
1976 | pos += scnprintf(buf + pos, bufsz - pos, | 1960 | pos += scnprintf(buf + pos, bufsz - pos, |
@@ -2219,7 +2203,7 @@ const struct iwl_trans_ops trans_ops_pcie = { | |||
2219 | .start_fw = iwl_trans_pcie_start_fw, | 2203 | .start_fw = iwl_trans_pcie_start_fw, |
2220 | .stop_device = iwl_trans_pcie_stop_device, | 2204 | .stop_device = iwl_trans_pcie_stop_device, |
2221 | 2205 | ||
2222 | .wake_any_queue = iwl_trans_pcie_wake_any_queue, | 2206 | .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, |
2223 | 2207 | ||
2224 | .send_cmd = iwl_trans_pcie_send_cmd, | 2208 | .send_cmd = iwl_trans_pcie_send_cmd, |
2225 | 2209 | ||
@@ -2231,7 +2215,6 @@ const struct iwl_trans_ops trans_ops_pcie = { | |||
2231 | .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, | 2215 | .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, |
2232 | 2216 | ||
2233 | .free = iwl_trans_pcie_free, | 2217 | .free = iwl_trans_pcie_free, |
2234 | .stop_queue = iwl_trans_pcie_stop_queue, | ||
2235 | 2218 | ||
2236 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | 2219 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, |
2237 | 2220 | ||
@@ -2245,6 +2228,7 @@ const struct iwl_trans_ops trans_ops_pcie = { | |||
2245 | .write8 = iwl_trans_pcie_write8, | 2228 | .write8 = iwl_trans_pcie_write8, |
2246 | .write32 = iwl_trans_pcie_write32, | 2229 | .write32 = iwl_trans_pcie_write32, |
2247 | .read32 = iwl_trans_pcie_read32, | 2230 | .read32 = iwl_trans_pcie_read32, |
2231 | .configure = iwl_trans_pcie_configure, | ||
2248 | }; | 2232 | }; |
2249 | 2233 | ||
2250 | struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | 2234 | struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, |
@@ -2267,8 +2251,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |||
2267 | trans->ops = &trans_ops_pcie; | 2251 | trans->ops = &trans_ops_pcie; |
2268 | trans->shrd = shrd; | 2252 | trans->shrd = shrd; |
2269 | trans_pcie->trans = trans; | 2253 | trans_pcie->trans = trans; |
2270 | spin_lock_init(&trans->hcmd_lock); | ||
2271 | spin_lock_init(&trans_pcie->irq_lock); | 2254 | spin_lock_init(&trans_pcie->irq_lock); |
2255 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | ||
2272 | 2256 | ||
2273 | /* W/A - seems to solve weird behavior. We need to remove this if we | 2257 | /* W/A - seems to solve weird behavior. We need to remove this if we |
2274 | * don't want to stay in L1 all the time. This wastes a lot of power */ | 2258 | * don't want to stay in L1 all the time. This wastes a lot of power */ |
@@ -2304,9 +2288,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |||
2304 | goto out_pci_disable_device; | 2288 | goto out_pci_disable_device; |
2305 | } | 2289 | } |
2306 | 2290 | ||
2307 | trans_pcie->hw_base = pci_iomap(pdev, 0, 0); | 2291 | trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); |
2308 | if (!trans_pcie->hw_base) { | 2292 | if (!trans_pcie->hw_base) { |
2309 | dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed"); | 2293 | dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed"); |
2310 | err = -ENODEV; | 2294 | err = -ENODEV; |
2311 | goto out_pci_release_regions; | 2295 | goto out_pci_release_regions; |
2312 | } | 2296 | } |
@@ -2330,7 +2314,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |||
2330 | "pci_enable_msi failed(0X%x)", err); | 2314 | "pci_enable_msi failed(0X%x)", err); |
2331 | 2315 | ||
2332 | trans->dev = &pdev->dev; | 2316 | trans->dev = &pdev->dev; |
2333 | trans->irq = pdev->irq; | 2317 | trans_pcie->irq = pdev->irq; |
2334 | trans_pcie->pci_dev = pdev; | 2318 | trans_pcie->pci_dev = pdev; |
2335 | trans->hw_rev = iwl_read32(trans, CSR_HW_REV); | 2319 | trans->hw_rev = iwl_read32(trans, CSR_HW_REV); |
2336 | trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; | 2320 | trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; |
@@ -2345,6 +2329,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |||
2345 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | 2329 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); |
2346 | } | 2330 | } |
2347 | 2331 | ||
2332 | /* Initialize the wait queue for commands */ | ||
2333 | init_waitqueue_head(&trans->wait_command_queue); | ||
2334 | |||
2348 | return trans; | 2335 | return trans; |
2349 | 2336 | ||
2350 | out_pci_release_regions: | 2337 | out_pci_release_regions: |