aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c504
1 files changed, 61 insertions, 443 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index eb2be0d30483..3bee0f119bcd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -53,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
53 53
54 switch (status) { 54 switch (status) {
55 case TX_STATUS_POSTPONE_DELAY: 55 case TX_STATUS_POSTPONE_DELAY:
56 priv->_agn.reply_tx_stats.pp_delay++; 56 priv->reply_tx_stats.pp_delay++;
57 break; 57 break;
58 case TX_STATUS_POSTPONE_FEW_BYTES: 58 case TX_STATUS_POSTPONE_FEW_BYTES:
59 priv->_agn.reply_tx_stats.pp_few_bytes++; 59 priv->reply_tx_stats.pp_few_bytes++;
60 break; 60 break;
61 case TX_STATUS_POSTPONE_BT_PRIO: 61 case TX_STATUS_POSTPONE_BT_PRIO:
62 priv->_agn.reply_tx_stats.pp_bt_prio++; 62 priv->reply_tx_stats.pp_bt_prio++;
63 break; 63 break;
64 case TX_STATUS_POSTPONE_QUIET_PERIOD: 64 case TX_STATUS_POSTPONE_QUIET_PERIOD:
65 priv->_agn.reply_tx_stats.pp_quiet_period++; 65 priv->reply_tx_stats.pp_quiet_period++;
66 break; 66 break;
67 case TX_STATUS_POSTPONE_CALC_TTAK: 67 case TX_STATUS_POSTPONE_CALC_TTAK:
68 priv->_agn.reply_tx_stats.pp_calc_ttak++; 68 priv->reply_tx_stats.pp_calc_ttak++;
69 break; 69 break;
70 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: 70 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
71 priv->_agn.reply_tx_stats.int_crossed_retry++; 71 priv->reply_tx_stats.int_crossed_retry++;
72 break; 72 break;
73 case TX_STATUS_FAIL_SHORT_LIMIT: 73 case TX_STATUS_FAIL_SHORT_LIMIT:
74 priv->_agn.reply_tx_stats.short_limit++; 74 priv->reply_tx_stats.short_limit++;
75 break; 75 break;
76 case TX_STATUS_FAIL_LONG_LIMIT: 76 case TX_STATUS_FAIL_LONG_LIMIT:
77 priv->_agn.reply_tx_stats.long_limit++; 77 priv->reply_tx_stats.long_limit++;
78 break; 78 break;
79 case TX_STATUS_FAIL_FIFO_UNDERRUN: 79 case TX_STATUS_FAIL_FIFO_UNDERRUN:
80 priv->_agn.reply_tx_stats.fifo_underrun++; 80 priv->reply_tx_stats.fifo_underrun++;
81 break; 81 break;
82 case TX_STATUS_FAIL_DRAIN_FLOW: 82 case TX_STATUS_FAIL_DRAIN_FLOW:
83 priv->_agn.reply_tx_stats.drain_flow++; 83 priv->reply_tx_stats.drain_flow++;
84 break; 84 break;
85 case TX_STATUS_FAIL_RFKILL_FLUSH: 85 case TX_STATUS_FAIL_RFKILL_FLUSH:
86 priv->_agn.reply_tx_stats.rfkill_flush++; 86 priv->reply_tx_stats.rfkill_flush++;
87 break; 87 break;
88 case TX_STATUS_FAIL_LIFE_EXPIRE: 88 case TX_STATUS_FAIL_LIFE_EXPIRE:
89 priv->_agn.reply_tx_stats.life_expire++; 89 priv->reply_tx_stats.life_expire++;
90 break; 90 break;
91 case TX_STATUS_FAIL_DEST_PS: 91 case TX_STATUS_FAIL_DEST_PS:
92 priv->_agn.reply_tx_stats.dest_ps++; 92 priv->reply_tx_stats.dest_ps++;
93 break; 93 break;
94 case TX_STATUS_FAIL_HOST_ABORTED: 94 case TX_STATUS_FAIL_HOST_ABORTED:
95 priv->_agn.reply_tx_stats.host_abort++; 95 priv->reply_tx_stats.host_abort++;
96 break; 96 break;
97 case TX_STATUS_FAIL_BT_RETRY: 97 case TX_STATUS_FAIL_BT_RETRY:
98 priv->_agn.reply_tx_stats.bt_retry++; 98 priv->reply_tx_stats.bt_retry++;
99 break; 99 break;
100 case TX_STATUS_FAIL_STA_INVALID: 100 case TX_STATUS_FAIL_STA_INVALID:
101 priv->_agn.reply_tx_stats.sta_invalid++; 101 priv->reply_tx_stats.sta_invalid++;
102 break; 102 break;
103 case TX_STATUS_FAIL_FRAG_DROPPED: 103 case TX_STATUS_FAIL_FRAG_DROPPED:
104 priv->_agn.reply_tx_stats.frag_drop++; 104 priv->reply_tx_stats.frag_drop++;
105 break; 105 break;
106 case TX_STATUS_FAIL_TID_DISABLE: 106 case TX_STATUS_FAIL_TID_DISABLE:
107 priv->_agn.reply_tx_stats.tid_disable++; 107 priv->reply_tx_stats.tid_disable++;
108 break; 108 break;
109 case TX_STATUS_FAIL_FIFO_FLUSHED: 109 case TX_STATUS_FAIL_FIFO_FLUSHED:
110 priv->_agn.reply_tx_stats.fifo_flush++; 110 priv->reply_tx_stats.fifo_flush++;
111 break; 111 break;
112 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL: 112 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
113 priv->_agn.reply_tx_stats.insuff_cf_poll++; 113 priv->reply_tx_stats.insuff_cf_poll++;
114 break; 114 break;
115 case TX_STATUS_FAIL_PASSIVE_NO_RX: 115 case TX_STATUS_FAIL_PASSIVE_NO_RX:
116 priv->_agn.reply_tx_stats.fail_hw_drop++; 116 priv->reply_tx_stats.fail_hw_drop++;
117 break; 117 break;
118 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR: 118 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
119 priv->_agn.reply_tx_stats.sta_color_mismatch++; 119 priv->reply_tx_stats.sta_color_mismatch++;
120 break; 120 break;
121 default: 121 default:
122 priv->_agn.reply_tx_stats.unknown++; 122 priv->reply_tx_stats.unknown++;
123 break; 123 break;
124 } 124 }
125} 125}
@@ -130,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
130 130
131 switch (status) { 131 switch (status) {
132 case AGG_TX_STATE_UNDERRUN_MSK: 132 case AGG_TX_STATE_UNDERRUN_MSK:
133 priv->_agn.reply_agg_tx_stats.underrun++; 133 priv->reply_agg_tx_stats.underrun++;
134 break; 134 break;
135 case AGG_TX_STATE_BT_PRIO_MSK: 135 case AGG_TX_STATE_BT_PRIO_MSK:
136 priv->_agn.reply_agg_tx_stats.bt_prio++; 136 priv->reply_agg_tx_stats.bt_prio++;
137 break; 137 break;
138 case AGG_TX_STATE_FEW_BYTES_MSK: 138 case AGG_TX_STATE_FEW_BYTES_MSK:
139 priv->_agn.reply_agg_tx_stats.few_bytes++; 139 priv->reply_agg_tx_stats.few_bytes++;
140 break; 140 break;
141 case AGG_TX_STATE_ABORT_MSK: 141 case AGG_TX_STATE_ABORT_MSK:
142 priv->_agn.reply_agg_tx_stats.abort++; 142 priv->reply_agg_tx_stats.abort++;
143 break; 143 break;
144 case AGG_TX_STATE_LAST_SENT_TTL_MSK: 144 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
145 priv->_agn.reply_agg_tx_stats.last_sent_ttl++; 145 priv->reply_agg_tx_stats.last_sent_ttl++;
146 break; 146 break;
147 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK: 147 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
148 priv->_agn.reply_agg_tx_stats.last_sent_try++; 148 priv->reply_agg_tx_stats.last_sent_try++;
149 break; 149 break;
150 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK: 150 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
151 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++; 151 priv->reply_agg_tx_stats.last_sent_bt_kill++;
152 break; 152 break;
153 case AGG_TX_STATE_SCD_QUERY_MSK: 153 case AGG_TX_STATE_SCD_QUERY_MSK:
154 priv->_agn.reply_agg_tx_stats.scd_query++; 154 priv->reply_agg_tx_stats.scd_query++;
155 break; 155 break;
156 case AGG_TX_STATE_TEST_BAD_CRC32_MSK: 156 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
157 priv->_agn.reply_agg_tx_stats.bad_crc32++; 157 priv->reply_agg_tx_stats.bad_crc32++;
158 break; 158 break;
159 case AGG_TX_STATE_RESPONSE_MSK: 159 case AGG_TX_STATE_RESPONSE_MSK:
160 priv->_agn.reply_agg_tx_stats.response++; 160 priv->reply_agg_tx_stats.response++;
161 break; 161 break;
162 case AGG_TX_STATE_DUMP_TX_MSK: 162 case AGG_TX_STATE_DUMP_TX_MSK:
163 priv->_agn.reply_agg_tx_stats.dump_tx++; 163 priv->reply_agg_tx_stats.dump_tx++;
164 break; 164 break;
165 case AGG_TX_STATE_DELAY_TX_MSK: 165 case AGG_TX_STATE_DELAY_TX_MSK:
166 priv->_agn.reply_agg_tx_stats.delay_tx++; 166 priv->reply_agg_tx_stats.delay_tx++;
167 break; 167 break;
168 default: 168 default:
169 priv->_agn.reply_agg_tx_stats.unknown++; 169 priv->reply_agg_tx_stats.unknown++;
170 break; 170 break;
171 } 171 }
172} 172}
@@ -391,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv,
391 } 391 }
392} 392}
393 393
394static void iwlagn_rx_reply_tx(struct iwl_priv *priv, 394void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
395 struct iwl_rx_mem_buffer *rxb)
396{ 395{
397 struct iwl_rx_packet *pkt = rxb_addr(rxb); 396 struct iwl_rx_packet *pkt = rxb_addr(rxb);
398 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 397 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -401,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
401 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
402 struct ieee80211_tx_info *info; 401 struct ieee80211_tx_info *info;
403 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
403 struct ieee80211_hdr *hdr;
404 struct iwl_tx_info *txb; 404 struct iwl_tx_info *txb;
405 u32 status = le16_to_cpu(tx_resp->status.status); 405 u32 status = le16_to_cpu(tx_resp->status.status);
406 int tid; 406 int tid;
@@ -427,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
427 IWLAGN_TX_RES_RA_POS; 427 IWLAGN_TX_RES_RA_POS;
428 428
429 spin_lock_irqsave(&priv->sta_lock, flags); 429 spin_lock_irqsave(&priv->sta_lock, flags);
430
431 hdr = (void *)txb->skb->data;
432 if (!ieee80211_is_data_qos(hdr->frame_control))
433 priv->last_seq_ctl = tx_resp->seq_ctl;
434
430 if (txq->sched_retry) { 435 if (txq->sched_retry) {
431 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); 436 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
432 struct iwl_ht_agg *agg; 437 struct iwl_ht_agg *agg;
@@ -479,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
479 spin_unlock_irqrestore(&priv->sta_lock, flags); 484 spin_unlock_irqrestore(&priv->sta_lock, flags);
480} 485}
481 486
482void iwlagn_rx_handler_setup(struct iwl_priv *priv)
483{
484 /* init calibration handlers */
485 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
486 iwlagn_rx_calib_result;
487 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
488
489 /* set up notification wait support */
490 spin_lock_init(&priv->_agn.notif_wait_lock);
491 INIT_LIST_HEAD(&priv->_agn.notif_waits);
492 init_waitqueue_head(&priv->_agn.notif_waitq);
493}
494
495void iwlagn_setup_deferred_work(struct iwl_priv *priv)
496{
497 /*
498 * nothing need to be done here anymore
499 * still keep for future use if needed
500 */
501}
502
503int iwlagn_hw_valid_rtc_data_addr(u32 addr) 487int iwlagn_hw_valid_rtc_data_addr(u32 addr)
504{ 488{
505 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && 489 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -541,7 +525,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
541 else 525 else
542 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 526 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
543 527
544 return trans_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC, 528 return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
545 sizeof(tx_power_cmd), &tx_power_cmd); 529 sizeof(tx_power_cmd), &tx_power_cmd);
546} 530}
547 531
@@ -628,283 +612,6 @@ struct iwl_mod_params iwlagn_mod_params = {
628 /* the rest are 0 by default */ 612 /* the rest are 0 by default */
629}; 613};
630 614
631int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
632{
633 u32 rb_size;
634 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
635 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
636
637 rb_timeout = RX_RB_TIMEOUT;
638
639 if (iwlagn_mod_params.amsdu_size_8K)
640 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
641 else
642 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
643
644 /* Stop Rx DMA */
645 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
646
647 /* Reset driver's Rx queue write index */
648 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
649
650 /* Tell device where to find RBD circular buffer in DRAM */
651 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
652 (u32)(rxq->bd_dma >> 8));
653
654 /* Tell device where in DRAM to update its Rx status */
655 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
656 rxq->rb_stts_dma >> 4);
657
658 /* Enable Rx DMA
659 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
660 * the credit mechanism in 5000 HW RX FIFO
661 * Direct rx interrupts to hosts
662 * Rx buffer size 4 or 8k
663 * RB timeout 0x10
664 * 256 RBDs
665 */
666 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
667 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
668 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
669 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
670 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
671 rb_size|
672 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
673 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
674
675 /* Set interrupt coalescing timer to default (2048 usecs) */
676 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
677
678 return 0;
679}
680
681static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
682{
683/*
684 * (for documentation purposes)
685 * to set power to V_AUX, do:
686
687 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
688 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
689 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
690 ~APMG_PS_CTRL_MSK_PWR_SRC);
691 */
692
693 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
694 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
695 ~APMG_PS_CTRL_MSK_PWR_SRC);
696}
697
698int iwlagn_hw_nic_init(struct iwl_priv *priv)
699{
700 unsigned long flags;
701 struct iwl_rx_queue *rxq = &priv->rxq;
702
703 /* nic_init */
704 spin_lock_irqsave(&priv->lock, flags);
705 iwl_apm_init(priv);
706
707 /* Set interrupt coalescing calibration timer to default (512 usecs) */
708 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
709
710 spin_unlock_irqrestore(&priv->lock, flags);
711
712 iwlagn_set_pwr_vmain(priv);
713
714 priv->cfg->ops->lib->nic_config(priv);
715
716 /* Allocate the RX queue, or reset if it is already allocated */
717 trans_rx_init(priv);
718
719 iwlagn_rx_replenish(priv);
720
721 iwlagn_rx_init(priv, rxq);
722
723 spin_lock_irqsave(&priv->lock, flags);
724
725 rxq->need_update = 1;
726 iwl_rx_queue_update_write_ptr(priv, rxq);
727
728 spin_unlock_irqrestore(&priv->lock, flags);
729
730 /* Allocate or reset and init all Tx and Command queues */
731 if (trans_tx_init(priv))
732 return -ENOMEM;
733
734 if (priv->cfg->base_params->shadow_reg_enable) {
735 /* enable shadow regs in HW */
736 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
737 0x800FFFFF);
738 }
739
740 set_bit(STATUS_INIT, &priv->status);
741
742 return 0;
743}
744
745/**
746 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
747 */
748static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
749 dma_addr_t dma_addr)
750{
751 return cpu_to_le32((u32)(dma_addr >> 8));
752}
753
754/**
755 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
756 *
757 * If there are slots in the RX queue that need to be restocked,
758 * and we have free pre-allocated buffers, fill the ranks as much
759 * as we can, pulling from rx_free.
760 *
761 * This moves the 'write' index forward to catch up with 'processed', and
762 * also updates the memory address in the firmware to reference the new
763 * target buffer.
764 */
765void iwlagn_rx_queue_restock(struct iwl_priv *priv)
766{
767 struct iwl_rx_queue *rxq = &priv->rxq;
768 struct list_head *element;
769 struct iwl_rx_mem_buffer *rxb;
770 unsigned long flags;
771
772 spin_lock_irqsave(&rxq->lock, flags);
773 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
774 /* The overwritten rxb must be a used one */
775 rxb = rxq->queue[rxq->write];
776 BUG_ON(rxb && rxb->page);
777
778 /* Get next free Rx buffer, remove from free list */
779 element = rxq->rx_free.next;
780 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
781 list_del(element);
782
783 /* Point to Rx buffer via next RBD in circular buffer */
784 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
785 rxb->page_dma);
786 rxq->queue[rxq->write] = rxb;
787 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
788 rxq->free_count--;
789 }
790 spin_unlock_irqrestore(&rxq->lock, flags);
791 /* If the pre-allocated buffer pool is dropping low, schedule to
792 * refill it */
793 if (rxq->free_count <= RX_LOW_WATERMARK)
794 queue_work(priv->workqueue, &priv->rx_replenish);
795
796
797 /* If we've added more space for the firmware to place data, tell it.
798 * Increment device's write pointer in multiples of 8. */
799 if (rxq->write_actual != (rxq->write & ~0x7)) {
800 spin_lock_irqsave(&rxq->lock, flags);
801 rxq->need_update = 1;
802 spin_unlock_irqrestore(&rxq->lock, flags);
803 iwl_rx_queue_update_write_ptr(priv, rxq);
804 }
805}
806
807/**
808 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
809 *
810 * When moving to rx_free an SKB is allocated for the slot.
811 *
812 * Also restock the Rx queue via iwl_rx_queue_restock.
813 * This is called as a scheduled work item (except for during initialization)
814 */
815void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
816{
817 struct iwl_rx_queue *rxq = &priv->rxq;
818 struct list_head *element;
819 struct iwl_rx_mem_buffer *rxb;
820 struct page *page;
821 unsigned long flags;
822 gfp_t gfp_mask = priority;
823
824 while (1) {
825 spin_lock_irqsave(&rxq->lock, flags);
826 if (list_empty(&rxq->rx_used)) {
827 spin_unlock_irqrestore(&rxq->lock, flags);
828 return;
829 }
830 spin_unlock_irqrestore(&rxq->lock, flags);
831
832 if (rxq->free_count > RX_LOW_WATERMARK)
833 gfp_mask |= __GFP_NOWARN;
834
835 if (priv->hw_params.rx_page_order > 0)
836 gfp_mask |= __GFP_COMP;
837
838 /* Alloc a new receive buffer */
839 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
840 if (!page) {
841 if (net_ratelimit())
842 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
843 "order: %d\n",
844 priv->hw_params.rx_page_order);
845
846 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
847 net_ratelimit())
848 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
849 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
850 rxq->free_count);
851 /* We don't reschedule replenish work here -- we will
852 * call the restock method and if it still needs
853 * more buffers it will schedule replenish */
854 return;
855 }
856
857 spin_lock_irqsave(&rxq->lock, flags);
858
859 if (list_empty(&rxq->rx_used)) {
860 spin_unlock_irqrestore(&rxq->lock, flags);
861 __free_pages(page, priv->hw_params.rx_page_order);
862 return;
863 }
864 element = rxq->rx_used.next;
865 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
866 list_del(element);
867
868 spin_unlock_irqrestore(&rxq->lock, flags);
869
870 BUG_ON(rxb->page);
871 rxb->page = page;
872 /* Get physical address of the RB */
873 rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
874 PAGE_SIZE << priv->hw_params.rx_page_order,
875 DMA_FROM_DEVICE);
876 /* dma address must be no more than 36 bits */
877 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
878 /* and also 256 byte aligned! */
879 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
880
881 spin_lock_irqsave(&rxq->lock, flags);
882
883 list_add_tail(&rxb->list, &rxq->rx_free);
884 rxq->free_count++;
885
886 spin_unlock_irqrestore(&rxq->lock, flags);
887 }
888}
889
890void iwlagn_rx_replenish(struct iwl_priv *priv)
891{
892 unsigned long flags;
893
894 iwlagn_rx_allocate(priv, GFP_KERNEL);
895
896 spin_lock_irqsave(&priv->lock, flags);
897 iwlagn_rx_queue_restock(priv);
898 spin_unlock_irqrestore(&priv->lock, flags);
899}
900
901void iwlagn_rx_replenish_now(struct iwl_priv *priv)
902{
903 iwlagn_rx_allocate(priv, GFP_ATOMIC);
904
905 iwlagn_rx_queue_restock(priv);
906}
907
908int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 615int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
909{ 616{
910 int idx = 0; 617 int idx = 0;
@@ -1048,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1048 755
1049static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen) 756static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
1050{ 757{
1051 struct sk_buff *skb = priv->_agn.offchan_tx_skb; 758 struct sk_buff *skb = priv->offchan_tx_skb;
1052 759
1053 if (skb->len < maxlen) 760 if (skb->len < maxlen)
1054 maxlen = skb->len; 761 maxlen = skb->len;
@@ -1134,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1134 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) { 841 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
1135 scan->suspend_time = 0; 842 scan->suspend_time = 0;
1136 scan->max_out_time = 843 scan->max_out_time =
1137 cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout); 844 cpu_to_le32(1024 * priv->offchan_tx_timeout);
1138 } 845 }
1139 846
1140 switch (priv->scan_type) { 847 switch (priv->scan_type) {
@@ -1322,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1322 scan_ch = (void *)&scan->data[cmd_len]; 1029 scan_ch = (void *)&scan->data[cmd_len];
1323 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 1030 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1324 scan_ch->channel = 1031 scan_ch->channel =
1325 cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value); 1032 cpu_to_le16(priv->offchan_tx_chan->hw_value);
1326 scan_ch->active_dwell = 1033 scan_ch->active_dwell =
1327 cpu_to_le16(priv->_agn.offchan_tx_timeout); 1034 cpu_to_le16(priv->offchan_tx_timeout);
1328 scan_ch->passive_dwell = 0; 1035 scan_ch->passive_dwell = 0;
1329 1036
1330 /* Set txpower levels to defaults */ 1037 /* Set txpower levels to defaults */
@@ -1334,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1334 * power level: 1041 * power level:
1335 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; 1042 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1336 */ 1043 */
1337 if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ) 1044 if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
1338 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 1045 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1339 else 1046 else
1340 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 1047 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -1360,7 +1067,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1360 if (ret) 1067 if (ret)
1361 return ret; 1068 return ret;
1362 1069
1363 ret = trans_send_cmd(priv, &cmd); 1070 ret = trans_send_cmd(&priv->trans, &cmd);
1364 if (ret) { 1071 if (ret) {
1365 clear_bit(STATUS_SCAN_HW, &priv->status); 1072 clear_bit(STATUS_SCAN_HW, &priv->status);
1366 iwlagn_set_pan_params(priv); 1073 iwlagn_set_pan_params(priv);
@@ -1466,7 +1173,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1466 flush_cmd.fifo_control); 1173 flush_cmd.fifo_control);
1467 flush_cmd.flush_control = cpu_to_le16(flush_control); 1174 flush_cmd.flush_control = cpu_to_le16(flush_control);
1468 1175
1469 return trans_send_cmd(priv, &cmd); 1176 return trans_send_cmd(&priv->trans, &cmd);
1470} 1177}
1471 1178
1472void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 1179void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
@@ -1660,12 +1367,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1660 if (priv->cfg->bt_params->bt_session_2) { 1367 if (priv->cfg->bt_params->bt_session_2) {
1661 memcpy(&bt_cmd_2000.basic, &basic, 1368 memcpy(&bt_cmd_2000.basic, &basic,
1662 sizeof(basic)); 1369 sizeof(basic));
1663 ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG, 1370 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
1664 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); 1371 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
1665 } else { 1372 } else {
1666 memcpy(&bt_cmd_6000.basic, &basic, 1373 memcpy(&bt_cmd_6000.basic, &basic,
1667 sizeof(basic)); 1374 sizeof(basic));
1668 ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG, 1375 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
1669 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); 1376 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
1670 } 1377 }
1671 if (ret) 1378 if (ret)
@@ -1986,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1986 1693
1987void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) 1694void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
1988{ 1695{
1989 iwlagn_rx_handler_setup(priv);
1990 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] = 1696 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
1991 iwlagn_bt_coex_profile_notif; 1697 iwlagn_bt_coex_profile_notif;
1992} 1698}
1993 1699
1994void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv) 1700void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
1995{ 1701{
1996 iwlagn_setup_deferred_work(priv);
1997
1998 INIT_WORK(&priv->bt_traffic_change_work, 1702 INIT_WORK(&priv->bt_traffic_change_work,
1999 iwlagn_bt_traffic_change_work); 1703 iwlagn_bt_traffic_change_work);
2000} 1704}
@@ -2306,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv,
2306 wait_entry->triggered = false; 2010 wait_entry->triggered = false;
2307 wait_entry->aborted = false; 2011 wait_entry->aborted = false;
2308 2012
2309 spin_lock_bh(&priv->_agn.notif_wait_lock); 2013 spin_lock_bh(&priv->notif_wait_lock);
2310 list_add(&wait_entry->list, &priv->_agn.notif_waits); 2014 list_add(&wait_entry->list, &priv->notif_waits);
2311 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2015 spin_unlock_bh(&priv->notif_wait_lock);
2312} 2016}
2313 2017
2314int iwlagn_wait_notification(struct iwl_priv *priv, 2018int iwlagn_wait_notification(struct iwl_priv *priv,
@@ -2317,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
2317{ 2021{
2318 int ret; 2022 int ret;
2319 2023
2320 ret = wait_event_timeout(priv->_agn.notif_waitq, 2024 ret = wait_event_timeout(priv->notif_waitq,
2321 wait_entry->triggered || wait_entry->aborted, 2025 wait_entry->triggered || wait_entry->aborted,
2322 timeout); 2026 timeout);
2323 2027
2324 spin_lock_bh(&priv->_agn.notif_wait_lock); 2028 spin_lock_bh(&priv->notif_wait_lock);
2325 list_del(&wait_entry->list); 2029 list_del(&wait_entry->list);
2326 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2030 spin_unlock_bh(&priv->notif_wait_lock);
2327 2031
2328 if (wait_entry->aborted) 2032 if (wait_entry->aborted)
2329 return -EIO; 2033 return -EIO;
@@ -2337,93 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
2337void iwlagn_remove_notification(struct iwl_priv *priv, 2041void iwlagn_remove_notification(struct iwl_priv *priv,
2338 struct iwl_notification_wait *wait_entry) 2042 struct iwl_notification_wait *wait_entry)
2339{ 2043{
2340 spin_lock_bh(&priv->_agn.notif_wait_lock); 2044 spin_lock_bh(&priv->notif_wait_lock);
2341 list_del(&wait_entry->list); 2045 list_del(&wait_entry->list);
2342 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2046 spin_unlock_bh(&priv->notif_wait_lock);
2343}
2344
2345int iwlagn_start_device(struct iwl_priv *priv)
2346{
2347 int ret;
2348
2349 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
2350 iwl_prepare_card_hw(priv)) {
2351 IWL_WARN(priv, "Exit HW not ready\n");
2352 return -EIO;
2353 }
2354
2355 /* If platform's RF_KILL switch is NOT set to KILL */
2356 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2357 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2358 else
2359 set_bit(STATUS_RF_KILL_HW, &priv->status);
2360
2361 if (iwl_is_rfkill(priv)) {
2362 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2363 iwl_enable_interrupts(priv);
2364 return -ERFKILL;
2365 }
2366
2367 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2368
2369 ret = iwlagn_hw_nic_init(priv);
2370 if (ret) {
2371 IWL_ERR(priv, "Unable to init nic\n");
2372 return ret;
2373 }
2374
2375 /* make sure rfkill handshake bits are cleared */
2376 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2377 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2378 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2379
2380 /* clear (again), then enable host interrupts */
2381 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2382 iwl_enable_interrupts(priv);
2383
2384 /* really make sure rfkill handshake bits are cleared */
2385 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2386 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2387
2388 return 0;
2389}
2390
2391void iwlagn_stop_device(struct iwl_priv *priv)
2392{
2393 unsigned long flags;
2394
2395 /* stop and reset the on-board processor */
2396 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2397
2398 /* tell the device to stop sending interrupts */
2399 spin_lock_irqsave(&priv->lock, flags);
2400 iwl_disable_interrupts(priv);
2401 spin_unlock_irqrestore(&priv->lock, flags);
2402 iwl_synchronize_irq(priv);
2403
2404 /* device going down, Stop using ICT table */
2405 iwl_disable_ict(priv);
2406
2407 /*
2408 * If a HW restart happens during firmware loading,
2409 * then the firmware loading might call this function
2410 * and later it might be called again due to the
2411 * restart. So don't process again if the device is
2412 * already dead.
2413 */
2414 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
2415 trans_tx_stop(priv);
2416 trans_rx_stop(priv);
2417
2418 /* Power-down device's busmaster DMA clocks */
2419 iwl_write_prph(priv, APMG_CLK_DIS_REG,
2420 APMG_CLK_VAL_DMA_CLK_RQT);
2421 udelay(5);
2422 }
2423
2424 /* Make sure (redundant) we've released our request to stay awake */
2425 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2426
2427 /* Stop the device, and put it in low power state */
2428 iwl_apm_stop(priv);
2429} 2047}