aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-4965.c
diff options
context:
space:
mode:
authorCahill, Ben M <ben.m.cahill@intel.com>2007-11-28 22:09:54 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:05:22 -0500
commit8b6eaea8ec79b111a18a1c60333deb16ba27e6b3 (patch)
tree4a6de834ae5c503e4140a763486ced7022dcb64d /drivers/net/wireless/iwlwifi/iwl-4965.c
parent74093ddf4c42da74922b63fb6844989e21164d9e (diff)
iwlwifi: add comments, mostly on Tx queues
Add comments, mostly on Tx queues Signed-off-by: Cahill, Ben M <ben.m.cahill@intel.com> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-4965.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c252
1 files changed, 216 insertions, 36 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d44166a599fc..40c795eaabac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -144,7 +144,7 @@ int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv)
144 return rc; 144 return rc;
145 } 145 }
146 146
147 /* stop HW */ 147 /* stop Rx DMA */
148 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 148 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
149 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 149 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
150 (1 << 24), 1000); 150 (1 << 24), 1000);
@@ -234,17 +234,22 @@ static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *r
234 return rc; 234 return rc;
235 } 235 }
236 236
237 /* stop HW */ 237 /* Stop Rx DMA */
238 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 238 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
239 239
240 /* Reset driver's Rx queue write index */
240 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 241 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
242
243 /* Tell device where to find RBD circular buffer in DRAM */
241 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 244 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
242 rxq->dma_addr >> 8); 245 rxq->dma_addr >> 8);
243 246
247 /* Tell device where in DRAM to update its Rx status */
244 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 248 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
245 (priv->hw_setting.shared_phys + 249 (priv->hw_setting.shared_phys +
246 offsetof(struct iwl4965_shared, val0)) >> 4); 250 offsetof(struct iwl4965_shared, val0)) >> 4);
247 251
252 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
248 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 253 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
249 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 254 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
250 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 255 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
@@ -263,6 +268,7 @@ static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *r
263 return 0; 268 return 0;
264} 269}
265 270
271/* Tell 4965 where to find the "keep warm" buffer */
266static int iwl4965_kw_init(struct iwl4965_priv *priv) 272static int iwl4965_kw_init(struct iwl4965_priv *priv)
267{ 273{
268 unsigned long flags; 274 unsigned long flags;
@@ -297,6 +303,11 @@ static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
297#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 303#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
298 ? # x " " : "") 304 ? # x " " : "")
299 305
306/**
307 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
308 *
309 * Does not set up a command, or touch hardware.
310 */
300int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, u16 channel, 311int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, u16 channel,
301 const struct iwl4965_eeprom_channel *eeprom_ch, 312 const struct iwl4965_eeprom_channel *eeprom_ch,
302 u8 fat_extension_channel) 313 u8 fat_extension_channel)
@@ -337,6 +348,9 @@ int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, u16 channe
337 return 0; 348 return 0;
338} 349}
339 350
351/**
352 * iwl4965_kw_free - Free the "keep warm" buffer
353 */
340static void iwl4965_kw_free(struct iwl4965_priv *priv) 354static void iwl4965_kw_free(struct iwl4965_priv *priv)
341{ 355{
342 struct pci_dev *dev = priv->pci_dev; 356 struct pci_dev *dev = priv->pci_dev;
@@ -363,9 +377,10 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
363 377
364 iwl4965_kw_free(priv); 378 iwl4965_kw_free(priv);
365 379
380 /* Free all tx/cmd queues and keep-warm buffer */
366 iwl4965_hw_txq_ctx_free(priv); 381 iwl4965_hw_txq_ctx_free(priv);
367 382
368 /* Tx CMD queue */ 383 /* Alloc keep-warm buffer */
369 rc = iwl4965_kw_alloc(priv); 384 rc = iwl4965_kw_alloc(priv);
370 if (rc) { 385 if (rc) {
371 IWL_ERROR("Keep Warm allocation failed"); 386 IWL_ERROR("Keep Warm allocation failed");
@@ -381,17 +396,20 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
381 goto error_reset; 396 goto error_reset;
382 } 397 }
383 398
399 /* Turn off all Tx DMA channels */
384 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0); 400 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0);
385 iwl4965_release_nic_access(priv); 401 iwl4965_release_nic_access(priv);
386 spin_unlock_irqrestore(&priv->lock, flags); 402 spin_unlock_irqrestore(&priv->lock, flags);
387 403
404 /* Tell 4965 where to find the keep-warm buffer */
388 rc = iwl4965_kw_init(priv); 405 rc = iwl4965_kw_init(priv);
389 if (rc) { 406 if (rc) {
390 IWL_ERROR("kw_init failed\n"); 407 IWL_ERROR("kw_init failed\n");
391 goto error_reset; 408 goto error_reset;
392 } 409 }
393 410
394 /* Tx queue(s) */ 411 /* Alloc and init all (default 16) Tx queues,
412 * including the command queue (#4) */
395 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { 413 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
396 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 414 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
397 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 415 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -545,6 +563,8 @@ int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
545 iwl4965_rx_queue_update_write_ptr(priv, rxq); 563 iwl4965_rx_queue_update_write_ptr(priv, rxq);
546 564
547 spin_unlock_irqrestore(&priv->lock, flags); 565 spin_unlock_irqrestore(&priv->lock, flags);
566
567 /* Allocate and init all Tx and Command queues */
548 rc = iwl4965_txq_ctx_reset(priv); 568 rc = iwl4965_txq_ctx_reset(priv);
549 if (rc) 569 if (rc)
550 return rc; 570 return rc;
@@ -593,13 +613,16 @@ int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
593 return rc; 613 return rc;
594} 614}
595 615
616/**
617 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
618 */
596void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv) 619void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
597{ 620{
598 621
599 int txq_id; 622 int txq_id;
600 unsigned long flags; 623 unsigned long flags;
601 624
602 /* reset TFD queues */ 625 /* Stop each Tx DMA channel, and wait for it to be idle */
603 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { 626 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
604 spin_lock_irqsave(&priv->lock, flags); 627 spin_lock_irqsave(&priv->lock, flags);
605 if (iwl4965_grab_nic_access(priv)) { 628 if (iwl4965_grab_nic_access(priv)) {
@@ -617,6 +640,7 @@ void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
617 spin_unlock_irqrestore(&priv->lock, flags); 640 spin_unlock_irqrestore(&priv->lock, flags);
618 } 641 }
619 642
643 /* Deallocate memory for all Tx queues */
620 iwl4965_hw_txq_ctx_free(priv); 644 iwl4965_hw_txq_ctx_free(priv);
621} 645}
622 646
@@ -1586,16 +1610,23 @@ static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index
1586 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index); 1610 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
1587} 1611}
1588 1612
1589/* 1613/**
1590 * Acquire priv->lock before calling this function ! 1614 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1615 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1616 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1617 *
1618 * NOTE: Acquire priv->lock before calling this function !
1591 */ 1619 */
1592static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv, 1620static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1593 struct iwl4965_tx_queue *txq, 1621 struct iwl4965_tx_queue *txq,
1594 int tx_fifo_id, int scd_retry) 1622 int tx_fifo_id, int scd_retry)
1595{ 1623{
1596 int txq_id = txq->q.id; 1624 int txq_id = txq->q.id;
1625
1626 /* Find out whether to activate Tx queue */
1597 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; 1627 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1598 1628
1629 /* Set up and activate */
1599 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id), 1630 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
1600 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1631 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1601 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 1632 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
@@ -1606,7 +1637,7 @@ static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1606 txq->sched_retry = scd_retry; 1637 txq->sched_retry = scd_retry;
1607 1638
1608 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", 1639 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1609 active ? "Activete" : "Deactivate", 1640 active ? "Activate" : "Deactivate",
1610 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 1641 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1611} 1642}
1612 1643
@@ -1654,6 +1685,7 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1654 return rc; 1685 return rc;
1655 } 1686 }
1656 1687
1688 /* Clear 4965's internal Tx Scheduler data base */
1657 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR); 1689 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
1658 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 1690 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1659 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 1691 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
@@ -1663,20 +1695,29 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1663 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4) 1695 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1664 iwl4965_write_targ_mem(priv, a, 0); 1696 iwl4965_write_targ_mem(priv, a, 0);
1665 1697
1698 /* Tel 4965 where to find Tx byte count tables */
1666 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR, 1699 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
1667 (priv->hw_setting.shared_phys + 1700 (priv->hw_setting.shared_phys +
1668 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10); 1701 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1702
1703 /* Disable chain mode for all queues */
1669 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0); 1704 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
1670 1705
1671 /* initiate the queues */ 1706 /* Initialize each Tx queue (including the command queue) */
1672 for (i = 0; i < priv->hw_setting.max_txq_num; i++) { 1707 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1708
1709 /* TFD circular buffer read/write indexes */
1673 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0); 1710 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1674 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 1711 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1712
1713 /* Max Tx Window size for Scheduler-ACK mode */
1675 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1714 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1676 SCD_CONTEXT_QUEUE_OFFSET(i), 1715 SCD_CONTEXT_QUEUE_OFFSET(i),
1677 (SCD_WIN_SIZE << 1716 (SCD_WIN_SIZE <<
1678 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 1717 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1679 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 1718 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1719
1720 /* Frame limit */
1680 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1721 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1681 SCD_CONTEXT_QUEUE_OFFSET(i) + 1722 SCD_CONTEXT_QUEUE_OFFSET(i) +
1682 sizeof(u32), 1723 sizeof(u32),
@@ -1688,11 +1729,13 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1688 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK, 1729 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
1689 (1 << priv->hw_setting.max_txq_num) - 1); 1730 (1 << priv->hw_setting.max_txq_num) - 1);
1690 1731
1732 /* Activate all Tx DMA/FIFO channels */
1691 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 1733 iwl4965_write_prph(priv, KDR_SCD_TXFACT,
1692 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 1734 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1693 1735
1694 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 1736 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1695 /* map qos queues to fifos one-to-one */ 1737
1738 /* Map each Tx/cmd queue to its corresponding fifo */
1696 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 1739 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1697 int ac = default_queue_to_tx_fifo[i]; 1740 int ac = default_queue_to_tx_fifo[i];
1698 iwl4965_txq_ctx_activate(priv, i); 1741 iwl4965_txq_ctx_activate(priv, i);
@@ -1705,8 +1748,14 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1705 return 0; 1748 return 0;
1706} 1749}
1707 1750
1751/**
1752 * iwl4965_hw_set_hw_setting
1753 *
1754 * Called when initializing driver
1755 */
1708int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv) 1756int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
1709{ 1757{
1758 /* Allocate area for Tx byte count tables and Rx queue status */
1710 priv->hw_setting.shared_virt = 1759 priv->hw_setting.shared_virt =
1711 pci_alloc_consistent(priv->pci_dev, 1760 pci_alloc_consistent(priv->pci_dev,
1712 sizeof(struct iwl4965_shared), 1761 sizeof(struct iwl4965_shared),
@@ -1741,13 +1790,15 @@ void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
1741 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) 1790 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
1742 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]); 1791 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1743 1792
1793 /* Keep-warm buffer */
1744 iwl4965_kw_free(priv); 1794 iwl4965_kw_free(priv);
1745} 1795}
1746 1796
1747/** 1797/**
1748 * iwl4965_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] 1798 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1749 * 1799 *
1750 * Does NOT advance any indexes 1800 * Does NOT advance any TFD circular buffer read/write indexes
1801 * Does NOT free the TFD itself (which is within circular buffer)
1751 */ 1802 */
1752int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq) 1803int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
1753{ 1804{
@@ -1758,12 +1809,11 @@ int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *
1758 int counter = 0; 1809 int counter = 0;
1759 int index, is_odd; 1810 int index, is_odd;
1760 1811
1761 /* classify bd */ 1812 /* Host command buffers stay mapped in memory, nothing to clean */
1762 if (txq->q.id == IWL_CMD_QUEUE_NUM) 1813 if (txq->q.id == IWL_CMD_QUEUE_NUM)
1763 /* nothing to cleanup after for host commands */
1764 return 0; 1814 return 0;
1765 1815
1766 /* sanity check */ 1816 /* Sanity check on number of chunks */
1767 counter = IWL_GET_BITS(*bd, num_tbs); 1817 counter = IWL_GET_BITS(*bd, num_tbs);
1768 if (counter > MAX_NUM_OF_TBS) { 1818 if (counter > MAX_NUM_OF_TBS) {
1769 IWL_ERROR("Too many chunks: %i\n", counter); 1819 IWL_ERROR("Too many chunks: %i\n", counter);
@@ -1771,8 +1821,8 @@ int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *
1771 return 0; 1821 return 0;
1772 } 1822 }
1773 1823
1774 /* unmap chunks if any */ 1824 /* Unmap chunks, if any.
1775 1825 * TFD info for odd chunks is different format than for even chunks. */
1776 for (i = 0; i < counter; i++) { 1826 for (i = 0; i < counter; i++) {
1777 index = i / 2; 1827 index = i / 2;
1778 is_odd = i & 0x1; 1828 is_odd = i & 0x1;
@@ -1792,6 +1842,7 @@ int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *
1792 IWL_GET_BITS(bd->pa[index], tb1_len), 1842 IWL_GET_BITS(bd->pa[index], tb1_len),
1793 PCI_DMA_TODEVICE); 1843 PCI_DMA_TODEVICE);
1794 1844
1845 /* Free SKB, if any, for this chunk */
1795 if (txq->txb[txq->q.read_ptr].skb[i]) { 1846 if (txq->txb[txq->q.read_ptr].skb[i]) {
1796 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; 1847 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
1797 1848
@@ -1826,6 +1877,17 @@ static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1826 return 1; 1877 return 1;
1827} 1878}
1828 1879
1880/**
1881 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1882 *
1883 * Determines power supply voltage compensation for txpower calculations.
1884 * Returns number of 1/2-dB steps to subtract from gain table index,
1885 * to compensate for difference between power supply voltage during
1886 * factory measurements, vs. current power supply voltage.
1887 *
1888 * Voltage indication is higher for lower voltage.
1889 * Lower voltage requires more gain (lower gain table index).
1890 */
1829static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, 1891static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1830 s32 current_voltage) 1892 s32 current_voltage)
1831{ 1893{
@@ -1913,6 +1975,14 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1913 } 1975 }
1914} 1976}
1915 1977
1978/**
1979 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
1980 *
1981 * Interpolates factory measurements from the two sample channels within a
1982 * sub-band, to apply to channel of interest. Interpolation is proportional to
1983 * differences in channel frequencies, which is proportional to differences
1984 * in channel number.
1985 */
1916static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel, 1986static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel,
1917 struct iwl4965_eeprom_calib_ch_info *chan_info) 1987 struct iwl4965_eeprom_calib_ch_info *chan_info)
1918{ 1988{
@@ -2681,6 +2751,13 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2681 return (sizeof(*tx_beacon_cmd) + frame_size); 2751 return (sizeof(*tx_beacon_cmd) + frame_size);
2682} 2752}
2683 2753
2754/*
2755 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2756 * given Tx queue, and enable the DMA channel used for that queue.
2757 *
2758 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2759 * channels supported in hardware.
2760 */
2684int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq) 2761int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
2685{ 2762{
2686 int rc; 2763 int rc;
@@ -2694,8 +2771,11 @@ int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue
2694 return rc; 2771 return rc;
2695 } 2772 }
2696 2773
2774 /* Circular buffer (TFD queue in DRAM) physical base address */
2697 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 2775 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2698 txq->q.dma_addr >> 8); 2776 txq->q.dma_addr >> 8);
2777
2778 /* Enable DMA channel, using same id as for TFD queue */
2699 iwl4965_write_direct32( 2779 iwl4965_write_direct32(
2700 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 2780 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2701 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2781 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
@@ -2718,6 +2798,7 @@ int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
2718 struct iwl4965_tfd_frame *tfd = ptr; 2798 struct iwl4965_tfd_frame *tfd = ptr;
2719 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 2799 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2720 2800
2801 /* Each TFD can point to a maximum 20 Tx buffers */
2721 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { 2802 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2722 IWL_ERROR("Error can not send more than %d chunks\n", 2803 IWL_ERROR("Error can not send more than %d chunks\n",
2723 MAX_NUM_OF_TBS); 2804 MAX_NUM_OF_TBS);
@@ -2759,6 +2840,9 @@ static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
2759#define IWL_TX_CRC_SIZE 4 2840#define IWL_TX_CRC_SIZE 4
2760#define IWL_TX_DELIMITER_SIZE 4 2841#define IWL_TX_DELIMITER_SIZE 4
2761 2842
2843/**
2844 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
2845 */
2762int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv, 2846int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2763 struct iwl4965_tx_queue *txq, u16 byte_cnt) 2847 struct iwl4965_tx_queue *txq, u16 byte_cnt)
2764{ 2848{
@@ -2771,9 +2855,11 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2771 2855
2772 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 2856 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2773 2857
2858 /* Set up byte count within first 256 entries */
2774 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 2859 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2775 tfd_offset[txq->q.write_ptr], byte_cnt, len); 2860 tfd_offset[txq->q.write_ptr], byte_cnt, len);
2776 2861
2862 /* If within first 64 entries, duplicate at end */
2777 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) 2863 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
2778 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 2864 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2779 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 2865 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
@@ -2782,8 +2868,12 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2782 return 0; 2868 return 0;
2783} 2869}
2784 2870
2785/* Set up Rx receiver/antenna/chain usage in "staging" RXON image. 2871/**
2786 * This should not be used for scan command ... it puts data in wrong place. */ 2872 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2873 *
2874 * Selects how many and which Rx receivers/antennas/chains to use.
2875 * This should not be used for scan command ... it puts data in wrong place.
2876 */
2787void iwl4965_set_rxon_chain(struct iwl4965_priv *priv) 2877void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2788{ 2878{
2789 u8 is_single = is_single_stream(priv); 2879 u8 is_single = is_single_stream(priv);
@@ -2931,6 +3021,9 @@ enum HT_STATUS {
2931 BA_STATUS_ACTIVE, 3021 BA_STATUS_ACTIVE,
2932}; 3022};
2933 3023
3024/**
3025 * iwl4964_tl_ba_avail - Find out if an unused aggregation queue is available
3026 */
2934static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv) 3027static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
2935{ 3028{
2936 int i; 3029 int i;
@@ -2939,6 +3032,8 @@ static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
2939 u16 msk; 3032 u16 msk;
2940 3033
2941 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr); 3034 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3035
3036 /* Find out how many agg queues are in use */
2942 for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) { 3037 for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
2943 msk = 1 << i; 3038 msk = 1 << i;
2944 if ((lq->agg_ctrl.granted_ba & msk) || 3039 if ((lq->agg_ctrl.granted_ba & msk) ||
@@ -3080,6 +3175,9 @@ void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid)
3080 } 3175 }
3081} 3176}
3082 3177
3178/**
3179 * iwl4965_ba_status - Update driver's link quality mgr with tid's HT status
3180 */
3083static void iwl4965_ba_status(struct iwl4965_priv *priv, 3181static void iwl4965_ba_status(struct iwl4965_priv *priv,
3084 u8 tid, enum HT_STATUS status) 3182 u8 tid, enum HT_STATUS status)
3085{ 3183{
@@ -3301,11 +3399,12 @@ int iwl4965_get_temperature(const struct iwl4965_priv *priv)
3301 } 3399 }
3302 3400
3303 /* 3401 /*
3304 * Temperature is only 23 bits so sign extend out to 32 3402 * Temperature is only 23 bits, so sign extend out to 32.
3305 * 3403 *
3306 * NOTE If we haven't received a statistics notification yet 3404 * NOTE If we haven't received a statistics notification yet
3307 * with an updated temperature, use R4 provided to us in the 3405 * with an updated temperature, use R4 provided to us in the
3308 * ALIVE response. */ 3406 * "initialize" ALIVE response.
3407 */
3309 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 3408 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3310 vt = sign_extend(R4, 23); 3409 vt = sign_extend(R4, 23);
3311 else 3410 else
@@ -4001,6 +4100,11 @@ static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4001#ifdef CONFIG_IWL4965_HT 4100#ifdef CONFIG_IWL4965_HT
4002#ifdef CONFIG_IWL4965_HT_AGG 4101#ifdef CONFIG_IWL4965_HT_AGG
4003 4102
4103/**
4104 * iwl4965_set_tx_status - Update driver's record of one Tx frame's status
4105 *
4106 * This will get sent to mac80211.
4107 */
4004static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx, 4108static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx,
4005 u32 status, u32 retry_count, u32 rate) 4109 u32 status, u32 retry_count, u32 rate)
4006{ 4110{
@@ -4013,11 +4117,15 @@ static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx
4013} 4117}
4014 4118
4015 4119
4120/**
4121 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4122 */
4016static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv, 4123static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4017 int sta_id, int tid) 4124 int sta_id, int tid)
4018{ 4125{
4019 unsigned long flags; 4126 unsigned long flags;
4020 4127
4128 /* Remove "disable" flag, to enable Tx for this TID */
4021 spin_lock_irqsave(&priv->sta_lock, flags); 4129 spin_lock_irqsave(&priv->sta_lock, flags);
4022 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; 4130 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4023 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); 4131 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
@@ -4028,6 +4136,12 @@ static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4028} 4136}
4029 4137
4030 4138
4139/**
4140 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4141 *
4142 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4143 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4144 */
4031static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv, 4145static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4032 struct iwl4965_ht_agg *agg, 4146 struct iwl4965_ht_agg *agg,
4033 struct iwl4965_compressed_ba_resp* 4147 struct iwl4965_compressed_ba_resp*
@@ -4044,13 +4158,17 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4044 IWL_ERROR("Received BA when not expected\n"); 4158 IWL_ERROR("Received BA when not expected\n");
4045 return -EINVAL; 4159 return -EINVAL;
4046 } 4160 }
4161
4162 /* Mark that the expected block-ack response arrived */
4047 agg->wait_for_ba = 0; 4163 agg->wait_for_ba = 0;
4048 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); 4164 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl);
4165
4166 /* Calculate shift to align block-ack bits with our Tx window bits */
4049 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4); 4167 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4);
4050 if (sh < 0) /* tbw something is wrong with indices */ 4168 if (sh < 0) /* tbw something is wrong with indices */
4051 sh += 0x100; 4169 sh += 0x100;
4052 4170
4053 /* don't use 64 bits for now */ 4171 /* don't use 64-bit values for now */
4054 bitmap0 = resp_bitmap0 >> sh; 4172 bitmap0 = resp_bitmap0 >> sh;
4055 bitmap1 = resp_bitmap1 >> sh; 4173 bitmap1 = resp_bitmap1 >> sh;
4056 bitmap0 |= (resp_bitmap1 & ((1<<sh)|((1<<sh)-1))) << (32 - sh); 4174 bitmap0 |= (resp_bitmap1 & ((1<<sh)|((1<<sh)-1))) << (32 - sh);
@@ -4061,10 +4179,12 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4061 } 4179 }
4062 4180
4063 /* check for success or failure according to the 4181 /* check for success or failure according to the
4064 * transmitted bitmap and back bitmap */ 4182 * transmitted bitmap and block-ack bitmap */
4065 bitmap0 &= agg->bitmap0; 4183 bitmap0 &= agg->bitmap0;
4066 bitmap1 &= agg->bitmap1; 4184 bitmap1 &= agg->bitmap1;
4067 4185
4186 /* For each frame attempted in aggregation,
4187 * update driver's record of tx frame's status. */
4068 for (i = 0; i < agg->frame_count ; i++) { 4188 for (i = 0; i < agg->frame_count ; i++) {
4069 int idx = (agg->start_idx + i) & 0xff; 4189 int idx = (agg->start_idx + i) & 0xff;
4070 ack = bitmap0 & (1 << i); 4190 ack = bitmap0 & (1 << i);
@@ -4080,11 +4200,22 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4080 return 0; 4200 return 0;
4081} 4201}
4082 4202
4203/**
4204 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4205 * @index -- current index
4206 * @n_bd -- total number of entries in queue (s/b power of 2)
4207 */
4083static inline int iwl4965_queue_dec_wrap(int index, int n_bd) 4208static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4084{ 4209{
4085 return (index == 0) ? n_bd - 1 : index - 1; 4210 return (index == 0) ? n_bd - 1 : index - 1;
4086} 4211}
4087 4212
4213/**
4214 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4215 *
4216 * Handles block-acknowledge notification from device, which reports success
4217 * of frames sent via aggregation.
4218 */
4088static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv, 4219static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4089 struct iwl4965_rx_mem_buffer *rxb) 4220 struct iwl4965_rx_mem_buffer *rxb)
4090{ 4221{
@@ -4093,7 +4224,12 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4093 int index; 4224 int index;
4094 struct iwl4965_tx_queue *txq = NULL; 4225 struct iwl4965_tx_queue *txq = NULL;
4095 struct iwl4965_ht_agg *agg; 4226 struct iwl4965_ht_agg *agg;
4227
4228 /* "flow" corresponds to Tx queue */
4096 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); 4229 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow);
4230
4231 /* "ssn" is start of block-ack Tx window, corresponds to index
4232 * (in Tx queue's circular buffer) of first TFD/frame in window */
4097 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 4233 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4098 4234
4099 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { 4235 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) {
@@ -4103,6 +4239,8 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4103 4239
4104 txq = &priv->txq[ba_resp_scd_flow]; 4240 txq = &priv->txq[ba_resp_scd_flow];
4105 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 4241 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4242
4243 /* Find index just before block-ack window */
4106 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 4244 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4107 4245
4108 /* TODO: Need to get this copy more safely - now good for debug */ 4246 /* TODO: Need to get this copy more safely - now good for debug */
@@ -4128,22 +4266,35 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4128 agg->bitmap0); 4266 agg->bitmap0);
4129 } 4267 }
4130*/ 4268*/
4269
4270 /* Update driver's record of ACK vs. not for each frame in window */
4131 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); 4271 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4132 /* releases all the TFDs until the SSN */ 4272
4273 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4274 * block-ack window (we assume that they've been successfully
4275 * transmitted ... if not, it's too late anyway). */
4133 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) 4276 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff))
4134 iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index); 4277 iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index);
4135 4278
4136} 4279}
4137 4280
4138 4281
4282/**
4283 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4284 */
4139static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id) 4285static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id)
4140{ 4286{
4287 /* Simply stop the queue, but don't change any configuration;
4288 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4141 iwl4965_write_prph(priv, 4289 iwl4965_write_prph(priv,
4142 KDR_SCD_QUEUE_STATUS_BITS(txq_id), 4290 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4143 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 4291 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4144 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 4292 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4145} 4293}
4146 4294
4295/**
4296 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4297 */
4147static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid, 4298static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4148 u16 txq_id) 4299 u16 txq_id)
4149{ 4300{
@@ -4169,7 +4320,10 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4169} 4320}
4170 4321
4171/** 4322/**
4172 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID 4323 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4324 *
4325 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4326 * i.e. it must be one of the higher queues used for aggregation
4173 */ 4327 */
4174static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id, 4328static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4175 int tx_fifo, int sta_id, int tid, 4329 int tx_fifo, int sta_id, int tid,
@@ -4185,6 +4339,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4185 4339
4186 ra_tid = BUILD_RAxTID(sta_id, tid); 4340 ra_tid = BUILD_RAxTID(sta_id, tid);
4187 4341
4342 /* Modify device's station table to Tx this TID */
4188 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 4343 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4189 4344
4190 spin_lock_irqsave(&priv->lock, flags); 4345 spin_lock_irqsave(&priv->lock, flags);
@@ -4194,19 +4349,22 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4194 return rc; 4349 return rc;
4195 } 4350 }
4196 4351
4352 /* Stop this Tx queue before configuring it */
4197 iwl4965_tx_queue_stop_scheduler(priv, txq_id); 4353 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4198 4354
4355 /* Map receiver-address / traffic-ID to this queue */
4199 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 4356 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4200 4357
4201 4358 /* Set this queue as a chain-building queue */
4202 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1<<txq_id)); 4359 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1<<txq_id));
4203 4360
4361 /* Place first TFD at index corresponding to start sequence number.
4362 * Assumes that ssn_idx is valid (!= 0xFFF) */
4204 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 4363 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4205 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 4364 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4206
4207 /* supposes that ssn_idx is valid (!= 0xFFF) */
4208 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 4365 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4209 4366
4367 /* Set up Tx window size and frame limit for this queue */
4210 iwl4965_write_targ_mem(priv, 4368 iwl4965_write_targ_mem(priv,
4211 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 4369 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4212 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 4370 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
@@ -4219,6 +4377,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4219 4377
4220 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id)); 4378 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4221 4379
4380 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4222 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 4381 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4223 4382
4224 iwl4965_release_nic_access(priv); 4383 iwl4965_release_nic_access(priv);
@@ -4274,14 +4433,16 @@ static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4274/** 4433/**
4275 * iwl4965_add_station - Initialize a station's hardware rate table 4434 * iwl4965_add_station - Initialize a station's hardware rate table
4276 * 4435 *
4277 * The uCode contains a table of fallback rates and retries per rate 4436 * The uCode's station table contains a table of fallback rates
4278 * for automatic fallback during transmission. 4437 * for automatic fallback during transmission.
4279 * 4438 *
4280 * NOTE: This initializes the table for a single retry per data rate 4439 * NOTE: This sets up a default set of values. These will be replaced later
4281 * which is not optimal. Setting up an intelligent retry per rate 4440 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4282 * requires feedback from transmission, which isn't exposed through 4441 * rc80211_simple.
4283 * rc80211_simple which is what this driver is currently using.
4284 * 4442 *
4443 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4444 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4445 * which requires station table entry to exist).
4285 */ 4446 */
4286void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap) 4447void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4287{ 4448{
@@ -4291,8 +4452,8 @@ void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4291 }; 4452 };
4292 u16 rate_flags; 4453 u16 rate_flags;
4293 4454
4294 /* Set up the rate scaling to start at 54M and fallback 4455 /* Set up the rate scaling to start at selected rate, fall back
4295 * all the way to 1M in IEEE order and then spin on IEEE */ 4456 * all the way down to 1M in IEEE order, and then spin on 1M */
4296 if (is_ap) 4457 if (is_ap)
4297 r = IWL_RATE_54M_INDEX; 4458 r = IWL_RATE_54M_INDEX;
4298 else if (priv->phymode == MODE_IEEE80211A) 4459 else if (priv->phymode == MODE_IEEE80211A)
@@ -4305,8 +4466,10 @@ void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4305 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 4466 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4306 rate_flags |= RATE_MCS_CCK_MSK; 4467 rate_flags |= RATE_MCS_CCK_MSK;
4307 4468
4469 /* Use Tx antenna B only */
4308 rate_flags |= RATE_MCS_ANT_B_MSK; 4470 rate_flags |= RATE_MCS_ANT_B_MSK;
4309 rate_flags &= ~RATE_MCS_ANT_A_MSK; 4471 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4472
4310 link_cmd.rs_table[i].rate_n_flags = 4473 link_cmd.rs_table[i].rate_n_flags =
4311 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags); 4474 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4312 r = iwl4965_get_prev_ieee_rate(r); 4475 r = iwl4965_get_prev_ieee_rate(r);
@@ -4374,6 +4537,7 @@ void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct sta_ht_info *ht_info)
4374 if (!ht_info->is_ht) 4537 if (!ht_info->is_ht)
4375 return; 4538 return;
4376 4539
4540 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4377 if (iwl4965_is_fat_tx_allowed(priv, ht_info)) 4541 if (iwl4965_is_fat_tx_allowed(priv, ht_info))
4378 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; 4542 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4379 else 4543 else
@@ -4388,7 +4552,7 @@ void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct sta_ht_info *ht_info)
4388 return; 4552 return;
4389 } 4553 }
4390 4554
4391 /* Note: control channel is oposit to extension channel */ 4555 /* Note: control channel is opposite of extension channel */
4392 switch (ht_info->extension_chan_offset) { 4556 switch (ht_info->extension_chan_offset) {
4393 case IWL_EXT_CHANNEL_OFFSET_ABOVE: 4557 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4394 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 4558 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
@@ -4514,6 +4678,12 @@ static const u16 default_tid_to_tx_fifo[] = {
4514 IWL_TX_FIFO_AC3 4678 IWL_TX_FIFO_AC3
4515}; 4679};
4516 4680
4681/*
4682 * Find first available (lowest unused) Tx Queue, mark it "active".
4683 * Called only when finding queue for aggregation.
4684 * Should never return anything < 7, because they should already
4685 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4686 */
4517static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv) 4687static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4518{ 4688{
4519 int txq_id; 4689 int txq_id;
@@ -4537,6 +4707,7 @@ int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid,
4537 struct iwl4965_tid_data *tid_data; 4707 struct iwl4965_tid_data *tid_data;
4538 DECLARE_MAC_BUF(mac); 4708 DECLARE_MAC_BUF(mac);
4539 4709
4710 /* Determine Tx DMA/FIFO channel for this Traffic ID */
4540 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 4711 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4541 tx_fifo = default_tid_to_tx_fifo[tid]; 4712 tx_fifo = default_tid_to_tx_fifo[tid];
4542 else 4713 else
@@ -4545,22 +4716,31 @@ int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid,
4545 IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=%s" 4716 IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=%s"
4546 " tid=%d\n", print_mac(mac, da), tid); 4717 " tid=%d\n", print_mac(mac, da), tid);
4547 4718
4719 /* Get index into station table */
4548 sta_id = iwl4965_hw_find_station(priv, da); 4720 sta_id = iwl4965_hw_find_station(priv, da);
4549 if (sta_id == IWL_INVALID_STATION) 4721 if (sta_id == IWL_INVALID_STATION)
4550 return -ENXIO; 4722 return -ENXIO;
4551 4723
4724 /* Find available Tx queue for aggregation */
4552 txq_id = iwl4965_txq_ctx_activate_free(priv); 4725 txq_id = iwl4965_txq_ctx_activate_free(priv);
4553 if (txq_id == -1) 4726 if (txq_id == -1)
4554 return -ENXIO; 4727 return -ENXIO;
4555 4728
4556 spin_lock_irqsave(&priv->sta_lock, flags); 4729 spin_lock_irqsave(&priv->sta_lock, flags);
4557 tid_data = &priv->stations[sta_id].tid[tid]; 4730 tid_data = &priv->stations[sta_id].tid[tid];
4731
4732 /* Get starting sequence number for 1st frame in block ack window.
4733 * We'll use least signif byte as 1st frame's index into Tx queue. */
4558 ssn = SEQ_TO_SN(tid_data->seq_number); 4734 ssn = SEQ_TO_SN(tid_data->seq_number);
4559 tid_data->agg.txq_id = txq_id; 4735 tid_data->agg.txq_id = txq_id;
4560 spin_unlock_irqrestore(&priv->sta_lock, flags); 4736 spin_unlock_irqrestore(&priv->sta_lock, flags);
4561 4737
4562 *start_seq_num = ssn; 4738 *start_seq_num = ssn;
4739
4740 /* Update driver's link quality manager */
4563 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE); 4741 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE);
4742
4743 /* Set up and enable aggregation for selected Tx queue and FIFO */
4564 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 4744 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4565 sta_id, tid, ssn); 4745 sta_id, tid, ssn);
4566} 4746}