aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl3945-base.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c148
1 files changed, 103 insertions, 45 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index acefc3721267..617c4235d971 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -972,7 +972,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
972 dma_addr_t phys_addr; 972 dma_addr_t phys_addr;
973 dma_addr_t txcmd_phys; 973 dma_addr_t txcmd_phys;
974 int txq_id = skb_get_queue_mapping(skb); 974 int txq_id = skb_get_queue_mapping(skb);
975 u16 len, idx, len_org, hdr_len; 975 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
976 u8 id; 976 u8 id;
977 u8 unicast; 977 u8 unicast;
978 u8 sta_id; 978 u8 sta_id;
@@ -1074,6 +1074,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1074 /* Copy MAC header from skb into command buffer */ 1074 /* Copy MAC header from skb into command buffer */
1075 memcpy(tx->hdr, hdr, hdr_len); 1075 memcpy(tx->hdr, hdr, hdr_len);
1076 1076
1077
1078 if (info->control.hw_key)
1079 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
1080
1081 /* TODO need this for burst mode later on */
1082 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
1083
1084 /* set is_hcca to 0; it probably will never be implemented */
1085 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
1086
1087 /* Total # bytes to be transmitted */
1088 len = (u16)skb->len;
1089 tx->len = cpu_to_le16(len);
1090
1091
1092 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1093 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
1094
1095 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1096 txq->need_update = 1;
1097 if (qc)
1098 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
1099 } else {
1100 wait_write_ptr = 1;
1101 txq->need_update = 0;
1102 }
1103
1104 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1105 le16_to_cpu(out_cmd->hdr.sequence));
1106 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
1107 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
1108 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
1109 ieee80211_hdrlen(fc));
1110
1077 /* 1111 /*
1078 * Use the first empty entry in this queue's command buffer array 1112 * Use the first empty entry in this queue's command buffer array
1079 * to contain the Tx command and MAC header concatenated together 1113 * to contain the Tx command and MAC header concatenated together
@@ -1096,22 +1130,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1096 1130
1097 /* Physical address of this Tx command's header (not MAC header!), 1131 /* Physical address of this Tx command's header (not MAC header!),
1098 * within command buffer array. */ 1132 * within command buffer array. */
1099 txcmd_phys = pci_map_single(priv->pci_dev, 1133 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1100 out_cmd, sizeof(struct iwl_cmd), 1134 len, PCI_DMA_TODEVICE);
1101 PCI_DMA_TODEVICE); 1135 /* we do not map meta data ... so we can safely access address to
1136 * provide to unmap command*/
1102 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); 1137 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
1103 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); 1138 pci_unmap_len_set(&out_cmd->meta, len, len);
1104 /* Add buffer containing Tx command and MAC(!) header to TFD's
1105 * first entry */
1106 txcmd_phys += offsetof(struct iwl_cmd, hdr);
1107 1139
1108 /* Add buffer containing Tx command and MAC(!) header to TFD's 1140 /* Add buffer containing Tx command and MAC(!) header to TFD's
1109 * first entry */ 1141 * first entry */
1110 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 1142 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1111 txcmd_phys, len, 1, 0); 1143 txcmd_phys, len, 1, 0);
1112 1144
1113 if (info->control.hw_key)
1114 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
1115 1145
1116 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1146 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1117 * if any (802.11 null frames have no payload). */ 1147 * if any (802.11 null frames have no payload). */
@@ -1124,34 +1154,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1124 0, U32_PAD(len)); 1154 0, U32_PAD(len));
1125 } 1155 }
1126 1156
1127 /* Total # bytes to be transmitted */
1128 len = (u16)skb->len;
1129 tx->len = cpu_to_le16(len);
1130
1131 /* TODO need this for burst mode later on */
1132 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
1133
1134 /* set is_hcca to 0; it probably will never be implemented */
1135 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
1136
1137 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1138 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
1139
1140 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1141 txq->need_update = 1;
1142 if (qc)
1143 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
1144 } else {
1145 wait_write_ptr = 1;
1146 txq->need_update = 0;
1147 }
1148
1149 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1150 le16_to_cpu(out_cmd->hdr.sequence));
1151 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
1152 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
1153 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
1154 ieee80211_hdrlen(fc));
1155 1157
1156 /* Tell device the write index *just past* this latest filled TFD */ 1158 /* Tell device the write index *just past* this latest filled TFD */
1157 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1159 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -1663,6 +1665,37 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
1663 spin_unlock_irqrestore(&rxq->lock, flags); 1665 spin_unlock_irqrestore(&rxq->lock, flags);
1664} 1666}
1665 1667
1668void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1669{
1670 unsigned long flags;
1671 int i;
1672 spin_lock_irqsave(&rxq->lock, flags);
1673 INIT_LIST_HEAD(&rxq->rx_free);
1674 INIT_LIST_HEAD(&rxq->rx_used);
1675 /* Fill the rx_used queue with _all_ of the Rx buffers */
1676 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1677 /* In the reset function, these buffers may have been allocated
1678 * to an SKB, so we need to unmap and free potential storage */
1679 if (rxq->pool[i].skb != NULL) {
1680 pci_unmap_single(priv->pci_dev,
1681 rxq->pool[i].real_dma_addr,
1682 priv->hw_params.rx_buf_size,
1683 PCI_DMA_FROMDEVICE);
1684 priv->alloc_rxb_skb--;
1685 dev_kfree_skb(rxq->pool[i].skb);
1686 rxq->pool[i].skb = NULL;
1687 }
1688 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1689 }
1690
1691 /* Set us so that we have processed and used all buffers, but have
1692 * not restocked the Rx queue with fresh buffers */
1693 rxq->read = rxq->write = 0;
1694 rxq->free_count = 0;
1695 spin_unlock_irqrestore(&rxq->lock, flags);
1696}
1697EXPORT_SYMBOL(iwl3945_rx_queue_reset);
1698
1666/* 1699/*
1667 * this should be called while priv->lock is locked 1700 * this should be called while priv->lock is locked
1668 */ 1701 */
@@ -1687,6 +1720,34 @@ void iwl3945_rx_replenish(void *data)
1687 spin_unlock_irqrestore(&priv->lock, flags); 1720 spin_unlock_irqrestore(&priv->lock, flags);
1688} 1721}
1689 1722
1723/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1724 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1725 * This free routine walks the list of POOL entries and if SKB is set to
1726 * non NULL it is unmapped and freed
1727 */
1728static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1729{
1730 int i;
1731 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1732 if (rxq->pool[i].skb != NULL) {
1733 pci_unmap_single(priv->pci_dev,
1734 rxq->pool[i].real_dma_addr,
1735 priv->hw_params.rx_buf_size,
1736 PCI_DMA_FROMDEVICE);
1737 dev_kfree_skb(rxq->pool[i].skb);
1738 }
1739 }
1740
1741 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1742 rxq->dma_addr);
1743 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
1744 rxq->rb_stts, rxq->rb_stts_dma);
1745 rxq->bd = NULL;
1746 rxq->rb_stts = NULL;
1747}
1748EXPORT_SYMBOL(iwl3945_rx_queue_free);
1749
1750
1690/* Convert linear signal-to-noise ratio into dB */ 1751/* Convert linear signal-to-noise ratio into dB */
1691static u8 ratio2dB[100] = { 1752static u8 ratio2dB[100] = {
1692/* 0 1 2 3 4 5 6 7 8 9 */ 1753/* 0 1 2 3 4 5 6 7 8 9 */
@@ -1804,9 +1865,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1804 1865
1805 rxq->queue[i] = NULL; 1866 rxq->queue[i] = NULL;
1806 1867
1807 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr, 1868 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1808 priv->hw_params.rx_buf_size, 1869 priv->hw_params.rx_buf_size,
1809 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1810 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1871 pkt = (struct iwl_rx_packet *)rxb->skb->data;
1811 1872
1812 /* Reclaim a command buffer only if this packet is a response 1873 /* Reclaim a command buffer only if this packet is a response
@@ -1854,9 +1915,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1854 rxb->skb = NULL; 1915 rxb->skb = NULL;
1855 } 1916 }
1856 1917
1857 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1858 priv->hw_params.rx_buf_size,
1859 PCI_DMA_FROMDEVICE);
1860 spin_lock_irqsave(&rxq->lock, flags); 1918 spin_lock_irqsave(&rxq->lock, flags);
1861 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1919 list_add_tail(&rxb->list, &priv->rxq.rx_used);
1862 spin_unlock_irqrestore(&rxq->lock, flags); 1920 spin_unlock_irqrestore(&rxq->lock, flags);
@@ -5203,7 +5261,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
5203 iwl3945_dealloc_ucode_pci(priv); 5261 iwl3945_dealloc_ucode_pci(priv);
5204 5262
5205 if (priv->rxq.bd) 5263 if (priv->rxq.bd)
5206 iwl_rx_queue_free(priv, &priv->rxq); 5264 iwl3945_rx_queue_free(priv, &priv->rxq);
5207 iwl3945_hw_txq_ctx_free(priv); 5265 iwl3945_hw_txq_ctx_free(priv);
5208 5266
5209 iwl3945_unset_hw_params(priv); 5267 iwl3945_unset_hw_params(priv);