aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorReinette Chatre <reinette.chatre@intel.com>2009-04-21 13:55:48 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-04-21 16:43:34 -0400
commitdf833b1d73680f9f9dc72cbc3215edbbc6ab740d (patch)
tree07b4e4c829c8e9c2c31936b4db7ad3553d9dafc6
parentd2ee9cd2e2bdfa2e5817142d6f044697066d3977 (diff)
iwlwifi: DMA fixes
A few issues wrt DMA were uncovered when using the driver with swiotlb. - driver should not use memory after it has been mapped - iwl3945's RX queue management cannot use all of iwlagn because the size of the RX buffer is different. Revert back to using iwl3945 specific routines that map/unmap memory. - no need to "dma_syn_single_range_for_cpu" followed by pci_unmap_single, we can just call pci_unmap_single initially - only map the memory area that will be used by device. this is especially relevant to the mapping of iwl_cmd. we should not map the entire structure because the meta data at the beginning of structure contains the address to be used later for unmapping. If the address to be used for unmapping is stored in mapped data it creates a problem. - ensure that _if_ memory needs to be modified after it is mapped that we call _sync_single_for_cpu first, and then release it back to device with _sync_single_for_device - we mapped the wrong length of data for host commands, with mapped length differing with length provided to device, fix that. Thanks to Jason Andryuk <jandryuk@gmail.com> for significant bisecting help to find these issues. This fixes http://www.intellinuxwireless.org/bugzilla/show_bug.cgi?id=1964 Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Tested-by: Jason Andryuk <jandryuk@gmail.com> Tested-by: Ben Gamari <bgamari@gmail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c148
6 files changed, 165 insertions, 96 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 2399328e8de7..527525cc0919 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1192 return -ENOMEM; 1192 return -ENOMEM;
1193 } 1193 }
1194 } else 1194 } else
1195 iwl_rx_queue_reset(priv, rxq); 1195 iwl3945_rx_queue_reset(priv, rxq);
1196 1196
1197 iwl3945_rx_replenish(priv); 1197 iwl3945_rx_replenish(priv);
1198 1198
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ab7aaf6872c7..55188844657b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
215extern int iwl3945_tx_queue_init(struct iwl_priv *priv, 215extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
216 struct iwl_tx_queue *txq, int count, u32 id); 216 struct iwl_tx_queue *txq, int count, u32 id);
217extern void iwl3945_rx_replenish(void *data); 217extern void iwl3945_rx_replenish(void *data);
218extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
218extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq); 219extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
219extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, 220extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
220 const void *data); 221 const void *data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 3889158b359c..1ef4192207a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -976,11 +976,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
976 976
977 rxq->queue[i] = NULL; 977 rxq->queue[i] = NULL;
978 978
979 dma_sync_single_range_for_cpu( 979 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
980 &priv->pci_dev->dev, rxb->real_dma_addr, 980 priv->hw_params.rx_buf_size + 256,
981 rxb->aligned_dma_addr - rxb->real_dma_addr, 981 PCI_DMA_FROMDEVICE);
982 priv->hw_params.rx_buf_size,
983 PCI_DMA_FROMDEVICE);
984 pkt = (struct iwl_rx_packet *)rxb->skb->data; 982 pkt = (struct iwl_rx_packet *)rxb->skb->data;
985 983
986 /* Reclaim a command buffer only if this packet is a response 984 /* Reclaim a command buffer only if this packet is a response
@@ -1031,9 +1029,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
1031 rxb->skb = NULL; 1029 rxb->skb = NULL;
1032 } 1030 }
1033 1031
1034 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1035 priv->hw_params.rx_buf_size + 256,
1036 PCI_DMA_FROMDEVICE);
1037 spin_lock_irqsave(&rxq->lock, flags); 1032 spin_lock_irqsave(&rxq->lock, flags);
1038 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1033 list_add_tail(&rxb->list, &priv->rxq.rx_used);
1039 spin_unlock_irqrestore(&rxq->lock, flags); 1034 spin_unlock_irqrestore(&rxq->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ec9a13846edd..cf7f0db58fcf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -360,12 +360,16 @@ struct iwl_host_cmd {
360 360
361/** 361/**
362 * struct iwl_rx_queue - Rx queue 362 * struct iwl_rx_queue - Rx queue
363 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
364 * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
363 * @read: Shared index to newest available Rx buffer 365 * @read: Shared index to newest available Rx buffer
364 * @write: Shared index to oldest written Rx packet 366 * @write: Shared index to oldest written Rx packet
365 * @free_count: Number of pre-allocated buffers in rx_free 367 * @free_count: Number of pre-allocated buffers in rx_free
366 * @rx_free: list of free SKBs for use 368 * @rx_free: list of free SKBs for use
367 * @rx_used: List of Rx buffers with no SKB 369 * @rx_used: List of Rx buffers with no SKB
368 * @need_update: flag to indicate we need to update read/write index 370 * @need_update: flag to indicate we need to update read/write index
371 * @rb_stts: driver's pointer to receive buffer status
372 * @rb_stts_dma: bus address of receive buffer status
369 * 373 *
370 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 374 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
371 */ 375 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8ef53e28176f..71d5b8a1a73e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
799 /* Copy MAC header from skb into command buffer */ 799 /* Copy MAC header from skb into command buffer */
800 memcpy(tx_cmd->hdr, hdr, hdr_len); 800 memcpy(tx_cmd->hdr, hdr, hdr_len);
801 801
802
803 /* Total # bytes to be transmitted */
804 len = (u16)skb->len;
805 tx_cmd->len = cpu_to_le16(len);
806
807 if (info->control.hw_key)
808 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
809
810 /* TODO need this for burst mode later on */
811 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
812
813 /* set is_hcca to 0; it probably will never be implemented */
814 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
815
816 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
817
802 /* 818 /*
803 * Use the first empty entry in this queue's command buffer array 819 * Use the first empty entry in this queue's command buffer array
804 * to contain the Tx command and MAC header concatenated together 820 * to contain the Tx command and MAC header concatenated together
@@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
819 else 835 else
820 len_org = 0; 836 len_org = 0;
821 837
838 /* Tell NIC about any 2-byte padding after MAC header */
839 if (len_org)
840 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
841
822 /* Physical address of this Tx command's header (not MAC header!), 842 /* Physical address of this Tx command's header (not MAC header!),
823 * within command buffer array. */ 843 * within command buffer array. */
824 txcmd_phys = pci_map_single(priv->pci_dev, 844 txcmd_phys = pci_map_single(priv->pci_dev,
825 out_cmd, sizeof(struct iwl_cmd), 845 &out_cmd->hdr, len,
826 PCI_DMA_BIDIRECTIONAL); 846 PCI_DMA_BIDIRECTIONAL);
827 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); 847 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
828 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); 848 pci_unmap_len_set(&out_cmd->meta, len, len);
829 /* Add buffer containing Tx command and MAC(!) header to TFD's 849 /* Add buffer containing Tx command and MAC(!) header to TFD's
830 * first entry */ 850 * first entry */
831 txcmd_phys += offsetof(struct iwl_cmd, hdr);
832 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 851 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
833 txcmd_phys, len, 1, 0); 852 txcmd_phys, len, 1, 0);
834 853
835 if (info->control.hw_key) 854 if (!ieee80211_has_morefrags(hdr->frame_control)) {
836 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 855 txq->need_update = 1;
856 if (qc)
857 priv->stations[sta_id].tid[tid].seq_number = seq_number;
858 } else {
859 wait_write_ptr = 1;
860 txq->need_update = 0;
861 }
837 862
838 /* Set up TFD's 2nd entry to point directly to remainder of skb, 863 /* Set up TFD's 2nd entry to point directly to remainder of skb,
839 * if any (802.11 null frames have no payload). */ 864 * if any (802.11 null frames have no payload). */
@@ -846,35 +871,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
846 0, 0); 871 0, 0);
847 } 872 }
848 873
849 /* Tell NIC about any 2-byte padding after MAC header */
850 if (len_org)
851 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
852
853 /* Total # bytes to be transmitted */
854 len = (u16)skb->len;
855 tx_cmd->len = cpu_to_le16(len);
856 /* TODO need this for burst mode later on */
857 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
858
859 /* set is_hcca to 0; it probably will never be implemented */
860 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
861
862 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
863
864 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 874 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
865 offsetof(struct iwl_tx_cmd, scratch); 875 offsetof(struct iwl_tx_cmd, scratch);
876
877 len = sizeof(struct iwl_tx_cmd) +
878 sizeof(struct iwl_cmd_header) + hdr_len;
879 /* take back ownership of DMA buffer to enable update */
880 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
881 len, PCI_DMA_BIDIRECTIONAL);
866 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 882 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
867 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 883 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
868 884
869 if (!ieee80211_has_morefrags(hdr->frame_control)) {
870 txq->need_update = 1;
871 if (qc)
872 priv->stations[sta_id].tid[tid].seq_number = seq_number;
873 } else {
874 wait_write_ptr = 1;
875 txq->need_update = 0;
876 }
877
878 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 885 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
879 le16_to_cpu(out_cmd->hdr.sequence)); 886 le16_to_cpu(out_cmd->hdr.sequence));
880 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); 887 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
@@ -882,7 +889,11 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
882 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); 889 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
883 890
884 /* Set up entry for this TFD in Tx byte-count array */ 891 /* Set up entry for this TFD in Tx byte-count array */
885 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); 892 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
893 le16_to_cpu(tx_cmd->len));
894
895 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
896 len, PCI_DMA_BIDIRECTIONAL);
886 897
887 /* Tell device the write index *just past* this latest filled TFD */ 898 /* Tell device the write index *just past* this latest filled TFD */
888 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 899 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -970,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
970 INDEX_TO_SEQ(q->write_ptr)); 981 INDEX_TO_SEQ(q->write_ptr));
971 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 982 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
972 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 983 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
973 len = (idx == TFD_CMD_SLOTS) ? 984 len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
974 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 985 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
975 986
976 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
977 len, PCI_DMA_BIDIRECTIONAL);
978 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
979 pci_unmap_len_set(&out_cmd->meta, len, len);
980 phys_addr += offsetof(struct iwl_cmd, hdr);
981
982 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
983 phys_addr, fix_size, 1,
984 U32_PAD(cmd->len));
985 987
986#ifdef CONFIG_IWLWIFI_DEBUG 988#ifdef CONFIG_IWLWIFI_DEBUG
987 switch (out_cmd->hdr.cmd) { 989 switch (out_cmd->hdr.cmd) {
@@ -1009,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1009 /* Set up entry in queue's byte count circular buffer */ 1011 /* Set up entry in queue's byte count circular buffer */
1010 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); 1012 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1011 1013
1014 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1015 fix_size, PCI_DMA_BIDIRECTIONAL);
1016 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1017 pci_unmap_len_set(&out_cmd->meta, len, fix_size);
1018
1019 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1020 phys_addr, fix_size, 1,
1021 U32_PAD(cmd->len));
1022
1012 /* Increment and update queue's write index */ 1023 /* Increment and update queue's write index */
1013 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1024 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1014 ret = iwl_txq_update_write_ptr(priv, txq); 1025 ret = iwl_txq_update_write_ptr(priv, txq);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index acefc3721267..617c4235d971 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -972,7 +972,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
972 dma_addr_t phys_addr; 972 dma_addr_t phys_addr;
973 dma_addr_t txcmd_phys; 973 dma_addr_t txcmd_phys;
974 int txq_id = skb_get_queue_mapping(skb); 974 int txq_id = skb_get_queue_mapping(skb);
975 u16 len, idx, len_org, hdr_len; 975 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
976 u8 id; 976 u8 id;
977 u8 unicast; 977 u8 unicast;
978 u8 sta_id; 978 u8 sta_id;
@@ -1074,6 +1074,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1074 /* Copy MAC header from skb into command buffer */ 1074 /* Copy MAC header from skb into command buffer */
1075 memcpy(tx->hdr, hdr, hdr_len); 1075 memcpy(tx->hdr, hdr, hdr_len);
1076 1076
1077
1078 if (info->control.hw_key)
1079 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
1080
1081 /* TODO need this for burst mode later on */
1082 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
1083
1084 /* set is_hcca to 0; it probably will never be implemented */
1085 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
1086
1087 /* Total # bytes to be transmitted */
1088 len = (u16)skb->len;
1089 tx->len = cpu_to_le16(len);
1090
1091
1092 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1093 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
1094
1095 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1096 txq->need_update = 1;
1097 if (qc)
1098 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
1099 } else {
1100 wait_write_ptr = 1;
1101 txq->need_update = 0;
1102 }
1103
1104 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1105 le16_to_cpu(out_cmd->hdr.sequence));
1106 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
1107 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
1108 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
1109 ieee80211_hdrlen(fc));
1110
1077 /* 1111 /*
1078 * Use the first empty entry in this queue's command buffer array 1112 * Use the first empty entry in this queue's command buffer array
1079 * to contain the Tx command and MAC header concatenated together 1113 * to contain the Tx command and MAC header concatenated together
@@ -1096,22 +1130,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1096 1130
1097 /* Physical address of this Tx command's header (not MAC header!), 1131 /* Physical address of this Tx command's header (not MAC header!),
1098 * within command buffer array. */ 1132 * within command buffer array. */
1099 txcmd_phys = pci_map_single(priv->pci_dev, 1133 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1100 out_cmd, sizeof(struct iwl_cmd), 1134 len, PCI_DMA_TODEVICE);
1101 PCI_DMA_TODEVICE); 1135 /* we do not map meta data ... so we can safely access address to
1136 * provide to unmap command*/
1102 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); 1137 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
1103 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); 1138 pci_unmap_len_set(&out_cmd->meta, len, len);
1104 /* Add buffer containing Tx command and MAC(!) header to TFD's
1105 * first entry */
1106 txcmd_phys += offsetof(struct iwl_cmd, hdr);
1107 1139
1108 /* Add buffer containing Tx command and MAC(!) header to TFD's 1140 /* Add buffer containing Tx command and MAC(!) header to TFD's
1109 * first entry */ 1141 * first entry */
1110 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 1142 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1111 txcmd_phys, len, 1, 0); 1143 txcmd_phys, len, 1, 0);
1112 1144
1113 if (info->control.hw_key)
1114 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
1115 1145
1116 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1146 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1117 * if any (802.11 null frames have no payload). */ 1147 * if any (802.11 null frames have no payload). */
@@ -1124,34 +1154,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
1124 0, U32_PAD(len)); 1154 0, U32_PAD(len));
1125 } 1155 }
1126 1156
1127 /* Total # bytes to be transmitted */
1128 len = (u16)skb->len;
1129 tx->len = cpu_to_le16(len);
1130
1131 /* TODO need this for burst mode later on */
1132 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
1133
1134 /* set is_hcca to 0; it probably will never be implemented */
1135 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
1136
1137 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
1138 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
1139
1140 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1141 txq->need_update = 1;
1142 if (qc)
1143 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
1144 } else {
1145 wait_write_ptr = 1;
1146 txq->need_update = 0;
1147 }
1148
1149 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1150 le16_to_cpu(out_cmd->hdr.sequence));
1151 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
1152 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
1153 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
1154 ieee80211_hdrlen(fc));
1155 1157
1156 /* Tell device the write index *just past* this latest filled TFD */ 1158 /* Tell device the write index *just past* this latest filled TFD */
1157 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1159 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -1663,6 +1665,37 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
1663 spin_unlock_irqrestore(&rxq->lock, flags); 1665 spin_unlock_irqrestore(&rxq->lock, flags);
1664} 1666}
1665 1667
1668void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1669{
1670 unsigned long flags;
1671 int i;
1672 spin_lock_irqsave(&rxq->lock, flags);
1673 INIT_LIST_HEAD(&rxq->rx_free);
1674 INIT_LIST_HEAD(&rxq->rx_used);
1675 /* Fill the rx_used queue with _all_ of the Rx buffers */
1676 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1677 /* In the reset function, these buffers may have been allocated
1678 * to an SKB, so we need to unmap and free potential storage */
1679 if (rxq->pool[i].skb != NULL) {
1680 pci_unmap_single(priv->pci_dev,
1681 rxq->pool[i].real_dma_addr,
1682 priv->hw_params.rx_buf_size,
1683 PCI_DMA_FROMDEVICE);
1684 priv->alloc_rxb_skb--;
1685 dev_kfree_skb(rxq->pool[i].skb);
1686 rxq->pool[i].skb = NULL;
1687 }
1688 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1689 }
1690
1691 /* Set us so that we have processed and used all buffers, but have
1692 * not restocked the Rx queue with fresh buffers */
1693 rxq->read = rxq->write = 0;
1694 rxq->free_count = 0;
1695 spin_unlock_irqrestore(&rxq->lock, flags);
1696}
1697EXPORT_SYMBOL(iwl3945_rx_queue_reset);
1698
1666/* 1699/*
1667 * this should be called while priv->lock is locked 1700 * this should be called while priv->lock is locked
1668 */ 1701 */
@@ -1687,6 +1720,34 @@ void iwl3945_rx_replenish(void *data)
1687 spin_unlock_irqrestore(&priv->lock, flags); 1720 spin_unlock_irqrestore(&priv->lock, flags);
1688} 1721}
1689 1722
1723/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1724 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1725 * This free routine walks the list of POOL entries and if SKB is set to
1726 * non NULL it is unmapped and freed
1727 */
1728static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1729{
1730 int i;
1731 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1732 if (rxq->pool[i].skb != NULL) {
1733 pci_unmap_single(priv->pci_dev,
1734 rxq->pool[i].real_dma_addr,
1735 priv->hw_params.rx_buf_size,
1736 PCI_DMA_FROMDEVICE);
1737 dev_kfree_skb(rxq->pool[i].skb);
1738 }
1739 }
1740
1741 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1742 rxq->dma_addr);
1743 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
1744 rxq->rb_stts, rxq->rb_stts_dma);
1745 rxq->bd = NULL;
1746 rxq->rb_stts = NULL;
1747}
1748EXPORT_SYMBOL(iwl3945_rx_queue_free);
1749
1750
1690/* Convert linear signal-to-noise ratio into dB */ 1751/* Convert linear signal-to-noise ratio into dB */
1691static u8 ratio2dB[100] = { 1752static u8 ratio2dB[100] = {
1692/* 0 1 2 3 4 5 6 7 8 9 */ 1753/* 0 1 2 3 4 5 6 7 8 9 */
@@ -1804,9 +1865,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1804 1865
1805 rxq->queue[i] = NULL; 1866 rxq->queue[i] = NULL;
1806 1867
1807 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr, 1868 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1808 priv->hw_params.rx_buf_size, 1869 priv->hw_params.rx_buf_size,
1809 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1810 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1871 pkt = (struct iwl_rx_packet *)rxb->skb->data;
1811 1872
1812 /* Reclaim a command buffer only if this packet is a response 1873 /* Reclaim a command buffer only if this packet is a response
@@ -1854,9 +1915,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1854 rxb->skb = NULL; 1915 rxb->skb = NULL;
1855 } 1916 }
1856 1917
1857 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1858 priv->hw_params.rx_buf_size,
1859 PCI_DMA_FROMDEVICE);
1860 spin_lock_irqsave(&rxq->lock, flags); 1918 spin_lock_irqsave(&rxq->lock, flags);
1861 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1919 list_add_tail(&rxb->list, &priv->rxq.rx_used);
1862 spin_unlock_irqrestore(&rxq->lock, flags); 1920 spin_unlock_irqrestore(&rxq->lock, flags);
@@ -5203,7 +5261,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
5203 iwl3945_dealloc_ucode_pci(priv); 5261 iwl3945_dealloc_ucode_pci(priv);
5204 5262
5205 if (priv->rxq.bd) 5263 if (priv->rxq.bd)
5206 iwl_rx_queue_free(priv, &priv->rxq); 5264 iwl3945_rx_queue_free(priv, &priv->rxq);
5207 iwl3945_hw_txq_ctx_free(priv); 5265 iwl3945_hw_txq_ctx_free(priv);
5208 5266
5209 iwl3945_unset_hw_params(priv); 5267 iwl3945_unset_hw_params(priv);