aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTomas Winkler <tomas.winkler@intel.com>2008-10-14 15:32:48 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-10-31 19:00:30 -0400
commit499b1883038a7db2dcf8b64229f8533ce2c8f0fc (patch)
tree34e5682d4e7e682412c1ffb5c2ff06a991c7ecbd /drivers
parent76eff18bdc5feaa53f1be33709b67df02f1d55e9 (diff)
iwlwifi: fix TX cmd dma unmapping
This patch: 1. fixes command DMA unmapping, this might be visible only on platforms where DMA unmapping is no noop such as PPC64 (not tested) 2. attaches correctly high memory part of the host command buffer 3. changes structure of TFD TB instead of describing transmit buffer (TB) tuple it describes single TB and makes code more readable on price of one unaligned access 4. eliminates using of IWL_GET/SET_BITs for TFD handling 5. renames TFD structures to mach the HW spec 6. reduces iwl_tx_info size by reserving first TB to the host command This patch should not have any visible effect on x86 32 This patch is rework of iwlwifi: fix DMA code and bugs from Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Cc: Johannes Berg <johannes@sipsolutions.net> Reviewed-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h100
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c231
5 files changed, 163 insertions, 184 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index f4793a609443..b66dd093084d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -822,94 +822,62 @@ enum {
822#define IWL49_NUM_QUEUES 16 822#define IWL49_NUM_QUEUES 16
823#define IWL49_NUM_AMPDU_QUEUES 8 823#define IWL49_NUM_AMPDU_QUEUES 8
824 824
825#define IWL_TX_DMA_MASK (DMA_BIT_MASK(36) & ~0x3)
826#define IWL_NUM_OF_TBS 20
827
828static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
829{
830 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
831}
825/** 832/**
826 * struct iwl_tfd_frame_data 833 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
827 * 834 *
828 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame. 835 * This structure contains dma address and length of transmission address
829 * Each buffer must be on dword boundary.
830 * Up to 10 iwl_tfd_frame_data structures, describing up to 20 buffers,
831 * may be filled within a TFD (iwl_tfd_frame).
832 * 836 *
833 * Bit fields in tb1_addr: 837 * @lo: low [31:0] portion of the dma address of TX buffer
834 * 31- 0: Tx buffer 1 address bits [31:0] 838 * every even is unaligned on 16 bit boundary
835 * 839 * @hi_n_len 0-3 [35:32] portion of dma
836 * Bit fields in val1: 840 * 4-16 length of the tx buffer
837 * 31-16: Tx buffer 2 address bits [15:0]
838 * 15- 4: Tx buffer 1 length (bytes)
839 * 3- 0: Tx buffer 1 address bits [32:32]
840 *
841 * Bit fields in val2:
842 * 31-20: Tx buffer 2 length (bytes)
843 * 19- 0: Tx buffer 2 address bits [35:16]
844 */ 841 */
845struct iwl_tfd_frame_data { 842struct iwl_tfd_tb {
846 __le32 tb1_addr; 843 __le32 lo;
847 844 __le16 hi_n_len;
848 __le32 val1; 845} __attribute__((packed));
849 /* __le32 ptb1_32_35:4; */
850#define IWL_tb1_addr_hi_POS 0
851#define IWL_tb1_addr_hi_LEN 4
852#define IWL_tb1_addr_hi_SYM val1
853 /* __le32 tb_len1:12; */
854#define IWL_tb1_len_POS 4
855#define IWL_tb1_len_LEN 12
856#define IWL_tb1_len_SYM val1
857 /* __le32 ptb2_0_15:16; */
858#define IWL_tb2_addr_lo16_POS 16
859#define IWL_tb2_addr_lo16_LEN 16
860#define IWL_tb2_addr_lo16_SYM val1
861
862 __le32 val2;
863 /* __le32 ptb2_16_35:20; */
864#define IWL_tb2_addr_hi20_POS 0
865#define IWL_tb2_addr_hi20_LEN 20
866#define IWL_tb2_addr_hi20_SYM val2
867 /* __le32 tb_len2:12; */
868#define IWL_tb2_len_POS 20
869#define IWL_tb2_len_LEN 12
870#define IWL_tb2_len_SYM val2
871} __attribute__ ((packed));
872
873 846
874/** 847/**
875 * struct iwl_tfd_frame 848 * struct iwl_tfd
876 * 849 *
877 * Transmit Frame Descriptor (TFD) 850 * Transmit Frame Descriptor (TFD)
878 * 851 *
879 * 4965 supports up to 16 Tx queues resident in host DRAM. 852 * @ __reserved1[3] reserved
853 * @ num_tbs 0-5 number of active tbs
854 * 6-7 padding (not used)
855 * @ tbs[20] transmit frame buffer descriptors
856 * @ __pad padding
857 *
880 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. 858 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
881 * Both driver and device share these circular buffers, each of which must be 859 * Both driver and device share these circular buffers, each of which must be
882 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes for 4965. 860 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
883 * 861 *
884 * Driver must indicate the physical address of the base of each 862 * Driver must indicate the physical address of the base of each
885 * circular buffer via the 4965's FH_MEM_CBBC_QUEUE registers. 863 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
886 * 864 *
887 * Each TFD contains pointer/size information for up to 20 data buffers 865 * Each TFD contains pointer/size information for up to 20 data buffers
888 * in host DRAM. These buffers collectively contain the (one) frame described 866 * in host DRAM. These buffers collectively contain the (one) frame described
889 * by the TFD. Each buffer must be a single contiguous block of memory within 867 * by the TFD. Each buffer must be a single contiguous block of memory within
890 * itself, but buffers may be scattered in host DRAM. Each buffer has max size 868 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
891 * of (4K - 4). The 4965 concatenates all of a TFD's buffers into a single 869 * of (4K - 4). The concatenates all of a TFD's buffers into a single
892 * Tx frame, up to 8 KBytes in size. 870 * Tx frame, up to 8 KBytes in size.
893 * 871 *
894 * Bit fields in the control dword (val0):
895 * 31-30: # dwords (0-3) of padding required at end of frame for 16-byte bound
896 * 29: reserved
897 * 28-24: # Transmit Buffer Descriptors in TFD
898 * 23- 0: reserved
899 *
900 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. 872 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
873 *
874 * Bit fields in the control dword (val0):
901 */ 875 */
902struct iwl_tfd_frame { 876struct iwl_tfd {
903 __le32 val0; 877 u8 __reserved1[3];
904 /* __le32 rsvd1:24; */ 878 u8 num_tbs;
905 /* __le32 num_tbs:5; */ 879 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
906#define IWL_num_tbs_POS 24 880 __le32 __pad;
907#define IWL_num_tbs_LEN 5
908#define IWL_num_tbs_SYM val0
909 /* __le32 rsvd2:1; */
910 /* __le32 padding:2; */
911 struct iwl_tfd_frame_data pa[10];
912 __le32 reserved;
913} __attribute__ ((packed)); 881} __attribute__ ((packed));
914 882
915 883
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 0fc6757cb901..ad566ef233b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -536,7 +536,7 @@ static int iwl5000_load_section(struct iwl_priv *priv,
536 536
537 iwl_write_direct32(priv, 537 iwl_write_direct32(priv,
538 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 538 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
539 (iwl_get_dma_hi_address(phy_addr) 539 (iwl_get_dma_hi_addr(phy_addr)
540 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 540 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
541 541
542 iwl_write_direct32(priv, 542 iwl_write_direct32(priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 0f57bf40e8d7..1f42e90c5f30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -112,11 +112,9 @@ struct iwl_queue {
112 * space less than this */ 112 * space less than this */
113} __attribute__ ((packed)); 113} __attribute__ ((packed));
114 114
115#define MAX_NUM_OF_TBS (20)
116
117/* One for each TFD */ 115/* One for each TFD */
118struct iwl_tx_info { 116struct iwl_tx_info {
119 struct sk_buff *skb[MAX_NUM_OF_TBS]; 117 struct sk_buff *skb[IWL_NUM_OF_TBS - 1];
120}; 118};
121 119
122/** 120/**
@@ -134,7 +132,7 @@ struct iwl_tx_info {
134 */ 132 */
135struct iwl_tx_queue { 133struct iwl_tx_queue {
136 struct iwl_queue q; 134 struct iwl_queue q;
137 struct iwl_tfd_frame *bd; 135 struct iwl_tfd *tfds;
138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS]; 136 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
139 struct iwl_tx_info *txb; 137 struct iwl_tx_info *txb;
140 int need_update; 138 int need_update;
@@ -252,7 +250,8 @@ struct iwl_cmd_meta {
252 /* The CMD_SIZE_HUGE flag bit indicates that the command 250 /* The CMD_SIZE_HUGE flag bit indicates that the command
253 * structure is stored at the end of the shared queue memory. */ 251 * structure is stored at the end of the shared queue memory. */
254 u32 flags; 252 u32 flags;
255 253 DECLARE_PCI_UNMAP_ADDR(mapping)
254 DECLARE_PCI_UNMAP_LEN(len)
256} __attribute__ ((packed)); 255} __attribute__ ((packed));
257 256
258#define IWL_CMD_MAX_PAYLOAD 320 257#define IWL_CMD_MAX_PAYLOAD 320
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 41eed6793328..029d19c7075c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -159,11 +159,6 @@ static inline unsigned long elapsed_jiffies(unsigned long start,
159 return end + (MAX_JIFFY_OFFSET - start) + 1; 159 return end + (MAX_JIFFY_OFFSET - start) + 1;
160} 160}
161 161
162static inline u8 iwl_get_dma_hi_address(dma_addr_t addr)
163{
164 return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0;
165}
166
167/** 162/**
168 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 163 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
169 * @index -- current index 164 * @index -- current index
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b047fd156c0b..c3656c46f55f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -56,92 +56,112 @@ static const u16 default_tid_to_tx_fifo[] = {
56 IWL_TX_FIFO_AC3 56 IWL_TX_FIFO_AC3
57}; 57};
58 58
59static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
60{
61 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
62
63 dma_addr_t addr = get_unaligned_le32(&tb->lo);
64 if (sizeof(dma_addr_t) > sizeof(u32))
65 addr |=
66 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
67
68 return addr;
69}
70
71static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
72{
73 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
74
75 return le16_to_cpu(tb->hi_n_len) >> 4;
76}
77
78static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
79 dma_addr_t addr, u16 len)
80{
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
82 u16 hi_n_len = len << 4;
83
84 put_unaligned_le32(addr, &tb->lo);
85 if (sizeof(dma_addr_t) > sizeof(u32))
86 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
87
88 tb->hi_n_len = cpu_to_le16(hi_n_len);
89
90 tfd->num_tbs = idx + 1;
91}
92
93static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
94{
95 return tfd->num_tbs & 0x1f;
96}
59 97
60/** 98/**
61 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 99 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
100 * @priv - driver private data
101 * @txq - tx queue
62 * 102 *
63 * Does NOT advance any TFD circular buffer read/write indexes 103 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer) 104 * Does NOT free the TFD itself (which is within circular buffer)
65 */ 105 */
66static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 106static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
67{ 107{
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; 108 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 109 struct iwl_tfd *tfd;
70 struct pci_dev *dev = priv->pci_dev; 110 struct pci_dev *dev = priv->pci_dev;
111 int index = txq->q.read_ptr;
71 int i; 112 int i;
72 int counter = 0; 113 int num_tbs;
73 int index, is_odd; 114
115 tfd = &tfd_tmp[index];
74 116
75 /* Sanity check on number of chunks */ 117 /* Sanity check on number of chunks */
76 counter = IWL_GET_BITS(*bd, num_tbs); 118 num_tbs = iwl_tfd_get_num_tbs(tfd);
77 if (counter > MAX_NUM_OF_TBS) { 119
78 IWL_ERROR("Too many chunks: %i\n", counter); 120 if (num_tbs >= IWL_NUM_OF_TBS) {
121 IWL_ERROR("Too many chunks: %i\n", num_tbs);
79 /* @todo issue fatal error, it is quite serious situation */ 122 /* @todo issue fatal error, it is quite serious situation */
80 return 0; 123 return;
81 } 124 }
82 125
83 /* Unmap chunks, if any. 126 /* Unmap tx_cmd */
84 * TFD info for odd chunks is different format than for even chunks. */ 127 if (num_tbs)
85 for (i = 0; i < counter; i++) { 128 pci_unmap_single(dev,
86 index = i / 2; 129 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
87 is_odd = i & 0x1; 130 pci_unmap_len(&txq->cmd[index]->meta, len),
88
89 if (is_odd)
90 pci_unmap_single(
91 dev,
92 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
93 (IWL_GET_BITS(bd->pa[index],
94 tb2_addr_hi20) << 16),
95 IWL_GET_BITS(bd->pa[index], tb2_len),
96 PCI_DMA_TODEVICE); 131 PCI_DMA_TODEVICE);
97 132
98 else if (i > 0) 133 /* Unmap chunks, if any. */
99 pci_unmap_single(dev, 134 for (i = 1; i < num_tbs; i++) {
100 le32_to_cpu(bd->pa[index].tb1_addr), 135 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
101 IWL_GET_BITS(bd->pa[index], tb1_len), 136 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
102 PCI_DMA_TODEVICE);
103
104 /* Free SKB, if any, for this chunk */
105 if (txq->txb[txq->q.read_ptr].skb[i]) {
106 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
107 137
108 dev_kfree_skb(skb); 138 if (txq->txb) {
109 txq->txb[txq->q.read_ptr].skb[i] = NULL; 139 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
140 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
110 } 141 }
111 } 142 }
112 return 0;
113} 143}
114 144
115static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, 145static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
116 dma_addr_t addr, u16 len) 146 struct iwl_tfd *tfd,
147 dma_addr_t addr, u16 len)
117{ 148{
118 int index, is_odd; 149
119 struct iwl_tfd_frame *tfd = ptr; 150 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
120 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
121 151
122 /* Each TFD can point to a maximum 20 Tx buffers */ 152 /* Each TFD can point to a maximum 20 Tx buffers */
123 if (num_tbs >= MAX_NUM_OF_TBS) { 153 if (num_tbs >= IWL_NUM_OF_TBS) {
124 IWL_ERROR("Error can not send more than %d chunks\n", 154 IWL_ERROR("Error can not send more than %d chunks\n",
125 MAX_NUM_OF_TBS); 155 IWL_NUM_OF_TBS);
126 return -EINVAL; 156 return -EINVAL;
127 } 157 }
128 158
129 index = num_tbs / 2; 159 BUG_ON(addr & ~DMA_BIT_MASK(36));
130 is_odd = num_tbs & 0x1; 160 if (unlikely(addr & ~IWL_TX_DMA_MASK))
131 161 IWL_ERROR("Unaligned address = %llx\n",
132 if (!is_odd) { 162 (unsigned long long)addr);
133 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
134 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
135 iwl_get_dma_hi_address(addr));
136 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
137 } else {
138 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
139 (u32) (addr & 0xffff));
140 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
141 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
142 }
143 163
144 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 164 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
145 165
146 return 0; 166 return 0;
147} 167}
@@ -224,8 +244,8 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
224 244
225 /* De-alloc circular buffer of TFDs */ 245 /* De-alloc circular buffer of TFDs */
226 if (txq->q.n_bd) 246 if (txq->q.n_bd)
227 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * 247 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
228 txq->q.n_bd, txq->bd, txq->q.dma_addr); 248 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
229 249
230 /* De-alloc array of per-TFD driver data */ 250 /* De-alloc array of per-TFD driver data */
231 kfree(txq->txb); 251 kfree(txq->txb);
@@ -263,8 +283,8 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv)
263 283
264 /* De-alloc circular buffer of TFDs */ 284 /* De-alloc circular buffer of TFDs */
265 if (txq->q.n_bd) 285 if (txq->q.n_bd)
266 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * 286 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
267 txq->q.n_bd, txq->bd, txq->q.dma_addr); 287 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
268 288
269 /* 0-fill queue descriptor structure */ 289 /* 0-fill queue descriptor structure */
270 memset(txq, 0, sizeof(*txq)); 290 memset(txq, 0, sizeof(*txq));
@@ -364,13 +384,13 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
364 384
365 /* Circular buffer of transmit frame descriptors (TFDs), 385 /* Circular buffer of transmit frame descriptors (TFDs),
366 * shared with device */ 386 * shared with device */
367 txq->bd = pci_alloc_consistent(dev, 387 txq->tfds = pci_alloc_consistent(dev,
368 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, 388 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
369 &txq->q.dma_addr); 389 &txq->q.dma_addr);
370 390
371 if (!txq->bd) { 391 if (!txq->tfds) {
372 IWL_ERROR("pci_alloc_consistent(%zd) failed\n", 392 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
373 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); 393 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
374 goto error; 394 goto error;
375 } 395 }
376 txq->q.id = id; 396 txq->q.id = id;
@@ -394,15 +414,15 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
394static int iwl_hw_tx_queue_init(struct iwl_priv *priv, 414static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
395 struct iwl_tx_queue *txq) 415 struct iwl_tx_queue *txq)
396{ 416{
397 int rc; 417 int ret;
398 unsigned long flags; 418 unsigned long flags;
399 int txq_id = txq->q.id; 419 int txq_id = txq->q.id;
400 420
401 spin_lock_irqsave(&priv->lock, flags); 421 spin_lock_irqsave(&priv->lock, flags);
402 rc = iwl_grab_nic_access(priv); 422 ret = iwl_grab_nic_access(priv);
403 if (rc) { 423 if (ret) {
404 spin_unlock_irqrestore(&priv->lock, flags); 424 spin_unlock_irqrestore(&priv->lock, flags);
405 return rc; 425 return ret;
406 } 426 }
407 427
408 /* Circular buffer (TFD queue in DRAM) physical base address */ 428 /* Circular buffer (TFD queue in DRAM) physical base address */
@@ -410,10 +430,10 @@ static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
410 txq->q.dma_addr >> 8); 430 txq->q.dma_addr >> 8);
411 431
412 /* Enable DMA channel, using same id as for TFD queue */ 432 /* Enable DMA channel, using same id as for TFD queue */
413 iwl_write_direct32( 433 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
414 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
415 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 434 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
416 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 435 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
436
417 iwl_release_nic_access(priv); 437 iwl_release_nic_access(priv);
418 spin_unlock_irqrestore(&priv->lock, flags); 438 spin_unlock_irqrestore(&priv->lock, flags);
419 439
@@ -788,7 +808,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
788{ 808{
789 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 809 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 810 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
791 struct iwl_tfd_frame *tfd; 811 struct iwl_tfd *tfd;
792 struct iwl_tx_queue *txq; 812 struct iwl_tx_queue *txq;
793 struct iwl_queue *q; 813 struct iwl_queue *q;
794 struct iwl_cmd *out_cmd; 814 struct iwl_cmd *out_cmd;
@@ -882,7 +902,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
882 spin_lock_irqsave(&priv->lock, flags); 902 spin_lock_irqsave(&priv->lock, flags);
883 903
884 /* Set up first empty TFD within this queue's circular TFD buffer */ 904 /* Set up first empty TFD within this queue's circular TFD buffer */
885 tfd = &txq->bd[q->write_ptr]; 905 tfd = &txq->tfds[q->write_ptr];
886 memset(tfd, 0, sizeof(*tfd)); 906 memset(tfd, 0, sizeof(*tfd));
887 idx = get_cmd_index(q, q->write_ptr, 0); 907 idx = get_cmd_index(q, q->write_ptr, 0);
888 908
@@ -931,12 +951,14 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
931 951
932 /* Physical address of this Tx command's header (not MAC header!), 952 /* Physical address of this Tx command's header (not MAC header!),
933 * within command buffer array. */ 953 * within command buffer array. */
934 txcmd_phys = pci_map_single(priv->pci_dev, out_cmd, 954 txcmd_phys = pci_map_single(priv->pci_dev,
935 sizeof(struct iwl_cmd), PCI_DMA_TODEVICE); 955 out_cmd, sizeof(struct iwl_cmd),
936 txcmd_phys += offsetof(struct iwl_cmd, hdr); 956 PCI_DMA_TODEVICE);
937 957 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
958 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
938 /* Add buffer containing Tx command and MAC(!) header to TFD's 959 /* Add buffer containing Tx command and MAC(!) header to TFD's
939 * first entry */ 960 * first entry */
961 txcmd_phys += offsetof(struct iwl_cmd, hdr);
940 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 962 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
941 963
942 if (info->control.hw_key) 964 if (info->control.hw_key)
@@ -969,7 +991,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
969 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 991 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
970 offsetof(struct iwl_tx_cmd, scratch); 992 offsetof(struct iwl_tx_cmd, scratch);
971 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 993 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
972 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); 994 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
973 995
974 if (!ieee80211_has_morefrags(hdr->frame_control)) { 996 if (!ieee80211_has_morefrags(hdr->frame_control)) {
975 txq->need_update = 1; 997 txq->need_update = 1;
@@ -1030,7 +1052,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1030{ 1052{
1031 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1053 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1032 struct iwl_queue *q = &txq->q; 1054 struct iwl_queue *q = &txq->q;
1033 struct iwl_tfd_frame *tfd; 1055 struct iwl_tfd *tfd;
1034 struct iwl_cmd *out_cmd; 1056 struct iwl_cmd *out_cmd;
1035 dma_addr_t phys_addr; 1057 dma_addr_t phys_addr;
1036 unsigned long flags; 1058 unsigned long flags;
@@ -1059,7 +1081,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1059 1081
1060 spin_lock_irqsave(&priv->hcmd_lock, flags); 1082 spin_lock_irqsave(&priv->hcmd_lock, flags);
1061 1083
1062 tfd = &txq->bd[q->write_ptr]; 1084 tfd = &txq->tfds[q->write_ptr];
1063 memset(tfd, 0, sizeof(*tfd)); 1085 memset(tfd, 0, sizeof(*tfd));
1064 1086
1065 1087
@@ -1080,9 +1102,13 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1080 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 1102 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1081 len = (idx == TFD_CMD_SLOTS) ? 1103 len = (idx == TFD_CMD_SLOTS) ?
1082 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 1104 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1083 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len, 1105
1084 PCI_DMA_TODEVICE); 1106 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
1107 len, PCI_DMA_TODEVICE);
1108 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1109 pci_unmap_len_set(&out_cmd->meta, len, len);
1085 phys_addr += offsetof(struct iwl_cmd, hdr); 1110 phys_addr += offsetof(struct iwl_cmd, hdr);
1111
1086 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1112 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1087 1113
1088#ifdef CONFIG_IWLWIFI_DEBUG 1114#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1132,8 +1158,9 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1132 return 0; 1158 return 0;
1133 } 1159 }
1134 1160
1135 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 1161 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1136 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1162 q->read_ptr != index;
1163 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1137 1164
1138 tx_info = &txq->txb[txq->q.read_ptr]; 1165 tx_info = &txq->txb[txq->q.read_ptr];
1139 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 1166 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
@@ -1157,44 +1184,34 @@ EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1157 * need to be reclaimed. As result, some free space forms. If there is 1184 * need to be reclaimed. As result, some free space forms. If there is
1158 * enough free space (> low mark), wake the stack that feeds us. 1185 * enough free space (> low mark), wake the stack that feeds us.
1159 */ 1186 */
1160static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1187static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1188 int idx, int cmd_idx)
1161{ 1189{
1162 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1190 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1163 struct iwl_queue *q = &txq->q; 1191 struct iwl_queue *q = &txq->q;
1164 struct iwl_tfd_frame *bd = &txq->bd[index];
1165 dma_addr_t dma_addr;
1166 int is_odd, buf_len;
1167 int nfreed = 0; 1192 int nfreed = 0;
1168 1193
1169 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1194 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1170 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1195 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1171 "is out of range [0-%d] %d %d.\n", txq_id, 1196 "is out of range [0-%d] %d %d.\n", txq_id,
1172 index, q->n_bd, q->write_ptr, q->read_ptr); 1197 idx, q->n_bd, q->write_ptr, q->read_ptr);
1173 return; 1198 return;
1174 } 1199 }
1175 1200
1176 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 1201 pci_unmap_single(priv->pci_dev,
1177 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1202 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1203 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
1204 PCI_DMA_TODEVICE);
1205
1206 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1207 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1178 1208
1179 if (nfreed > 1) { 1209 if (nfreed++ > 0) {
1180 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, 1210 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx,
1181 q->write_ptr, q->read_ptr); 1211 q->write_ptr, q->read_ptr);
1182 queue_work(priv->workqueue, &priv->restart); 1212 queue_work(priv->workqueue, &priv->restart);
1183 } 1213 }
1184 is_odd = (index/2) & 0x1;
1185 if (is_odd) {
1186 dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1187 (IWL_GET_BITS(bd->pa[index],
1188 tb2_addr_hi20) << 16);
1189 buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
1190 } else {
1191 dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
1192 buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
1193 }
1194 1214
1195 pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
1196 PCI_DMA_TODEVICE);
1197 nfreed++;
1198 } 1215 }
1199} 1216}
1200 1217
@@ -1234,7 +1251,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1234 !cmd->meta.u.callback(priv, cmd, rxb->skb)) 1251 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1235 rxb->skb = NULL; 1252 rxb->skb = NULL;
1236 1253
1237 iwl_hcmd_queue_reclaim(priv, txq_id, index); 1254 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1238 1255
1239 if (!(cmd->meta.flags & CMD_ASYNC)) { 1256 if (!(cmd->meta.flags & CMD_ASYNC)) {
1240 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1257 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);