diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 1225 |
1 files changed, 975 insertions, 250 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index db3efbb84d9..6c5b867c353 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -42,12 +42,170 @@ | |||
42 | #define IWL_TX_CRC_SIZE 4 | 42 | #define IWL_TX_CRC_SIZE 4 |
43 | #define IWL_TX_DELIMITER_SIZE 4 | 43 | #define IWL_TX_DELIMITER_SIZE 4 |
44 | 44 | ||
45 | /** | 45 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
46 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 46 | * DMA services |
47 | * | ||
48 | * Theory of operation | ||
49 | * | ||
50 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
51 | * of buffer descriptors, each of which points to one or more data buffers for | ||
52 | * the device to read from or fill. Driver and device exchange status of each | ||
53 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
54 | * entries in each circular buffer, to protect against confusing empty and full | ||
55 | * queue states. | ||
56 | * | ||
57 | * The device reads or writes the data in the queues via the device's several | ||
58 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
59 | * | ||
60 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
61 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
62 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
63 | * Tx queue resumed. | ||
64 | * | ||
65 | ***************************************************/ | ||
66 | static int iwl_queue_space(const struct iwl_queue *q) | ||
67 | { | ||
68 | int s = q->read_ptr - q->write_ptr; | ||
69 | |||
70 | if (q->read_ptr > q->write_ptr) | ||
71 | s -= q->n_bd; | ||
72 | |||
73 | if (s <= 0) | ||
74 | s += q->n_window; | ||
75 | /* keep some reserve to not confuse empty and full situations */ | ||
76 | s -= 2; | ||
77 | if (s < 0) | ||
78 | s = 0; | ||
79 | return s; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | ||
84 | */ | ||
85 | static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | ||
86 | { | ||
87 | q->n_bd = count; | ||
88 | q->n_window = slots_num; | ||
89 | q->id = id; | ||
90 | |||
91 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
92 | * and iwl_queue_dec_wrap are broken. */ | ||
93 | if (WARN_ON(!is_power_of_2(count))) | ||
94 | return -EINVAL; | ||
95 | |||
96 | /* slots_num must be power-of-two size, otherwise | ||
97 | * get_cmd_index is broken. */ | ||
98 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
99 | return -EINVAL; | ||
100 | |||
101 | q->low_mark = q->n_window / 4; | ||
102 | if (q->low_mark < 4) | ||
103 | q->low_mark = 4; | ||
104 | |||
105 | q->high_mark = q->n_window / 8; | ||
106 | if (q->high_mark < 2) | ||
107 | q->high_mark = 2; | ||
108 | |||
109 | q->write_ptr = 0; | ||
110 | q->read_ptr = 0; | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, | ||
116 | struct iwl_dma_ptr *ptr, size_t size) | ||
117 | { | ||
118 | if (WARN_ON(ptr->addr)) | ||
119 | return -EINVAL; | ||
120 | |||
121 | ptr->addr = dma_alloc_coherent(trans->dev, size, | ||
122 | &ptr->dma, GFP_KERNEL); | ||
123 | if (!ptr->addr) | ||
124 | return -ENOMEM; | ||
125 | ptr->size = size; | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, | ||
130 | struct iwl_dma_ptr *ptr) | ||
131 | { | ||
132 | if (unlikely(!ptr->addr)) | ||
133 | return; | ||
134 | |||
135 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); | ||
136 | memset(ptr, 0, sizeof(*ptr)); | ||
137 | } | ||
138 | |||
139 | static void iwl_pcie_txq_stuck_timer(unsigned long data) | ||
140 | { | ||
141 | struct iwl_txq *txq = (void *)data; | ||
142 | struct iwl_queue *q = &txq->q; | ||
143 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; | ||
144 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); | ||
145 | u32 scd_sram_addr = trans_pcie->scd_base_addr + | ||
146 | SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); | ||
147 | u8 buf[16]; | ||
148 | int i; | ||
149 | |||
150 | spin_lock(&txq->lock); | ||
151 | /* check if triggered erroneously */ | ||
152 | if (txq->q.read_ptr == txq->q.write_ptr) { | ||
153 | spin_unlock(&txq->lock); | ||
154 | return; | ||
155 | } | ||
156 | spin_unlock(&txq->lock); | ||
157 | |||
158 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, | ||
159 | jiffies_to_msecs(trans_pcie->wd_timeout)); | ||
160 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", | ||
161 | txq->q.read_ptr, txq->q.write_ptr); | ||
162 | |||
163 | iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); | ||
164 | |||
165 | iwl_print_hex_error(trans, buf, sizeof(buf)); | ||
166 | |||
167 | for (i = 0; i < FH_TCSR_CHNL_NUM; i++) | ||
168 | IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, | ||
169 | iwl_read_direct32(trans, FH_TX_TRB_REG(i))); | ||
170 | |||
171 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { | ||
172 | u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); | ||
173 | u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; | ||
174 | bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); | ||
175 | u32 tbl_dw = | ||
176 | iwl_read_targ_mem(trans, | ||
177 | trans_pcie->scd_base_addr + | ||
178 | SCD_TRANS_TBL_OFFSET_QUEUE(i)); | ||
179 | |||
180 | if (i & 0x1) | ||
181 | tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; | ||
182 | else | ||
183 | tbl_dw = tbl_dw & 0x0000FFFF; | ||
184 | |||
185 | IWL_ERR(trans, | ||
186 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | ||
187 | i, active ? "" : "in", fifo, tbl_dw, | ||
188 | iwl_read_prph(trans, | ||
189 | SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), | ||
190 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | ||
191 | } | ||
192 | |||
193 | for (i = q->read_ptr; i != q->write_ptr; | ||
194 | i = iwl_queue_inc_wrap(i, q->n_bd)) { | ||
195 | struct iwl_tx_cmd *tx_cmd = | ||
196 | (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; | ||
197 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | ||
198 | get_unaligned_le32(&tx_cmd->scratch)); | ||
199 | } | ||
200 | |||
201 | iwl_op_mode_nic_error(trans->op_mode); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
47 | */ | 206 | */ |
48 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | 207 | static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
49 | struct iwl_tx_queue *txq, | 208 | struct iwl_txq *txq, u16 byte_cnt) |
50 | u16 byte_cnt) | ||
51 | { | 209 | { |
52 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | 210 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; |
53 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 211 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -88,10 +246,36 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | |||
88 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | 246 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; |
89 | } | 247 | } |
90 | 248 | ||
91 | /** | 249 | static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, |
92 | * iwl_txq_update_write_ptr - Send new write index to hardware | 250 | struct iwl_txq *txq) |
251 | { | ||
252 | struct iwl_trans_pcie *trans_pcie = | ||
253 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
254 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
255 | int txq_id = txq->q.id; | ||
256 | int read_ptr = txq->q.read_ptr; | ||
257 | u8 sta_id = 0; | ||
258 | __le16 bc_ent; | ||
259 | struct iwl_tx_cmd *tx_cmd = | ||
260 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
261 | |||
262 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
263 | |||
264 | if (txq_id != trans_pcie->cmd_queue) | ||
265 | sta_id = tx_cmd->sta_id; | ||
266 | |||
267 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
268 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
269 | |||
270 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
271 | scd_bc_tbl[txq_id]. | ||
272 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware | ||
93 | */ | 277 | */ |
94 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | 278 | void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) |
95 | { | 279 | { |
96 | u32 reg = 0; | 280 | u32 reg = 0; |
97 | int txq_id = txq->q.id; | 281 | int txq_id = txq->q.id; |
@@ -137,7 +321,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | |||
137 | txq->need_update = 0; | 321 | txq->need_update = 0; |
138 | } | 322 | } |
139 | 323 | ||
140 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | 324 | static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) |
141 | { | 325 | { |
142 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 326 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
143 | 327 | ||
@@ -149,15 +333,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | |||
149 | return addr; | 333 | return addr; |
150 | } | 334 | } |
151 | 335 | ||
152 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | 336 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) |
153 | { | 337 | { |
154 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 338 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
155 | 339 | ||
156 | return le16_to_cpu(tb->hi_n_len) >> 4; | 340 | return le16_to_cpu(tb->hi_n_len) >> 4; |
157 | } | 341 | } |
158 | 342 | ||
159 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | 343 | static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, |
160 | dma_addr_t addr, u16 len) | 344 | dma_addr_t addr, u16 len) |
161 | { | 345 | { |
162 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 346 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
163 | u16 hi_n_len = len << 4; | 347 | u16 hi_n_len = len << 4; |
@@ -171,19 +355,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | |||
171 | tfd->num_tbs = idx + 1; | 355 | tfd->num_tbs = idx + 1; |
172 | } | 356 | } |
173 | 357 | ||
174 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | 358 | static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) |
175 | { | 359 | { |
176 | return tfd->num_tbs & 0x1f; | 360 | return tfd->num_tbs & 0x1f; |
177 | } | 361 | } |
178 | 362 | ||
179 | static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | 363 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, |
180 | struct iwl_tfd *tfd, enum dma_data_direction dma_dir) | 364 | struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, |
365 | enum dma_data_direction dma_dir) | ||
181 | { | 366 | { |
182 | int i; | 367 | int i; |
183 | int num_tbs; | 368 | int num_tbs; |
184 | 369 | ||
185 | /* Sanity check on number of chunks */ | 370 | /* Sanity check on number of chunks */ |
186 | num_tbs = iwl_tfd_get_num_tbs(tfd); | 371 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); |
187 | 372 | ||
188 | if (num_tbs >= IWL_NUM_OF_TBS) { | 373 | if (num_tbs >= IWL_NUM_OF_TBS) { |
189 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | 374 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
@@ -200,14 +385,14 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
200 | 385 | ||
201 | /* Unmap chunks, if any. */ | 386 | /* Unmap chunks, if any. */ |
202 | for (i = 1; i < num_tbs; i++) | 387 | for (i = 1; i < num_tbs; i++) |
203 | dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), | 388 | dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), |
204 | iwl_tfd_tb_get_len(tfd, i), dma_dir); | 389 | iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); |
205 | 390 | ||
206 | tfd->num_tbs = 0; | 391 | tfd->num_tbs = 0; |
207 | } | 392 | } |
208 | 393 | ||
209 | /** | 394 | /* |
210 | * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 395 | * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
211 | * @trans - transport private data | 396 | * @trans - transport private data |
212 | * @txq - tx queue | 397 | * @txq - tx queue |
213 | * @dma_dir - the direction of the DMA mapping | 398 | * @dma_dir - the direction of the DMA mapping |
@@ -215,8 +400,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
215 | * Does NOT advance any TFD circular buffer read/write indexes | 400 | * Does NOT advance any TFD circular buffer read/write indexes |
216 | * Does NOT free the TFD itself (which is within circular buffer) | 401 | * Does NOT free the TFD itself (which is within circular buffer) |
217 | */ | 402 | */ |
218 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | 403 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
219 | enum dma_data_direction dma_dir) | 404 | enum dma_data_direction dma_dir) |
220 | { | 405 | { |
221 | struct iwl_tfd *tfd_tmp = txq->tfds; | 406 | struct iwl_tfd *tfd_tmp = txq->tfds; |
222 | 407 | ||
@@ -227,8 +412,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
227 | lockdep_assert_held(&txq->lock); | 412 | lockdep_assert_held(&txq->lock); |
228 | 413 | ||
229 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | 414 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ |
230 | iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], | 415 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], |
231 | dma_dir); | 416 | dma_dir); |
232 | 417 | ||
233 | /* free SKB */ | 418 | /* free SKB */ |
234 | if (txq->entries) { | 419 | if (txq->entries) { |
@@ -247,10 +432,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
247 | } | 432 | } |
248 | } | 433 | } |
249 | 434 | ||
250 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | 435 | static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
251 | struct iwl_tx_queue *txq, | 436 | dma_addr_t addr, u16 len, u8 reset) |
252 | dma_addr_t addr, u16 len, | ||
253 | u8 reset) | ||
254 | { | 437 | { |
255 | struct iwl_queue *q; | 438 | struct iwl_queue *q; |
256 | struct iwl_tfd *tfd, *tfd_tmp; | 439 | struct iwl_tfd *tfd, *tfd_tmp; |
@@ -263,7 +446,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | |||
263 | if (reset) | 446 | if (reset) |
264 | memset(tfd, 0, sizeof(*tfd)); | 447 | memset(tfd, 0, sizeof(*tfd)); |
265 | 448 | ||
266 | num_tbs = iwl_tfd_get_num_tbs(tfd); | 449 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); |
267 | 450 | ||
268 | /* Each TFD can point to a maximum 20 Tx buffers */ | 451 | /* Each TFD can point to a maximum 20 Tx buffers */ |
269 | if (num_tbs >= IWL_NUM_OF_TBS) { | 452 | if (num_tbs >= IWL_NUM_OF_TBS) { |
@@ -279,108 +462,534 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | |||
279 | IWL_ERR(trans, "Unaligned address = %llx\n", | 462 | IWL_ERR(trans, "Unaligned address = %llx\n", |
280 | (unsigned long long)addr); | 463 | (unsigned long long)addr); |
281 | 464 | ||
282 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | 465 | iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); |
283 | 466 | ||
284 | return 0; | 467 | return 0; |
285 | } | 468 | } |
286 | 469 | ||
287 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | 470 | static int iwl_pcie_txq_alloc(struct iwl_trans *trans, |
288 | * DMA services | 471 | struct iwl_txq *txq, int slots_num, |
289 | * | 472 | u32 txq_id) |
290 | * Theory of operation | 473 | { |
291 | * | 474 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
292 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | 475 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
293 | * of buffer descriptors, each of which points to one or more data buffers for | 476 | int i; |
294 | * the device to read from or fill. Driver and device exchange status of each | 477 | |
295 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | 478 | if (WARN_ON(txq->entries || txq->tfds)) |
296 | * entries in each circular buffer, to protect against confusing empty and full | 479 | return -EINVAL; |
297 | * queue states. | 480 | |
298 | * | 481 | setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, |
299 | * The device reads or writes the data in the queues via the device's several | 482 | (unsigned long)txq); |
300 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | 483 | txq->trans_pcie = trans_pcie; |
301 | * | 484 | |
302 | * For Tx queue, there are low mark and high mark limits. If, after queuing | 485 | txq->q.n_window = slots_num; |
303 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | 486 | |
304 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | 487 | txq->entries = kcalloc(slots_num, |
305 | * Tx queue resumed. | 488 | sizeof(struct iwl_pcie_txq_entry), |
489 | GFP_KERNEL); | ||
490 | |||
491 | if (!txq->entries) | ||
492 | goto error; | ||
493 | |||
494 | if (txq_id == trans_pcie->cmd_queue) | ||
495 | for (i = 0; i < slots_num; i++) { | ||
496 | txq->entries[i].cmd = | ||
497 | kmalloc(sizeof(struct iwl_device_cmd), | ||
498 | GFP_KERNEL); | ||
499 | if (!txq->entries[i].cmd) | ||
500 | goto error; | ||
501 | } | ||
502 | |||
503 | /* Circular buffer of transmit frame descriptors (TFDs), | ||
504 | * shared with device */ | ||
505 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, | ||
506 | &txq->q.dma_addr, GFP_KERNEL); | ||
507 | if (!txq->tfds) { | ||
508 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | ||
509 | goto error; | ||
510 | } | ||
511 | txq->q.id = txq_id; | ||
512 | |||
513 | return 0; | ||
514 | error: | ||
515 | if (txq->entries && txq_id == trans_pcie->cmd_queue) | ||
516 | for (i = 0; i < slots_num; i++) | ||
517 | kfree(txq->entries[i].cmd); | ||
518 | kfree(txq->entries); | ||
519 | txq->entries = NULL; | ||
520 | |||
521 | return -ENOMEM; | ||
522 | |||
523 | } | ||
524 | |||
525 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | ||
526 | int slots_num, u32 txq_id) | ||
527 | { | ||
528 | int ret; | ||
529 | |||
530 | txq->need_update = 0; | ||
531 | |||
532 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
533 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
534 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
535 | |||
536 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
537 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | ||
538 | txq_id); | ||
539 | if (ret) | ||
540 | return ret; | ||
541 | |||
542 | spin_lock_init(&txq->lock); | ||
543 | |||
544 | /* | ||
545 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
546 | * given Tx queue, and enable the DMA channel used for that queue. | ||
547 | * Circular buffer (TFD queue in DRAM) physical base address */ | ||
548 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), | ||
549 | txq->q.dma_addr >> 8); | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's | ||
556 | */ | ||
557 | static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) | ||
558 | { | ||
559 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
560 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
561 | struct iwl_queue *q = &txq->q; | ||
562 | enum dma_data_direction dma_dir; | ||
563 | |||
564 | if (!q->n_bd) | ||
565 | return; | ||
566 | |||
567 | /* In the command queue, all the TBs are mapped as BIDI | ||
568 | * so unmap them as such. | ||
569 | */ | ||
570 | if (txq_id == trans_pcie->cmd_queue) | ||
571 | dma_dir = DMA_BIDIRECTIONAL; | ||
572 | else | ||
573 | dma_dir = DMA_TO_DEVICE; | ||
574 | |||
575 | spin_lock_bh(&txq->lock); | ||
576 | while (q->write_ptr != q->read_ptr) { | ||
577 | iwl_pcie_txq_free_tfd(trans, txq, dma_dir); | ||
578 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | ||
579 | } | ||
580 | spin_unlock_bh(&txq->lock); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * iwl_pcie_txq_free - Deallocate DMA queue. | ||
585 | * @txq: Transmit queue to deallocate. | ||
306 | * | 586 | * |
307 | ***************************************************/ | 587 | * Empty queue by removing and destroying all BD's. |
588 | * Free all buffers. | ||
589 | * 0-fill, but do not free "txq" descriptor structure. | ||
590 | */ | ||
591 | static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | ||
592 | { | ||
593 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
594 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
595 | struct device *dev = trans->dev; | ||
596 | int i; | ||
308 | 597 | ||
309 | int iwl_queue_space(const struct iwl_queue *q) | 598 | if (WARN_ON(!txq)) |
599 | return; | ||
600 | |||
601 | iwl_pcie_txq_unmap(trans, txq_id); | ||
602 | |||
603 | /* De-alloc array of command/tx buffers */ | ||
604 | if (txq_id == trans_pcie->cmd_queue) | ||
605 | for (i = 0; i < txq->q.n_window; i++) { | ||
606 | kfree(txq->entries[i].cmd); | ||
607 | kfree(txq->entries[i].copy_cmd); | ||
608 | kfree(txq->entries[i].free_buf); | ||
609 | } | ||
610 | |||
611 | /* De-alloc circular buffer of TFDs */ | ||
612 | if (txq->q.n_bd) { | ||
613 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | ||
614 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | ||
615 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | ||
616 | } | ||
617 | |||
618 | kfree(txq->entries); | ||
619 | txq->entries = NULL; | ||
620 | |||
621 | del_timer_sync(&txq->stuck_timer); | ||
622 | |||
623 | /* 0-fill queue descriptor structure */ | ||
624 | memset(txq, 0, sizeof(*txq)); | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
629 | */ | ||
630 | static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) | ||
310 | { | 631 | { |
311 | int s = q->read_ptr - q->write_ptr; | 632 | struct iwl_trans_pcie __maybe_unused *trans_pcie = |
633 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
312 | 634 | ||
313 | if (q->read_ptr > q->write_ptr) | 635 | iwl_write_prph(trans, SCD_TXFACT, mask); |
314 | s -= q->n_bd; | 636 | } |
315 | 637 | ||
316 | if (s <= 0) | 638 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) |
317 | s += q->n_window; | 639 | { |
318 | /* keep some reserve to not confuse empty and full situations */ | 640 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
319 | s -= 2; | 641 | u32 a; |
320 | if (s < 0) | 642 | int chan; |
321 | s = 0; | 643 | u32 reg_val; |
322 | return s; | 644 | |
645 | /* make sure all queue are not stopped/used */ | ||
646 | memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); | ||
647 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | ||
648 | |||
649 | trans_pcie->scd_base_addr = | ||
650 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); | ||
651 | |||
652 | WARN_ON(scd_base_addr != 0 && | ||
653 | scd_base_addr != trans_pcie->scd_base_addr); | ||
654 | |||
655 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | ||
656 | /* reset conext data memory */ | ||
657 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | ||
658 | a += 4) | ||
659 | iwl_write_targ_mem(trans, a, 0); | ||
660 | /* reset tx status memory */ | ||
661 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | ||
662 | a += 4) | ||
663 | iwl_write_targ_mem(trans, a, 0); | ||
664 | for (; a < trans_pcie->scd_base_addr + | ||
665 | SCD_TRANS_TBL_OFFSET_QUEUE( | ||
666 | trans->cfg->base_params->num_of_queues); | ||
667 | a += 4) | ||
668 | iwl_write_targ_mem(trans, a, 0); | ||
669 | |||
670 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | ||
671 | trans_pcie->scd_bc_tbls.dma >> 10); | ||
672 | |||
673 | /* The chain extension of the SCD doesn't work well. This feature is | ||
674 | * enabled by default by the HW, so we need to disable it manually. | ||
675 | */ | ||
676 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
677 | |||
678 | iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, | ||
679 | trans_pcie->cmd_fifo); | ||
680 | |||
681 | /* Activate all Tx DMA/FIFO channels */ | ||
682 | iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7)); | ||
683 | |||
684 | /* Enable DMA channel */ | ||
685 | for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) | ||
686 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
687 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
688 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
689 | |||
690 | /* Update FH chicken bits */ | ||
691 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); | ||
692 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | ||
693 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
694 | |||
695 | /* Enable L1-Active */ | ||
696 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
697 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
323 | } | 698 | } |
324 | 699 | ||
325 | /** | 700 | /* |
326 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 701 | * iwl_pcie_tx_stop - Stop all Tx DMA channels |
327 | */ | 702 | */ |
328 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | 703 | int iwl_pcie_tx_stop(struct iwl_trans *trans) |
329 | { | 704 | { |
330 | q->n_bd = count; | 705 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
331 | q->n_window = slots_num; | 706 | int ch, txq_id, ret; |
332 | q->id = id; | 707 | unsigned long flags; |
333 | 708 | ||
334 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | 709 | /* Turn off all Tx DMA fifos */ |
335 | * and iwl_queue_dec_wrap are broken. */ | 710 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
336 | if (WARN_ON(!is_power_of_2(count))) | ||
337 | return -EINVAL; | ||
338 | 711 | ||
339 | /* slots_num must be power-of-two size, otherwise | 712 | iwl_pcie_txq_set_sched(trans, 0); |
340 | * get_cmd_index is broken. */ | ||
341 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
342 | return -EINVAL; | ||
343 | 713 | ||
344 | q->low_mark = q->n_window / 4; | 714 | /* Stop each Tx DMA channel, and wait for it to be idle */ |
345 | if (q->low_mark < 4) | 715 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
346 | q->low_mark = 4; | 716 | iwl_write_direct32(trans, |
717 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
718 | ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, | ||
719 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); | ||
720 | if (ret < 0) | ||
721 | IWL_ERR(trans, | ||
722 | "Failing on timeout while stopping DMA channel %d [0x%08x]\n", | ||
723 | ch, | ||
724 | iwl_read_direct32(trans, | ||
725 | FH_TSSR_TX_STATUS_REG)); | ||
726 | } | ||
727 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
347 | 728 | ||
348 | q->high_mark = q->n_window / 8; | 729 | if (!trans_pcie->txq) { |
349 | if (q->high_mark < 2) | 730 | IWL_WARN(trans, |
350 | q->high_mark = 2; | 731 | "Stopping tx queues that aren't allocated...\n"); |
732 | return 0; | ||
733 | } | ||
351 | 734 | ||
352 | q->write_ptr = q->read_ptr = 0; | 735 | /* Unmap DMA from host system and free skb's */ |
736 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
737 | txq_id++) | ||
738 | iwl_pcie_txq_unmap(trans, txq_id); | ||
353 | 739 | ||
354 | return 0; | 740 | return 0; |
355 | } | 741 | } |
356 | 742 | ||
357 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, | 743 | /* |
358 | struct iwl_tx_queue *txq) | 744 | * iwl_trans_tx_free - Free TXQ Context |
745 | * | ||
746 | * Destroy all TX DMA queues and structures | ||
747 | */ | ||
748 | void iwl_pcie_tx_free(struct iwl_trans *trans) | ||
359 | { | 749 | { |
360 | struct iwl_trans_pcie *trans_pcie = | 750 | int txq_id; |
361 | IWL_TRANS_GET_PCIE_TRANS(trans); | 751 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
362 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
363 | int txq_id = txq->q.id; | ||
364 | int read_ptr = txq->q.read_ptr; | ||
365 | u8 sta_id = 0; | ||
366 | __le16 bc_ent; | ||
367 | struct iwl_tx_cmd *tx_cmd = | ||
368 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
369 | 752 | ||
370 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | 753 | /* Tx queues */ |
754 | if (trans_pcie->txq) { | ||
755 | for (txq_id = 0; | ||
756 | txq_id < trans->cfg->base_params->num_of_queues; txq_id++) | ||
757 | iwl_pcie_txq_free(trans, txq_id); | ||
758 | } | ||
371 | 759 | ||
372 | if (txq_id != trans_pcie->cmd_queue) | 760 | kfree(trans_pcie->txq); |
373 | sta_id = tx_cmd->sta_id; | 761 | trans_pcie->txq = NULL; |
374 | 762 | ||
375 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | 763 | iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); |
376 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
377 | 764 | ||
378 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | 765 | iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
379 | scd_bc_tbl[txq_id]. | 766 | } |
380 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | 767 | |
768 | /* | ||
769 | * iwl_pcie_tx_alloc - allocate TX context | ||
770 | * Allocate all Tx DMA structures and initialize them | ||
771 | */ | ||
772 | static int iwl_pcie_tx_alloc(struct iwl_trans *trans) | ||
773 | { | ||
774 | int ret; | ||
775 | int txq_id, slots_num; | ||
776 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
777 | |||
778 | u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * | ||
779 | sizeof(struct iwlagn_scd_bc_tbl); | ||
780 | |||
781 | /*It is not allowed to alloc twice, so warn when this happens. | ||
782 | * We cannot rely on the previous allocation, so free and fail */ | ||
783 | if (WARN_ON(trans_pcie->txq)) { | ||
784 | ret = -EINVAL; | ||
785 | goto error; | ||
786 | } | ||
787 | |||
788 | ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, | ||
789 | scd_bc_tbls_size); | ||
790 | if (ret) { | ||
791 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); | ||
792 | goto error; | ||
793 | } | ||
794 | |||
795 | /* Alloc keep-warm buffer */ | ||
796 | ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); | ||
797 | if (ret) { | ||
798 | IWL_ERR(trans, "Keep Warm allocation failed\n"); | ||
799 | goto error; | ||
800 | } | ||
801 | |||
802 | trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, | ||
803 | sizeof(struct iwl_txq), GFP_KERNEL); | ||
804 | if (!trans_pcie->txq) { | ||
805 | IWL_ERR(trans, "Not enough memory for txq\n"); | ||
806 | ret = ENOMEM; | ||
807 | goto error; | ||
808 | } | ||
809 | |||
810 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
811 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
812 | txq_id++) { | ||
813 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
814 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
815 | ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], | ||
816 | slots_num, txq_id); | ||
817 | if (ret) { | ||
818 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); | ||
819 | goto error; | ||
820 | } | ||
821 | } | ||
822 | |||
823 | return 0; | ||
824 | |||
825 | error: | ||
826 | iwl_pcie_tx_free(trans); | ||
827 | |||
828 | return ret; | ||
829 | } | ||
830 | int iwl_pcie_tx_init(struct iwl_trans *trans) | ||
831 | { | ||
832 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
833 | int ret; | ||
834 | int txq_id, slots_num; | ||
835 | unsigned long flags; | ||
836 | bool alloc = false; | ||
837 | |||
838 | if (!trans_pcie->txq) { | ||
839 | ret = iwl_pcie_tx_alloc(trans); | ||
840 | if (ret) | ||
841 | goto error; | ||
842 | alloc = true; | ||
843 | } | ||
844 | |||
845 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
846 | |||
847 | /* Turn off all Tx DMA fifos */ | ||
848 | iwl_write_prph(trans, SCD_TXFACT, 0); | ||
849 | |||
850 | /* Tell NIC where to find the "keep warm" buffer */ | ||
851 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, | ||
852 | trans_pcie->kw.dma >> 4); | ||
853 | |||
854 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
855 | |||
856 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
857 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
858 | txq_id++) { | ||
859 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
860 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
861 | ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], | ||
862 | slots_num, txq_id); | ||
863 | if (ret) { | ||
864 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); | ||
865 | goto error; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | return 0; | ||
870 | error: | ||
871 | /*Upon error, free only if we allocated something */ | ||
872 | if (alloc) | ||
873 | iwl_pcie_tx_free(trans); | ||
874 | return ret; | ||
875 | } | ||
876 | |||
877 | static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, | ||
878 | struct iwl_txq *txq) | ||
879 | { | ||
880 | if (!trans_pcie->wd_timeout) | ||
881 | return; | ||
882 | |||
883 | /* | ||
884 | * if empty delete timer, otherwise move timer forward | ||
885 | * since we're making progress on this queue | ||
886 | */ | ||
887 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
888 | del_timer(&txq->stuck_timer); | ||
889 | else | ||
890 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
891 | } | ||
892 | |||
893 | /* Frees buffers until index _not_ inclusive */ | ||
894 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | ||
895 | struct sk_buff_head *skbs) | ||
896 | { | ||
897 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
898 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
899 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ | ||
900 | int tfd_num = ssn & (txq->q.n_bd - 1); | ||
901 | struct iwl_queue *q = &txq->q; | ||
902 | int last_to_free; | ||
903 | |||
904 | /* This function is not meant to release cmd queue*/ | ||
905 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | ||
906 | return; | ||
907 | |||
908 | spin_lock(&txq->lock); | ||
909 | |||
910 | if (txq->q.read_ptr == tfd_num) | ||
911 | goto out; | ||
912 | |||
913 | IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", | ||
914 | txq_id, txq->q.read_ptr, tfd_num, ssn); | ||
915 | |||
916 | /*Since we free until index _not_ inclusive, the one before index is | ||
917 | * the last we will free. This one must be used */ | ||
918 | last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); | ||
919 | |||
920 | if (!iwl_queue_used(q, last_to_free)) { | ||
921 | IWL_ERR(trans, | ||
922 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | ||
923 | __func__, txq_id, last_to_free, q->n_bd, | ||
924 | q->write_ptr, q->read_ptr); | ||
925 | goto out; | ||
926 | } | ||
927 | |||
928 | if (WARN_ON(!skb_queue_empty(skbs))) | ||
929 | goto out; | ||
930 | |||
931 | for (; | ||
932 | q->read_ptr != tfd_num; | ||
933 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
934 | |||
935 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | ||
936 | continue; | ||
937 | |||
938 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | ||
939 | |||
940 | txq->entries[txq->q.read_ptr].skb = NULL; | ||
941 | |||
942 | iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); | ||
943 | |||
944 | iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | ||
945 | } | ||
946 | |||
947 | iwl_pcie_txq_progress(trans_pcie, txq); | ||
948 | |||
949 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) | ||
950 | iwl_wake_queue(trans, txq); | ||
951 | out: | ||
952 | spin_unlock(&txq->lock); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd | ||
957 | * | ||
958 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
959 | * need to be reclaimed. As result, some free space forms. If there is | ||
960 | * enough free space (> low mark), wake the stack that feeds us. | ||
961 | */ | ||
962 | static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) | ||
963 | { | ||
964 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
965 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
966 | struct iwl_queue *q = &txq->q; | ||
967 | int nfreed = 0; | ||
968 | |||
969 | lockdep_assert_held(&txq->lock); | ||
970 | |||
971 | if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { | ||
972 | IWL_ERR(trans, | ||
973 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
974 | __func__, txq_id, idx, q->n_bd, | ||
975 | q->write_ptr, q->read_ptr); | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
980 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
981 | |||
982 | if (nfreed++ > 0) { | ||
983 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
984 | idx, q->write_ptr, q->read_ptr); | ||
985 | iwl_op_mode_nic_error(trans->op_mode); | ||
986 | } | ||
987 | } | ||
988 | |||
989 | iwl_pcie_txq_progress(trans_pcie, txq); | ||
381 | } | 990 | } |
382 | 991 | ||
383 | static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | 992 | static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, |
384 | u16 txq_id) | 993 | u16 txq_id) |
385 | { | 994 | { |
386 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 995 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -405,7 +1014,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | |||
405 | return 0; | 1014 | return 0; |
406 | } | 1015 | } |
407 | 1016 | ||
408 | static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) | 1017 | static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, |
1018 | u16 txq_id) | ||
409 | { | 1019 | { |
410 | /* Simply stop the queue, but don't change any configuration; | 1020 | /* Simply stop the queue, but don't change any configuration; |
411 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | 1021 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ |
@@ -424,7 +1034,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
424 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); | 1034 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); |
425 | 1035 | ||
426 | /* Stop this Tx queue before configuring it */ | 1036 | /* Stop this Tx queue before configuring it */ |
427 | iwl_txq_set_inactive(trans, txq_id); | 1037 | iwl_pcie_txq_set_inactive(trans, txq_id); |
428 | 1038 | ||
429 | /* Set this queue as a chain-building queue unless it is CMD queue */ | 1039 | /* Set this queue as a chain-building queue unless it is CMD queue */ |
430 | if (txq_id != trans_pcie->cmd_queue) | 1040 | if (txq_id != trans_pcie->cmd_queue) |
@@ -435,7 +1045,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
435 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); | 1045 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); |
436 | 1046 | ||
437 | /* Map receiver-address / traffic-ID to this queue */ | 1047 | /* Map receiver-address / traffic-ID to this queue */ |
438 | iwl_txq_set_ratid_map(trans, ra_tid, txq_id); | 1048 | iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); |
439 | 1049 | ||
440 | /* enable aggregations for the queue */ | 1050 | /* enable aggregations for the queue */ |
441 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | 1051 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); |
@@ -489,18 +1099,20 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
489 | return; | 1099 | return; |
490 | } | 1100 | } |
491 | 1101 | ||
492 | iwl_txq_set_inactive(trans, txq_id); | 1102 | iwl_pcie_txq_set_inactive(trans, txq_id); |
493 | 1103 | ||
494 | _iwl_write_targ_mem_dwords(trans, stts_addr, | 1104 | _iwl_write_targ_mem_dwords(trans, stts_addr, |
495 | zero_val, ARRAY_SIZE(zero_val)); | 1105 | zero_val, ARRAY_SIZE(zero_val)); |
496 | 1106 | ||
1107 | iwl_pcie_txq_unmap(trans, txq_id); | ||
1108 | |||
497 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | 1109 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
498 | } | 1110 | } |
499 | 1111 | ||
500 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 1112 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
501 | 1113 | ||
502 | /** | 1114 | /* |
503 | * iwl_enqueue_hcmd - enqueue a uCode command | 1115 | * iwl_pcie_enqueue_hcmd - enqueue a uCode command |
504 | * @priv: device private data point | 1116 | * @priv: device private data point |
505 | * @cmd: a point to the ucode command structure | 1117 | * @cmd: a point to the ucode command structure |
506 | * | 1118 | * |
@@ -508,15 +1120,17 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
508 | * failed. On success, it turns the index (> 0) of command in the | 1120 | * failed. On success, it turns the index (> 0) of command in the |
509 | * command queue. | 1121 | * command queue. |
510 | */ | 1122 | */ |
511 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1123 | static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, |
1124 | struct iwl_host_cmd *cmd) | ||
512 | { | 1125 | { |
513 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1126 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
514 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 1127 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
515 | struct iwl_queue *q = &txq->q; | 1128 | struct iwl_queue *q = &txq->q; |
516 | struct iwl_device_cmd *out_cmd; | 1129 | struct iwl_device_cmd *out_cmd; |
517 | struct iwl_cmd_meta *out_meta; | 1130 | struct iwl_cmd_meta *out_meta; |
1131 | void *dup_buf = NULL; | ||
518 | dma_addr_t phys_addr; | 1132 | dma_addr_t phys_addr; |
519 | u32 idx; | 1133 | int idx; |
520 | u16 copy_size, cmd_size; | 1134 | u16 copy_size, cmd_size; |
521 | bool had_nocopy = false; | 1135 | bool had_nocopy = false; |
522 | int i; | 1136 | int i; |
@@ -533,10 +1147,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
533 | continue; | 1147 | continue; |
534 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | 1148 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { |
535 | had_nocopy = true; | 1149 | had_nocopy = true; |
1150 | if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { | ||
1151 | idx = -EINVAL; | ||
1152 | goto free_dup_buf; | ||
1153 | } | ||
1154 | } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { | ||
1155 | /* | ||
1156 | * This is also a chunk that isn't copied | ||
1157 | * to the static buffer so set had_nocopy. | ||
1158 | */ | ||
1159 | had_nocopy = true; | ||
1160 | |||
1161 | /* only allowed once */ | ||
1162 | if (WARN_ON(dup_buf)) { | ||
1163 | idx = -EINVAL; | ||
1164 | goto free_dup_buf; | ||
1165 | } | ||
1166 | |||
1167 | dup_buf = kmemdup(cmd->data[i], cmd->len[i], | ||
1168 | GFP_ATOMIC); | ||
1169 | if (!dup_buf) | ||
1170 | return -ENOMEM; | ||
536 | } else { | 1171 | } else { |
537 | /* NOCOPY must not be followed by normal! */ | 1172 | /* NOCOPY must not be followed by normal! */ |
538 | if (WARN_ON(had_nocopy)) | 1173 | if (WARN_ON(had_nocopy)) { |
539 | return -EINVAL; | 1174 | idx = -EINVAL; |
1175 | goto free_dup_buf; | ||
1176 | } | ||
540 | copy_size += cmd->len[i]; | 1177 | copy_size += cmd->len[i]; |
541 | } | 1178 | } |
542 | cmd_size += cmd->len[i]; | 1179 | cmd_size += cmd->len[i]; |
@@ -550,9 +1187,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
550 | */ | 1187 | */ |
551 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, | 1188 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, |
552 | "Command %s (%#x) is too large (%d bytes)\n", | 1189 | "Command %s (%#x) is too large (%d bytes)\n", |
553 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 1190 | get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { |
554 | cmd->id, copy_size)) | 1191 | idx = -EINVAL; |
555 | return -EINVAL; | 1192 | goto free_dup_buf; |
1193 | } | ||
556 | 1194 | ||
557 | spin_lock_bh(&txq->lock); | 1195 | spin_lock_bh(&txq->lock); |
558 | 1196 | ||
@@ -561,7 +1199,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
561 | 1199 | ||
562 | IWL_ERR(trans, "No space in command queue\n"); | 1200 | IWL_ERR(trans, "No space in command queue\n"); |
563 | iwl_op_mode_cmd_queue_full(trans->op_mode); | 1201 | iwl_op_mode_cmd_queue_full(trans->op_mode); |
564 | return -ENOSPC; | 1202 | idx = -ENOSPC; |
1203 | goto free_dup_buf; | ||
565 | } | 1204 | } |
566 | 1205 | ||
567 | idx = get_cmd_index(q, q->write_ptr); | 1206 | idx = get_cmd_index(q, q->write_ptr); |
@@ -585,7 +1224,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
585 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1224 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { |
586 | if (!cmd->len[i]) | 1225 | if (!cmd->len[i]) |
587 | continue; | 1226 | continue; |
588 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) | 1227 | if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | |
1228 | IWL_HCMD_DFL_DUP)) | ||
589 | break; | 1229 | break; |
590 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); | 1230 | memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); |
591 | cmd_pos += cmd->len[i]; | 1231 | cmd_pos += cmd->len[i]; |
@@ -610,7 +1250,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
610 | 1250 | ||
611 | IWL_DEBUG_HC(trans, | 1251 | IWL_DEBUG_HC(trans, |
612 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | 1252 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
613 | trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | 1253 | get_cmd_string(trans_pcie, out_cmd->hdr.cmd), |
614 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | 1254 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
615 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | 1255 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); |
616 | 1256 | ||
@@ -624,28 +1264,35 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
624 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | 1264 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
625 | dma_unmap_len_set(out_meta, len, copy_size); | 1265 | dma_unmap_len_set(out_meta, len, copy_size); |
626 | 1266 | ||
627 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); | 1267 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); |
628 | 1268 | ||
629 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1269 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { |
1270 | const void *data = cmd->data[i]; | ||
1271 | |||
630 | if (!cmd->len[i]) | 1272 | if (!cmd->len[i]) |
631 | continue; | 1273 | continue; |
632 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | 1274 | if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | |
1275 | IWL_HCMD_DFL_DUP))) | ||
633 | continue; | 1276 | continue; |
634 | phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], | 1277 | if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) |
1278 | data = dup_buf; | ||
1279 | phys_addr = dma_map_single(trans->dev, (void *)data, | ||
635 | cmd->len[i], DMA_BIDIRECTIONAL); | 1280 | cmd->len[i], DMA_BIDIRECTIONAL); |
636 | if (dma_mapping_error(trans->dev, phys_addr)) { | 1281 | if (dma_mapping_error(trans->dev, phys_addr)) { |
637 | iwl_unmap_tfd(trans, out_meta, | 1282 | iwl_pcie_tfd_unmap(trans, out_meta, |
638 | &txq->tfds[q->write_ptr], | 1283 | &txq->tfds[q->write_ptr], |
639 | DMA_BIDIRECTIONAL); | 1284 | DMA_BIDIRECTIONAL); |
640 | idx = -ENOMEM; | 1285 | idx = -ENOMEM; |
641 | goto out; | 1286 | goto out; |
642 | } | 1287 | } |
643 | 1288 | ||
644 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | 1289 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); |
645 | cmd->len[i], 0); | ||
646 | } | 1290 | } |
647 | 1291 | ||
648 | out_meta->flags = cmd->flags; | 1292 | out_meta->flags = cmd->flags; |
1293 | if (WARN_ON_ONCE(txq->entries[idx].free_buf)) | ||
1294 | kfree(txq->entries[idx].free_buf); | ||
1295 | txq->entries[idx].free_buf = dup_buf; | ||
649 | 1296 | ||
650 | txq->need_update = 1; | 1297 | txq->need_update = 1; |
651 | 1298 | ||
@@ -658,70 +1305,18 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
658 | 1305 | ||
659 | /* Increment and update queue's write index */ | 1306 | /* Increment and update queue's write index */ |
660 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1307 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
661 | iwl_txq_update_write_ptr(trans, txq); | 1308 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
662 | 1309 | ||
663 | out: | 1310 | out: |
664 | spin_unlock_bh(&txq->lock); | 1311 | spin_unlock_bh(&txq->lock); |
1312 | free_dup_buf: | ||
1313 | if (idx < 0) | ||
1314 | kfree(dup_buf); | ||
665 | return idx; | 1315 | return idx; |
666 | } | 1316 | } |
667 | 1317 | ||
668 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | 1318 | /* |
669 | struct iwl_tx_queue *txq) | 1319 | * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them |
670 | { | ||
671 | if (!trans_pcie->wd_timeout) | ||
672 | return; | ||
673 | |||
674 | /* | ||
675 | * if empty delete timer, otherwise move timer forward | ||
676 | * since we're making progress on this queue | ||
677 | */ | ||
678 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
679 | del_timer(&txq->stuck_timer); | ||
680 | else | ||
681 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
686 | * | ||
687 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
688 | * need to be reclaimed. As result, some free space forms. If there is | ||
689 | * enough free space (> low mark), wake the stack that feeds us. | ||
690 | */ | ||
691 | static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | ||
692 | int idx) | ||
693 | { | ||
694 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
695 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
696 | struct iwl_queue *q = &txq->q; | ||
697 | int nfreed = 0; | ||
698 | |||
699 | lockdep_assert_held(&txq->lock); | ||
700 | |||
701 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | ||
702 | IWL_ERR(trans, | ||
703 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
704 | __func__, txq_id, idx, q->n_bd, | ||
705 | q->write_ptr, q->read_ptr); | ||
706 | return; | ||
707 | } | ||
708 | |||
709 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
710 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
711 | |||
712 | if (nfreed++ > 0) { | ||
713 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
714 | idx, q->write_ptr, q->read_ptr); | ||
715 | iwl_op_mode_nic_error(trans->op_mode); | ||
716 | } | ||
717 | |||
718 | } | ||
719 | |||
720 | iwl_queue_progress(trans_pcie, txq); | ||
721 | } | ||
722 | |||
723 | /** | ||
724 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
725 | * @rxb: Rx buffer to reclaim | 1320 | * @rxb: Rx buffer to reclaim |
726 | * @handler_status: return value of the handler of the command | 1321 | * @handler_status: return value of the handler of the command |
727 | * (put in setup_rx_handlers) | 1322 | * (put in setup_rx_handlers) |
@@ -730,8 +1325,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
730 | * will be executed. The attached skb (if present) will only be freed | 1325 | * will be executed. The attached skb (if present) will only be freed |
731 | * if the callback returns 1 | 1326 | * if the callback returns 1 |
732 | */ | 1327 | */ |
733 | void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | 1328 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
734 | int handler_status) | 1329 | struct iwl_rx_cmd_buffer *rxb, int handler_status) |
735 | { | 1330 | { |
736 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 1331 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
737 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 1332 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
@@ -741,7 +1336,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
741 | struct iwl_device_cmd *cmd; | 1336 | struct iwl_device_cmd *cmd; |
742 | struct iwl_cmd_meta *meta; | 1337 | struct iwl_cmd_meta *meta; |
743 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1338 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
744 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 1339 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
745 | 1340 | ||
746 | /* If a Tx command is being handled and it isn't in the actual | 1341 | /* If a Tx command is being handled and it isn't in the actual |
747 | * command queue then there a command routing bug has been introduced | 1342 | * command queue then there a command routing bug has been introduced |
@@ -761,7 +1356,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
761 | cmd = txq->entries[cmd_index].cmd; | 1356 | cmd = txq->entries[cmd_index].cmd; |
762 | meta = &txq->entries[cmd_index].meta; | 1357 | meta = &txq->entries[cmd_index].meta; |
763 | 1358 | ||
764 | iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | 1359 | iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); |
765 | 1360 | ||
766 | /* Input error checking is done when commands are added to queue. */ | 1361 | /* Input error checking is done when commands are added to queue. */ |
767 | if (meta->flags & CMD_WANT_SKB) { | 1362 | if (meta->flags & CMD_WANT_SKB) { |
@@ -773,20 +1368,18 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
773 | meta->source->handler_status = handler_status; | 1368 | meta->source->handler_status = handler_status; |
774 | } | 1369 | } |
775 | 1370 | ||
776 | iwl_hcmd_queue_reclaim(trans, txq_id, index); | 1371 | iwl_pcie_cmdq_reclaim(trans, txq_id, index); |
777 | 1372 | ||
778 | if (!(meta->flags & CMD_ASYNC)) { | 1373 | if (!(meta->flags & CMD_ASYNC)) { |
779 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 1374 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
780 | IWL_WARN(trans, | 1375 | IWL_WARN(trans, |
781 | "HCMD_ACTIVE already clear for command %s\n", | 1376 | "HCMD_ACTIVE already clear for command %s\n", |
782 | trans_pcie_get_cmd_string(trans_pcie, | 1377 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
783 | cmd->hdr.cmd)); | ||
784 | } | 1378 | } |
785 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1379 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
786 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | 1380 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", |
787 | trans_pcie_get_cmd_string(trans_pcie, | 1381 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
788 | cmd->hdr.cmd)); | 1382 | wake_up(&trans_pcie->wait_command_queue); |
789 | wake_up(&trans->wait_command_queue); | ||
790 | } | 1383 | } |
791 | 1384 | ||
792 | meta->flags = 0; | 1385 | meta->flags = 0; |
@@ -796,7 +1389,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
796 | 1389 | ||
797 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | 1390 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) |
798 | 1391 | ||
799 | static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1392 | static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, |
1393 | struct iwl_host_cmd *cmd) | ||
800 | { | 1394 | { |
801 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1395 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
802 | int ret; | 1396 | int ret; |
@@ -805,59 +1399,59 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
805 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | 1399 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) |
806 | return -EINVAL; | 1400 | return -EINVAL; |
807 | 1401 | ||
808 | 1402 | ret = iwl_pcie_enqueue_hcmd(trans, cmd); | |
809 | ret = iwl_enqueue_hcmd(trans, cmd); | ||
810 | if (ret < 0) { | 1403 | if (ret < 0) { |
811 | IWL_ERR(trans, | 1404 | IWL_ERR(trans, |
812 | "Error sending %s: enqueue_hcmd failed: %d\n", | 1405 | "Error sending %s: enqueue_hcmd failed: %d\n", |
813 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 1406 | get_cmd_string(trans_pcie, cmd->id), ret); |
814 | return ret; | 1407 | return ret; |
815 | } | 1408 | } |
816 | return 0; | 1409 | return 0; |
817 | } | 1410 | } |
818 | 1411 | ||
819 | static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1412 | static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, |
1413 | struct iwl_host_cmd *cmd) | ||
820 | { | 1414 | { |
821 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1415 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
822 | int cmd_idx; | 1416 | int cmd_idx; |
823 | int ret; | 1417 | int ret; |
824 | 1418 | ||
825 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", | 1419 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", |
826 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1420 | get_cmd_string(trans_pcie, cmd->id)); |
827 | 1421 | ||
828 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, | 1422 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, |
829 | &trans_pcie->status))) { | 1423 | &trans_pcie->status))) { |
830 | IWL_ERR(trans, "Command %s: a command is already active!\n", | 1424 | IWL_ERR(trans, "Command %s: a command is already active!\n", |
831 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1425 | get_cmd_string(trans_pcie, cmd->id)); |
832 | return -EIO; | 1426 | return -EIO; |
833 | } | 1427 | } |
834 | 1428 | ||
835 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", | 1429 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", |
836 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1430 | get_cmd_string(trans_pcie, cmd->id)); |
837 | 1431 | ||
838 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); | 1432 | cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); |
839 | if (cmd_idx < 0) { | 1433 | if (cmd_idx < 0) { |
840 | ret = cmd_idx; | 1434 | ret = cmd_idx; |
841 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1435 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
842 | IWL_ERR(trans, | 1436 | IWL_ERR(trans, |
843 | "Error sending %s: enqueue_hcmd failed: %d\n", | 1437 | "Error sending %s: enqueue_hcmd failed: %d\n", |
844 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 1438 | get_cmd_string(trans_pcie, cmd->id), ret); |
845 | return ret; | 1439 | return ret; |
846 | } | 1440 | } |
847 | 1441 | ||
848 | ret = wait_event_timeout(trans->wait_command_queue, | 1442 | ret = wait_event_timeout(trans_pcie->wait_command_queue, |
849 | !test_bit(STATUS_HCMD_ACTIVE, | 1443 | !test_bit(STATUS_HCMD_ACTIVE, |
850 | &trans_pcie->status), | 1444 | &trans_pcie->status), |
851 | HOST_COMPLETE_TIMEOUT); | 1445 | HOST_COMPLETE_TIMEOUT); |
852 | if (!ret) { | 1446 | if (!ret) { |
853 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 1447 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
854 | struct iwl_tx_queue *txq = | 1448 | struct iwl_txq *txq = |
855 | &trans_pcie->txq[trans_pcie->cmd_queue]; | 1449 | &trans_pcie->txq[trans_pcie->cmd_queue]; |
856 | struct iwl_queue *q = &txq->q; | 1450 | struct iwl_queue *q = &txq->q; |
857 | 1451 | ||
858 | IWL_ERR(trans, | 1452 | IWL_ERR(trans, |
859 | "Error sending %s: time out after %dms.\n", | 1453 | "Error sending %s: time out after %dms.\n", |
860 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 1454 | get_cmd_string(trans_pcie, cmd->id), |
861 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | 1455 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); |
862 | 1456 | ||
863 | IWL_ERR(trans, | 1457 | IWL_ERR(trans, |
@@ -867,16 +1461,28 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
867 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1461 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
868 | IWL_DEBUG_INFO(trans, | 1462 | IWL_DEBUG_INFO(trans, |
869 | "Clearing HCMD_ACTIVE for command %s\n", | 1463 | "Clearing HCMD_ACTIVE for command %s\n", |
870 | trans_pcie_get_cmd_string(trans_pcie, | 1464 | get_cmd_string(trans_pcie, cmd->id)); |
871 | cmd->id)); | ||
872 | ret = -ETIMEDOUT; | 1465 | ret = -ETIMEDOUT; |
873 | goto cancel; | 1466 | goto cancel; |
874 | } | 1467 | } |
875 | } | 1468 | } |
876 | 1469 | ||
1470 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { | ||
1471 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", | ||
1472 | get_cmd_string(trans_pcie, cmd->id)); | ||
1473 | ret = -EIO; | ||
1474 | goto cancel; | ||
1475 | } | ||
1476 | |||
1477 | if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { | ||
1478 | IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); | ||
1479 | ret = -ERFKILL; | ||
1480 | goto cancel; | ||
1481 | } | ||
1482 | |||
877 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | 1483 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { |
878 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", | 1484 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", |
879 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1485 | get_cmd_string(trans_pcie, cmd->id)); |
880 | ret = -EIO; | 1486 | ret = -EIO; |
881 | goto cancel; | 1487 | goto cancel; |
882 | } | 1488 | } |
@@ -903,64 +1509,183 @@ cancel: | |||
903 | return ret; | 1509 | return ret; |
904 | } | 1510 | } |
905 | 1511 | ||
906 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1512 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) |
907 | { | 1513 | { |
1514 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1515 | |||
1516 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) | ||
1517 | return -EIO; | ||
1518 | |||
1519 | if (test_bit(STATUS_RFKILL, &trans_pcie->status)) | ||
1520 | return -ERFKILL; | ||
1521 | |||
908 | if (cmd->flags & CMD_ASYNC) | 1522 | if (cmd->flags & CMD_ASYNC) |
909 | return iwl_send_cmd_async(trans, cmd); | 1523 | return iwl_pcie_send_hcmd_async(trans, cmd); |
910 | 1524 | ||
911 | return iwl_send_cmd_sync(trans, cmd); | 1525 | /* We still can fail on RFKILL that can be asserted while we wait */ |
1526 | return iwl_pcie_send_hcmd_sync(trans, cmd); | ||
912 | } | 1527 | } |
913 | 1528 | ||
914 | /* Frees buffers until index _not_ inclusive */ | 1529 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
915 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | 1530 | struct iwl_device_cmd *dev_cmd, int txq_id) |
916 | struct sk_buff_head *skbs) | ||
917 | { | 1531 | { |
918 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1532 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
919 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 1533 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
920 | struct iwl_queue *q = &txq->q; | 1534 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; |
921 | int last_to_free; | 1535 | struct iwl_cmd_meta *out_meta; |
922 | int freed = 0; | 1536 | struct iwl_txq *txq; |
1537 | struct iwl_queue *q; | ||
1538 | dma_addr_t phys_addr = 0; | ||
1539 | dma_addr_t txcmd_phys; | ||
1540 | dma_addr_t scratch_phys; | ||
1541 | u16 len, firstlen, secondlen; | ||
1542 | u8 wait_write_ptr = 0; | ||
1543 | __le16 fc = hdr->frame_control; | ||
1544 | u8 hdr_len = ieee80211_hdrlen(fc); | ||
1545 | u16 __maybe_unused wifi_seq; | ||
1546 | |||
1547 | txq = &trans_pcie->txq[txq_id]; | ||
1548 | q = &txq->q; | ||
923 | 1549 | ||
924 | /* This function is not meant to release cmd queue*/ | 1550 | if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { |
925 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | 1551 | WARN_ON_ONCE(1); |
926 | return 0; | 1552 | return -EINVAL; |
1553 | } | ||
927 | 1554 | ||
928 | lockdep_assert_held(&txq->lock); | 1555 | spin_lock(&txq->lock); |
929 | 1556 | ||
930 | /*Since we free until index _not_ inclusive, the one before index is | 1557 | /* In AGG mode, the index in the ring must correspond to the WiFi |
931 | * the last we will free. This one must be used */ | 1558 | * sequence number. This is a HW requirements to help the SCD to parse |
932 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); | 1559 | * the BA. |
1560 | * Check here that the packets are in the right place on the ring. | ||
1561 | */ | ||
1562 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1563 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | ||
1564 | WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && | ||
1565 | ((wifi_seq & 0xff) != q->write_ptr), | ||
1566 | "Q: %d WiFi Seq %d tfdNum %d", | ||
1567 | txq_id, wifi_seq, q->write_ptr); | ||
1568 | #endif | ||
1569 | |||
1570 | /* Set up driver data for this TFD */ | ||
1571 | txq->entries[q->write_ptr].skb = skb; | ||
1572 | txq->entries[q->write_ptr].cmd = dev_cmd; | ||
1573 | |||
1574 | dev_cmd->hdr.cmd = REPLY_TX; | ||
1575 | dev_cmd->hdr.sequence = | ||
1576 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
1577 | INDEX_TO_SEQ(q->write_ptr))); | ||
1578 | |||
1579 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
1580 | out_meta = &txq->entries[q->write_ptr].meta; | ||
933 | 1581 | ||
934 | if ((index >= q->n_bd) || | 1582 | /* |
935 | (iwl_queue_used(q, last_to_free) == 0)) { | 1583 | * Use the first empty entry in this queue's command buffer array |
936 | IWL_ERR(trans, | 1584 | * to contain the Tx command and MAC header concatenated together |
937 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | 1585 | * (payload data will be in another buffer). |
938 | __func__, txq_id, last_to_free, q->n_bd, | 1586 | * Size of this varies, due to varying MAC header length. |
939 | q->write_ptr, q->read_ptr); | 1587 | * If end is not dword aligned, we'll have 2 extra bytes at the end |
940 | return 0; | 1588 | * of the MAC header (device reads on dword boundaries). |
1589 | * We'll tell device about this padding later. | ||
1590 | */ | ||
1591 | len = sizeof(struct iwl_tx_cmd) + | ||
1592 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1593 | firstlen = (len + 3) & ~3; | ||
1594 | |||
1595 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
1596 | if (firstlen != len) | ||
1597 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
1598 | |||
1599 | /* Physical address of this Tx command's header (not MAC header!), | ||
1600 | * within command buffer array. */ | ||
1601 | txcmd_phys = dma_map_single(trans->dev, | ||
1602 | &dev_cmd->hdr, firstlen, | ||
1603 | DMA_BIDIRECTIONAL); | ||
1604 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | ||
1605 | goto out_err; | ||
1606 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1607 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1608 | |||
1609 | if (!ieee80211_has_morefrags(fc)) { | ||
1610 | txq->need_update = 1; | ||
1611 | } else { | ||
1612 | wait_write_ptr = 1; | ||
1613 | txq->need_update = 0; | ||
941 | } | 1614 | } |
942 | 1615 | ||
943 | if (WARN_ON(!skb_queue_empty(skbs))) | 1616 | /* Set up TFD's 2nd entry to point directly to remainder of skb, |
944 | return 0; | 1617 | * if any (802.11 null frames have no payload). */ |
1618 | secondlen = skb->len - hdr_len; | ||
1619 | if (secondlen > 0) { | ||
1620 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, | ||
1621 | secondlen, DMA_TO_DEVICE); | ||
1622 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
1623 | dma_unmap_single(trans->dev, | ||
1624 | dma_unmap_addr(out_meta, mapping), | ||
1625 | dma_unmap_len(out_meta, len), | ||
1626 | DMA_BIDIRECTIONAL); | ||
1627 | goto out_err; | ||
1628 | } | ||
1629 | } | ||
945 | 1630 | ||
946 | for (; | 1631 | /* Attach buffers to TFD */ |
947 | q->read_ptr != index; | 1632 | iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1); |
948 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1633 | if (secondlen > 0) |
1634 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0); | ||
949 | 1635 | ||
950 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | 1636 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
951 | continue; | 1637 | offsetof(struct iwl_tx_cmd, scratch); |
952 | 1638 | ||
953 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | 1639 | /* take back ownership of DMA buffer to enable update */ |
1640 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, | ||
1641 | DMA_BIDIRECTIONAL); | ||
1642 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1643 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
954 | 1644 | ||
955 | txq->entries[txq->q.read_ptr].skb = NULL; | 1645 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
1646 | le16_to_cpu(dev_cmd->hdr.sequence)); | ||
1647 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
956 | 1648 | ||
957 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); | 1649 | /* Set up entry for this TFD in Tx byte-count array */ |
1650 | iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | ||
958 | 1651 | ||
959 | iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | 1652 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, |
960 | freed++; | 1653 | DMA_BIDIRECTIONAL); |
961 | } | 1654 | |
1655 | trace_iwlwifi_dev_tx(trans->dev, skb, | ||
1656 | &txq->tfds[txq->q.write_ptr], | ||
1657 | sizeof(struct iwl_tfd), | ||
1658 | &dev_cmd->hdr, firstlen, | ||
1659 | skb->data + hdr_len, secondlen); | ||
1660 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | ||
1661 | skb->data + hdr_len, secondlen); | ||
962 | 1662 | ||
963 | iwl_queue_progress(trans_pcie, txq); | 1663 | /* start timer if queue currently empty */ |
1664 | if (txq->need_update && q->read_ptr == q->write_ptr && | ||
1665 | trans_pcie->wd_timeout) | ||
1666 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
1667 | |||
1668 | /* Tell device the write index *just past* this latest filled TFD */ | ||
1669 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1670 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | ||
964 | 1671 | ||
965 | return freed; | 1672 | /* |
1673 | * At this point the frame is "transmitted" successfully | ||
1674 | * and we will get a TX status notification eventually, | ||
1675 | * regardless of the value of ret. "ret" only indicates | ||
1676 | * whether or not we should update the write pointer. | ||
1677 | */ | ||
1678 | if (iwl_queue_space(q) < q->high_mark) { | ||
1679 | if (wait_write_ptr) { | ||
1680 | txq->need_update = 1; | ||
1681 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | ||
1682 | } else { | ||
1683 | iwl_stop_queue(trans, txq); | ||
1684 | } | ||
1685 | } | ||
1686 | spin_unlock(&txq->lock); | ||
1687 | return 0; | ||
1688 | out_err: | ||
1689 | spin_unlock(&txq->lock); | ||
1690 | return -1; | ||
966 | } | 1691 | } |