aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie/tx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-11 10:51:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-11 10:51:59 -0400
commit0cb77508252e2d0e00c5ec7e57b4be9b3f7eb24d (patch)
treecb7ed9fbc7f8fdc6e727d60e2627799a7fd7a8cc /drivers/net/wireless/iwlwifi/pcie/tx.c
parentffb6a445e7cdc03d67f8b9fb2f5afaafd8260b4b (diff)
parent9026c4927254f5bea695cc3ef2e255280e6a3011 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Missing cancel of work items in mac80211 MLME, from Ben Greear. 2) Fix DMA mapping handling in iwlwifi by using coherent DMA for command headers, from Johannes Berg. 3) Decrease the amount of pressure on the page allocator by using order 1 pages less in iwlwifi, from Emmanuel Grumbach. 4) Fix mesh PS broadcast OOPS in mac80211, from Marco Porsch. 5) Don't forget to recalculate idle state in mac80211 monitor interface, from Felix Fietkau. 6) Fix varargs in netfilter conntrack handler, from Joe Perches. 7) Need to reset entire chip when command queue fills up in iwlwifi, from Emmanuel Grumbach. 8) The TX antenna value must be valid when calibrations are performed in iwlwifi, fix from Dor Shaish. 9) Don't generate netfilter audit log entries when audit is disabled, from Gao Feng. 10) Deal with DMA unit hang on e1000e during power state transitions, from Bruce Allan. 11) Remove BUILD_BUG_ON check from igb driver, from Alexander Duyck. 12) Fix lockdep warning on i2c handling of igb driver, from Carolyn Wyborny. 13) Fix several TTY handling issues in IRDA ircomm tty driver, from Peter Hurley. 14) Several QFQ packet scheduler fixes from Paolo Valente. 15) When VXLAN encapsulates on transmit, we have to reset the netfilter state. From Zang MingJie. 16) Fix jiffie check in net_rx_action() so that we really cap the processing at 2HZ. From Eric Dumazet. 17) Fix erroneous trigger of IP option space exhaustion, when routers are pre-specified and we are looking to see if we can insert a timestamp, we will have the space. From David Ward. 18) Fix various issues in benet driver wrt waiting for firmware to finish POST after resets or errors. From Gavin Shan and Sathya Perla. 19) Fix TX locking in SFC driver, from Ben Hutchings. 20) Like the VXLAN fix above, when we encap in a TUN device we have to reset the netfilter state. This should fix several strange crashes reported by Dave Jones and others. From Eric Dumazet. 21) Don't forget to clean up MAC address resources when shutting down a port in mlx4 driver, from Yan Burman. 22) Fix divide by zero in vmxnet3 driver, from Bhavesh Davda. 23) Fix device statistic regression in tg3 when the driver is using phylib, from Nithin Sujir. 24) Fix info leak in several netlink handlers, from Mathias Krause. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (79 commits) 6lowpan: Fix endianness issue in is_addr_link_local(). rrunner.c: fix possible memory leak in rr_init_one() dcbnl: fix various netlink info leaks rtnl: fix info leak on RTM_GETLINK request for VF devices bridge: fix mdb info leaks tg3: Update link_up flag for phylib devices ipv6: stop multicast forwarding to process interface scoped addresses bridging: fix rx_handlers return code netlabel: fix build problems when CONFIG_IPV6=n drivers/isdn: checkng length to be sure not memory overflow net/rds: zero last byte for strncpy bnx2x: Fix SFP+ misconfiguration in iSCSI boot scenario bnx2x: Fix intermittent long KR2 link up time macvlan: Set IFF_UNICAST_FLT flag to prevent unnecessary promisc mode. team: unsyc the devices addresses when port is removed bridge: add missing vid to br_mdb_get() Fix: sparse warning in inet_csk_prepare_forced_close afkey: fix a typo MAINTAINERS: Update qlcnic maintainers list netlabel: correctly list all the static label mappings ...
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c266
1 files changed, 124 insertions, 142 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8b625a7f5685..8595c16f74de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
191 } 191 }
192 192
193 for (i = q->read_ptr; i != q->write_ptr; 193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) { 194 i = iwl_queue_inc_wrap(i, q->n_bd))
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 195 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch)); 196 le32_to_cpu(txq->scratchbufs[i].scratch));
199 }
200 197
201 iwl_op_mode_nic_error(trans->op_mode); 198 iwl_op_mode_nic_error(trans->op_mode);
202} 199}
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
367} 364}
368 365
369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 366static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, 367 struct iwl_cmd_meta *meta,
371 enum dma_data_direction dma_dir) 368 struct iwl_tfd *tfd)
372{ 369{
373 int i; 370 int i;
374 int num_tbs; 371 int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
382 return; 379 return;
383 } 380 }
384 381
385 /* Unmap tx_cmd */ 382 /* first TB is never freed - it's the scratchbuf data */
386 if (num_tbs)
387 dma_unmap_single(trans->dev,
388 dma_unmap_addr(meta, mapping),
389 dma_unmap_len(meta, len),
390 DMA_BIDIRECTIONAL);
391 383
392 /* Unmap chunks, if any. */
393 for (i = 1; i < num_tbs; i++) 384 for (i = 1; i < num_tbs; i++)
394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 385 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); 386 iwl_pcie_tfd_tb_get_len(tfd, i),
387 DMA_TO_DEVICE);
396 388
397 tfd->num_tbs = 0; 389 tfd->num_tbs = 0;
398} 390}
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
406 * Does NOT advance any TFD circular buffer read/write indexes 398 * Does NOT advance any TFD circular buffer read/write indexes
407 * Does NOT free the TFD itself (which is within circular buffer) 399 * Does NOT free the TFD itself (which is within circular buffer)
408 */ 400 */
409static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 401static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
410 enum dma_data_direction dma_dir)
411{ 402{
412 struct iwl_tfd *tfd_tmp = txq->tfds; 403 struct iwl_tfd *tfd_tmp = txq->tfds;
413 404
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
418 lockdep_assert_held(&txq->lock); 409 lockdep_assert_held(&txq->lock);
419 410
420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 411 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 412 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
422 dma_dir);
423 413
424 /* free SKB */ 414 /* free SKB */
425 if (txq->entries) { 415 if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
479{ 469{
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 471 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
472 size_t scratchbuf_sz;
482 int i; 473 int i;
483 474
484 if (WARN_ON(txq->entries || txq->tfds)) 475 if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
514 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
515 goto error; 506 goto error;
516 } 507 }
508
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
511 sizeof(struct iwl_cmd_header) +
512 offsetof(struct iwl_tx_cmd, scratch));
513
514 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
515
516 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
517 &txq->scratchbufs_dma,
518 GFP_KERNEL);
519 if (!txq->scratchbufs)
520 goto err_free_tfds;
521
517 txq->q.id = txq_id; 522 txq->q.id = txq_id;
518 523
519 return 0; 524 return 0;
525err_free_tfds:
526 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
520error: 527error:
521 if (txq->entries && txq_id == trans_pcie->cmd_queue) 528 if (txq->entries && txq_id == trans_pcie->cmd_queue)
522 for (i = 0; i < slots_num; i++) 529 for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 573 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
567 struct iwl_queue *q = &txq->q; 574 struct iwl_queue *q = &txq->q;
568 enum dma_data_direction dma_dir;
569 575
570 if (!q->n_bd) 576 if (!q->n_bd)
571 return; 577 return;
572 578
573 /* In the command queue, all the TBs are mapped as BIDI
574 * so unmap them as such.
575 */
576 if (txq_id == trans_pcie->cmd_queue)
577 dma_dir = DMA_BIDIRECTIONAL;
578 else
579 dma_dir = DMA_TO_DEVICE;
580
581 spin_lock_bh(&txq->lock); 579 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 580 while (q->write_ptr != q->read_ptr) {
583 iwl_pcie_txq_free_tfd(trans, txq, dma_dir); 581 iwl_pcie_txq_free_tfd(trans, txq);
584 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 582 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
585 } 583 }
586 spin_unlock_bh(&txq->lock); 584 spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
610 if (txq_id == trans_pcie->cmd_queue) 608 if (txq_id == trans_pcie->cmd_queue)
611 for (i = 0; i < txq->q.n_window; i++) { 609 for (i = 0; i < txq->q.n_window; i++) {
612 kfree(txq->entries[i].cmd); 610 kfree(txq->entries[i].cmd);
613 kfree(txq->entries[i].copy_cmd);
614 kfree(txq->entries[i].free_buf); 611 kfree(txq->entries[i].free_buf);
615 } 612 }
616 613
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
619 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 616 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
620 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 617 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
621 txq->q.dma_addr = 0; 618 txq->q.dma_addr = 0;
619
620 dma_free_coherent(dev,
621 sizeof(*txq->scratchbufs) * txq->q.n_window,
622 txq->scratchbufs, txq->scratchbufs_dma);
622 } 623 }
623 624
624 kfree(txq->entries); 625 kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
962 963
963 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 964 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
964 965
965 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 966 iwl_pcie_txq_free_tfd(trans, txq);
966 } 967 }
967 968
968 iwl_pcie_txq_progress(trans_pcie, txq); 969 iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1153 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1154 dma_addr_t phys_addr;
1154 int idx; 1155 int idx;
1155 u16 copy_size, cmd_size, dma_size; 1156 u16 copy_size, cmd_size, scratch_size;
1156 bool had_nocopy = false; 1157 bool had_nocopy = false;
1157 int i; 1158 int i;
1158 u32 cmd_pos; 1159 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS]; 1160 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS]; 1161 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1161 1162
1162 copy_size = sizeof(out_cmd->hdr); 1163 copy_size = sizeof(out_cmd->hdr);
1163 cmd_size = sizeof(out_cmd->hdr); 1164 cmd_size = sizeof(out_cmd->hdr);
1164 1165
1165 /* need one for the header if the first is NOCOPY */ 1166 /* need one for the header if the first is NOCOPY */
1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1167 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1167 1168
1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1169 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1169 cmddata[i] = cmd->data[i]; 1170 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i]; 1171 cmdlen[i] = cmd->len[i];
1171 1172
1172 if (!cmd->len[i]) 1173 if (!cmd->len[i])
1173 continue; 1174 continue;
1174 1175
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1176 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1177 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1178 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1178 1179
1179 if (copy > cmdlen[i]) 1180 if (copy > cmdlen[i])
1180 copy = cmdlen[i]; 1181 copy = cmdlen[i];
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1260 /* and copy the data that needs to be copied */ 1261 /* and copy the data that needs to be copied */
1261 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1262 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr); 1263 copy_size = sizeof(out_cmd->hdr);
1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1264 int copy = 0; 1265 int copy = 0;
1265 1266
1266 if (!cmd->len) 1267 if (!cmd->len)
1267 continue; 1268 continue;
1268 1269
1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1271 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1272 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1272 1273
1273 if (copy > cmd->len[i]) 1274 if (copy > cmd->len[i])
1274 copy = cmd->len[i]; 1275 copy = cmd->len[i];
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1286 } 1287 }
1287 } 1288 }
1288 1289
1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
1290
1291 /*
1292 * since out_cmd will be the source address of the FH, it will write
1293 * the retry count there. So when the user needs to receivce the HCMD
1294 * that corresponds to the response in the response handler, it needs
1295 * to set CMD_WANT_HCMD.
1296 */
1297 if (cmd->flags & CMD_WANT_HCMD) {
1298 txq->entries[idx].copy_cmd =
1299 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
1300 if (unlikely(!txq->entries[idx].copy_cmd)) {
1301 idx = -ENOMEM;
1302 goto out;
1303 }
1304 }
1305
1306 IWL_DEBUG_HC(trans, 1290 IWL_DEBUG_HC(trans,
1307 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1291 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1308 get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1292 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1293 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1294 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1311 1295
1312 /* 1296 /* start the TFD with the scratchbuf */
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must 1297 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1314 * still map at least that many bytes for the hardware to write back to. 1298 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1315 * We have enough space, so that's not a problem. 1299 iwl_pcie_txq_build_tfd(trans, txq,
1316 */ 1300 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); 1301 scratch_size, 1);
1302
1303 /* map first command fragment, if any remains */
1304 if (copy_size > scratch_size) {
1305 phys_addr = dma_map_single(trans->dev,
1306 ((u8 *)&out_cmd->hdr) + scratch_size,
1307 copy_size - scratch_size,
1308 DMA_TO_DEVICE);
1309 if (dma_mapping_error(trans->dev, phys_addr)) {
1310 iwl_pcie_tfd_unmap(trans, out_meta,
1311 &txq->tfds[q->write_ptr]);
1312 idx = -ENOMEM;
1313 goto out;
1314 }
1318 1315
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, 1316 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1320 DMA_BIDIRECTIONAL); 1317 copy_size - scratch_size, 0);
1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1322 idx = -ENOMEM;
1323 goto out;
1324 } 1318 }
1325 1319
1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1327 dma_unmap_len_set(out_meta, len, dma_size);
1328
1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */ 1320 /* map the remaining (adjusted) nocopy/dup fragments */
1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1321 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1333 const void *data = cmddata[i]; 1322 const void *data = cmddata[i];
1334 1323
1335 if (!cmdlen[i]) 1324 if (!cmdlen[i])
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1329 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1341 data = dup_buf; 1330 data = dup_buf;
1342 phys_addr = dma_map_single(trans->dev, (void *)data, 1331 phys_addr = dma_map_single(trans->dev, (void *)data,
1343 cmdlen[i], DMA_BIDIRECTIONAL); 1332 cmdlen[i], DMA_TO_DEVICE);
1344 if (dma_mapping_error(trans->dev, phys_addr)) { 1333 if (dma_mapping_error(trans->dev, phys_addr)) {
1345 iwl_pcie_tfd_unmap(trans, out_meta, 1334 iwl_pcie_tfd_unmap(trans, out_meta,
1346 &txq->tfds[q->write_ptr], 1335 &txq->tfds[q->write_ptr]);
1347 DMA_BIDIRECTIONAL);
1348 idx = -ENOMEM; 1336 idx = -ENOMEM;
1349 goto out; 1337 goto out;
1350 } 1338 }
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1418 cmd = txq->entries[cmd_index].cmd; 1406 cmd = txq->entries[cmd_index].cmd;
1419 meta = &txq->entries[cmd_index].meta; 1407 meta = &txq->entries[cmd_index].meta;
1420 1408
1421 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1409 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1422 1410
1423 /* Input error checking is done when commands are added to queue. */ 1411 /* Input error checking is done when commands are added to queue. */
1424 if (meta->flags & CMD_WANT_SKB) { 1412 if (meta->flags & CMD_WANT_SKB) {
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1597 struct iwl_cmd_meta *out_meta; 1585 struct iwl_cmd_meta *out_meta;
1598 struct iwl_txq *txq; 1586 struct iwl_txq *txq;
1599 struct iwl_queue *q; 1587 struct iwl_queue *q;
1600 dma_addr_t phys_addr = 0; 1588 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1601 dma_addr_t txcmd_phys; 1589 void *tb1_addr;
1602 dma_addr_t scratch_phys; 1590 u16 len, tb1_len, tb2_len;
1603 u16 len, firstlen, secondlen;
1604 u8 wait_write_ptr = 0; 1591 u8 wait_write_ptr = 0;
1605 __le16 fc = hdr->frame_control; 1592 __le16 fc = hdr->frame_control;
1606 u8 hdr_len = ieee80211_hdrlen(fc); 1593 u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1638 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1625 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1639 INDEX_TO_SEQ(q->write_ptr))); 1626 INDEX_TO_SEQ(q->write_ptr)));
1640 1627
1628 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1629 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1630 offsetof(struct iwl_tx_cmd, scratch);
1631
1632 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1633 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1634
1641 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1635 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1642 out_meta = &txq->entries[q->write_ptr].meta; 1636 out_meta = &txq->entries[q->write_ptr].meta;
1643 1637
1644 /* 1638 /*
1645 * Use the first empty entry in this queue's command buffer array 1639 * The second TB (tb1) points to the remainder of the TX command
1646 * to contain the Tx command and MAC header concatenated together 1640 * and the 802.11 header - dword aligned size
1647 * (payload data will be in another buffer). 1641 * (This calculation modifies the TX command, so do it before the
1648 * Size of this varies, due to varying MAC header length. 1642 * setup of the first TB)
1649 * If end is not dword aligned, we'll have 2 extra bytes at the end
1650 * of the MAC header (device reads on dword boundaries).
1651 * We'll tell device about this padding later.
1652 */ 1643 */
1653 len = sizeof(struct iwl_tx_cmd) + 1644 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1654 sizeof(struct iwl_cmd_header) + hdr_len; 1645 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1655 firstlen = (len + 3) & ~3; 1646 tb1_len = (len + 3) & ~3;
1656 1647
1657 /* Tell NIC about any 2-byte padding after MAC header */ 1648 /* Tell NIC about any 2-byte padding after MAC header */
1658 if (firstlen != len) 1649 if (tb1_len != len)
1659 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1650 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1660 1651
1661 /* Physical address of this Tx command's header (not MAC header!), 1652 /* The first TB points to the scratchbuf data - min_copy bytes */
1662 * within command buffer array. */ 1653 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1663 txcmd_phys = dma_map_single(trans->dev, 1654 IWL_HCMD_SCRATCHBUF_SIZE);
1664 &dev_cmd->hdr, firstlen, 1655 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1665 DMA_BIDIRECTIONAL); 1656 IWL_HCMD_SCRATCHBUF_SIZE, 1);
1666 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1667 goto out_err;
1668 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1669 dma_unmap_len_set(out_meta, len, firstlen);
1670 1657
1671 if (!ieee80211_has_morefrags(fc)) { 1658 /* there must be data left over for TB1 or this code must be changed */
1672 txq->need_update = 1; 1659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1673 } else {
1674 wait_write_ptr = 1;
1675 txq->need_update = 0;
1676 }
1677 1660
1678 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1661 /* map the data for TB1 */
1679 * if any (802.11 null frames have no payload). */ 1662 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1680 secondlen = skb->len - hdr_len; 1663 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1681 if (secondlen > 0) { 1664 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1682 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, 1665 goto out_err;
1683 secondlen, DMA_TO_DEVICE); 1666 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1684 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1667
1685 dma_unmap_single(trans->dev, 1668 /*
1686 dma_unmap_addr(out_meta, mapping), 1669 * Set up TFD's third entry to point directly to remainder
1687 dma_unmap_len(out_meta, len), 1670 * of skb, if any (802.11 null frames have no payload).
1688 DMA_BIDIRECTIONAL); 1671 */
1672 tb2_len = skb->len - hdr_len;
1673 if (tb2_len > 0) {
1674 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1675 skb->data + hdr_len,
1676 tb2_len, DMA_TO_DEVICE);
1677 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1678 iwl_pcie_tfd_unmap(trans, out_meta,
1679 &txq->tfds[q->write_ptr]);
1689 goto out_err; 1680 goto out_err;
1690 } 1681 }
1682 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1691 } 1683 }
1692 1684
1693 /* Attach buffers to TFD */
1694 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1695 if (secondlen > 0)
1696 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1697
1698 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1699 offsetof(struct iwl_tx_cmd, scratch);
1700
1701 /* take back ownership of DMA buffer to enable update */
1702 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1703 DMA_BIDIRECTIONAL);
1704 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1705 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1706
1707 /* Set up entry for this TFD in Tx byte-count array */ 1685 /* Set up entry for this TFD in Tx byte-count array */
1708 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1686 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1709 1687
1710 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1711 DMA_BIDIRECTIONAL);
1712
1713 trace_iwlwifi_dev_tx(trans->dev, skb, 1688 trace_iwlwifi_dev_tx(trans->dev, skb,
1714 &txq->tfds[txq->q.write_ptr], 1689 &txq->tfds[txq->q.write_ptr],
1715 sizeof(struct iwl_tfd), 1690 sizeof(struct iwl_tfd),
1716 &dev_cmd->hdr, firstlen, 1691 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1717 skb->data + hdr_len, secondlen); 1692 skb->data + hdr_len, tb2_len);
1718 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1693 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1719 skb->data + hdr_len, secondlen); 1694 skb->data + hdr_len, tb2_len);
1695
1696 if (!ieee80211_has_morefrags(fc)) {
1697 txq->need_update = 1;
1698 } else {
1699 wait_write_ptr = 1;
1700 txq->need_update = 0;
1701 }
1720 1702
1721 /* start timer if queue currently empty */ 1703 /* start timer if queue currently empty */
1722 if (txq->need_update && q->read_ptr == q->write_ptr && 1704 if (txq->need_update && q->read_ptr == q->write_ptr &&