diff options
author | Johannes Berg <johannes.berg@intel.com> | 2012-03-05 14:24:24 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2012-03-06 15:16:08 -0500 |
commit | 015c15e1067c988fc87fb550b222f075c8d3f47c (patch) | |
tree | 20ebc3c951e8d9e531410d3f0a116ea39405f752 /drivers/net | |
parent | 9451ca1a31be44f8235c3f8f998ff27fc7a8395e (diff) |
iwlwifi: introduce per-queue locks
Instead of (ab)using the sta_lock, make the
transport layer lock its own TX queue data
structures with a lock per queue. This also
unifies with the cmd queue lock.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c | 20 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | 31 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans.h | 2 |
4 files changed, 33 insertions, 22 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h index 5b26b71ae3d5..b1029468ccbd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h | |||
@@ -169,6 +169,7 @@ struct iwl_queue { | |||
169 | * @meta: array of meta data for each command/tx buffer | 169 | * @meta: array of meta data for each command/tx buffer |
170 | * @dma_addr_cmd: physical address of cmd/tx buffer array | 170 | * @dma_addr_cmd: physical address of cmd/tx buffer array |
171 | * @txb: array of per-TFD driver data | 171 | * @txb: array of per-TFD driver data |
172 | * lock: queue lock | ||
172 | * @time_stamp: time (in jiffies) of last read_ptr change | 173 | * @time_stamp: time (in jiffies) of last read_ptr change |
173 | * @need_update: indicates need to update read/write index | 174 | * @need_update: indicates need to update read/write index |
174 | * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled | 175 | * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled |
@@ -187,6 +188,7 @@ struct iwl_tx_queue { | |||
187 | struct iwl_device_cmd **cmd; | 188 | struct iwl_device_cmd **cmd; |
188 | struct iwl_cmd_meta *meta; | 189 | struct iwl_cmd_meta *meta; |
189 | struct sk_buff **skbs; | 190 | struct sk_buff **skbs; |
191 | spinlock_t lock; | ||
190 | unsigned long time_stamp; | 192 | unsigned long time_stamp; |
191 | u8 need_update; | 193 | u8 need_update; |
192 | u8 sched_retry; | 194 | u8 sched_retry; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c index 82e34484fa5e..1cb1dd29b3fe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c | |||
@@ -217,6 +217,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
217 | { | 217 | { |
218 | struct iwl_tfd *tfd_tmp = txq->tfds; | 218 | struct iwl_tfd *tfd_tmp = txq->tfds; |
219 | 219 | ||
220 | lockdep_assert_held(&txq->lock); | ||
221 | |||
220 | iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); | 222 | iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); |
221 | 223 | ||
222 | /* free SKB */ | 224 | /* free SKB */ |
@@ -621,7 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
621 | struct iwl_device_cmd *out_cmd; | 623 | struct iwl_device_cmd *out_cmd; |
622 | struct iwl_cmd_meta *out_meta; | 624 | struct iwl_cmd_meta *out_meta; |
623 | dma_addr_t phys_addr; | 625 | dma_addr_t phys_addr; |
624 | unsigned long flags; | ||
625 | u32 idx; | 626 | u32 idx; |
626 | u16 copy_size, cmd_size; | 627 | u16 copy_size, cmd_size; |
627 | bool is_ct_kill = false; | 628 | bool is_ct_kill = false; |
@@ -680,10 +681,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
680 | return -EIO; | 681 | return -EIO; |
681 | } | 682 | } |
682 | 683 | ||
683 | spin_lock_irqsave(&trans->hcmd_lock, flags); | 684 | spin_lock_bh(&txq->lock); |
684 | 685 | ||
685 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 686 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
686 | spin_unlock_irqrestore(&trans->hcmd_lock, flags); | 687 | spin_unlock_bh(&txq->lock); |
687 | 688 | ||
688 | IWL_ERR(trans, "No space in command queue\n"); | 689 | IWL_ERR(trans, "No space in command queue\n"); |
689 | is_ct_kill = iwl_check_for_ct_kill(priv(trans)); | 690 | is_ct_kill = iwl_check_for_ct_kill(priv(trans)); |
@@ -790,7 +791,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
790 | iwl_txq_update_write_ptr(trans, txq); | 791 | iwl_txq_update_write_ptr(trans, txq); |
791 | 792 | ||
792 | out: | 793 | out: |
793 | spin_unlock_irqrestore(&trans->hcmd_lock, flags); | 794 | spin_unlock_bh(&txq->lock); |
794 | return idx; | 795 | return idx; |
795 | } | 796 | } |
796 | 797 | ||
@@ -809,6 +810,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
809 | struct iwl_queue *q = &txq->q; | 810 | struct iwl_queue *q = &txq->q; |
810 | int nfreed = 0; | 811 | int nfreed = 0; |
811 | 812 | ||
813 | lockdep_assert_held(&txq->lock); | ||
814 | |||
812 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | 815 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
813 | IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " | 816 | IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " |
814 | "index %d is out of range [0-%d] %d %d.\n", __func__, | 817 | "index %d is out of range [0-%d] %d %d.\n", __func__, |
@@ -850,7 +853,6 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, | |||
850 | struct iwl_cmd_meta *meta; | 853 | struct iwl_cmd_meta *meta; |
851 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 854 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
852 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; | 855 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; |
853 | unsigned long flags; | ||
854 | 856 | ||
855 | /* If a Tx command is being handled and it isn't in the actual | 857 | /* If a Tx command is being handled and it isn't in the actual |
856 | * command queue then there a command routing bug has been introduced | 858 | * command queue then there a command routing bug has been introduced |
@@ -864,6 +866,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, | |||
864 | return; | 866 | return; |
865 | } | 867 | } |
866 | 868 | ||
869 | spin_lock(&txq->lock); | ||
870 | |||
867 | cmd_index = get_cmd_index(&txq->q, index); | 871 | cmd_index = get_cmd_index(&txq->q, index); |
868 | cmd = txq->cmd[cmd_index]; | 872 | cmd = txq->cmd[cmd_index]; |
869 | meta = &txq->meta[cmd_index]; | 873 | meta = &txq->meta[cmd_index]; |
@@ -880,8 +884,6 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, | |||
880 | rxb->page = NULL; | 884 | rxb->page = NULL; |
881 | } | 885 | } |
882 | 886 | ||
883 | spin_lock_irqsave(&trans->hcmd_lock, flags); | ||
884 | |||
885 | iwl_hcmd_queue_reclaim(trans, txq_id, index); | 887 | iwl_hcmd_queue_reclaim(trans, txq_id, index); |
886 | 888 | ||
887 | if (!(meta->flags & CMD_ASYNC)) { | 889 | if (!(meta->flags & CMD_ASYNC)) { |
@@ -898,7 +900,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, | |||
898 | 900 | ||
899 | meta->flags = 0; | 901 | meta->flags = 0; |
900 | 902 | ||
901 | spin_unlock_irqrestore(&trans->hcmd_lock, flags); | 903 | spin_unlock(&txq->lock); |
902 | } | 904 | } |
903 | 905 | ||
904 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | 906 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) |
@@ -1041,6 +1043,8 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | |||
1041 | if (WARN_ON(txq_id == trans->shrd->cmd_queue)) | 1043 | if (WARN_ON(txq_id == trans->shrd->cmd_queue)) |
1042 | return 0; | 1044 | return 0; |
1043 | 1045 | ||
1046 | lockdep_assert_held(&txq->lock); | ||
1047 | |||
1044 | /*Since we free until index _not_ inclusive, the one before index is | 1048 | /*Since we free until index _not_ inclusive, the one before index is |
1045 | * the last we will free. This one must be used */ | 1049 | * the last we will free. This one must be used */ |
1046 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); | 1050 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 9f8b23909404..f47426a5ef4d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -390,6 +390,8 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
390 | if (ret) | 390 | if (ret) |
391 | return ret; | 391 | return ret; |
392 | 392 | ||
393 | spin_lock_init(&txq->lock); | ||
394 | |||
393 | /* | 395 | /* |
394 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | 396 | * Tell nic where to find circular buffer of Tx Frame Descriptors for |
395 | * given Tx queue, and enable the DMA channel used for that queue. | 397 | * given Tx queue, and enable the DMA channel used for that queue. |
@@ -409,8 +411,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
409 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 411 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; |
410 | struct iwl_queue *q = &txq->q; | 412 | struct iwl_queue *q = &txq->q; |
411 | enum dma_data_direction dma_dir; | 413 | enum dma_data_direction dma_dir; |
412 | unsigned long flags; | ||
413 | spinlock_t *lock; | ||
414 | 414 | ||
415 | if (!q->n_bd) | 415 | if (!q->n_bd) |
416 | return; | 416 | return; |
@@ -418,22 +418,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
418 | /* In the command queue, all the TBs are mapped as BIDI | 418 | /* In the command queue, all the TBs are mapped as BIDI |
419 | * so unmap them as such. | 419 | * so unmap them as such. |
420 | */ | 420 | */ |
421 | if (txq_id == trans->shrd->cmd_queue) { | 421 | if (txq_id == trans->shrd->cmd_queue) |
422 | dma_dir = DMA_BIDIRECTIONAL; | 422 | dma_dir = DMA_BIDIRECTIONAL; |
423 | lock = &trans->hcmd_lock; | 423 | else |
424 | } else { | ||
425 | dma_dir = DMA_TO_DEVICE; | 424 | dma_dir = DMA_TO_DEVICE; |
426 | lock = &trans->shrd->sta_lock; | ||
427 | } | ||
428 | 425 | ||
429 | spin_lock_irqsave(lock, flags); | 426 | spin_lock_bh(&txq->lock); |
430 | while (q->write_ptr != q->read_ptr) { | 427 | while (q->write_ptr != q->read_ptr) { |
431 | /* The read_ptr needs to bound by q->n_window */ | 428 | /* The read_ptr needs to bound by q->n_window */ |
432 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), | 429 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
433 | dma_dir); | 430 | dma_dir); |
434 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 431 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
435 | } | 432 | } |
436 | spin_unlock_irqrestore(lock, flags); | 433 | spin_unlock_bh(&txq->lock); |
437 | } | 434 | } |
438 | 435 | ||
439 | /** | 436 | /** |
@@ -1358,6 +1355,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1358 | txq = &trans_pcie->txq[txq_id]; | 1355 | txq = &trans_pcie->txq[txq_id]; |
1359 | q = &txq->q; | 1356 | q = &txq->q; |
1360 | 1357 | ||
1358 | spin_lock(&txq->lock); | ||
1359 | |||
1361 | /* In AGG mode, the index in the ring must correspond to the WiFi | 1360 | /* In AGG mode, the index in the ring must correspond to the WiFi |
1362 | * sequence number. This is a HW requirements to help the SCD to parse | 1361 | * sequence number. This is a HW requirements to help the SCD to parse |
1363 | * the BA. | 1362 | * the BA. |
@@ -1404,7 +1403,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1404 | &dev_cmd->hdr, firstlen, | 1403 | &dev_cmd->hdr, firstlen, |
1405 | DMA_BIDIRECTIONAL); | 1404 | DMA_BIDIRECTIONAL); |
1406 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | 1405 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) |
1407 | return -1; | 1406 | goto out_err; |
1408 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | 1407 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
1409 | dma_unmap_len_set(out_meta, len, firstlen); | 1408 | dma_unmap_len_set(out_meta, len, firstlen); |
1410 | 1409 | ||
@@ -1426,7 +1425,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1426 | dma_unmap_addr(out_meta, mapping), | 1425 | dma_unmap_addr(out_meta, mapping), |
1427 | dma_unmap_len(out_meta, len), | 1426 | dma_unmap_len(out_meta, len), |
1428 | DMA_BIDIRECTIONAL); | 1427 | DMA_BIDIRECTIONAL); |
1429 | return -1; | 1428 | goto out_err; |
1430 | } | 1429 | } |
1431 | } | 1430 | } |
1432 | 1431 | ||
@@ -1481,7 +1480,11 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1481 | iwl_stop_queue(trans, txq, "Queue is full"); | 1480 | iwl_stop_queue(trans, txq, "Queue is full"); |
1482 | } | 1481 | } |
1483 | } | 1482 | } |
1483 | spin_unlock(&txq->lock); | ||
1484 | return 0; | 1484 | return 0; |
1485 | out_err: | ||
1486 | spin_unlock(&txq->lock); | ||
1487 | return -1; | ||
1485 | } | 1488 | } |
1486 | 1489 | ||
1487 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 1490 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
@@ -1560,6 +1563,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1560 | int tfd_num = ssn & (txq->q.n_bd - 1); | 1563 | int tfd_num = ssn & (txq->q.n_bd - 1); |
1561 | int freed = 0; | 1564 | int freed = 0; |
1562 | 1565 | ||
1566 | spin_lock(&txq->lock); | ||
1567 | |||
1563 | txq->time_stamp = jiffies; | 1568 | txq->time_stamp = jiffies; |
1564 | 1569 | ||
1565 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && | 1570 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && |
@@ -1574,6 +1579,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1574 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " | 1579 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " |
1575 | "agg_txq[sta_id[tid] %d", txq_id, | 1580 | "agg_txq[sta_id[tid] %d", txq_id, |
1576 | trans_pcie->agg_txq[sta_id][tid]); | 1581 | trans_pcie->agg_txq[sta_id][tid]); |
1582 | spin_unlock(&txq->lock); | ||
1577 | return 1; | 1583 | return 1; |
1578 | } | 1584 | } |
1579 | 1585 | ||
@@ -1587,6 +1593,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |||
1587 | status != TX_STATUS_FAIL_PASSIVE_NO_RX)) | 1593 | status != TX_STATUS_FAIL_PASSIVE_NO_RX)) |
1588 | iwl_wake_queue(trans, txq, "Packets reclaimed"); | 1594 | iwl_wake_queue(trans, txq, "Packets reclaimed"); |
1589 | } | 1595 | } |
1596 | |||
1597 | spin_unlock(&txq->lock); | ||
1590 | return 0; | 1598 | return 0; |
1591 | } | 1599 | } |
1592 | 1600 | ||
@@ -2267,7 +2275,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |||
2267 | trans->ops = &trans_ops_pcie; | 2275 | trans->ops = &trans_ops_pcie; |
2268 | trans->shrd = shrd; | 2276 | trans->shrd = shrd; |
2269 | trans_pcie->trans = trans; | 2277 | trans_pcie->trans = trans; |
2270 | spin_lock_init(&trans->hcmd_lock); | ||
2271 | spin_lock_init(&trans_pcie->irq_lock); | 2278 | spin_lock_init(&trans_pcie->irq_lock); |
2272 | 2279 | ||
2273 | /* W/A - seems to solve weird behavior. We need to remove this if we | 2280 | /* W/A - seems to solve weird behavior. We need to remove this if we |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 4e7e6c0eede9..e2f21cfc2cd4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h | |||
@@ -309,7 +309,6 @@ enum iwl_trans_state { | |||
309 | * @ops - pointer to iwl_trans_ops | 309 | * @ops - pointer to iwl_trans_ops |
310 | * @op_mode - pointer to the op_mode | 310 | * @op_mode - pointer to the op_mode |
311 | * @shrd - pointer to iwl_shared which holds shared data from the upper layer | 311 | * @shrd - pointer to iwl_shared which holds shared data from the upper layer |
312 | * @hcmd_lock: protects HCMD | ||
313 | * @reg_lock - protect hw register access | 312 | * @reg_lock - protect hw register access |
314 | * @dev - pointer to struct device * that represents the device | 313 | * @dev - pointer to struct device * that represents the device |
315 | * @irq - the irq number for the device | 314 | * @irq - the irq number for the device |
@@ -326,7 +325,6 @@ struct iwl_trans { | |||
326 | struct iwl_op_mode *op_mode; | 325 | struct iwl_op_mode *op_mode; |
327 | struct iwl_shared *shrd; | 326 | struct iwl_shared *shrd; |
328 | enum iwl_trans_state state; | 327 | enum iwl_trans_state state; |
329 | spinlock_t hcmd_lock; | ||
330 | spinlock_t reg_lock; | 328 | spinlock_t reg_lock; |
331 | 329 | ||
332 | struct device *dev; | 330 | struct device *dev; |