diff options
author | Liad Kaufman <liad.kaufman@intel.com> | 2015-08-19 10:34:28 -0400 |
---|---|---|
committer | Luca Coelho <luciano.coelho@intel.com> | 2016-07-05 17:14:27 -0400 |
commit | 9794c64f302d6d544acbb5ab69a327d694a70fcb (patch) | |
tree | 8b3da996e69d2c3257872255f4165320a961f8b8 /drivers/net/wireless/intel/iwlwifi/mvm | |
parent | ca221c9b946cd4a9ea67375c8d90379a0e65179d (diff) |
iwlwifi: mvm: support dqa queue inactivation upon timeout
Support marking queues as inactive upon a timeout expiring,
and allow inactive queues to be re-assigned to other RA/TIDs
if no other queue is free.
This is done by keeping a timestamp of the latest frame TXed
for every RA/TID, and then going over the queues currently in
use when a new queue is needed, inactivating all those that
are inactive.
Signed-off-by: Liad Kaufman <liad.kaufman@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 17 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 197 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 7 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 26 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 166 |
5 files changed, 398 insertions, 15 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4b75b9226898..bf7d78e0e14a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -687,13 +687,22 @@ struct iwl_mvm_baid_data { | |||
687 | * This is the state of a queue that has been fully configured (including | 687 | * This is the state of a queue that has been fully configured (including |
688 | * SCD pointers, etc), has a specific RA/TID assigned to it, and can be | 688 | * SCD pointers, etc), has a specific RA/TID assigned to it, and can be |
689 | * used to send traffic. | 689 | * used to send traffic. |
690 | * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it | ||
691 | * This is a state of a queue that has had traffic on it, but during the | ||
692 | * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on | ||
693 | * it. In this state, when a new queue is needed to be allocated but no | ||
694 | * such free queue exists, an inactive queue might be freed and given to | ||
695 | * the new RA/TID. | ||
690 | */ | 696 | */ |
691 | enum iwl_mvm_queue_status { | 697 | enum iwl_mvm_queue_status { |
692 | IWL_MVM_QUEUE_FREE, | 698 | IWL_MVM_QUEUE_FREE, |
693 | IWL_MVM_QUEUE_RESERVED, | 699 | IWL_MVM_QUEUE_RESERVED, |
694 | IWL_MVM_QUEUE_READY, | 700 | IWL_MVM_QUEUE_READY, |
701 | IWL_MVM_QUEUE_INACTIVE, | ||
695 | }; | 702 | }; |
696 | 703 | ||
704 | #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) | ||
705 | |||
697 | struct iwl_mvm { | 706 | struct iwl_mvm { |
698 | /* for logger access */ | 707 | /* for logger access */ |
699 | struct device *dev; | 708 | struct device *dev; |
@@ -750,11 +759,15 @@ struct iwl_mvm { | |||
750 | u32 hw_queue_to_mac80211; | 759 | u32 hw_queue_to_mac80211; |
751 | u8 hw_queue_refcount; | 760 | u8 hw_queue_refcount; |
752 | u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ | 761 | u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ |
762 | bool reserved; /* Is this the TXQ reserved for a STA */ | ||
753 | u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ | 763 | u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ |
764 | /* Timestamp for inactivation per TID of this queue */ | ||
765 | unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; | ||
754 | enum iwl_mvm_queue_status status; | 766 | enum iwl_mvm_queue_status status; |
755 | } queue_info[IWL_MAX_HW_QUEUES]; | 767 | } queue_info[IWL_MAX_HW_QUEUES]; |
756 | spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ | 768 | spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ |
757 | struct work_struct add_stream_wk; /* To add streams to queues */ | 769 | struct work_struct add_stream_wk; /* To add streams to queues */ |
770 | |||
758 | atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; | 771 | atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; |
759 | 772 | ||
760 | const char *nvm_file_name; | 773 | const char *nvm_file_name; |
@@ -1618,7 +1631,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | |||
1618 | */ | 1631 | */ |
1619 | void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | 1632 | void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
1620 | u8 tid, u8 flags); | 1633 | u8 tid, u8 flags); |
1621 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); | 1634 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); |
1622 | 1635 | ||
1623 | /* Return a bitmask with all the hw supported queues, except for the | 1636 | /* Return a bitmask with all the hw supported queues, except for the |
1624 | * command queue, which can't be flushed. | 1637 | * command queue, which can't be flushed. |
@@ -1725,6 +1738,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
1725 | void iwl_mvm_reorder_timer_expired(unsigned long data); | 1738 | void iwl_mvm_reorder_timer_expired(unsigned long data); |
1726 | struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); | 1739 | struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); |
1727 | 1740 | ||
1741 | void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); | ||
1742 | |||
1728 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); | 1743 | void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); |
1729 | unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, | 1744 | unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, |
1730 | struct ieee80211_vif *vif, | 1745 | struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 64b07b114d3c..84384a4321d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -310,6 +310,112 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, | |||
310 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); | 310 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); |
311 | } | 311 | } |
312 | 312 | ||
313 | /* Disable aggregations for a bitmap of TIDs for a given station */ | ||
314 | static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, | ||
315 | unsigned long disable_agg_tids, | ||
316 | bool remove_queue) | ||
317 | { | ||
318 | struct iwl_mvm_add_sta_cmd cmd = {}; | ||
319 | struct ieee80211_sta *sta; | ||
320 | struct iwl_mvm_sta *mvmsta; | ||
321 | u32 status; | ||
322 | u8 sta_id; | ||
323 | int ret; | ||
324 | |||
325 | spin_lock_bh(&mvm->queue_info_lock); | ||
326 | sta_id = mvm->queue_info[queue].ra_sta_id; | ||
327 | spin_unlock_bh(&mvm->queue_info_lock); | ||
328 | |||
329 | rcu_read_lock(); | ||
330 | |||
331 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | ||
332 | |||
333 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { | ||
334 | rcu_read_unlock(); | ||
335 | return -EINVAL; | ||
336 | } | ||
337 | |||
338 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
339 | |||
340 | mvmsta->tid_disable_agg |= disable_agg_tids; | ||
341 | |||
342 | cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); | ||
343 | cmd.sta_id = mvmsta->sta_id; | ||
344 | cmd.add_modify = STA_MODE_MODIFY; | ||
345 | cmd.modify_mask = STA_MODIFY_QUEUES; | ||
346 | if (disable_agg_tids) | ||
347 | cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; | ||
348 | if (remove_queue) | ||
349 | cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; | ||
350 | cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); | ||
351 | cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); | ||
352 | |||
353 | rcu_read_unlock(); | ||
354 | |||
355 | /* Notify FW of queue removal from the STA queues */ | ||
356 | status = ADD_STA_SUCCESS; | ||
357 | ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, | ||
358 | iwl_mvm_add_sta_cmd_size(mvm), | ||
359 | &cmd, &status); | ||
360 | |||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Remove a queue from a station's resources. | ||
366 | * Note that this only marks as free. It DOESN'T delete a BA agreement, and | ||
367 | * doesn't disable the queue | ||
368 | */ | ||
369 | static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) | ||
370 | { | ||
371 | struct ieee80211_sta *sta; | ||
372 | struct iwl_mvm_sta *mvmsta; | ||
373 | unsigned long tid_bitmap; | ||
374 | unsigned long disable_agg_tids = 0; | ||
375 | u8 sta_id; | ||
376 | int tid; | ||
377 | |||
378 | lockdep_assert_held(&mvm->mutex); | ||
379 | |||
380 | spin_lock_bh(&mvm->queue_info_lock); | ||
381 | sta_id = mvm->queue_info[queue].ra_sta_id; | ||
382 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | ||
383 | spin_unlock_bh(&mvm->queue_info_lock); | ||
384 | |||
385 | rcu_read_lock(); | ||
386 | |||
387 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | ||
388 | |||
389 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { | ||
390 | rcu_read_unlock(); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
395 | |||
396 | spin_lock_bh(&mvmsta->lock); | ||
397 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
398 | mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; | ||
399 | |||
400 | if (mvmsta->tid_data[tid].state == IWL_AGG_ON) | ||
401 | disable_agg_tids |= BIT(tid); | ||
402 | } | ||
403 | mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ | ||
404 | |||
405 | spin_unlock_bh(&mvmsta->lock); | ||
406 | |||
407 | rcu_read_unlock(); | ||
408 | |||
409 | spin_lock(&mvm->queue_info_lock); | ||
410 | /* Unmap MAC queues and TIDs from this queue */ | ||
411 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; | ||
412 | mvm->queue_info[queue].hw_queue_refcount = 0; | ||
413 | mvm->queue_info[queue].tid_bitmap = 0; | ||
414 | spin_unlock(&mvm->queue_info_lock); | ||
415 | |||
416 | return disable_agg_tids; | ||
417 | } | ||
418 | |||
313 | static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | 419 | static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, |
314 | struct ieee80211_sta *sta, u8 ac, int tid, | 420 | struct ieee80211_sta *sta, u8 ac, int tid, |
315 | struct ieee80211_hdr *hdr) | 421 | struct ieee80211_hdr *hdr) |
@@ -325,6 +431,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
325 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); | 431 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); |
326 | u8 mac_queue = mvmsta->vif->hw_queue[ac]; | 432 | u8 mac_queue = mvmsta->vif->hw_queue[ac]; |
327 | int queue = -1; | 433 | int queue = -1; |
434 | bool using_inactive_queue = false; | ||
435 | unsigned long disable_agg_tids = 0; | ||
436 | enum iwl_mvm_agg_state queue_state; | ||
328 | int ssn; | 437 | int ssn; |
329 | int ret; | 438 | int ret; |
330 | 439 | ||
@@ -338,7 +447,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
338 | */ | 447 | */ |
339 | if (!ieee80211_is_data_qos(hdr->frame_control) || | 448 | if (!ieee80211_is_data_qos(hdr->frame_control) || |
340 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | 449 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { |
341 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, | 450 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
451 | IWL_MVM_DQA_MIN_MGMT_QUEUE, | ||
342 | IWL_MVM_DQA_MAX_MGMT_QUEUE); | 452 | IWL_MVM_DQA_MAX_MGMT_QUEUE); |
343 | if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) | 453 | if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) |
344 | IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", | 454 | IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", |
@@ -347,16 +457,37 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
347 | /* If no such queue is found, we'll use a DATA queue instead */ | 457 | /* If no such queue is found, we'll use a DATA queue instead */ |
348 | } | 458 | } |
349 | 459 | ||
350 | if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { | 460 | if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && |
461 | (mvm->queue_info[mvmsta->reserved_queue].status == | ||
462 | IWL_MVM_QUEUE_RESERVED || | ||
463 | mvm->queue_info[mvmsta->reserved_queue].status == | ||
464 | IWL_MVM_QUEUE_INACTIVE)) { | ||
351 | queue = mvmsta->reserved_queue; | 465 | queue = mvmsta->reserved_queue; |
466 | mvm->queue_info[queue].reserved = true; | ||
352 | IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); | 467 | IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); |
353 | } | 468 | } |
354 | 469 | ||
355 | if (queue < 0) | 470 | if (queue < 0) |
356 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, | 471 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
472 | IWL_MVM_DQA_MIN_DATA_QUEUE, | ||
357 | IWL_MVM_DQA_MAX_DATA_QUEUE); | 473 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
358 | 474 | ||
359 | /* | 475 | /* |
476 | * Check if this queue is already allocated but inactive. | ||
477 | * In such a case, we'll need to first free this queue before enabling | ||
478 | * it again, so we'll mark it as reserved to make sure no new traffic | ||
479 | * arrives on it | ||
480 | */ | ||
481 | if (queue > 0 && | ||
482 | mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { | ||
483 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; | ||
484 | using_inactive_queue = true; | ||
485 | IWL_DEBUG_TX_QUEUES(mvm, | ||
486 | "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", | ||
487 | queue, mvmsta->sta_id, tid); | ||
488 | } | ||
489 | |||
490 | /* | ||
360 | * Mark TXQ as ready, even though it hasn't been fully configured yet, | 491 | * Mark TXQ as ready, even though it hasn't been fully configured yet, |
361 | * to make sure no one else takes it. | 492 | * to make sure no one else takes it. |
362 | * This will allow avoiding re-acquiring the lock at the end of the | 493 | * This will allow avoiding re-acquiring the lock at the end of the |
@@ -380,6 +511,38 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
380 | cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || | 511 | cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || |
381 | queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); | 512 | queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); |
382 | 513 | ||
514 | /* | ||
515 | * If this queue was previously inactive (idle) - we need to free it | ||
516 | * first | ||
517 | */ | ||
518 | if (using_inactive_queue) { | ||
519 | struct iwl_scd_txq_cfg_cmd cmd = { | ||
520 | .scd_queue = queue, | ||
521 | .enable = 0, | ||
522 | }; | ||
523 | |||
524 | disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); | ||
525 | |||
526 | /* Disable the queue */ | ||
527 | iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, | ||
528 | true); | ||
529 | iwl_trans_txq_disable(mvm->trans, queue, false); | ||
530 | ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), | ||
531 | &cmd); | ||
532 | if (ret) { | ||
533 | IWL_ERR(mvm, | ||
534 | "Failed to free inactive queue %d (ret=%d)\n", | ||
535 | queue, ret); | ||
536 | |||
537 | /* Re-mark the inactive queue as inactive */ | ||
538 | spin_lock_bh(&mvm->queue_info_lock); | ||
539 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; | ||
540 | spin_unlock_bh(&mvm->queue_info_lock); | ||
541 | |||
542 | return ret; | ||
543 | } | ||
544 | } | ||
545 | |||
383 | IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", | 546 | IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", |
384 | queue, mvmsta->sta_id, tid); | 547 | queue, mvmsta->sta_id, tid); |
385 | 548 | ||
@@ -389,7 +552,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
389 | 552 | ||
390 | spin_lock_bh(&mvmsta->lock); | 553 | spin_lock_bh(&mvmsta->lock); |
391 | mvmsta->tid_data[tid].txq_id = queue; | 554 | mvmsta->tid_data[tid].txq_id = queue; |
555 | mvmsta->tid_data[tid].is_tid_active = true; | ||
392 | mvmsta->tfd_queue_msk |= BIT(queue); | 556 | mvmsta->tfd_queue_msk |= BIT(queue); |
557 | queue_state = mvmsta->tid_data[tid].state; | ||
393 | 558 | ||
394 | if (mvmsta->reserved_queue == queue) | 559 | if (mvmsta->reserved_queue == queue) |
395 | mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; | 560 | mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; |
@@ -399,7 +564,11 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | |||
399 | if (ret) | 564 | if (ret) |
400 | goto out_err; | 565 | goto out_err; |
401 | 566 | ||
402 | return 0; | 567 | /* If we need to re-enable aggregations... */ |
568 | if (queue_state == IWL_AGG_ON) | ||
569 | ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); | ||
570 | |||
571 | return ret; | ||
403 | 572 | ||
404 | out_err: | 573 | out_err: |
405 | iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); | 574 | iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); |
@@ -476,6 +645,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) | |||
476 | unsigned long deferred_tid_traffic; | 645 | unsigned long deferred_tid_traffic; |
477 | int sta_id, tid; | 646 | int sta_id, tid; |
478 | 647 | ||
648 | /* Check inactivity of queues */ | ||
649 | iwl_mvm_inactivity_check(mvm); | ||
650 | |||
479 | mutex_lock(&mvm->mutex); | 651 | mutex_lock(&mvm->mutex); |
480 | 652 | ||
481 | /* Go over all stations with deferred traffic */ | 653 | /* Go over all stations with deferred traffic */ |
@@ -505,6 +677,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, | |||
505 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | 677 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
506 | int queue; | 678 | int queue; |
507 | 679 | ||
680 | /* | ||
681 | * Check for inactive queues, so we don't reach a situation where we | ||
682 | * can't add a STA due to a shortage in queues that doesn't really exist | ||
683 | */ | ||
684 | iwl_mvm_inactivity_check(mvm); | ||
685 | |||
508 | spin_lock_bh(&mvm->queue_info_lock); | 686 | spin_lock_bh(&mvm->queue_info_lock); |
509 | 687 | ||
510 | /* Make sure we have free resources for this STA */ | 688 | /* Make sure we have free resources for this STA */ |
@@ -514,7 +692,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, | |||
514 | IWL_MVM_QUEUE_FREE)) | 692 | IWL_MVM_QUEUE_FREE)) |
515 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; | 693 | queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; |
516 | else | 694 | else |
517 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, | 695 | queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
696 | IWL_MVM_DQA_MIN_DATA_QUEUE, | ||
518 | IWL_MVM_DQA_MAX_DATA_QUEUE); | 697 | IWL_MVM_DQA_MAX_DATA_QUEUE); |
519 | if (queue < 0) { | 698 | if (queue < 0) { |
520 | spin_unlock_bh(&mvm->queue_info_lock); | 699 | spin_unlock_bh(&mvm->queue_info_lock); |
@@ -1403,8 +1582,8 @@ out_free: | |||
1403 | return ret; | 1582 | return ret; |
1404 | } | 1583 | } |
1405 | 1584 | ||
1406 | static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 1585 | int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
1407 | int tid, u8 queue, bool start) | 1586 | int tid, u8 queue, bool start) |
1408 | { | 1587 | { |
1409 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 1588 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
1410 | struct iwl_mvm_add_sta_cmd cmd = {}; | 1589 | struct iwl_mvm_add_sta_cmd cmd = {}; |
@@ -1459,6 +1638,7 @@ const u8 tid_to_mac80211_ac[] = { | |||
1459 | IEEE80211_AC_VI, | 1638 | IEEE80211_AC_VI, |
1460 | IEEE80211_AC_VO, | 1639 | IEEE80211_AC_VO, |
1461 | IEEE80211_AC_VO, | 1640 | IEEE80211_AC_VO, |
1641 | IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ | ||
1462 | }; | 1642 | }; |
1463 | 1643 | ||
1464 | static const u8 tid_to_ucode_ac[] = { | 1644 | static const u8 tid_to_ucode_ac[] = { |
@@ -1513,7 +1693,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
1513 | txq_id = mvmsta->tid_data[tid].txq_id; | 1693 | txq_id = mvmsta->tid_data[tid].txq_id; |
1514 | if (!iwl_mvm_is_dqa_supported(mvm) || | 1694 | if (!iwl_mvm_is_dqa_supported(mvm) || |
1515 | mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { | 1695 | mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { |
1516 | txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, | 1696 | txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, |
1697 | mvm->first_agg_queue, | ||
1517 | mvm->last_agg_queue); | 1698 | mvm->last_agg_queue); |
1518 | if (txq_id < 0) { | 1699 | if (txq_id < 0) { |
1519 | ret = txq_id; | 1700 | ret = txq_id; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d2c58f134fcf..1588eb63b7ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -321,6 +321,9 @@ enum iwl_mvm_agg_state { | |||
321 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that | 321 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that |
322 | * we are ready to finish the Tx AGG stop / start flow. | 322 | * we are ready to finish the Tx AGG stop / start flow. |
323 | * @tx_time: medium time consumed by this A-MPDU | 323 | * @tx_time: medium time consumed by this A-MPDU |
324 | * @is_tid_active: has this TID sent traffic in the last | ||
325 | * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this | ||
326 | * field should be ignored. | ||
324 | */ | 327 | */ |
325 | struct iwl_mvm_tid_data { | 328 | struct iwl_mvm_tid_data { |
326 | struct sk_buff_head deferred_tx_frames; | 329 | struct sk_buff_head deferred_tx_frames; |
@@ -333,6 +336,7 @@ struct iwl_mvm_tid_data { | |||
333 | u16 txq_id; | 336 | u16 txq_id; |
334 | u16 ssn; | 337 | u16 ssn; |
335 | u16 tx_time; | 338 | u16 tx_time; |
339 | bool is_tid_active; | ||
336 | }; | 340 | }; |
337 | 341 | ||
338 | static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) | 342 | static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) |
@@ -509,6 +513,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
509 | int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | 513 | int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, |
510 | struct ieee80211_sta *sta, u16 tid); | 514 | struct ieee80211_sta *sta, u16 tid); |
511 | 515 | ||
516 | int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | ||
517 | int tid, u8 queue, bool start); | ||
518 | |||
512 | int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); | 519 | int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); |
513 | void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); | 520 | void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); |
514 | 521 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 779bafcbc9a1..9943013dec98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -884,9 +884,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
884 | * nullfunc frames should go to the MGMT queue regardless of QOS | 884 | * nullfunc frames should go to the MGMT queue regardless of QOS |
885 | */ | 885 | */ |
886 | tid = IWL_MAX_TID_COUNT; | 886 | tid = IWL_MAX_TID_COUNT; |
887 | txq_id = mvmsta->tid_data[tid].txq_id; | ||
888 | } | 887 | } |
889 | 888 | ||
889 | if (iwl_mvm_is_dqa_supported(mvm)) | ||
890 | txq_id = mvmsta->tid_data[tid].txq_id; | ||
891 | |||
890 | /* Copy MAC header from skb into command buffer */ | 892 | /* Copy MAC header from skb into command buffer */ |
891 | memcpy(tx_cmd->hdr, hdr, hdrlen); | 893 | memcpy(tx_cmd->hdr, hdr, hdrlen); |
892 | 894 | ||
@@ -905,9 +907,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
905 | txq_id = mvmsta->tid_data[tid].txq_id; | 907 | txq_id = mvmsta->tid_data[tid].txq_id; |
906 | } | 908 | } |
907 | 909 | ||
908 | if (iwl_mvm_is_dqa_supported(mvm)) { | 910 | /* Check if TXQ needs to be allocated or re-activated */ |
909 | if (unlikely(mvmsta->tid_data[tid].txq_id == | 911 | if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || |
910 | IEEE80211_INVAL_HW_QUEUE)) { | 912 | !mvmsta->tid_data[tid].is_tid_active) && |
913 | iwl_mvm_is_dqa_supported(mvm)) { | ||
914 | /* If TXQ needs to be allocated... */ | ||
915 | if (txq_id == IEEE80211_INVAL_HW_QUEUE) { | ||
911 | iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); | 916 | iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); |
912 | 917 | ||
913 | /* | 918 | /* |
@@ -917,11 +922,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
917 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); | 922 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); |
918 | spin_unlock(&mvmsta->lock); | 923 | spin_unlock(&mvmsta->lock); |
919 | return 0; | 924 | return 0; |
925 | |||
920 | } | 926 | } |
921 | 927 | ||
922 | txq_id = mvmsta->tid_data[tid].txq_id; | 928 | /* If we are here - TXQ exists and needs to be re-activated */ |
929 | spin_lock(&mvm->queue_info_lock); | ||
930 | mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; | ||
931 | mvmsta->tid_data[tid].is_tid_active = true; | ||
932 | spin_unlock(&mvm->queue_info_lock); | ||
933 | |||
934 | IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", | ||
935 | txq_id); | ||
923 | } | 936 | } |
924 | 937 | ||
938 | /* Keep track of the time of the last frame for this RA/TID */ | ||
939 | mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; | ||
940 | |||
925 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, | 941 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, |
926 | tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); | 942 | tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); |
927 | 943 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 161b99efd63d..a0cb5ca4c9b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c | |||
@@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) | |||
579 | iwl_mvm_dump_umac_error_log(mvm); | 579 | iwl_mvm_dump_umac_error_log(mvm); |
580 | } | 580 | } |
581 | 581 | ||
582 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) | 582 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) |
583 | { | 583 | { |
584 | int i; | 584 | int i; |
585 | 585 | ||
586 | lockdep_assert_held(&mvm->queue_info_lock); | 586 | lockdep_assert_held(&mvm->queue_info_lock); |
587 | 587 | ||
588 | /* Start by looking for a free queue */ | ||
588 | for (i = minq; i <= maxq; i++) | 589 | for (i = minq; i <= maxq; i++) |
589 | if (mvm->queue_info[i].hw_queue_refcount == 0 && | 590 | if (mvm->queue_info[i].hw_queue_refcount == 0 && |
590 | mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) | 591 | mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) |
591 | return i; | 592 | return i; |
592 | 593 | ||
594 | /* | ||
595 | * If no free queue found - settle for an inactive one to reconfigure | ||
596 | * Make sure that the inactive queue either already belongs to this STA, | ||
597 | * or that if it belongs to another one - it isn't the reserved queue | ||
598 | */ | ||
599 | for (i = minq; i <= maxq; i++) | ||
600 | if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && | ||
601 | (sta_id == mvm->queue_info[i].ra_sta_id || | ||
602 | !mvm->queue_info[i].reserved)) | ||
603 | return i; | ||
604 | |||
593 | return -ENOSPC; | 605 | return -ENOSPC; |
594 | } | 606 | } |
595 | 607 | ||
@@ -650,6 +662,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | |||
650 | else | 662 | else |
651 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; | 663 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; |
652 | mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); | 664 | mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); |
665 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; | ||
653 | 666 | ||
654 | IWL_DEBUG_TX_QUEUES(mvm, | 667 | IWL_DEBUG_TX_QUEUES(mvm, |
655 | "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", | 668 | "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", |
@@ -752,6 +765,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | |||
752 | mvm->queue_info[queue].tid_bitmap = 0; | 765 | mvm->queue_info[queue].tid_bitmap = 0; |
753 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; | 766 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; |
754 | 767 | ||
768 | /* Regardless if this is a reserved TXQ for a STA - mark it as false */ | ||
769 | mvm->queue_info[queue].reserved = false; | ||
770 | |||
755 | spin_unlock_bh(&mvm->queue_info_lock); | 771 | spin_unlock_bh(&mvm->queue_info_lock); |
756 | 772 | ||
757 | iwl_trans_txq_disable(mvm->trans, queue, false); | 773 | iwl_trans_txq_disable(mvm->trans, queue, false); |
@@ -1039,6 +1055,154 @@ out: | |||
1039 | ieee80211_connection_loss(vif); | 1055 | ieee80211_connection_loss(vif); |
1040 | } | 1056 | } |
1041 | 1057 | ||
1058 | /* | ||
1059 | * Remove inactive TIDs of a given queue. | ||
1060 | * If all queue TIDs are inactive - mark the queue as inactive | ||
1061 | * If only some the queue TIDs are inactive - unmap them from the queue | ||
1062 | */ | ||
1063 | static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, | ||
1064 | struct iwl_mvm_sta *mvmsta, int queue, | ||
1065 | unsigned long tid_bitmap) | ||
1066 | { | ||
1067 | int tid; | ||
1068 | |||
1069 | lockdep_assert_held(&mvmsta->lock); | ||
1070 | lockdep_assert_held(&mvm->queue_info_lock); | ||
1071 | |||
1072 | /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ | ||
1073 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1074 | /* If some TFDs are still queued - don't mark TID as inactive */ | ||
1075 | if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid])) | ||
1076 | tid_bitmap &= ~BIT(tid); | ||
1077 | } | ||
1078 | |||
1079 | /* If all TIDs in the queue are inactive - mark queue as inactive. */ | ||
1080 | if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { | ||
1081 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; | ||
1082 | |||
1083 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) | ||
1084 | mvmsta->tid_data[tid].is_tid_active = false; | ||
1085 | |||
1086 | IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", | ||
1087 | queue); | ||
1088 | return; | ||
1089 | } | ||
1090 | |||
1091 | /* | ||
1092 | * If we are here, this is a shared queue and not all TIDs timed-out. | ||
1093 | * Remove the ones that did. | ||
1094 | */ | ||
1095 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1096 | int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; | ||
1097 | |||
1098 | mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; | ||
1099 | mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue); | ||
1100 | mvm->queue_info[queue].hw_queue_refcount--; | ||
1101 | mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); | ||
1102 | mvmsta->tid_data[tid].is_tid_active = false; | ||
1103 | |||
1104 | IWL_DEBUG_TX_QUEUES(mvm, | ||
1105 | "Removing inactive TID %d from shared Q:%d\n", | ||
1106 | tid, queue); | ||
1107 | } | ||
1108 | |||
1109 | IWL_DEBUG_TX_QUEUES(mvm, | ||
1110 | "TXQ #%d left with tid bitmap 0x%x\n", queue, | ||
1111 | mvm->queue_info[queue].tid_bitmap); | ||
1112 | |||
1113 | /* | ||
1114 | * There may be different TIDs with the same mac queues, so make | ||
1115 | * sure all TIDs have existing corresponding mac queues enabled | ||
1116 | */ | ||
1117 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | ||
1118 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1119 | mvm->queue_info[queue].hw_queue_to_mac80211 |= | ||
1120 | BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); | ||
1121 | } | ||
1122 | |||
1123 | /* TODO: if queue was shared - need to re-enable AGGs */ | ||
1124 | } | ||
1125 | |||
1126 | void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) | ||
1127 | { | ||
1128 | unsigned long timeout_queues_map = 0; | ||
1129 | unsigned long now = jiffies; | ||
1130 | int i; | ||
1131 | |||
1132 | spin_lock_bh(&mvm->queue_info_lock); | ||
1133 | for (i = 0; i < IWL_MAX_HW_QUEUES; i++) | ||
1134 | if (mvm->queue_info[i].hw_queue_refcount > 0) | ||
1135 | timeout_queues_map |= BIT(i); | ||
1136 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1137 | |||
1138 | rcu_read_lock(); | ||
1139 | |||
1140 | /* | ||
1141 | * If a queue time outs - mark it as INACTIVE (don't remove right away | ||
1142 | * if we don't have to.) This is an optimization in case traffic comes | ||
1143 | * later, and we don't HAVE to use a currently-inactive queue | ||
1144 | */ | ||
1145 | for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { | ||
1146 | struct ieee80211_sta *sta; | ||
1147 | struct iwl_mvm_sta *mvmsta; | ||
1148 | u8 sta_id; | ||
1149 | int tid; | ||
1150 | unsigned long inactive_tid_bitmap = 0; | ||
1151 | unsigned long queue_tid_bitmap; | ||
1152 | |||
1153 | spin_lock_bh(&mvm->queue_info_lock); | ||
1154 | queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; | ||
1155 | |||
1156 | /* If TXQ isn't in active use anyway - nothing to do here... */ | ||
1157 | if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY) { | ||
1158 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1159 | continue; | ||
1160 | } | ||
1161 | |||
1162 | /* Check to see if there are inactive TIDs on this queue */ | ||
1163 | for_each_set_bit(tid, &queue_tid_bitmap, | ||
1164 | IWL_MAX_TID_COUNT + 1) { | ||
1165 | if (time_after(mvm->queue_info[i].last_frame_time[tid] + | ||
1166 | IWL_MVM_DQA_QUEUE_TIMEOUT, now)) | ||
1167 | continue; | ||
1168 | |||
1169 | inactive_tid_bitmap |= BIT(tid); | ||
1170 | } | ||
1171 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1172 | |||
1173 | /* If all TIDs are active - finish check on this queue */ | ||
1174 | if (!inactive_tid_bitmap) | ||
1175 | continue; | ||
1176 | |||
1177 | /* | ||
1178 | * If we are here - the queue hadn't been served recently and is | ||
1179 | * in use | ||
1180 | */ | ||
1181 | |||
1182 | sta_id = mvm->queue_info[i].ra_sta_id; | ||
1183 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | ||
1184 | |||
1185 | /* | ||
1186 | * If the STA doesn't exist anymore, it isn't an error. It could | ||
1187 | * be that it was removed since getting the queues, and in this | ||
1188 | * case it should've inactivated its queues anyway. | ||
1189 | */ | ||
1190 | if (IS_ERR_OR_NULL(sta)) | ||
1191 | continue; | ||
1192 | |||
1193 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
1194 | |||
1195 | spin_lock_bh(&mvmsta->lock); | ||
1196 | spin_lock(&mvm->queue_info_lock); | ||
1197 | iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, | ||
1198 | inactive_tid_bitmap); | ||
1199 | spin_unlock(&mvm->queue_info_lock); | ||
1200 | spin_unlock_bh(&mvmsta->lock); | ||
1201 | } | ||
1202 | |||
1203 | rcu_read_unlock(); | ||
1204 | } | ||
1205 | |||
1042 | int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, | 1206 | int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, |
1043 | enum iwl_lqm_cmd_operatrions operation, | 1207 | enum iwl_lqm_cmd_operatrions operation, |
1044 | u32 duration, u32 timeout) | 1208 | u32 duration, u32 timeout) |