diff options
author | John W. Linville <linville@tuxdriver.com> | 2013-12-09 15:30:27 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2013-12-09 15:30:27 -0500 |
commit | 145babc68eebc4d72cd1a92d92e91739b905b7df (patch) | |
tree | 192088d4211b3f60c1aaa231029b519bf2fbc591 /drivers/net | |
parent | e08fd975bf26aa8063cadd245817e042f570472d (diff) | |
parent | cfb27d29b61cc32c0bb75f741aeabb9c6e6af742 (diff) |
Merge tag 'for-linville-20131203' of git://github.com/kvalo/ath
Conflicts:
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/mac.c
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/ce.c | 53 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/ce.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/core.c | 43 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/core.h | 23 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/debug.c | 94 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/debug.h | 6 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htc.c | 31 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt.c | 4 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt.h | 1 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt_rx.c | 25 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt_tx.c | 11 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/hw.h | 1 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/mac.c | 316 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/pci.c | 791 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/pci.h | 14 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/txrx.c | 6 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/wmi.c | 302 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath10k/wmi.h | 96 |
18 files changed, 1413 insertions, 406 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index e46951b8fb92..d44d618b05f9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c | |||
@@ -243,6 +243,16 @@ static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, | |||
243 | misc_ie_addr | CE_ERROR_MASK); | 243 | misc_ie_addr | CE_ERROR_MASK); |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, | ||
247 | u32 ce_ctrl_addr) | ||
248 | { | ||
249 | u32 misc_ie_addr = ath10k_pci_read32(ar, | ||
250 | ce_ctrl_addr + MISC_IE_ADDRESS); | ||
251 | |||
252 | ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, | ||
253 | misc_ie_addr & ~CE_ERROR_MASK); | ||
254 | } | ||
255 | |||
246 | static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, | 256 | static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, |
247 | u32 ce_ctrl_addr, | 257 | u32 ce_ctrl_addr, |
248 | unsigned int mask) | 258 | unsigned int mask) |
@@ -731,7 +741,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) | |||
731 | 741 | ||
732 | void ath10k_ce_per_engine_service_any(struct ath10k *ar) | 742 | void ath10k_ce_per_engine_service_any(struct ath10k *ar) |
733 | { | 743 | { |
734 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
735 | int ce_id, ret; | 744 | int ce_id, ret; |
736 | u32 intr_summary; | 745 | u32 intr_summary; |
737 | 746 | ||
@@ -741,7 +750,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) | |||
741 | 750 | ||
742 | intr_summary = CE_INTERRUPT_SUMMARY(ar); | 751 | intr_summary = CE_INTERRUPT_SUMMARY(ar); |
743 | 752 | ||
744 | for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { | 753 | for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { |
745 | if (intr_summary & (1 << ce_id)) | 754 | if (intr_summary & (1 << ce_id)) |
746 | intr_summary &= ~(1 << ce_id); | 755 | intr_summary &= ~(1 << ce_id); |
747 | else | 756 | else |
@@ -783,22 +792,25 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, | |||
783 | ath10k_pci_sleep(ar); | 792 | ath10k_pci_sleep(ar); |
784 | } | 793 | } |
785 | 794 | ||
786 | void ath10k_ce_disable_interrupts(struct ath10k *ar) | 795 | int ath10k_ce_disable_interrupts(struct ath10k *ar) |
787 | { | 796 | { |
788 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
789 | int ce_id, ret; | 797 | int ce_id, ret; |
790 | 798 | ||
791 | ret = ath10k_pci_wake(ar); | 799 | ret = ath10k_pci_wake(ar); |
792 | if (ret) | 800 | if (ret) |
793 | return; | 801 | return ret; |
794 | 802 | ||
795 | for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { | 803 | for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { |
796 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | 804 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); |
797 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
798 | 805 | ||
799 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); | 806 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
807 | ath10k_ce_error_intr_disable(ar, ctrl_addr); | ||
808 | ath10k_ce_watermark_intr_disable(ar, ctrl_addr); | ||
800 | } | 809 | } |
810 | |||
801 | ath10k_pci_sleep(ar); | 811 | ath10k_pci_sleep(ar); |
812 | |||
813 | return 0; | ||
802 | } | 814 | } |
803 | 815 | ||
804 | void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, | 816 | void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, |
@@ -1047,9 +1059,19 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | |||
1047 | const struct ce_attr *attr) | 1059 | const struct ce_attr *attr) |
1048 | { | 1060 | { |
1049 | struct ath10k_ce_pipe *ce_state; | 1061 | struct ath10k_ce_pipe *ce_state; |
1050 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1051 | int ret; | 1062 | int ret; |
1052 | 1063 | ||
1064 | /* | ||
1065 | * Make sure there's enough CE ringbuffer entries for HTT TX to avoid | ||
1066 | * additional TX locking checks. | ||
1067 | * | ||
1068 | * For the lack of a better place do the check here. | ||
1069 | */ | ||
1070 | BUILD_BUG_ON(TARGET_NUM_MSDU_DESC > | ||
1071 | (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); | ||
1072 | BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC > | ||
1073 | (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); | ||
1074 | |||
1053 | ret = ath10k_pci_wake(ar); | 1075 | ret = ath10k_pci_wake(ar); |
1054 | if (ret) | 1076 | if (ret) |
1055 | return NULL; | 1077 | return NULL; |
@@ -1057,7 +1079,7 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | |||
1057 | ce_state = ath10k_ce_init_state(ar, ce_id, attr); | 1079 | ce_state = ath10k_ce_init_state(ar, ce_id, attr); |
1058 | if (!ce_state) { | 1080 | if (!ce_state) { |
1059 | ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); | 1081 | ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); |
1060 | return NULL; | 1082 | goto out; |
1061 | } | 1083 | } |
1062 | 1084 | ||
1063 | if (attr->src_nentries) { | 1085 | if (attr->src_nentries) { |
@@ -1066,7 +1088,8 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | |||
1066 | ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", | 1088 | ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", |
1067 | ce_id, ret); | 1089 | ce_id, ret); |
1068 | ath10k_ce_deinit(ce_state); | 1090 | ath10k_ce_deinit(ce_state); |
1069 | return NULL; | 1091 | ce_state = NULL; |
1092 | goto out; | ||
1070 | } | 1093 | } |
1071 | } | 1094 | } |
1072 | 1095 | ||
@@ -1076,15 +1099,13 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, | |||
1076 | ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", | 1099 | ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", |
1077 | ce_id, ret); | 1100 | ce_id, ret); |
1078 | ath10k_ce_deinit(ce_state); | 1101 | ath10k_ce_deinit(ce_state); |
1079 | return NULL; | 1102 | ce_state = NULL; |
1103 | goto out; | ||
1080 | } | 1104 | } |
1081 | } | 1105 | } |
1082 | 1106 | ||
1083 | /* Enable CE error interrupts */ | 1107 | out: |
1084 | ath10k_ce_error_intr_enable(ar, ctrl_addr); | ||
1085 | |||
1086 | ath10k_pci_sleep(ar); | 1108 | ath10k_pci_sleep(ar); |
1087 | |||
1088 | return ce_state; | 1109 | return ce_state; |
1089 | } | 1110 | } |
1090 | 1111 | ||
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 15d45b5b7615..67dbde6a5c74 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h | |||
@@ -234,7 +234,7 @@ void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state); | |||
234 | /*==================CE Interrupt Handlers====================*/ | 234 | /*==================CE Interrupt Handlers====================*/ |
235 | void ath10k_ce_per_engine_service_any(struct ath10k *ar); | 235 | void ath10k_ce_per_engine_service_any(struct ath10k *ar); |
236 | void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); | 236 | void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); |
237 | void ath10k_ce_disable_interrupts(struct ath10k *ar); | 237 | int ath10k_ce_disable_interrupts(struct ath10k *ar); |
238 | 238 | ||
239 | /* ce_attr.flags values */ | 239 | /* ce_attr.flags values */ |
240 | /* Use NonSnooping PCIe accesses? */ | 240 | /* Use NonSnooping PCIe accesses? */ |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 1129994fb105..3b59af3bddf4 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -597,10 +597,8 @@ static int ath10k_init_uart(struct ath10k *ar) | |||
597 | return ret; | 597 | return ret; |
598 | } | 598 | } |
599 | 599 | ||
600 | if (!uart_print) { | 600 | if (!uart_print) |
601 | ath10k_info("UART prints disabled\n"); | ||
602 | return 0; | 601 | return 0; |
603 | } | ||
604 | 602 | ||
605 | ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); | 603 | ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); |
606 | if (ret) { | 604 | if (ret) { |
@@ -645,8 +643,8 @@ static int ath10k_init_hw_params(struct ath10k *ar) | |||
645 | 643 | ||
646 | ar->hw_params = *hw_params; | 644 | ar->hw_params = *hw_params; |
647 | 645 | ||
648 | ath10k_info("Hardware name %s version 0x%x\n", | 646 | ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", |
649 | ar->hw_params.name, ar->target_version); | 647 | ar->hw_params.name, ar->target_version); |
650 | 648 | ||
651 | return 0; | 649 | return 0; |
652 | } | 650 | } |
@@ -664,7 +662,8 @@ static void ath10k_core_restart(struct work_struct *work) | |||
664 | ieee80211_restart_hw(ar->hw); | 662 | ieee80211_restart_hw(ar->hw); |
665 | break; | 663 | break; |
666 | case ATH10K_STATE_OFF: | 664 | case ATH10K_STATE_OFF: |
667 | /* this can happen if driver is being unloaded */ | 665 | /* this can happen if driver is being unloaded |
666 | * or if the crash happens during FW probing */ | ||
668 | ath10k_warn("cannot restart a device that hasn't been started\n"); | 667 | ath10k_warn("cannot restart a device that hasn't been started\n"); |
669 | break; | 668 | break; |
670 | case ATH10K_STATE_RESTARTING: | 669 | case ATH10K_STATE_RESTARTING: |
@@ -737,8 +736,6 @@ EXPORT_SYMBOL(ath10k_core_create); | |||
737 | 736 | ||
738 | void ath10k_core_destroy(struct ath10k *ar) | 737 | void ath10k_core_destroy(struct ath10k *ar) |
739 | { | 738 | { |
740 | ath10k_debug_destroy(ar); | ||
741 | |||
742 | flush_workqueue(ar->workqueue); | 739 | flush_workqueue(ar->workqueue); |
743 | destroy_workqueue(ar->workqueue); | 740 | destroy_workqueue(ar->workqueue); |
744 | 741 | ||
@@ -786,21 +783,30 @@ int ath10k_core_start(struct ath10k *ar) | |||
786 | goto err; | 783 | goto err; |
787 | } | 784 | } |
788 | 785 | ||
789 | status = ath10k_htc_wait_target(&ar->htc); | 786 | status = ath10k_hif_start(ar); |
790 | if (status) | 787 | if (status) { |
788 | ath10k_err("could not start HIF: %d\n", status); | ||
791 | goto err_wmi_detach; | 789 | goto err_wmi_detach; |
790 | } | ||
791 | |||
792 | status = ath10k_htc_wait_target(&ar->htc); | ||
793 | if (status) { | ||
794 | ath10k_err("failed to connect to HTC: %d\n", status); | ||
795 | goto err_hif_stop; | ||
796 | } | ||
792 | 797 | ||
793 | status = ath10k_htt_attach(ar); | 798 | status = ath10k_htt_attach(ar); |
794 | if (status) { | 799 | if (status) { |
795 | ath10k_err("could not attach htt (%d)\n", status); | 800 | ath10k_err("could not attach htt (%d)\n", status); |
796 | goto err_wmi_detach; | 801 | goto err_hif_stop; |
797 | } | 802 | } |
798 | 803 | ||
799 | status = ath10k_init_connect_htc(ar); | 804 | status = ath10k_init_connect_htc(ar); |
800 | if (status) | 805 | if (status) |
801 | goto err_htt_detach; | 806 | goto err_htt_detach; |
802 | 807 | ||
803 | ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); | 808 | ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", |
809 | ar->hw->wiphy->fw_version); | ||
804 | 810 | ||
805 | status = ath10k_wmi_cmd_init(ar); | 811 | status = ath10k_wmi_cmd_init(ar); |
806 | if (status) { | 812 | if (status) { |
@@ -826,12 +832,23 @@ int ath10k_core_start(struct ath10k *ar) | |||
826 | ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; | 832 | ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; |
827 | INIT_LIST_HEAD(&ar->arvifs); | 833 | INIT_LIST_HEAD(&ar->arvifs); |
828 | 834 | ||
835 | if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) | ||
836 | ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n", | ||
837 | ar->hw_params.name, ar->target_version, | ||
838 | ar->hw->wiphy->fw_version, ar->fw_api, | ||
839 | ar->htt.target_version_major, | ||
840 | ar->htt.target_version_minor); | ||
841 | |||
842 | __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags); | ||
843 | |||
829 | return 0; | 844 | return 0; |
830 | 845 | ||
831 | err_disconnect_htc: | 846 | err_disconnect_htc: |
832 | ath10k_htc_stop(&ar->htc); | 847 | ath10k_htc_stop(&ar->htc); |
833 | err_htt_detach: | 848 | err_htt_detach: |
834 | ath10k_htt_detach(&ar->htt); | 849 | ath10k_htt_detach(&ar->htt); |
850 | err_hif_stop: | ||
851 | ath10k_hif_stop(ar); | ||
835 | err_wmi_detach: | 852 | err_wmi_detach: |
836 | ath10k_wmi_detach(ar); | 853 | ath10k_wmi_detach(ar); |
837 | err: | 854 | err: |
@@ -985,6 +1002,8 @@ void ath10k_core_unregister(struct ath10k *ar) | |||
985 | ath10k_mac_unregister(ar); | 1002 | ath10k_mac_unregister(ar); |
986 | 1003 | ||
987 | ath10k_core_free_firmware_files(ar); | 1004 | ath10k_core_free_firmware_files(ar); |
1005 | |||
1006 | ath10k_debug_destroy(ar); | ||
988 | } | 1007 | } |
989 | EXPORT_SYMBOL(ath10k_core_unregister); | 1008 | EXPORT_SYMBOL(ath10k_core_unregister); |
990 | 1009 | ||
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 0934f7633de3..79726e0fe2f0 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "wmi.h" | 30 | #include "wmi.h" |
31 | #include "../ath.h" | 31 | #include "../ath.h" |
32 | #include "../regd.h" | 32 | #include "../regd.h" |
33 | #include "../dfs_pattern_detector.h" | ||
33 | 34 | ||
34 | #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) | 35 | #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) |
35 | #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) | 36 | #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) |
@@ -43,7 +44,7 @@ | |||
43 | /* Antenna noise floor */ | 44 | /* Antenna noise floor */ |
44 | #define ATH10K_DEFAULT_NOISE_FLOOR -95 | 45 | #define ATH10K_DEFAULT_NOISE_FLOOR -95 |
45 | 46 | ||
46 | #define ATH10K_MAX_NUM_MGMT_PENDING 16 | 47 | #define ATH10K_MAX_NUM_MGMT_PENDING 128 |
47 | 48 | ||
48 | struct ath10k; | 49 | struct ath10k; |
49 | 50 | ||
@@ -192,6 +193,14 @@ struct ath10k_target_stats { | |||
192 | 193 | ||
193 | }; | 194 | }; |
194 | 195 | ||
196 | struct ath10k_dfs_stats { | ||
197 | u32 phy_errors; | ||
198 | u32 pulses_total; | ||
199 | u32 pulses_detected; | ||
200 | u32 pulses_discarded; | ||
201 | u32 radar_detected; | ||
202 | }; | ||
203 | |||
195 | #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ | 204 | #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ |
196 | 205 | ||
197 | struct ath10k_peer { | 206 | struct ath10k_peer { |
@@ -261,6 +270,8 @@ struct ath10k_debug { | |||
261 | 270 | ||
262 | unsigned long htt_stats_mask; | 271 | unsigned long htt_stats_mask; |
263 | struct delayed_work htt_stats_dwork; | 272 | struct delayed_work htt_stats_dwork; |
273 | struct ath10k_dfs_stats dfs_stats; | ||
274 | struct ath_dfs_pool_stats dfs_pool_stats; | ||
264 | }; | 275 | }; |
265 | 276 | ||
266 | enum ath10k_state { | 277 | enum ath10k_state { |
@@ -299,6 +310,12 @@ enum ath10k_fw_features { | |||
299 | ATH10K_FW_FEATURE_COUNT, | 310 | ATH10K_FW_FEATURE_COUNT, |
300 | }; | 311 | }; |
301 | 312 | ||
313 | enum ath10k_dev_flags { | ||
314 | /* Indicates that ath10k device is during CAC phase of DFS */ | ||
315 | ATH10K_CAC_RUNNING, | ||
316 | ATH10K_FLAG_FIRST_BOOT_DONE, | ||
317 | }; | ||
318 | |||
302 | struct ath10k { | 319 | struct ath10k { |
303 | struct ath_common ath_common; | 320 | struct ath_common ath_common; |
304 | struct ieee80211_hw *hw; | 321 | struct ieee80211_hw *hw; |
@@ -392,6 +409,8 @@ struct ath10k { | |||
392 | bool monitor_enabled; | 409 | bool monitor_enabled; |
393 | bool monitor_present; | 410 | bool monitor_present; |
394 | unsigned int filter_flags; | 411 | unsigned int filter_flags; |
412 | unsigned long dev_flags; | ||
413 | u32 dfs_block_radar_events; | ||
395 | 414 | ||
396 | struct wmi_pdev_set_wmm_params_arg wmm_params; | 415 | struct wmi_pdev_set_wmm_params_arg wmm_params; |
397 | struct completion install_key_done; | 416 | struct completion install_key_done; |
@@ -428,6 +447,8 @@ struct ath10k { | |||
428 | u32 survey_last_cycle_count; | 447 | u32 survey_last_cycle_count; |
429 | struct survey_info survey[ATH10K_NUM_CHANS]; | 448 | struct survey_info survey[ATH10K_NUM_CHANS]; |
430 | 449 | ||
450 | struct dfs_pattern_detector *dfs_detector; | ||
451 | |||
431 | #ifdef CONFIG_ATH10K_DEBUGFS | 452 | #ifdef CONFIG_ATH10K_DEBUGFS |
432 | struct ath10k_debug debug; | 453 | struct ath10k_debug debug; |
433 | #endif | 454 | #endif |
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 760ff2289e3c..6bdfad3144af 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c | |||
@@ -639,6 +639,86 @@ void ath10k_debug_stop(struct ath10k *ar) | |||
639 | cancel_delayed_work(&ar->debug.htt_stats_dwork); | 639 | cancel_delayed_work(&ar->debug.htt_stats_dwork); |
640 | } | 640 | } |
641 | 641 | ||
642 | static ssize_t ath10k_write_simulate_radar(struct file *file, | ||
643 | const char __user *user_buf, | ||
644 | size_t count, loff_t *ppos) | ||
645 | { | ||
646 | struct ath10k *ar = file->private_data; | ||
647 | |||
648 | ieee80211_radar_detected(ar->hw); | ||
649 | |||
650 | return count; | ||
651 | } | ||
652 | |||
653 | static const struct file_operations fops_simulate_radar = { | ||
654 | .write = ath10k_write_simulate_radar, | ||
655 | .open = simple_open, | ||
656 | .owner = THIS_MODULE, | ||
657 | .llseek = default_llseek, | ||
658 | }; | ||
659 | |||
660 | #define ATH10K_DFS_STAT(s, p) (\ | ||
661 | len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ | ||
662 | ar->debug.dfs_stats.p)) | ||
663 | |||
664 | #define ATH10K_DFS_POOL_STAT(s, p) (\ | ||
665 | len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ | ||
666 | ar->debug.dfs_pool_stats.p)) | ||
667 | |||
668 | static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf, | ||
669 | size_t count, loff_t *ppos) | ||
670 | { | ||
671 | int retval = 0, len = 0; | ||
672 | const int size = 8000; | ||
673 | struct ath10k *ar = file->private_data; | ||
674 | char *buf; | ||
675 | |||
676 | buf = kzalloc(size, GFP_KERNEL); | ||
677 | if (buf == NULL) | ||
678 | return -ENOMEM; | ||
679 | |||
680 | if (!ar->dfs_detector) { | ||
681 | len += scnprintf(buf + len, size - len, "DFS not enabled\n"); | ||
682 | goto exit; | ||
683 | } | ||
684 | |||
685 | ar->debug.dfs_pool_stats = | ||
686 | ar->dfs_detector->get_stats(ar->dfs_detector); | ||
687 | |||
688 | len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); | ||
689 | |||
690 | ATH10K_DFS_STAT("reported phy errors", phy_errors); | ||
691 | ATH10K_DFS_STAT("pulse events reported", pulses_total); | ||
692 | ATH10K_DFS_STAT("DFS pulses detected", pulses_detected); | ||
693 | ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded); | ||
694 | ATH10K_DFS_STAT("Radars detected", radar_detected); | ||
695 | |||
696 | len += scnprintf(buf + len, size - len, "Global Pool statistics:\n"); | ||
697 | ATH10K_DFS_POOL_STAT("Pool references", pool_reference); | ||
698 | ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated); | ||
699 | ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error); | ||
700 | ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used); | ||
701 | ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated); | ||
702 | ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error); | ||
703 | ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used); | ||
704 | |||
705 | exit: | ||
706 | if (len > size) | ||
707 | len = size; | ||
708 | |||
709 | retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
710 | kfree(buf); | ||
711 | |||
712 | return retval; | ||
713 | } | ||
714 | |||
715 | static const struct file_operations fops_dfs_stats = { | ||
716 | .read = ath10k_read_dfs_stats, | ||
717 | .open = simple_open, | ||
718 | .owner = THIS_MODULE, | ||
719 | .llseek = default_llseek, | ||
720 | }; | ||
721 | |||
642 | int ath10k_debug_create(struct ath10k *ar) | 722 | int ath10k_debug_create(struct ath10k *ar) |
643 | { | 723 | { |
644 | ar->debug.debugfs_phy = debugfs_create_dir("ath10k", | 724 | ar->debug.debugfs_phy = debugfs_create_dir("ath10k", |
@@ -667,6 +747,20 @@ int ath10k_debug_create(struct ath10k *ar) | |||
667 | debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, | 747 | debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, |
668 | ar, &fops_htt_stats_mask); | 748 | ar, &fops_htt_stats_mask); |
669 | 749 | ||
750 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { | ||
751 | debugfs_create_file("dfs_simulate_radar", S_IWUSR, | ||
752 | ar->debug.debugfs_phy, ar, | ||
753 | &fops_simulate_radar); | ||
754 | |||
755 | debugfs_create_bool("dfs_block_radar_events", S_IWUSR, | ||
756 | ar->debug.debugfs_phy, | ||
757 | &ar->dfs_block_radar_events); | ||
758 | |||
759 | debugfs_create_file("dfs_stats", S_IRUSR, | ||
760 | ar->debug.debugfs_phy, ar, | ||
761 | &fops_dfs_stats); | ||
762 | } | ||
763 | |||
670 | return 0; | 764 | return 0; |
671 | } | 765 | } |
672 | 766 | ||
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 3cfe3ee90dbe..1773c36c71a0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h | |||
@@ -33,6 +33,7 @@ enum ath10k_debug_mask { | |||
33 | ATH10K_DBG_MGMT = 0x00000100, | 33 | ATH10K_DBG_MGMT = 0x00000100, |
34 | ATH10K_DBG_DATA = 0x00000200, | 34 | ATH10K_DBG_DATA = 0x00000200, |
35 | ATH10K_DBG_BMI = 0x00000400, | 35 | ATH10K_DBG_BMI = 0x00000400, |
36 | ATH10K_DBG_REGULATORY = 0x00000800, | ||
36 | ATH10K_DBG_ANY = 0xffffffff, | 37 | ATH10K_DBG_ANY = 0xffffffff, |
37 | }; | 38 | }; |
38 | 39 | ||
@@ -53,6 +54,8 @@ void ath10k_debug_read_service_map(struct ath10k *ar, | |||
53 | void ath10k_debug_read_target_stats(struct ath10k *ar, | 54 | void ath10k_debug_read_target_stats(struct ath10k *ar, |
54 | struct wmi_stats_event *ev); | 55 | struct wmi_stats_event *ev); |
55 | 56 | ||
57 | #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) | ||
58 | |||
56 | #else | 59 | #else |
57 | static inline int ath10k_debug_start(struct ath10k *ar) | 60 | static inline int ath10k_debug_start(struct ath10k *ar) |
58 | { | 61 | { |
@@ -82,6 +85,9 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar, | |||
82 | struct wmi_stats_event *ev) | 85 | struct wmi_stats_event *ev) |
83 | { | 86 | { |
84 | } | 87 | } |
88 | |||
89 | #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) | ||
90 | |||
85 | #endif /* CONFIG_ATH10K_DEBUGFS */ | 91 | #endif /* CONFIG_ATH10K_DEBUGFS */ |
86 | 92 | ||
87 | #ifdef CONFIG_ATH10K_DEBUG | 93 | #ifdef CONFIG_ATH10K_DEBUG |
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index edae50b52806..edc57ab505c8 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c | |||
@@ -191,6 +191,11 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar, | |||
191 | struct ath10k_htc *htc = &ar->htc; | 191 | struct ath10k_htc *htc = &ar->htc; |
192 | struct ath10k_htc_ep *ep = &htc->endpoint[eid]; | 192 | struct ath10k_htc_ep *ep = &htc->endpoint[eid]; |
193 | 193 | ||
194 | if (!skb) { | ||
195 | ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n"); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
194 | ath10k_htc_notify_tx_completion(ep, skb); | 199 | ath10k_htc_notify_tx_completion(ep, skb); |
195 | /* the skb now belongs to the completion handler */ | 200 | /* the skb now belongs to the completion handler */ |
196 | 201 | ||
@@ -534,14 +539,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) | |||
534 | u16 credit_count; | 539 | u16 credit_count; |
535 | u16 credit_size; | 540 | u16 credit_size; |
536 | 541 | ||
537 | reinit_completion(&htc->ctl_resp); | ||
538 | |||
539 | status = ath10k_hif_start(htc->ar); | ||
540 | if (status) { | ||
541 | ath10k_err("could not start HIF (%d)\n", status); | ||
542 | goto err_start; | ||
543 | } | ||
544 | |||
545 | status = wait_for_completion_timeout(&htc->ctl_resp, | 542 | status = wait_for_completion_timeout(&htc->ctl_resp, |
546 | ATH10K_HTC_WAIT_TIMEOUT_HZ); | 543 | ATH10K_HTC_WAIT_TIMEOUT_HZ); |
547 | if (status <= 0) { | 544 | if (status <= 0) { |
@@ -549,15 +546,13 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) | |||
549 | status = -ETIMEDOUT; | 546 | status = -ETIMEDOUT; |
550 | 547 | ||
551 | ath10k_err("ctl_resp never came in (%d)\n", status); | 548 | ath10k_err("ctl_resp never came in (%d)\n", status); |
552 | goto err_target; | 549 | return status; |
553 | } | 550 | } |
554 | 551 | ||
555 | if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { | 552 | if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { |
556 | ath10k_err("Invalid HTC ready msg len:%d\n", | 553 | ath10k_err("Invalid HTC ready msg len:%d\n", |
557 | htc->control_resp_len); | 554 | htc->control_resp_len); |
558 | 555 | return -ECOMM; | |
559 | status = -ECOMM; | ||
560 | goto err_target; | ||
561 | } | 556 | } |
562 | 557 | ||
563 | msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; | 558 | msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; |
@@ -567,8 +562,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) | |||
567 | 562 | ||
568 | if (message_id != ATH10K_HTC_MSG_READY_ID) { | 563 | if (message_id != ATH10K_HTC_MSG_READY_ID) { |
569 | ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); | 564 | ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); |
570 | status = -ECOMM; | 565 | return -ECOMM; |
571 | goto err_target; | ||
572 | } | 566 | } |
573 | 567 | ||
574 | htc->total_transmit_credits = credit_count; | 568 | htc->total_transmit_credits = credit_count; |
@@ -581,9 +575,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) | |||
581 | 575 | ||
582 | if ((htc->total_transmit_credits == 0) || | 576 | if ((htc->total_transmit_credits == 0) || |
583 | (htc->target_credit_size == 0)) { | 577 | (htc->target_credit_size == 0)) { |
584 | status = -ECOMM; | ||
585 | ath10k_err("Invalid credit size received\n"); | 578 | ath10k_err("Invalid credit size received\n"); |
586 | goto err_target; | 579 | return -ECOMM; |
587 | } | 580 | } |
588 | 581 | ||
589 | ath10k_htc_setup_target_buffer_assignments(htc); | 582 | ath10k_htc_setup_target_buffer_assignments(htc); |
@@ -600,14 +593,10 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) | |||
600 | status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); | 593 | status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); |
601 | if (status) { | 594 | if (status) { |
602 | ath10k_err("could not connect to htc service (%d)\n", status); | 595 | ath10k_err("could not connect to htc service (%d)\n", status); |
603 | goto err_target; | 596 | return status; |
604 | } | 597 | } |
605 | 598 | ||
606 | return 0; | 599 | return 0; |
607 | err_target: | ||
608 | ath10k_hif_stop(htc->ar); | ||
609 | err_start: | ||
610 | return status; | ||
611 | } | 600 | } |
612 | 601 | ||
613 | int ath10k_htc_connect_service(struct ath10k_htc *htc, | 602 | int ath10k_htc_connect_service(struct ath10k_htc *htc, |
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 5f7eeebc5432..69697af59ce0 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c | |||
@@ -104,8 +104,8 @@ err_htc_attach: | |||
104 | 104 | ||
105 | static int ath10k_htt_verify_version(struct ath10k_htt *htt) | 105 | static int ath10k_htt_verify_version(struct ath10k_htt *htt) |
106 | { | 106 | { |
107 | ath10k_info("htt target version %d.%d\n", | 107 | ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n", |
108 | htt->target_version_major, htt->target_version_minor); | 108 | htt->target_version_major, htt->target_version_minor); |
109 | 109 | ||
110 | if (htt->target_version_major != 2 && | 110 | if (htt->target_version_major != 2 && |
111 | htt->target_version_major != 3) { | 111 | htt->target_version_major != 3) { |
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 1a337e93b7e9..7fc7919ea5f5 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h | |||
@@ -1182,6 +1182,7 @@ struct htt_rx_info { | |||
1182 | u32 info2; | 1182 | u32 info2; |
1183 | } rate; | 1183 | } rate; |
1184 | bool fcs_err; | 1184 | bool fcs_err; |
1185 | bool amsdu_more; | ||
1185 | }; | 1186 | }; |
1186 | 1187 | ||
1187 | struct ath10k_htt { | 1188 | struct ath10k_htt { |
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 90d4f74c28d7..fcb534f2f28f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
@@ -659,23 +659,6 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, | |||
659 | memcpy(hdr_buf, hdr, hdr_len); | 659 | memcpy(hdr_buf, hdr, hdr_len); |
660 | hdr = (struct ieee80211_hdr *)hdr_buf; | 660 | hdr = (struct ieee80211_hdr *)hdr_buf; |
661 | 661 | ||
662 | /* FIXME: Hopefully this is a temporary measure. | ||
663 | * | ||
664 | * Reporting individual A-MSDU subframes means each reported frame | ||
665 | * shares the same sequence number. | ||
666 | * | ||
667 | * mac80211 drops frames it recognizes as duplicates, i.e. | ||
668 | * retransmission flag is set and sequence number matches sequence | ||
669 | * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10 | ||
670 | * "Duplicate detection and recovery") | ||
671 | * | ||
672 | * To avoid frames being dropped clear retransmission flag for all | ||
673 | * received A-MSDUs. | ||
674 | * | ||
675 | * Worst case: actual duplicate frames will be reported but this should | ||
676 | * still be handled gracefully by other OSI/ISO layers. */ | ||
677 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY); | ||
678 | |||
679 | first = skb; | 662 | first = skb; |
680 | while (skb) { | 663 | while (skb) { |
681 | void *decap_hdr; | 664 | void *decap_hdr; |
@@ -746,6 +729,9 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, | |||
746 | skb = skb->next; | 729 | skb = skb->next; |
747 | info->skb->next = NULL; | 730 | info->skb->next = NULL; |
748 | 731 | ||
732 | if (skb) | ||
733 | info->amsdu_more = true; | ||
734 | |||
749 | ath10k_process_rx(htt->ar, info); | 735 | ath10k_process_rx(htt->ar, info); |
750 | } | 736 | } |
751 | 737 | ||
@@ -959,6 +945,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, | |||
959 | continue; | 945 | continue; |
960 | } | 946 | } |
961 | 947 | ||
948 | if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { | ||
949 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
950 | continue; | ||
951 | } | ||
952 | |||
962 | /* FIXME: we do not support chaining yet. | 953 | /* FIXME: we do not support chaining yet. |
963 | * this needs investigation */ | 954 | * this needs investigation */ |
964 | if (msdu_chaining) { | 955 | if (msdu_chaining) { |
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index d9335e9d0d04..f1d36d2d2723 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c | |||
@@ -85,16 +85,13 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) | |||
85 | 85 | ||
86 | int ath10k_htt_tx_attach(struct ath10k_htt *htt) | 86 | int ath10k_htt_tx_attach(struct ath10k_htt *htt) |
87 | { | 87 | { |
88 | u8 pipe; | ||
89 | |||
90 | spin_lock_init(&htt->tx_lock); | 88 | spin_lock_init(&htt->tx_lock); |
91 | init_waitqueue_head(&htt->empty_tx_wq); | 89 | init_waitqueue_head(&htt->empty_tx_wq); |
92 | 90 | ||
93 | /* At the beginning free queue number should hint us the maximum | 91 | if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) |
94 | * queue length */ | 92 | htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; |
95 | pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id; | 93 | else |
96 | htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, | 94 | htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; |
97 | pipe); | ||
98 | 95 | ||
99 | ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", | 96 | ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
100 | htt->max_num_pending_tx); | 97 | htt->max_num_pending_tx); |
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 8aeb46d9b534..9535eaa09f09 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h | |||
@@ -269,6 +269,7 @@ enum ath10k_mcast2ucast_mode { | |||
269 | #define CORE_CTRL_CPU_INTR_MASK 0x00002000 | 269 | #define CORE_CTRL_CPU_INTR_MASK 0x00002000 |
270 | #define CORE_CTRL_ADDRESS 0x0000 | 270 | #define CORE_CTRL_ADDRESS 0x0000 |
271 | #define PCIE_INTR_ENABLE_ADDRESS 0x0008 | 271 | #define PCIE_INTR_ENABLE_ADDRESS 0x0008 |
272 | #define PCIE_INTR_CAUSE_ADDRESS 0x000c | ||
272 | #define PCIE_INTR_CLR_ADDRESS 0x0014 | 273 | #define PCIE_INTR_CLR_ADDRESS 0x0014 |
273 | #define SCRATCH_3_ADDRESS 0x0030 | 274 | #define SCRATCH_3_ADDRESS 0x0030 |
274 | 275 | ||
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 06fe2b8fa22d..ce9ef3499ecb 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -322,12 +322,16 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) | |||
322 | lockdep_assert_held(&ar->conf_mutex); | 322 | lockdep_assert_held(&ar->conf_mutex); |
323 | 323 | ||
324 | ret = ath10k_wmi_peer_create(ar, vdev_id, addr); | 324 | ret = ath10k_wmi_peer_create(ar, vdev_id, addr); |
325 | if (ret) | 325 | if (ret) { |
326 | ath10k_warn("Failed to create wmi peer: %i\n", ret); | ||
326 | return ret; | 327 | return ret; |
328 | } | ||
327 | 329 | ||
328 | ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); | 330 | ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); |
329 | if (ret) | 331 | if (ret) { |
332 | ath10k_warn("Failed to wait for created wmi peer: %i\n", ret); | ||
330 | return ret; | 333 | return ret; |
334 | } | ||
331 | 335 | ||
332 | return 0; | 336 | return 0; |
333 | } | 337 | } |
@@ -450,15 +454,19 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) | |||
450 | 454 | ||
451 | arg.channel.mode = chan_to_phymode(&conf->chandef); | 455 | arg.channel.mode = chan_to_phymode(&conf->chandef); |
452 | 456 | ||
453 | arg.channel.min_power = channel->max_power * 3; | 457 | arg.channel.min_power = 0; |
454 | arg.channel.max_power = channel->max_power * 4; | 458 | arg.channel.max_power = channel->max_power * 2; |
455 | arg.channel.max_reg_power = channel->max_reg_power * 4; | 459 | arg.channel.max_reg_power = channel->max_reg_power * 2; |
456 | arg.channel.max_antenna_gain = channel->max_antenna_gain; | 460 | arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; |
457 | 461 | ||
458 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { | 462 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { |
459 | arg.ssid = arvif->u.ap.ssid; | 463 | arg.ssid = arvif->u.ap.ssid; |
460 | arg.ssid_len = arvif->u.ap.ssid_len; | 464 | arg.ssid_len = arvif->u.ap.ssid_len; |
461 | arg.hidden_ssid = arvif->u.ap.hidden_ssid; | 465 | arg.hidden_ssid = arvif->u.ap.hidden_ssid; |
466 | |||
467 | /* For now allow DFS for AP mode */ | ||
468 | arg.channel.chan_radar = | ||
469 | !!(channel->flags & IEEE80211_CHAN_RADAR); | ||
462 | } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { | 470 | } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { |
463 | arg.ssid = arvif->vif->bss_conf.ssid; | 471 | arg.ssid = arvif->vif->bss_conf.ssid; |
464 | arg.ssid_len = arvif->vif->bss_conf.ssid_len; | 472 | arg.ssid_len = arvif->vif->bss_conf.ssid_len; |
@@ -516,6 +524,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) | |||
516 | 524 | ||
517 | lockdep_assert_held(&ar->conf_mutex); | 525 | lockdep_assert_held(&ar->conf_mutex); |
518 | 526 | ||
527 | if (!ar->monitor_present) { | ||
528 | ath10k_warn("mac montor stop -- monitor is not present\n"); | ||
529 | return -EINVAL; | ||
530 | } | ||
531 | |||
519 | arg.vdev_id = vdev_id; | 532 | arg.vdev_id = vdev_id; |
520 | arg.channel.freq = channel->center_freq; | 533 | arg.channel.freq = channel->center_freq; |
521 | arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; | 534 | arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; |
@@ -523,11 +536,13 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) | |||
523 | /* TODO setup this dynamically, what in case we | 536 | /* TODO setup this dynamically, what in case we |
524 | don't have any vifs? */ | 537 | don't have any vifs? */ |
525 | arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); | 538 | arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); |
539 | arg.channel.chan_radar = | ||
540 | !!(channel->flags & IEEE80211_CHAN_RADAR); | ||
526 | 541 | ||
527 | arg.channel.min_power = channel->max_power * 3; | 542 | arg.channel.min_power = 0; |
528 | arg.channel.max_power = channel->max_power * 4; | 543 | arg.channel.max_power = channel->max_power * 2; |
529 | arg.channel.max_reg_power = channel->max_reg_power * 4; | 544 | arg.channel.max_reg_power = channel->max_reg_power * 2; |
530 | arg.channel.max_antenna_gain = channel->max_antenna_gain; | 545 | arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; |
531 | 546 | ||
532 | ret = ath10k_wmi_vdev_start(ar, &arg); | 547 | ret = ath10k_wmi_vdev_start(ar, &arg); |
533 | if (ret) { | 548 | if (ret) { |
@@ -566,6 +581,16 @@ static int ath10k_monitor_stop(struct ath10k *ar) | |||
566 | 581 | ||
567 | lockdep_assert_held(&ar->conf_mutex); | 582 | lockdep_assert_held(&ar->conf_mutex); |
568 | 583 | ||
584 | if (!ar->monitor_present) { | ||
585 | ath10k_warn("mac montor stop -- monitor is not present\n"); | ||
586 | return -EINVAL; | ||
587 | } | ||
588 | |||
589 | if (!ar->monitor_enabled) { | ||
590 | ath10k_warn("mac montor stop -- monitor is not enabled\n"); | ||
591 | return -EINVAL; | ||
592 | } | ||
593 | |||
569 | ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); | 594 | ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); |
570 | if (ret) | 595 | if (ret) |
571 | ath10k_warn("Monitor vdev down failed: %d\n", ret); | 596 | ath10k_warn("Monitor vdev down failed: %d\n", ret); |
@@ -647,6 +672,107 @@ static int ath10k_monitor_destroy(struct ath10k *ar) | |||
647 | return ret; | 672 | return ret; |
648 | } | 673 | } |
649 | 674 | ||
675 | static int ath10k_start_cac(struct ath10k *ar) | ||
676 | { | ||
677 | int ret; | ||
678 | |||
679 | lockdep_assert_held(&ar->conf_mutex); | ||
680 | |||
681 | set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); | ||
682 | |||
683 | ret = ath10k_monitor_create(ar); | ||
684 | if (ret) { | ||
685 | clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); | ||
686 | return ret; | ||
687 | } | ||
688 | |||
689 | ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); | ||
690 | if (ret) { | ||
691 | clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); | ||
692 | ath10k_monitor_destroy(ar); | ||
693 | return ret; | ||
694 | } | ||
695 | |||
696 | ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", | ||
697 | ar->monitor_vdev_id); | ||
698 | |||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | static int ath10k_stop_cac(struct ath10k *ar) | ||
703 | { | ||
704 | lockdep_assert_held(&ar->conf_mutex); | ||
705 | |||
706 | /* CAC is not running - do nothing */ | ||
707 | if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) | ||
708 | return 0; | ||
709 | |||
710 | ath10k_monitor_stop(ar); | ||
711 | ath10k_monitor_destroy(ar); | ||
712 | clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); | ||
713 | |||
714 | ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state) | ||
720 | { | ||
721 | switch (dfs_state) { | ||
722 | case NL80211_DFS_USABLE: | ||
723 | return "USABLE"; | ||
724 | case NL80211_DFS_UNAVAILABLE: | ||
725 | return "UNAVAILABLE"; | ||
726 | case NL80211_DFS_AVAILABLE: | ||
727 | return "AVAILABLE"; | ||
728 | default: | ||
729 | WARN_ON(1); | ||
730 | return "bug"; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | static void ath10k_config_radar_detection(struct ath10k *ar) | ||
735 | { | ||
736 | struct ieee80211_channel *chan = ar->hw->conf.chandef.chan; | ||
737 | bool radar = ar->hw->conf.radar_enabled; | ||
738 | bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR); | ||
739 | enum nl80211_dfs_state dfs_state = chan->dfs_state; | ||
740 | int ret; | ||
741 | |||
742 | lockdep_assert_held(&ar->conf_mutex); | ||
743 | |||
744 | ath10k_dbg(ATH10K_DBG_MAC, | ||
745 | "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n", | ||
746 | chan->center_freq, radar, chan_radar, | ||
747 | ath10k_dfs_state(dfs_state)); | ||
748 | |||
749 | /* | ||
750 | * It's safe to call it even if CAC is not started. | ||
751 | * This call here guarantees changing channel, etc. will stop CAC. | ||
752 | */ | ||
753 | ath10k_stop_cac(ar); | ||
754 | |||
755 | if (!radar) | ||
756 | return; | ||
757 | |||
758 | if (!chan_radar) | ||
759 | return; | ||
760 | |||
761 | if (dfs_state != NL80211_DFS_USABLE) | ||
762 | return; | ||
763 | |||
764 | ret = ath10k_start_cac(ar); | ||
765 | if (ret) { | ||
766 | /* | ||
767 | * Not possible to start CAC on current channel so starting | ||
768 | * radiation is not allowed, make this channel DFS_UNAVAILABLE | ||
769 | * by indicating that radar was detected. | ||
770 | */ | ||
771 | ath10k_warn("failed to start CAC (%d)\n", ret); | ||
772 | ieee80211_radar_detected(ar->hw); | ||
773 | } | ||
774 | } | ||
775 | |||
650 | static void ath10k_control_beaconing(struct ath10k_vif *arvif, | 776 | static void ath10k_control_beaconing(struct ath10k_vif *arvif, |
651 | struct ieee80211_bss_conf *info) | 777 | struct ieee80211_bss_conf *info) |
652 | { | 778 | { |
@@ -1356,14 +1482,17 @@ static int ath10k_update_channel_list(struct ath10k *ar) | |||
1356 | ch->ht40plus = | 1482 | ch->ht40plus = |
1357 | !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); | 1483 | !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); |
1358 | 1484 | ||
1485 | ch->chan_radar = | ||
1486 | !!(channel->flags & IEEE80211_CHAN_RADAR); | ||
1487 | |||
1359 | passive = channel->flags & IEEE80211_CHAN_NO_IR; | 1488 | passive = channel->flags & IEEE80211_CHAN_NO_IR; |
1360 | ch->passive = passive; | 1489 | ch->passive = passive; |
1361 | 1490 | ||
1362 | ch->freq = channel->center_freq; | 1491 | ch->freq = channel->center_freq; |
1363 | ch->min_power = channel->max_power * 3; | 1492 | ch->min_power = 0; |
1364 | ch->max_power = channel->max_power * 4; | 1493 | ch->max_power = channel->max_power * 2; |
1365 | ch->max_reg_power = channel->max_reg_power * 4; | 1494 | ch->max_reg_power = channel->max_reg_power * 2; |
1366 | ch->max_antenna_gain = channel->max_antenna_gain; | 1495 | ch->max_antenna_gain = channel->max_antenna_gain * 2; |
1367 | ch->reg_class_id = 0; /* FIXME */ | 1496 | ch->reg_class_id = 0; /* FIXME */ |
1368 | 1497 | ||
1369 | /* FIXME: why use only legacy modes, why not any | 1498 | /* FIXME: why use only legacy modes, why not any |
@@ -1423,9 +1552,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, | |||
1423 | { | 1552 | { |
1424 | struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); | 1553 | struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); |
1425 | struct ath10k *ar = hw->priv; | 1554 | struct ath10k *ar = hw->priv; |
1555 | bool result; | ||
1426 | 1556 | ||
1427 | ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); | 1557 | ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); |
1428 | 1558 | ||
1559 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { | ||
1560 | ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", | ||
1561 | request->dfs_region); | ||
1562 | result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, | ||
1563 | request->dfs_region); | ||
1564 | if (!result) | ||
1565 | ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n", | ||
1566 | request->dfs_region); | ||
1567 | } | ||
1568 | |||
1429 | mutex_lock(&ar->conf_mutex); | 1569 | mutex_lock(&ar->conf_mutex); |
1430 | if (ar->state == ATH10K_STATE_ON) | 1570 | if (ar->state == ATH10K_STATE_ON) |
1431 | ath10k_regd_update(ar); | 1571 | ath10k_regd_update(ar); |
@@ -1714,8 +1854,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) | |||
1714 | break; | 1854 | break; |
1715 | 1855 | ||
1716 | ret = ath10k_wmi_mgmt_tx(ar, skb); | 1856 | ret = ath10k_wmi_mgmt_tx(ar, skb); |
1717 | if (ret) | 1857 | if (ret) { |
1718 | ath10k_warn("wmi mgmt_tx failed (%d)\n", ret); | 1858 | ath10k_warn("wmi mgmt_tx failed (%d)\n", ret); |
1859 | ieee80211_free_txskb(ar->hw, skb); | ||
1860 | } | ||
1719 | } | 1861 | } |
1720 | } | 1862 | } |
1721 | 1863 | ||
@@ -1889,6 +2031,7 @@ void ath10k_halt(struct ath10k *ar) | |||
1889 | { | 2031 | { |
1890 | lockdep_assert_held(&ar->conf_mutex); | 2032 | lockdep_assert_held(&ar->conf_mutex); |
1891 | 2033 | ||
2034 | ath10k_stop_cac(ar); | ||
1892 | del_timer_sync(&ar->scan.timeout); | 2035 | del_timer_sync(&ar->scan.timeout); |
1893 | ath10k_offchan_tx_purge(ar); | 2036 | ath10k_offchan_tx_purge(ar); |
1894 | ath10k_mgmt_over_wmi_tx_purge(ar); | 2037 | ath10k_mgmt_over_wmi_tx_purge(ar); |
@@ -1943,7 +2086,7 @@ static int ath10k_start(struct ieee80211_hw *hw) | |||
1943 | ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", | 2086 | ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", |
1944 | ret); | 2087 | ret); |
1945 | 2088 | ||
1946 | ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0); | 2089 | ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); |
1947 | if (ret) | 2090 | if (ret) |
1948 | ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", | 2091 | ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", |
1949 | ret); | 2092 | ret); |
@@ -1998,15 +2141,40 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) | |||
1998 | struct ath10k *ar = hw->priv; | 2141 | struct ath10k *ar = hw->priv; |
1999 | struct ieee80211_conf *conf = &hw->conf; | 2142 | struct ieee80211_conf *conf = &hw->conf; |
2000 | int ret = 0; | 2143 | int ret = 0; |
2144 | u32 param; | ||
2001 | 2145 | ||
2002 | mutex_lock(&ar->conf_mutex); | 2146 | mutex_lock(&ar->conf_mutex); |
2003 | 2147 | ||
2004 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { | 2148 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { |
2005 | ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n", | 2149 | ath10k_dbg(ATH10K_DBG_MAC, |
2006 | conf->chandef.chan->center_freq); | 2150 | "mac config channel %d mhz flags 0x%x\n", |
2151 | conf->chandef.chan->center_freq, | ||
2152 | conf->chandef.chan->flags); | ||
2153 | |||
2007 | spin_lock_bh(&ar->data_lock); | 2154 | spin_lock_bh(&ar->data_lock); |
2008 | ar->rx_channel = conf->chandef.chan; | 2155 | ar->rx_channel = conf->chandef.chan; |
2009 | spin_unlock_bh(&ar->data_lock); | 2156 | spin_unlock_bh(&ar->data_lock); |
2157 | |||
2158 | ath10k_config_radar_detection(ar); | ||
2159 | } | ||
2160 | |||
2161 | if (changed & IEEE80211_CONF_CHANGE_POWER) { | ||
2162 | ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n", | ||
2163 | hw->conf.power_level); | ||
2164 | |||
2165 | param = ar->wmi.pdev_param->txpower_limit2g; | ||
2166 | ret = ath10k_wmi_pdev_set_param(ar, param, | ||
2167 | hw->conf.power_level * 2); | ||
2168 | if (ret) | ||
2169 | ath10k_warn("mac failed to set 2g txpower %d (%d)\n", | ||
2170 | hw->conf.power_level, ret); | ||
2171 | |||
2172 | param = ar->wmi.pdev_param->txpower_limit5g; | ||
2173 | ret = ath10k_wmi_pdev_set_param(ar, param, | ||
2174 | hw->conf.power_level * 2); | ||
2175 | if (ret) | ||
2176 | ath10k_warn("mac failed to set 5g txpower %d (%d)\n", | ||
2177 | hw->conf.power_level, ret); | ||
2010 | } | 2178 | } |
2011 | 2179 | ||
2012 | if (changed & IEEE80211_CONF_CHANGE_PS) | 2180 | if (changed & IEEE80211_CONF_CHANGE_PS) |
@@ -2049,6 +2217,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, | |||
2049 | arvif->vif = vif; | 2217 | arvif->vif = vif; |
2050 | 2218 | ||
2051 | INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); | 2219 | INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); |
2220 | INIT_LIST_HEAD(&arvif->list); | ||
2052 | 2221 | ||
2053 | if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { | 2222 | if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { |
2054 | ath10k_warn("Only one monitor interface allowed\n"); | 2223 | ath10k_warn("Only one monitor interface allowed\n"); |
@@ -2265,8 +2434,14 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, | |||
2265 | *total_flags &= SUPPORTED_FILTERS; | 2434 | *total_flags &= SUPPORTED_FILTERS; |
2266 | ar->filter_flags = *total_flags; | 2435 | ar->filter_flags = *total_flags; |
2267 | 2436 | ||
2437 | /* Monitor must not be started if it wasn't created first. | ||
2438 | * Promiscuous mode may be started on a non-monitor interface - in | ||
2439 | * such case the monitor vdev is not created so starting the | ||
2440 | * monitor makes no sense. Since ath10k uses no special RX filters | ||
2441 | * (only BSS filter in STA mode) there's no need for any special | ||
2442 | * action here. */ | ||
2268 | if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && | 2443 | if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && |
2269 | !ar->monitor_enabled) { | 2444 | !ar->monitor_enabled && ar->monitor_present) { |
2270 | ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n", | 2445 | ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n", |
2271 | ar->monitor_vdev_id); | 2446 | ar->monitor_vdev_id); |
2272 | 2447 | ||
@@ -2274,7 +2449,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, | |||
2274 | if (ret) | 2449 | if (ret) |
2275 | ath10k_warn("Unable to start monitor mode\n"); | 2450 | ath10k_warn("Unable to start monitor mode\n"); |
2276 | } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && | 2451 | } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && |
2277 | ar->monitor_enabled) { | 2452 | ar->monitor_enabled && ar->monitor_present) { |
2278 | ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n", | 2453 | ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n", |
2279 | ar->monitor_vdev_id); | 2454 | ar->monitor_vdev_id); |
2280 | 2455 | ||
@@ -2360,8 +2535,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, | |||
2360 | ret = ath10k_peer_create(ar, arvif->vdev_id, | 2535 | ret = ath10k_peer_create(ar, arvif->vdev_id, |
2361 | info->bssid); | 2536 | info->bssid); |
2362 | if (ret) | 2537 | if (ret) |
2363 | ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", | 2538 | ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n", |
2364 | info->bssid, arvif->vdev_id); | 2539 | info->bssid, arvif->vdev_id, ret); |
2365 | 2540 | ||
2366 | if (vif->type == NL80211_IFTYPE_STATION) { | 2541 | if (vif->type == NL80211_IFTYPE_STATION) { |
2367 | /* | 2542 | /* |
@@ -2542,6 +2717,44 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, | |||
2542 | mutex_unlock(&ar->conf_mutex); | 2717 | mutex_unlock(&ar->conf_mutex); |
2543 | } | 2718 | } |
2544 | 2719 | ||
2720 | static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, | ||
2721 | struct ath10k_vif *arvif, | ||
2722 | enum set_key_cmd cmd, | ||
2723 | struct ieee80211_key_conf *key) | ||
2724 | { | ||
2725 | u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; | ||
2726 | int ret; | ||
2727 | |||
2728 | /* 10.1 firmware branch requires default key index to be set to group | ||
2729 | * key index after installing it. Otherwise FW/HW Txes corrupted | ||
2730 | * frames with multi-vif APs. This is not required for main firmware | ||
2731 | * branch (e.g. 636). | ||
2732 | * | ||
2733 | * FIXME: This has been tested only in AP. It remains unknown if this | ||
2734 | * is required for multi-vif STA interfaces on 10.1 */ | ||
2735 | |||
2736 | if (arvif->vdev_type != WMI_VDEV_TYPE_AP) | ||
2737 | return; | ||
2738 | |||
2739 | if (key->cipher == WLAN_CIPHER_SUITE_WEP40) | ||
2740 | return; | ||
2741 | |||
2742 | if (key->cipher == WLAN_CIPHER_SUITE_WEP104) | ||
2743 | return; | ||
2744 | |||
2745 | if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) | ||
2746 | return; | ||
2747 | |||
2748 | if (cmd != SET_KEY) | ||
2749 | return; | ||
2750 | |||
2751 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, | ||
2752 | key->keyidx); | ||
2753 | if (ret) | ||
2754 | ath10k_warn("failed to set group key as default key: %d\n", | ||
2755 | ret); | ||
2756 | } | ||
2757 | |||
2545 | static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | 2758 | static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, |
2546 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | 2759 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, |
2547 | struct ieee80211_key_conf *key) | 2760 | struct ieee80211_key_conf *key) |
@@ -2603,6 +2816,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
2603 | goto exit; | 2816 | goto exit; |
2604 | } | 2817 | } |
2605 | 2818 | ||
2819 | ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); | ||
2820 | |||
2606 | spin_lock_bh(&ar->data_lock); | 2821 | spin_lock_bh(&ar->data_lock); |
2607 | peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); | 2822 | peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); |
2608 | if (peer && cmd == SET_KEY) | 2823 | if (peer && cmd == SET_KEY) |
@@ -2643,8 +2858,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, | |||
2643 | 2858 | ||
2644 | ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); | 2859 | ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); |
2645 | if (ret) | 2860 | if (ret) |
2646 | ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", | 2861 | ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n", |
2647 | sta->addr, arvif->vdev_id); | 2862 | sta->addr, arvif->vdev_id, ret); |
2648 | } else if ((old_state == IEEE80211_STA_NONE && | 2863 | } else if ((old_state == IEEE80211_STA_NONE && |
2649 | new_state == IEEE80211_STA_NOTEXIST)) { | 2864 | new_state == IEEE80211_STA_NOTEXIST)) { |
2650 | /* | 2865 | /* |
@@ -3249,12 +3464,36 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = { | |||
3249 | }, | 3464 | }, |
3250 | }; | 3465 | }; |
3251 | 3466 | ||
3252 | static const struct ieee80211_iface_combination ath10k_if_comb = { | 3467 | #ifdef CONFIG_ATH10K_DFS_CERTIFIED |
3253 | .limits = ath10k_if_limits, | 3468 | static const struct ieee80211_iface_limit ath10k_if_dfs_limits[] = { |
3254 | .n_limits = ARRAY_SIZE(ath10k_if_limits), | 3469 | { |
3255 | .max_interfaces = 8, | 3470 | .max = 8, |
3256 | .num_different_channels = 1, | 3471 | .types = BIT(NL80211_IFTYPE_AP) |
3257 | .beacon_int_infra_match = true, | 3472 | }, |
3473 | }; | ||
3474 | #endif | ||
3475 | |||
3476 | static const struct ieee80211_iface_combination ath10k_if_comb[] = { | ||
3477 | { | ||
3478 | .limits = ath10k_if_limits, | ||
3479 | .n_limits = ARRAY_SIZE(ath10k_if_limits), | ||
3480 | .max_interfaces = 8, | ||
3481 | .num_different_channels = 1, | ||
3482 | .beacon_int_infra_match = true, | ||
3483 | }, | ||
3484 | #ifdef CONFIG_ATH10K_DFS_CERTIFIED | ||
3485 | { | ||
3486 | .limits = ath10k_if_dfs_limits, | ||
3487 | .n_limits = ARRAY_SIZE(ath10k_if_dfs_limits), | ||
3488 | .max_interfaces = 8, | ||
3489 | .num_different_channels = 1, | ||
3490 | .beacon_int_infra_match = true, | ||
3491 | .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | | ||
3492 | BIT(NL80211_CHAN_WIDTH_20) | | ||
3493 | BIT(NL80211_CHAN_WIDTH_40) | | ||
3494 | BIT(NL80211_CHAN_WIDTH_80), | ||
3495 | } | ||
3496 | #endif | ||
3258 | }; | 3497 | }; |
3259 | 3498 | ||
3260 | static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) | 3499 | static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) |
@@ -3478,11 +3717,21 @@ int ath10k_mac_register(struct ath10k *ar) | |||
3478 | */ | 3717 | */ |
3479 | ar->hw->queues = 4; | 3718 | ar->hw->queues = 4; |
3480 | 3719 | ||
3481 | ar->hw->wiphy->iface_combinations = &ath10k_if_comb; | 3720 | ar->hw->wiphy->iface_combinations = ath10k_if_comb; |
3482 | ar->hw->wiphy->n_iface_combinations = 1; | 3721 | ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_if_comb); |
3483 | 3722 | ||
3484 | ar->hw->netdev_features = NETIF_F_HW_CSUM; | 3723 | ar->hw->netdev_features = NETIF_F_HW_CSUM; |
3485 | 3724 | ||
3725 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { | ||
3726 | /* Init ath dfs pattern detector */ | ||
3727 | ar->ath_common.debug_mask = ATH_DBG_DFS; | ||
3728 | ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, | ||
3729 | NL80211_DFS_UNSET); | ||
3730 | |||
3731 | if (!ar->dfs_detector) | ||
3732 | ath10k_warn("dfs pattern detector init failed\n"); | ||
3733 | } | ||
3734 | |||
3486 | ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, | 3735 | ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, |
3487 | ath10k_reg_notifier); | 3736 | ath10k_reg_notifier); |
3488 | if (ret) { | 3737 | if (ret) { |
@@ -3518,6 +3767,9 @@ void ath10k_mac_unregister(struct ath10k *ar) | |||
3518 | { | 3767 | { |
3519 | ieee80211_unregister_hw(ar->hw); | 3768 | ieee80211_unregister_hw(ar->hw); |
3520 | 3769 | ||
3770 | if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) | ||
3771 | ar->dfs_detector->exit(ar->dfs_detector); | ||
3772 | |||
3521 | kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); | 3773 | kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); |
3522 | kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); | 3774 | kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); |
3523 | 3775 | ||
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 9e86a811086f..29fd197d1fd8 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/bitops.h> | ||
22 | 23 | ||
23 | #include "core.h" | 24 | #include "core.h" |
24 | #include "debug.h" | 25 | #include "debug.h" |
@@ -32,10 +33,21 @@ | |||
32 | #include "ce.h" | 33 | #include "ce.h" |
33 | #include "pci.h" | 34 | #include "pci.h" |
34 | 35 | ||
36 | enum ath10k_pci_irq_mode { | ||
37 | ATH10K_PCI_IRQ_AUTO = 0, | ||
38 | ATH10K_PCI_IRQ_LEGACY = 1, | ||
39 | ATH10K_PCI_IRQ_MSI = 2, | ||
40 | }; | ||
41 | |||
35 | static unsigned int ath10k_target_ps; | 42 | static unsigned int ath10k_target_ps; |
43 | static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; | ||
44 | |||
36 | module_param(ath10k_target_ps, uint, 0644); | 45 | module_param(ath10k_target_ps, uint, 0644); |
37 | MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); | 46 | MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); |
38 | 47 | ||
48 | module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); | ||
49 | MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); | ||
50 | |||
39 | #define QCA988X_2_0_DEVICE_ID (0x003c) | 51 | #define QCA988X_2_0_DEVICE_ID (0x003c) |
40 | 52 | ||
41 | static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { | 53 | static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { |
@@ -52,10 +64,16 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, | |||
52 | int num); | 64 | int num); |
53 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); | 65 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); |
54 | static void ath10k_pci_stop_ce(struct ath10k *ar); | 66 | static void ath10k_pci_stop_ce(struct ath10k *ar); |
55 | static void ath10k_pci_device_reset(struct ath10k *ar); | 67 | static int ath10k_pci_device_reset(struct ath10k *ar); |
56 | static int ath10k_pci_reset_target(struct ath10k *ar); | 68 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar); |
57 | static int ath10k_pci_start_intr(struct ath10k *ar); | 69 | static int ath10k_pci_init_irq(struct ath10k *ar); |
58 | static void ath10k_pci_stop_intr(struct ath10k *ar); | 70 | static int ath10k_pci_deinit_irq(struct ath10k *ar); |
71 | static int ath10k_pci_request_irq(struct ath10k *ar); | ||
72 | static void ath10k_pci_free_irq(struct ath10k *ar); | ||
73 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, | ||
74 | struct ath10k_ce_pipe *rx_pipe, | ||
75 | struct bmi_xfer *xfer); | ||
76 | static void ath10k_pci_cleanup_ce(struct ath10k *ar); | ||
59 | 77 | ||
60 | static const struct ce_attr host_ce_config_wlan[] = { | 78 | static const struct ce_attr host_ce_config_wlan[] = { |
61 | /* CE0: host->target HTC control and raw streams */ | 79 | /* CE0: host->target HTC control and raw streams */ |
@@ -200,6 +218,87 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { | |||
200 | /* CE7 used only by Host */ | 218 | /* CE7 used only by Host */ |
201 | }; | 219 | }; |
202 | 220 | ||
221 | static bool ath10k_pci_irq_pending(struct ath10k *ar) | ||
222 | { | ||
223 | u32 cause; | ||
224 | |||
225 | /* Check if the shared legacy irq is for us */ | ||
226 | cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + | ||
227 | PCIE_INTR_CAUSE_ADDRESS); | ||
228 | if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) | ||
229 | return true; | ||
230 | |||
231 | return false; | ||
232 | } | ||
233 | |||
234 | static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) | ||
235 | { | ||
236 | /* IMPORTANT: INTR_CLR register has to be set after | ||
237 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | ||
238 | * really cleared. */ | ||
239 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, | ||
240 | 0); | ||
241 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, | ||
242 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | ||
243 | |||
244 | /* IMPORTANT: this extra read transaction is required to | ||
245 | * flush the posted write buffer. */ | ||
246 | (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + | ||
247 | PCIE_INTR_ENABLE_ADDRESS); | ||
248 | } | ||
249 | |||
250 | static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) | ||
251 | { | ||
252 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + | ||
253 | PCIE_INTR_ENABLE_ADDRESS, | ||
254 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | ||
255 | |||
256 | /* IMPORTANT: this extra read transaction is required to | ||
257 | * flush the posted write buffer. */ | ||
258 | (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + | ||
259 | PCIE_INTR_ENABLE_ADDRESS); | ||
260 | } | ||
261 | |||
262 | static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) | ||
263 | { | ||
264 | struct ath10k *ar = arg; | ||
265 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
266 | |||
267 | if (ar_pci->num_msi_intrs == 0) { | ||
268 | if (!ath10k_pci_irq_pending(ar)) | ||
269 | return IRQ_NONE; | ||
270 | |||
271 | ath10k_pci_disable_and_clear_legacy_irq(ar); | ||
272 | } | ||
273 | |||
274 | tasklet_schedule(&ar_pci->early_irq_tasklet); | ||
275 | |||
276 | return IRQ_HANDLED; | ||
277 | } | ||
278 | |||
279 | static int ath10k_pci_request_early_irq(struct ath10k *ar) | ||
280 | { | ||
281 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
282 | int ret; | ||
283 | |||
284 | /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first | ||
285 | * interrupt from irq vector is triggered in all cases for FW | ||
286 | * indication/errors */ | ||
287 | ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, | ||
288 | IRQF_SHARED, "ath10k_pci (early)", ar); | ||
289 | if (ret) { | ||
290 | ath10k_warn("failed to request early irq: %d\n", ret); | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static void ath10k_pci_free_early_irq(struct ath10k *ar) | ||
298 | { | ||
299 | free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); | ||
300 | } | ||
301 | |||
203 | /* | 302 | /* |
204 | * Diagnostic read/write access is provided for startup/config/debug usage. | 303 | * Diagnostic read/write access is provided for startup/config/debug usage. |
205 | * Caller must guarantee proper alignment, when applicable, and single user | 304 | * Caller must guarantee proper alignment, when applicable, and single user |
@@ -526,17 +625,6 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar) | |||
526 | return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); | 625 | return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); |
527 | } | 626 | } |
528 | 627 | ||
529 | static void ath10k_pci_wait(struct ath10k *ar) | ||
530 | { | ||
531 | int n = 100; | ||
532 | |||
533 | while (n-- && !ath10k_pci_target_is_awake(ar)) | ||
534 | msleep(10); | ||
535 | |||
536 | if (n < 0) | ||
537 | ath10k_warn("Unable to wakeup target\n"); | ||
538 | } | ||
539 | |||
540 | int ath10k_do_pci_wake(struct ath10k *ar) | 628 | int ath10k_do_pci_wake(struct ath10k *ar) |
541 | { | 629 | { |
542 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 630 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
@@ -723,7 +811,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, | |||
723 | ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id, | 811 | ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id, |
724 | flags); | 812 | flags); |
725 | if (ret) | 813 | if (ret) |
726 | ath10k_warn("CE send failed: %p\n", nbuf); | 814 | ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf); |
727 | 815 | ||
728 | return ret; | 816 | return ret; |
729 | } | 817 | } |
@@ -750,9 +838,10 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) | |||
750 | ar->fw_version_build); | 838 | ar->fw_version_build); |
751 | 839 | ||
752 | host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); | 840 | host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); |
753 | if (ath10k_pci_diag_read_mem(ar, host_addr, | 841 | ret = ath10k_pci_diag_read_mem(ar, host_addr, |
754 | ®_dump_area, sizeof(u32)) != 0) { | 842 | ®_dump_area, sizeof(u32)); |
755 | ath10k_warn("could not read hi_failure_state\n"); | 843 | if (ret) { |
844 | ath10k_err("failed to read FW dump area address: %d\n", ret); | ||
756 | return; | 845 | return; |
757 | } | 846 | } |
758 | 847 | ||
@@ -762,7 +851,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) | |||
762 | ®_dump_values[0], | 851 | ®_dump_values[0], |
763 | REG_DUMP_COUNT_QCA988X * sizeof(u32)); | 852 | REG_DUMP_COUNT_QCA988X * sizeof(u32)); |
764 | if (ret != 0) { | 853 | if (ret != 0) { |
765 | ath10k_err("could not dump FW Dump Area\n"); | 854 | ath10k_err("failed to read FW dump area: %d\n", ret); |
766 | return; | 855 | return; |
767 | } | 856 | } |
768 | 857 | ||
@@ -777,7 +866,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) | |||
777 | reg_dump_values[i + 2], | 866 | reg_dump_values[i + 2], |
778 | reg_dump_values[i + 3]); | 867 | reg_dump_values[i + 3]); |
779 | 868 | ||
780 | ieee80211_queue_work(ar->hw, &ar->restart_work); | 869 | queue_work(ar->workqueue, &ar->restart_work); |
781 | } | 870 | } |
782 | 871 | ||
783 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, | 872 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, |
@@ -815,53 +904,41 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, | |||
815 | sizeof(ar_pci->msg_callbacks_current)); | 904 | sizeof(ar_pci->msg_callbacks_current)); |
816 | } | 905 | } |
817 | 906 | ||
818 | static int ath10k_pci_start_ce(struct ath10k *ar) | 907 | static int ath10k_pci_alloc_compl(struct ath10k *ar) |
819 | { | 908 | { |
820 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 909 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
821 | struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag; | ||
822 | const struct ce_attr *attr; | 910 | const struct ce_attr *attr; |
823 | struct ath10k_pci_pipe *pipe_info; | 911 | struct ath10k_pci_pipe *pipe_info; |
824 | struct ath10k_pci_compl *compl; | 912 | struct ath10k_pci_compl *compl; |
825 | int i, pipe_num, completions, disable_interrupts; | 913 | int i, pipe_num, completions; |
826 | 914 | ||
827 | spin_lock_init(&ar_pci->compl_lock); | 915 | spin_lock_init(&ar_pci->compl_lock); |
828 | INIT_LIST_HEAD(&ar_pci->compl_process); | 916 | INIT_LIST_HEAD(&ar_pci->compl_process); |
829 | 917 | ||
830 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 918 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
831 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 919 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
832 | 920 | ||
833 | spin_lock_init(&pipe_info->pipe_lock); | 921 | spin_lock_init(&pipe_info->pipe_lock); |
834 | INIT_LIST_HEAD(&pipe_info->compl_free); | 922 | INIT_LIST_HEAD(&pipe_info->compl_free); |
835 | 923 | ||
836 | /* Handle Diagnostic CE specially */ | 924 | /* Handle Diagnostic CE specially */ |
837 | if (pipe_info->ce_hdl == ce_diag) | 925 | if (pipe_info->ce_hdl == ar_pci->ce_diag) |
838 | continue; | 926 | continue; |
839 | 927 | ||
840 | attr = &host_ce_config_wlan[pipe_num]; | 928 | attr = &host_ce_config_wlan[pipe_num]; |
841 | completions = 0; | 929 | completions = 0; |
842 | 930 | ||
843 | if (attr->src_nentries) { | 931 | if (attr->src_nentries) |
844 | disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; | ||
845 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | ||
846 | ath10k_pci_ce_send_done, | ||
847 | disable_interrupts); | ||
848 | completions += attr->src_nentries; | 932 | completions += attr->src_nentries; |
849 | } | ||
850 | 933 | ||
851 | if (attr->dest_nentries) { | 934 | if (attr->dest_nentries) |
852 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | ||
853 | ath10k_pci_ce_recv_data); | ||
854 | completions += attr->dest_nentries; | 935 | completions += attr->dest_nentries; |
855 | } | ||
856 | |||
857 | if (completions == 0) | ||
858 | continue; | ||
859 | 936 | ||
860 | for (i = 0; i < completions; i++) { | 937 | for (i = 0; i < completions; i++) { |
861 | compl = kmalloc(sizeof(*compl), GFP_KERNEL); | 938 | compl = kmalloc(sizeof(*compl), GFP_KERNEL); |
862 | if (!compl) { | 939 | if (!compl) { |
863 | ath10k_warn("No memory for completion state\n"); | 940 | ath10k_warn("No memory for completion state\n"); |
864 | ath10k_pci_stop_ce(ar); | 941 | ath10k_pci_cleanup_ce(ar); |
865 | return -ENOMEM; | 942 | return -ENOMEM; |
866 | } | 943 | } |
867 | 944 | ||
@@ -873,20 +950,55 @@ static int ath10k_pci_start_ce(struct ath10k *ar) | |||
873 | return 0; | 950 | return 0; |
874 | } | 951 | } |
875 | 952 | ||
876 | static void ath10k_pci_stop_ce(struct ath10k *ar) | 953 | static int ath10k_pci_setup_ce_irq(struct ath10k *ar) |
877 | { | 954 | { |
878 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 955 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
879 | struct ath10k_pci_compl *compl; | 956 | const struct ce_attr *attr; |
880 | struct sk_buff *skb; | 957 | struct ath10k_pci_pipe *pipe_info; |
881 | int i; | 958 | int pipe_num, disable_interrupts; |
882 | 959 | ||
883 | ath10k_ce_disable_interrupts(ar); | 960 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
961 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
962 | |||
963 | /* Handle Diagnostic CE specially */ | ||
964 | if (pipe_info->ce_hdl == ar_pci->ce_diag) | ||
965 | continue; | ||
966 | |||
967 | attr = &host_ce_config_wlan[pipe_num]; | ||
968 | |||
969 | if (attr->src_nentries) { | ||
970 | disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; | ||
971 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | ||
972 | ath10k_pci_ce_send_done, | ||
973 | disable_interrupts); | ||
974 | } | ||
975 | |||
976 | if (attr->dest_nentries) | ||
977 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | ||
978 | ath10k_pci_ce_recv_data); | ||
979 | } | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | static void ath10k_pci_kill_tasklet(struct ath10k *ar) | ||
985 | { | ||
986 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
987 | int i; | ||
884 | 988 | ||
885 | /* Cancel the pending tasklet */ | ||
886 | tasklet_kill(&ar_pci->intr_tq); | 989 | tasklet_kill(&ar_pci->intr_tq); |
990 | tasklet_kill(&ar_pci->msi_fw_err); | ||
991 | tasklet_kill(&ar_pci->early_irq_tasklet); | ||
887 | 992 | ||
888 | for (i = 0; i < CE_COUNT; i++) | 993 | for (i = 0; i < CE_COUNT; i++) |
889 | tasklet_kill(&ar_pci->pipe_info[i].intr); | 994 | tasklet_kill(&ar_pci->pipe_info[i].intr); |
995 | } | ||
996 | |||
997 | static void ath10k_pci_stop_ce(struct ath10k *ar) | ||
998 | { | ||
999 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1000 | struct ath10k_pci_compl *compl; | ||
1001 | struct sk_buff *skb; | ||
890 | 1002 | ||
891 | /* Mark pending completions as aborted, so that upper layers free up | 1003 | /* Mark pending completions as aborted, so that upper layers free up |
892 | * their associated resources */ | 1004 | * their associated resources */ |
@@ -920,7 +1032,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar) | |||
920 | spin_unlock_bh(&ar_pci->compl_lock); | 1032 | spin_unlock_bh(&ar_pci->compl_lock); |
921 | 1033 | ||
922 | /* Free unused completions for each pipe. */ | 1034 | /* Free unused completions for each pipe. */ |
923 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 1035 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
924 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1036 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
925 | 1037 | ||
926 | spin_lock_bh(&pipe_info->pipe_lock); | 1038 | spin_lock_bh(&pipe_info->pipe_lock); |
@@ -974,8 +1086,8 @@ static void ath10k_pci_process_ce(struct ath10k *ar) | |||
974 | case ATH10K_PCI_COMPL_RECV: | 1086 | case ATH10K_PCI_COMPL_RECV: |
975 | ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); | 1087 | ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); |
976 | if (ret) { | 1088 | if (ret) { |
977 | ath10k_warn("Unable to post recv buffer for pipe: %d\n", | 1089 | ath10k_warn("failed to post RX buffer for pipe %d: %d\n", |
978 | compl->pipe_info->pipe_num); | 1090 | compl->pipe_info->pipe_num, ret); |
979 | break; | 1091 | break; |
980 | } | 1092 | } |
981 | 1093 | ||
@@ -1114,7 +1226,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, | |||
1114 | for (i = 0; i < num; i++) { | 1226 | for (i = 0; i < num; i++) { |
1115 | skb = dev_alloc_skb(pipe_info->buf_sz); | 1227 | skb = dev_alloc_skb(pipe_info->buf_sz); |
1116 | if (!skb) { | 1228 | if (!skb) { |
1117 | ath10k_warn("could not allocate skbuff for pipe %d\n", | 1229 | ath10k_warn("failed to allocate skbuff for pipe %d\n", |
1118 | num); | 1230 | num); |
1119 | ret = -ENOMEM; | 1231 | ret = -ENOMEM; |
1120 | goto err; | 1232 | goto err; |
@@ -1127,7 +1239,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, | |||
1127 | DMA_FROM_DEVICE); | 1239 | DMA_FROM_DEVICE); |
1128 | 1240 | ||
1129 | if (unlikely(dma_mapping_error(ar->dev, ce_data))) { | 1241 | if (unlikely(dma_mapping_error(ar->dev, ce_data))) { |
1130 | ath10k_warn("could not dma map skbuff\n"); | 1242 | ath10k_warn("failed to DMA map sk_buff\n"); |
1131 | dev_kfree_skb_any(skb); | 1243 | dev_kfree_skb_any(skb); |
1132 | ret = -EIO; | 1244 | ret = -EIO; |
1133 | goto err; | 1245 | goto err; |
@@ -1142,7 +1254,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, | |||
1142 | ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, | 1254 | ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, |
1143 | ce_data); | 1255 | ce_data); |
1144 | if (ret) { | 1256 | if (ret) { |
1145 | ath10k_warn("could not enqueue to pipe %d (%d)\n", | 1257 | ath10k_warn("failed to enqueue to pipe %d: %d\n", |
1146 | num, ret); | 1258 | num, ret); |
1147 | goto err; | 1259 | goto err; |
1148 | } | 1260 | } |
@@ -1162,7 +1274,7 @@ static int ath10k_pci_post_rx(struct ath10k *ar) | |||
1162 | const struct ce_attr *attr; | 1274 | const struct ce_attr *attr; |
1163 | int pipe_num, ret = 0; | 1275 | int pipe_num, ret = 0; |
1164 | 1276 | ||
1165 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 1277 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
1166 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1278 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
1167 | attr = &host_ce_config_wlan[pipe_num]; | 1279 | attr = &host_ce_config_wlan[pipe_num]; |
1168 | 1280 | ||
@@ -1172,8 +1284,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar) | |||
1172 | ret = ath10k_pci_post_rx_pipe(pipe_info, | 1284 | ret = ath10k_pci_post_rx_pipe(pipe_info, |
1173 | attr->dest_nentries - 1); | 1285 | attr->dest_nentries - 1); |
1174 | if (ret) { | 1286 | if (ret) { |
1175 | ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", | 1287 | ath10k_warn("failed to post RX buffer for pipe %d: %d\n", |
1176 | pipe_num); | 1288 | pipe_num, ret); |
1177 | 1289 | ||
1178 | for (; pipe_num >= 0; pipe_num--) { | 1290 | for (; pipe_num >= 0; pipe_num--) { |
1179 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1291 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
@@ -1189,23 +1301,58 @@ static int ath10k_pci_post_rx(struct ath10k *ar) | |||
1189 | static int ath10k_pci_hif_start(struct ath10k *ar) | 1301 | static int ath10k_pci_hif_start(struct ath10k *ar) |
1190 | { | 1302 | { |
1191 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 1303 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1192 | int ret; | 1304 | int ret, ret_early; |
1193 | 1305 | ||
1194 | ret = ath10k_pci_start_ce(ar); | 1306 | ath10k_pci_free_early_irq(ar); |
1307 | ath10k_pci_kill_tasklet(ar); | ||
1308 | |||
1309 | ret = ath10k_pci_alloc_compl(ar); | ||
1195 | if (ret) { | 1310 | if (ret) { |
1196 | ath10k_warn("could not start CE (%d)\n", ret); | 1311 | ath10k_warn("failed to allocate CE completions: %d\n", ret); |
1197 | return ret; | 1312 | goto err_early_irq; |
1313 | } | ||
1314 | |||
1315 | ret = ath10k_pci_request_irq(ar); | ||
1316 | if (ret) { | ||
1317 | ath10k_warn("failed to post RX buffers for all pipes: %d\n", | ||
1318 | ret); | ||
1319 | goto err_free_compl; | ||
1320 | } | ||
1321 | |||
1322 | ret = ath10k_pci_setup_ce_irq(ar); | ||
1323 | if (ret) { | ||
1324 | ath10k_warn("failed to setup CE interrupts: %d\n", ret); | ||
1325 | goto err_stop; | ||
1198 | } | 1326 | } |
1199 | 1327 | ||
1200 | /* Post buffers once to start things off. */ | 1328 | /* Post buffers once to start things off. */ |
1201 | ret = ath10k_pci_post_rx(ar); | 1329 | ret = ath10k_pci_post_rx(ar); |
1202 | if (ret) { | 1330 | if (ret) { |
1203 | ath10k_warn("could not post rx pipes (%d)\n", ret); | 1331 | ath10k_warn("failed to post RX buffers for all pipes: %d\n", |
1204 | return ret; | 1332 | ret); |
1333 | goto err_stop; | ||
1205 | } | 1334 | } |
1206 | 1335 | ||
1207 | ar_pci->started = 1; | 1336 | ar_pci->started = 1; |
1208 | return 0; | 1337 | return 0; |
1338 | |||
1339 | err_stop: | ||
1340 | ath10k_ce_disable_interrupts(ar); | ||
1341 | ath10k_pci_free_irq(ar); | ||
1342 | ath10k_pci_kill_tasklet(ar); | ||
1343 | ath10k_pci_stop_ce(ar); | ||
1344 | ath10k_pci_process_ce(ar); | ||
1345 | err_free_compl: | ||
1346 | ath10k_pci_cleanup_ce(ar); | ||
1347 | err_early_irq: | ||
1348 | /* Though there should be no interrupts (device was reset) | ||
1349 | * power_down() expects the early IRQ to be installed as per the | ||
1350 | * driver lifecycle. */ | ||
1351 | ret_early = ath10k_pci_request_early_irq(ar); | ||
1352 | if (ret_early) | ||
1353 | ath10k_warn("failed to re-enable early irq: %d\n", ret_early); | ||
1354 | |||
1355 | return ret; | ||
1209 | } | 1356 | } |
1210 | 1357 | ||
1211 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) | 1358 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) |
@@ -1271,6 +1418,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) | |||
1271 | * Indicate the completion to higer layer to free | 1418 | * Indicate the completion to higer layer to free |
1272 | * the buffer | 1419 | * the buffer |
1273 | */ | 1420 | */ |
1421 | |||
1422 | if (!netbuf) { | ||
1423 | ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n", | ||
1424 | ce_hdl->id); | ||
1425 | continue; | ||
1426 | } | ||
1427 | |||
1274 | ATH10K_SKB_CB(netbuf)->is_aborted = true; | 1428 | ATH10K_SKB_CB(netbuf)->is_aborted = true; |
1275 | ar_pci->msg_callbacks_current.tx_completion(ar, | 1429 | ar_pci->msg_callbacks_current.tx_completion(ar, |
1276 | netbuf, | 1430 | netbuf, |
@@ -1291,7 +1445,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) | |||
1291 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 1445 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1292 | int pipe_num; | 1446 | int pipe_num; |
1293 | 1447 | ||
1294 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 1448 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
1295 | struct ath10k_pci_pipe *pipe_info; | 1449 | struct ath10k_pci_pipe *pipe_info; |
1296 | 1450 | ||
1297 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1451 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
@@ -1306,7 +1460,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) | |||
1306 | struct ath10k_pci_pipe *pipe_info; | 1460 | struct ath10k_pci_pipe *pipe_info; |
1307 | int pipe_num; | 1461 | int pipe_num; |
1308 | 1462 | ||
1309 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 1463 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
1310 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1464 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
1311 | if (pipe_info->ce_hdl) { | 1465 | if (pipe_info->ce_hdl) { |
1312 | ath10k_ce_deinit(pipe_info->ce_hdl); | 1466 | ath10k_ce_deinit(pipe_info->ce_hdl); |
@@ -1316,27 +1470,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) | |||
1316 | } | 1470 | } |
1317 | } | 1471 | } |
1318 | 1472 | ||
1319 | static void ath10k_pci_disable_irqs(struct ath10k *ar) | ||
1320 | { | ||
1321 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1322 | int i; | ||
1323 | |||
1324 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | ||
1325 | disable_irq(ar_pci->pdev->irq + i); | ||
1326 | } | ||
1327 | |||
1328 | static void ath10k_pci_hif_stop(struct ath10k *ar) | 1473 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
1329 | { | 1474 | { |
1330 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 1475 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1476 | int ret; | ||
1331 | 1477 | ||
1332 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | 1478 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
1333 | 1479 | ||
1334 | /* Irqs are never explicitly re-enabled. They are implicitly re-enabled | 1480 | ret = ath10k_ce_disable_interrupts(ar); |
1335 | * by ath10k_pci_start_intr(). */ | 1481 | if (ret) |
1336 | ath10k_pci_disable_irqs(ar); | 1482 | ath10k_warn("failed to disable CE interrupts: %d\n", ret); |
1337 | 1483 | ||
1484 | ath10k_pci_free_irq(ar); | ||
1485 | ath10k_pci_kill_tasklet(ar); | ||
1338 | ath10k_pci_stop_ce(ar); | 1486 | ath10k_pci_stop_ce(ar); |
1339 | 1487 | ||
1488 | ret = ath10k_pci_request_early_irq(ar); | ||
1489 | if (ret) | ||
1490 | ath10k_warn("failed to re-enable early irq: %d\n", ret); | ||
1491 | |||
1340 | /* At this point, asynchronous threads are stopped, the target should | 1492 | /* At this point, asynchronous threads are stopped, the target should |
1341 | * not DMA nor interrupt. We process the leftovers and then free | 1493 | * not DMA nor interrupt. We process the leftovers and then free |
1342 | * everything else up. */ | 1494 | * everything else up. */ |
@@ -1345,6 +1497,13 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) | |||
1345 | ath10k_pci_cleanup_ce(ar); | 1497 | ath10k_pci_cleanup_ce(ar); |
1346 | ath10k_pci_buffer_cleanup(ar); | 1498 | ath10k_pci_buffer_cleanup(ar); |
1347 | 1499 | ||
1500 | /* Make the sure the device won't access any structures on the host by | ||
1501 | * resetting it. The device was fed with PCI CE ringbuffer | ||
1502 | * configuration during init. If ringbuffers are freed and the device | ||
1503 | * were to access them this could lead to memory corruption on the | ||
1504 | * host. */ | ||
1505 | ath10k_pci_device_reset(ar); | ||
1506 | |||
1348 | ar_pci->started = 0; | 1507 | ar_pci->started = 0; |
1349 | } | 1508 | } |
1350 | 1509 | ||
@@ -1363,6 +1522,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, | |||
1363 | void *treq, *tresp = NULL; | 1522 | void *treq, *tresp = NULL; |
1364 | int ret = 0; | 1523 | int ret = 0; |
1365 | 1524 | ||
1525 | might_sleep(); | ||
1526 | |||
1366 | if (resp && !resp_len) | 1527 | if (resp && !resp_len) |
1367 | return -EINVAL; | 1528 | return -EINVAL; |
1368 | 1529 | ||
@@ -1403,14 +1564,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, | |||
1403 | if (ret) | 1564 | if (ret) |
1404 | goto err_resp; | 1565 | goto err_resp; |
1405 | 1566 | ||
1406 | ret = wait_for_completion_timeout(&xfer.done, | 1567 | ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); |
1407 | BMI_COMMUNICATION_TIMEOUT_HZ); | 1568 | if (ret) { |
1408 | if (ret <= 0) { | ||
1409 | u32 unused_buffer; | 1569 | u32 unused_buffer; |
1410 | unsigned int unused_nbytes; | 1570 | unsigned int unused_nbytes; |
1411 | unsigned int unused_id; | 1571 | unsigned int unused_id; |
1412 | 1572 | ||
1413 | ret = -ETIMEDOUT; | ||
1414 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, | 1573 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, |
1415 | &unused_nbytes, &unused_id); | 1574 | &unused_nbytes, &unused_id); |
1416 | } else { | 1575 | } else { |
@@ -1478,6 +1637,25 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) | |||
1478 | complete(&xfer->done); | 1637 | complete(&xfer->done); |
1479 | } | 1638 | } |
1480 | 1639 | ||
1640 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, | ||
1641 | struct ath10k_ce_pipe *rx_pipe, | ||
1642 | struct bmi_xfer *xfer) | ||
1643 | { | ||
1644 | unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; | ||
1645 | |||
1646 | while (time_before_eq(jiffies, timeout)) { | ||
1647 | ath10k_pci_bmi_send_done(tx_pipe); | ||
1648 | ath10k_pci_bmi_recv_data(rx_pipe); | ||
1649 | |||
1650 | if (completion_done(&xfer->done)) | ||
1651 | return 0; | ||
1652 | |||
1653 | schedule(); | ||
1654 | } | ||
1655 | |||
1656 | return -ETIMEDOUT; | ||
1657 | } | ||
1658 | |||
1481 | /* | 1659 | /* |
1482 | * Map from service/endpoint to Copy Engine. | 1660 | * Map from service/endpoint to Copy Engine. |
1483 | * This table is derived from the CE_PCI TABLE, above. | 1661 | * This table is derived from the CE_PCI TABLE, above. |
@@ -1587,7 +1765,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |||
1587 | CORE_CTRL_ADDRESS, | 1765 | CORE_CTRL_ADDRESS, |
1588 | &core_ctrl); | 1766 | &core_ctrl); |
1589 | if (ret) { | 1767 | if (ret) { |
1590 | ath10k_warn("Unable to read core ctrl\n"); | 1768 | ath10k_warn("failed to read core_ctrl: %d\n", ret); |
1591 | return ret; | 1769 | return ret; |
1592 | } | 1770 | } |
1593 | 1771 | ||
@@ -1597,10 +1775,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |||
1597 | ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | | 1775 | ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | |
1598 | CORE_CTRL_ADDRESS, | 1776 | CORE_CTRL_ADDRESS, |
1599 | core_ctrl); | 1777 | core_ctrl); |
1600 | if (ret) | 1778 | if (ret) { |
1601 | ath10k_warn("Unable to set interrupt mask\n"); | 1779 | ath10k_warn("failed to set target CPU interrupt mask: %d\n", |
1780 | ret); | ||
1781 | return ret; | ||
1782 | } | ||
1602 | 1783 | ||
1603 | return ret; | 1784 | return 0; |
1604 | } | 1785 | } |
1605 | 1786 | ||
1606 | static int ath10k_pci_init_config(struct ath10k *ar) | 1787 | static int ath10k_pci_init_config(struct ath10k *ar) |
@@ -1751,7 +1932,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar) | |||
1751 | const struct ce_attr *attr; | 1932 | const struct ce_attr *attr; |
1752 | int pipe_num; | 1933 | int pipe_num; |
1753 | 1934 | ||
1754 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | 1935 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
1755 | pipe_info = &ar_pci->pipe_info[pipe_num]; | 1936 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
1756 | pipe_info->pipe_num = pipe_num; | 1937 | pipe_info->pipe_num = pipe_num; |
1757 | pipe_info->hif_ce_state = ar; | 1938 | pipe_info->hif_ce_state = ar; |
@@ -1759,7 +1940,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar) | |||
1759 | 1940 | ||
1760 | pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); | 1941 | pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); |
1761 | if (pipe_info->ce_hdl == NULL) { | 1942 | if (pipe_info->ce_hdl == NULL) { |
1762 | ath10k_err("Unable to initialize CE for pipe: %d\n", | 1943 | ath10k_err("failed to initialize CE for pipe: %d\n", |
1763 | pipe_num); | 1944 | pipe_num); |
1764 | 1945 | ||
1765 | /* It is safe to call it here. It checks if ce_hdl is | 1946 | /* It is safe to call it here. It checks if ce_hdl is |
@@ -1768,31 +1949,18 @@ static int ath10k_pci_ce_init(struct ath10k *ar) | |||
1768 | return -1; | 1949 | return -1; |
1769 | } | 1950 | } |
1770 | 1951 | ||
1771 | if (pipe_num == ar_pci->ce_count - 1) { | 1952 | if (pipe_num == CE_COUNT - 1) { |
1772 | /* | 1953 | /* |
1773 | * Reserve the ultimate CE for | 1954 | * Reserve the ultimate CE for |
1774 | * diagnostic Window support | 1955 | * diagnostic Window support |
1775 | */ | 1956 | */ |
1776 | ar_pci->ce_diag = | 1957 | ar_pci->ce_diag = pipe_info->ce_hdl; |
1777 | ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; | ||
1778 | continue; | 1958 | continue; |
1779 | } | 1959 | } |
1780 | 1960 | ||
1781 | pipe_info->buf_sz = (size_t) (attr->src_sz_max); | 1961 | pipe_info->buf_sz = (size_t) (attr->src_sz_max); |
1782 | } | 1962 | } |
1783 | 1963 | ||
1784 | /* | ||
1785 | * Initially, establish CE completion handlers for use with BMI. | ||
1786 | * These are overwritten with generic handlers after we exit BMI phase. | ||
1787 | */ | ||
1788 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; | ||
1789 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | ||
1790 | ath10k_pci_bmi_send_done, 0); | ||
1791 | |||
1792 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | ||
1793 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | ||
1794 | ath10k_pci_bmi_recv_data); | ||
1795 | |||
1796 | return 0; | 1964 | return 0; |
1797 | } | 1965 | } |
1798 | 1966 | ||
@@ -1828,14 +1996,9 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) | |||
1828 | static int ath10k_pci_hif_power_up(struct ath10k *ar) | 1996 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
1829 | { | 1997 | { |
1830 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 1998 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1999 | const char *irq_mode; | ||
1831 | int ret; | 2000 | int ret; |
1832 | 2001 | ||
1833 | ret = ath10k_pci_start_intr(ar); | ||
1834 | if (ret) { | ||
1835 | ath10k_err("could not start interrupt handling (%d)\n", ret); | ||
1836 | goto err; | ||
1837 | } | ||
1838 | |||
1839 | /* | 2002 | /* |
1840 | * Bring the target up cleanly. | 2003 | * Bring the target up cleanly. |
1841 | * | 2004 | * |
@@ -1846,39 +2009,80 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) | |||
1846 | * is in an unexpected state. We try to catch that here in order to | 2009 | * is in an unexpected state. We try to catch that here in order to |
1847 | * reset the Target and retry the probe. | 2010 | * reset the Target and retry the probe. |
1848 | */ | 2011 | */ |
1849 | ath10k_pci_device_reset(ar); | 2012 | ret = ath10k_pci_device_reset(ar); |
1850 | 2013 | if (ret) { | |
1851 | ret = ath10k_pci_reset_target(ar); | 2014 | ath10k_err("failed to reset target: %d\n", ret); |
1852 | if (ret) | 2015 | goto err; |
1853 | goto err_irq; | 2016 | } |
1854 | 2017 | ||
1855 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) | 2018 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
1856 | /* Force AWAKE forever */ | 2019 | /* Force AWAKE forever */ |
1857 | ath10k_do_pci_wake(ar); | 2020 | ath10k_do_pci_wake(ar); |
1858 | 2021 | ||
1859 | ret = ath10k_pci_ce_init(ar); | 2022 | ret = ath10k_pci_ce_init(ar); |
1860 | if (ret) | 2023 | if (ret) { |
2024 | ath10k_err("failed to initialize CE: %d\n", ret); | ||
1861 | goto err_ps; | 2025 | goto err_ps; |
2026 | } | ||
1862 | 2027 | ||
1863 | ret = ath10k_pci_init_config(ar); | 2028 | ret = ath10k_ce_disable_interrupts(ar); |
1864 | if (ret) | 2029 | if (ret) { |
2030 | ath10k_err("failed to disable CE interrupts: %d\n", ret); | ||
1865 | goto err_ce; | 2031 | goto err_ce; |
2032 | } | ||
1866 | 2033 | ||
1867 | ret = ath10k_pci_wake_target_cpu(ar); | 2034 | ret = ath10k_pci_init_irq(ar); |
1868 | if (ret) { | 2035 | if (ret) { |
1869 | ath10k_err("could not wake up target CPU (%d)\n", ret); | 2036 | ath10k_err("failed to init irqs: %d\n", ret); |
1870 | goto err_ce; | 2037 | goto err_ce; |
1871 | } | 2038 | } |
1872 | 2039 | ||
2040 | ret = ath10k_pci_request_early_irq(ar); | ||
2041 | if (ret) { | ||
2042 | ath10k_err("failed to request early irq: %d\n", ret); | ||
2043 | goto err_deinit_irq; | ||
2044 | } | ||
2045 | |||
2046 | ret = ath10k_pci_wait_for_target_init(ar); | ||
2047 | if (ret) { | ||
2048 | ath10k_err("failed to wait for target to init: %d\n", ret); | ||
2049 | goto err_free_early_irq; | ||
2050 | } | ||
2051 | |||
2052 | ret = ath10k_pci_init_config(ar); | ||
2053 | if (ret) { | ||
2054 | ath10k_err("failed to setup init config: %d\n", ret); | ||
2055 | goto err_free_early_irq; | ||
2056 | } | ||
2057 | |||
2058 | ret = ath10k_pci_wake_target_cpu(ar); | ||
2059 | if (ret) { | ||
2060 | ath10k_err("could not wake up target CPU: %d\n", ret); | ||
2061 | goto err_free_early_irq; | ||
2062 | } | ||
2063 | |||
2064 | if (ar_pci->num_msi_intrs > 1) | ||
2065 | irq_mode = "MSI-X"; | ||
2066 | else if (ar_pci->num_msi_intrs == 1) | ||
2067 | irq_mode = "MSI"; | ||
2068 | else | ||
2069 | irq_mode = "legacy"; | ||
2070 | |||
2071 | if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) | ||
2072 | ath10k_info("pci irq %s\n", irq_mode); | ||
2073 | |||
1873 | return 0; | 2074 | return 0; |
1874 | 2075 | ||
2076 | err_free_early_irq: | ||
2077 | ath10k_pci_free_early_irq(ar); | ||
2078 | err_deinit_irq: | ||
2079 | ath10k_pci_deinit_irq(ar); | ||
1875 | err_ce: | 2080 | err_ce: |
1876 | ath10k_pci_ce_deinit(ar); | 2081 | ath10k_pci_ce_deinit(ar); |
2082 | ath10k_pci_device_reset(ar); | ||
1877 | err_ps: | 2083 | err_ps: |
1878 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) | 2084 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
1879 | ath10k_do_pci_sleep(ar); | 2085 | ath10k_do_pci_sleep(ar); |
1880 | err_irq: | ||
1881 | ath10k_pci_stop_intr(ar); | ||
1882 | err: | 2086 | err: |
1883 | return ret; | 2087 | return ret; |
1884 | } | 2088 | } |
@@ -1887,7 +2091,10 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar) | |||
1887 | { | 2091 | { |
1888 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2092 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
1889 | 2093 | ||
1890 | ath10k_pci_stop_intr(ar); | 2094 | ath10k_pci_free_early_irq(ar); |
2095 | ath10k_pci_kill_tasklet(ar); | ||
2096 | ath10k_pci_deinit_irq(ar); | ||
2097 | ath10k_pci_device_reset(ar); | ||
1891 | 2098 | ||
1892 | ath10k_pci_ce_deinit(ar); | 2099 | ath10k_pci_ce_deinit(ar); |
1893 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) | 2100 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
@@ -2023,25 +2230,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | |||
2023 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2230 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2024 | 2231 | ||
2025 | if (ar_pci->num_msi_intrs == 0) { | 2232 | if (ar_pci->num_msi_intrs == 0) { |
2026 | /* | 2233 | if (!ath10k_pci_irq_pending(ar)) |
2027 | * IMPORTANT: INTR_CLR regiser has to be set after | 2234 | return IRQ_NONE; |
2028 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | 2235 | |
2029 | * really cleared. | 2236 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
2030 | */ | ||
2031 | iowrite32(0, ar_pci->mem + | ||
2032 | (SOC_CORE_BASE_ADDRESS | | ||
2033 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2034 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
2035 | PCIE_INTR_CE_MASK_ALL, | ||
2036 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
2037 | PCIE_INTR_CLR_ADDRESS)); | ||
2038 | /* | ||
2039 | * IMPORTANT: this extra read transaction is required to | ||
2040 | * flush the posted write buffer. | ||
2041 | */ | ||
2042 | (void) ioread32(ar_pci->mem + | ||
2043 | (SOC_CORE_BASE_ADDRESS | | ||
2044 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2045 | } | 2237 | } |
2046 | 2238 | ||
2047 | tasklet_schedule(&ar_pci->intr_tq); | 2239 | tasklet_schedule(&ar_pci->intr_tq); |
@@ -2049,6 +2241,34 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | |||
2049 | return IRQ_HANDLED; | 2241 | return IRQ_HANDLED; |
2050 | } | 2242 | } |
2051 | 2243 | ||
2244 | static void ath10k_pci_early_irq_tasklet(unsigned long data) | ||
2245 | { | ||
2246 | struct ath10k *ar = (struct ath10k *)data; | ||
2247 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2248 | u32 fw_ind; | ||
2249 | int ret; | ||
2250 | |||
2251 | ret = ath10k_pci_wake(ar); | ||
2252 | if (ret) { | ||
2253 | ath10k_warn("failed to wake target in early irq tasklet: %d\n", | ||
2254 | ret); | ||
2255 | return; | ||
2256 | } | ||
2257 | |||
2258 | fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address); | ||
2259 | if (fw_ind & FW_IND_EVENT_PENDING) { | ||
2260 | ath10k_pci_write32(ar, ar_pci->fw_indicator_address, | ||
2261 | fw_ind & ~FW_IND_EVENT_PENDING); | ||
2262 | |||
2263 | /* Some structures are unavailable during early boot or at | ||
2264 | * driver teardown so just print that the device has crashed. */ | ||
2265 | ath10k_warn("device crashed - no diagnostics available\n"); | ||
2266 | } | ||
2267 | |||
2268 | ath10k_pci_sleep(ar); | ||
2269 | ath10k_pci_enable_legacy_irq(ar); | ||
2270 | } | ||
2271 | |||
2052 | static void ath10k_pci_tasklet(unsigned long data) | 2272 | static void ath10k_pci_tasklet(unsigned long data) |
2053 | { | 2273 | { |
2054 | struct ath10k *ar = (struct ath10k *)data; | 2274 | struct ath10k *ar = (struct ath10k *)data; |
@@ -2057,40 +2277,22 @@ static void ath10k_pci_tasklet(unsigned long data) | |||
2057 | ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ | 2277 | ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ |
2058 | ath10k_ce_per_engine_service_any(ar); | 2278 | ath10k_ce_per_engine_service_any(ar); |
2059 | 2279 | ||
2060 | if (ar_pci->num_msi_intrs == 0) { | 2280 | /* Re-enable legacy irq that was disabled in the irq handler */ |
2061 | /* Enable Legacy PCI line interrupts */ | 2281 | if (ar_pci->num_msi_intrs == 0) |
2062 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | 2282 | ath10k_pci_enable_legacy_irq(ar); |
2063 | PCIE_INTR_CE_MASK_ALL, | ||
2064 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
2065 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2066 | /* | ||
2067 | * IMPORTANT: this extra read transaction is required to | ||
2068 | * flush the posted write buffer | ||
2069 | */ | ||
2070 | (void) ioread32(ar_pci->mem + | ||
2071 | (SOC_CORE_BASE_ADDRESS | | ||
2072 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2073 | } | ||
2074 | } | 2283 | } |
2075 | 2284 | ||
2076 | static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) | 2285 | static int ath10k_pci_request_irq_msix(struct ath10k *ar) |
2077 | { | 2286 | { |
2078 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2287 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2079 | int ret; | 2288 | int ret, i; |
2080 | int i; | ||
2081 | |||
2082 | ret = pci_enable_msi_block(ar_pci->pdev, num); | ||
2083 | if (ret) | ||
2084 | return ret; | ||
2085 | 2289 | ||
2086 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, | 2290 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, |
2087 | ath10k_pci_msi_fw_handler, | 2291 | ath10k_pci_msi_fw_handler, |
2088 | IRQF_SHARED, "ath10k_pci", ar); | 2292 | IRQF_SHARED, "ath10k_pci", ar); |
2089 | if (ret) { | 2293 | if (ret) { |
2090 | ath10k_warn("request_irq(%d) failed %d\n", | 2294 | ath10k_warn("failed to request MSI-X fw irq %d: %d\n", |
2091 | ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); | 2295 | ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); |
2092 | |||
2093 | pci_disable_msi(ar_pci->pdev); | ||
2094 | return ret; | 2296 | return ret; |
2095 | } | 2297 | } |
2096 | 2298 | ||
@@ -2099,44 +2301,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) | |||
2099 | ath10k_pci_per_engine_handler, | 2301 | ath10k_pci_per_engine_handler, |
2100 | IRQF_SHARED, "ath10k_pci", ar); | 2302 | IRQF_SHARED, "ath10k_pci", ar); |
2101 | if (ret) { | 2303 | if (ret) { |
2102 | ath10k_warn("request_irq(%d) failed %d\n", | 2304 | ath10k_warn("failed to request MSI-X ce irq %d: %d\n", |
2103 | ar_pci->pdev->irq + i, ret); | 2305 | ar_pci->pdev->irq + i, ret); |
2104 | 2306 | ||
2105 | for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) | 2307 | for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) |
2106 | free_irq(ar_pci->pdev->irq + i, ar); | 2308 | free_irq(ar_pci->pdev->irq + i, ar); |
2107 | 2309 | ||
2108 | free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); | 2310 | free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); |
2109 | pci_disable_msi(ar_pci->pdev); | ||
2110 | return ret; | 2311 | return ret; |
2111 | } | 2312 | } |
2112 | } | 2313 | } |
2113 | 2314 | ||
2114 | ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); | ||
2115 | return 0; | 2315 | return 0; |
2116 | } | 2316 | } |
2117 | 2317 | ||
2118 | static int ath10k_pci_start_intr_msi(struct ath10k *ar) | 2318 | static int ath10k_pci_request_irq_msi(struct ath10k *ar) |
2119 | { | 2319 | { |
2120 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2320 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2121 | int ret; | 2321 | int ret; |
2122 | 2322 | ||
2123 | ret = pci_enable_msi(ar_pci->pdev); | ||
2124 | if (ret < 0) | ||
2125 | return ret; | ||
2126 | |||
2127 | ret = request_irq(ar_pci->pdev->irq, | 2323 | ret = request_irq(ar_pci->pdev->irq, |
2128 | ath10k_pci_interrupt_handler, | 2324 | ath10k_pci_interrupt_handler, |
2129 | IRQF_SHARED, "ath10k_pci", ar); | 2325 | IRQF_SHARED, "ath10k_pci", ar); |
2130 | if (ret < 0) { | 2326 | if (ret) { |
2131 | pci_disable_msi(ar_pci->pdev); | 2327 | ath10k_warn("failed to request MSI irq %d: %d\n", |
2328 | ar_pci->pdev->irq, ret); | ||
2132 | return ret; | 2329 | return ret; |
2133 | } | 2330 | } |
2134 | 2331 | ||
2135 | ath10k_info("MSI interrupt handling\n"); | ||
2136 | return 0; | 2332 | return 0; |
2137 | } | 2333 | } |
2138 | 2334 | ||
2139 | static int ath10k_pci_start_intr_legacy(struct ath10k *ar) | 2335 | static int ath10k_pci_request_irq_legacy(struct ath10k *ar) |
2140 | { | 2336 | { |
2141 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2337 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2142 | int ret; | 2338 | int ret; |
@@ -2144,112 +2340,165 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar) | |||
2144 | ret = request_irq(ar_pci->pdev->irq, | 2340 | ret = request_irq(ar_pci->pdev->irq, |
2145 | ath10k_pci_interrupt_handler, | 2341 | ath10k_pci_interrupt_handler, |
2146 | IRQF_SHARED, "ath10k_pci", ar); | 2342 | IRQF_SHARED, "ath10k_pci", ar); |
2147 | if (ret < 0) | 2343 | if (ret) { |
2344 | ath10k_warn("failed to request legacy irq %d: %d\n", | ||
2345 | ar_pci->pdev->irq, ret); | ||
2148 | return ret; | 2346 | return ret; |
2347 | } | ||
2149 | 2348 | ||
2150 | /* | 2349 | return 0; |
2151 | * Make sure to wake the Target before enabling Legacy | 2350 | } |
2152 | * Interrupt. | 2351 | |
2153 | */ | 2352 | static int ath10k_pci_request_irq(struct ath10k *ar) |
2154 | iowrite32(PCIE_SOC_WAKE_V_MASK, | 2353 | { |
2155 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | 2354 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2156 | PCIE_SOC_WAKE_ADDRESS); | ||
2157 | 2355 | ||
2158 | ath10k_pci_wait(ar); | 2356 | switch (ar_pci->num_msi_intrs) { |
2357 | case 0: | ||
2358 | return ath10k_pci_request_irq_legacy(ar); | ||
2359 | case 1: | ||
2360 | return ath10k_pci_request_irq_msi(ar); | ||
2361 | case MSI_NUM_REQUEST: | ||
2362 | return ath10k_pci_request_irq_msix(ar); | ||
2363 | } | ||
2159 | 2364 | ||
2160 | /* | 2365 | ath10k_warn("unknown irq configuration upon request\n"); |
2161 | * A potential race occurs here: The CORE_BASE write | 2366 | return -EINVAL; |
2162 | * depends on target correctly decoding AXI address but | ||
2163 | * host won't know when target writes BAR to CORE_CTRL. | ||
2164 | * This write might get lost if target has NOT written BAR. | ||
2165 | * For now, fix the race by repeating the write in below | ||
2166 | * synchronization checking. | ||
2167 | */ | ||
2168 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
2169 | PCIE_INTR_CE_MASK_ALL, | ||
2170 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
2171 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2172 | iowrite32(PCIE_SOC_WAKE_RESET, | ||
2173 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
2174 | PCIE_SOC_WAKE_ADDRESS); | ||
2175 | |||
2176 | ath10k_info("legacy interrupt handling\n"); | ||
2177 | return 0; | ||
2178 | } | 2367 | } |
2179 | 2368 | ||
2180 | static int ath10k_pci_start_intr(struct ath10k *ar) | 2369 | static void ath10k_pci_free_irq(struct ath10k *ar) |
2370 | { | ||
2371 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2372 | int i; | ||
2373 | |||
2374 | /* There's at least one interrupt irregardless whether its legacy INTR | ||
2375 | * or MSI or MSI-X */ | ||
2376 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | ||
2377 | free_irq(ar_pci->pdev->irq + i, ar); | ||
2378 | } | ||
2379 | |||
2380 | static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) | ||
2181 | { | 2381 | { |
2182 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2382 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2183 | int num = MSI_NUM_REQUEST; | ||
2184 | int ret; | ||
2185 | int i; | 2383 | int i; |
2186 | 2384 | ||
2187 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); | 2385 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); |
2188 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, | 2386 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, |
2189 | (unsigned long) ar); | 2387 | (unsigned long)ar); |
2388 | tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, | ||
2389 | (unsigned long)ar); | ||
2190 | 2390 | ||
2191 | for (i = 0; i < CE_COUNT; i++) { | 2391 | for (i = 0; i < CE_COUNT; i++) { |
2192 | ar_pci->pipe_info[i].ar_pci = ar_pci; | 2392 | ar_pci->pipe_info[i].ar_pci = ar_pci; |
2193 | tasklet_init(&ar_pci->pipe_info[i].intr, | 2393 | tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, |
2194 | ath10k_pci_ce_tasklet, | ||
2195 | (unsigned long)&ar_pci->pipe_info[i]); | 2394 | (unsigned long)&ar_pci->pipe_info[i]); |
2196 | } | 2395 | } |
2396 | } | ||
2397 | |||
2398 | static int ath10k_pci_init_irq(struct ath10k *ar) | ||
2399 | { | ||
2400 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2401 | bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, | ||
2402 | ar_pci->features); | ||
2403 | int ret; | ||
2197 | 2404 | ||
2198 | if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) | 2405 | ath10k_pci_init_irq_tasklets(ar); |
2199 | num = 1; | ||
2200 | 2406 | ||
2201 | if (num > 1) { | 2407 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && |
2202 | ret = ath10k_pci_start_intr_msix(ar, num); | 2408 | !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) |
2409 | ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); | ||
2410 | |||
2411 | /* Try MSI-X */ | ||
2412 | if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { | ||
2413 | ar_pci->num_msi_intrs = MSI_NUM_REQUEST; | ||
2414 | ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs); | ||
2203 | if (ret == 0) | 2415 | if (ret == 0) |
2204 | goto exit; | 2416 | return 0; |
2417 | if (ret > 0) | ||
2418 | pci_disable_msi(ar_pci->pdev); | ||
2205 | 2419 | ||
2206 | ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); | 2420 | /* fall-through */ |
2207 | num = 1; | ||
2208 | } | 2421 | } |
2209 | 2422 | ||
2210 | if (num == 1) { | 2423 | /* Try MSI */ |
2211 | ret = ath10k_pci_start_intr_msi(ar); | 2424 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { |
2425 | ar_pci->num_msi_intrs = 1; | ||
2426 | ret = pci_enable_msi(ar_pci->pdev); | ||
2212 | if (ret == 0) | 2427 | if (ret == 0) |
2213 | goto exit; | 2428 | return 0; |
2214 | 2429 | ||
2215 | ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", | 2430 | /* fall-through */ |
2216 | ret); | ||
2217 | num = 0; | ||
2218 | } | 2431 | } |
2219 | 2432 | ||
2220 | ret = ath10k_pci_start_intr_legacy(ar); | 2433 | /* Try legacy irq |
2434 | * | ||
2435 | * A potential race occurs here: The CORE_BASE write | ||
2436 | * depends on target correctly decoding AXI address but | ||
2437 | * host won't know when target writes BAR to CORE_CTRL. | ||
2438 | * This write might get lost if target has NOT written BAR. | ||
2439 | * For now, fix the race by repeating the write in below | ||
2440 | * synchronization checking. */ | ||
2441 | ar_pci->num_msi_intrs = 0; | ||
2221 | 2442 | ||
2222 | exit: | 2443 | ret = ath10k_pci_wake(ar); |
2223 | ar_pci->num_msi_intrs = num; | 2444 | if (ret) { |
2224 | ar_pci->ce_count = CE_COUNT; | 2445 | ath10k_warn("failed to wake target: %d\n", ret); |
2225 | return ret; | 2446 | return ret; |
2447 | } | ||
2448 | |||
2449 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, | ||
2450 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); | ||
2451 | ath10k_pci_sleep(ar); | ||
2452 | |||
2453 | return 0; | ||
2226 | } | 2454 | } |
2227 | 2455 | ||
2228 | static void ath10k_pci_stop_intr(struct ath10k *ar) | 2456 | static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar) |
2229 | { | 2457 | { |
2230 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2458 | int ret; |
2231 | int i; | ||
2232 | 2459 | ||
2233 | /* There's at least one interrupt irregardless whether its legacy INTR | 2460 | ret = ath10k_pci_wake(ar); |
2234 | * or MSI or MSI-X */ | 2461 | if (ret) { |
2235 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | 2462 | ath10k_warn("failed to wake target: %d\n", ret); |
2236 | free_irq(ar_pci->pdev->irq + i, ar); | 2463 | return ret; |
2464 | } | ||
2237 | 2465 | ||
2238 | if (ar_pci->num_msi_intrs > 0) | 2466 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
2467 | 0); | ||
2468 | ath10k_pci_sleep(ar); | ||
2469 | |||
2470 | return 0; | ||
2471 | } | ||
2472 | |||
2473 | static int ath10k_pci_deinit_irq(struct ath10k *ar) | ||
2474 | { | ||
2475 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2476 | |||
2477 | switch (ar_pci->num_msi_intrs) { | ||
2478 | case 0: | ||
2479 | return ath10k_pci_deinit_irq_legacy(ar); | ||
2480 | case 1: | ||
2481 | /* fall-through */ | ||
2482 | case MSI_NUM_REQUEST: | ||
2239 | pci_disable_msi(ar_pci->pdev); | 2483 | pci_disable_msi(ar_pci->pdev); |
2484 | return 0; | ||
2485 | } | ||
2486 | |||
2487 | ath10k_warn("unknown irq configuration upon deinit\n"); | ||
2488 | return -EINVAL; | ||
2240 | } | 2489 | } |
2241 | 2490 | ||
2242 | static int ath10k_pci_reset_target(struct ath10k *ar) | 2491 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar) |
2243 | { | 2492 | { |
2244 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2493 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2245 | int wait_limit = 300; /* 3 sec */ | 2494 | int wait_limit = 300; /* 3 sec */ |
2495 | int ret; | ||
2246 | 2496 | ||
2247 | /* Wait for Target to finish initialization before we proceed. */ | 2497 | ret = ath10k_pci_wake(ar); |
2248 | iowrite32(PCIE_SOC_WAKE_V_MASK, | 2498 | if (ret) { |
2249 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | 2499 | ath10k_err("failed to wake up target: %d\n", ret); |
2250 | PCIE_SOC_WAKE_ADDRESS); | 2500 | return ret; |
2251 | 2501 | } | |
2252 | ath10k_pci_wait(ar); | ||
2253 | 2502 | ||
2254 | while (wait_limit-- && | 2503 | while (wait_limit-- && |
2255 | !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & | 2504 | !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & |
@@ -2264,34 +2513,26 @@ static int ath10k_pci_reset_target(struct ath10k *ar) | |||
2264 | } | 2513 | } |
2265 | 2514 | ||
2266 | if (wait_limit < 0) { | 2515 | if (wait_limit < 0) { |
2267 | ath10k_err("Target stalled\n"); | 2516 | ath10k_err("target stalled\n"); |
2268 | iowrite32(PCIE_SOC_WAKE_RESET, | 2517 | ret = -EIO; |
2269 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | 2518 | goto out; |
2270 | PCIE_SOC_WAKE_ADDRESS); | ||
2271 | return -EIO; | ||
2272 | } | 2519 | } |
2273 | 2520 | ||
2274 | iowrite32(PCIE_SOC_WAKE_RESET, | 2521 | out: |
2275 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | 2522 | ath10k_pci_sleep(ar); |
2276 | PCIE_SOC_WAKE_ADDRESS); | 2523 | return ret; |
2277 | |||
2278 | return 0; | ||
2279 | } | 2524 | } |
2280 | 2525 | ||
2281 | static void ath10k_pci_device_reset(struct ath10k *ar) | 2526 | static int ath10k_pci_device_reset(struct ath10k *ar) |
2282 | { | 2527 | { |
2283 | int i; | 2528 | int i, ret; |
2284 | u32 val; | 2529 | u32 val; |
2285 | 2530 | ||
2286 | if (!SOC_GLOBAL_RESET_ADDRESS) | 2531 | ret = ath10k_do_pci_wake(ar); |
2287 | return; | 2532 | if (ret) { |
2288 | 2533 | ath10k_err("failed to wake up target: %d\n", | |
2289 | ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, | 2534 | ret); |
2290 | PCIE_SOC_WAKE_V_MASK); | 2535 | return ret; |
2291 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | ||
2292 | if (ath10k_pci_target_is_awake(ar)) | ||
2293 | break; | ||
2294 | msleep(1); | ||
2295 | } | 2536 | } |
2296 | 2537 | ||
2297 | /* Put Target, including PCIe, into RESET. */ | 2538 | /* Put Target, including PCIe, into RESET. */ |
@@ -2317,7 +2558,8 @@ static void ath10k_pci_device_reset(struct ath10k *ar) | |||
2317 | msleep(1); | 2558 | msleep(1); |
2318 | } | 2559 | } |
2319 | 2560 | ||
2320 | ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); | 2561 | ath10k_do_pci_sleep(ar); |
2562 | return 0; | ||
2321 | } | 2563 | } |
2322 | 2564 | ||
2323 | static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) | 2565 | static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) |
@@ -2374,7 +2616,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2374 | 2616 | ||
2375 | ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); | 2617 | ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); |
2376 | if (!ar) { | 2618 | if (!ar) { |
2377 | ath10k_err("ath10k_core_create failed!\n"); | 2619 | ath10k_err("failed to create driver core\n"); |
2378 | ret = -EINVAL; | 2620 | ret = -EINVAL; |
2379 | goto err_ar_pci; | 2621 | goto err_ar_pci; |
2380 | } | 2622 | } |
@@ -2393,20 +2635,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2393 | */ | 2635 | */ |
2394 | ret = pci_assign_resource(pdev, BAR_NUM); | 2636 | ret = pci_assign_resource(pdev, BAR_NUM); |
2395 | if (ret) { | 2637 | if (ret) { |
2396 | ath10k_err("cannot assign PCI space: %d\n", ret); | 2638 | ath10k_err("failed to assign PCI space: %d\n", ret); |
2397 | goto err_ar; | 2639 | goto err_ar; |
2398 | } | 2640 | } |
2399 | 2641 | ||
2400 | ret = pci_enable_device(pdev); | 2642 | ret = pci_enable_device(pdev); |
2401 | if (ret) { | 2643 | if (ret) { |
2402 | ath10k_err("cannot enable PCI device: %d\n", ret); | 2644 | ath10k_err("failed to enable PCI device: %d\n", ret); |
2403 | goto err_ar; | 2645 | goto err_ar; |
2404 | } | 2646 | } |
2405 | 2647 | ||
2406 | /* Request MMIO resources */ | 2648 | /* Request MMIO resources */ |
2407 | ret = pci_request_region(pdev, BAR_NUM, "ath"); | 2649 | ret = pci_request_region(pdev, BAR_NUM, "ath"); |
2408 | if (ret) { | 2650 | if (ret) { |
2409 | ath10k_err("PCI MMIO reservation error: %d\n", ret); | 2651 | ath10k_err("failed to request MMIO region: %d\n", ret); |
2410 | goto err_device; | 2652 | goto err_device; |
2411 | } | 2653 | } |
2412 | 2654 | ||
@@ -2416,13 +2658,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2416 | */ | 2658 | */ |
2417 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 2659 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2418 | if (ret) { | 2660 | if (ret) { |
2419 | ath10k_err("32-bit DMA not available: %d\n", ret); | 2661 | ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret); |
2420 | goto err_region; | 2662 | goto err_region; |
2421 | } | 2663 | } |
2422 | 2664 | ||
2423 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 2665 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
2424 | if (ret) { | 2666 | if (ret) { |
2425 | ath10k_err("cannot enable 32-bit consistent DMA\n"); | 2667 | ath10k_err("failed to set consistent DMA mask to 32-bit\n"); |
2426 | goto err_region; | 2668 | goto err_region; |
2427 | } | 2669 | } |
2428 | 2670 | ||
@@ -2439,7 +2681,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2439 | /* Arrange for access to Target SoC registers. */ | 2681 | /* Arrange for access to Target SoC registers. */ |
2440 | mem = pci_iomap(pdev, BAR_NUM, 0); | 2682 | mem = pci_iomap(pdev, BAR_NUM, 0); |
2441 | if (!mem) { | 2683 | if (!mem) { |
2442 | ath10k_err("PCI iomap error\n"); | 2684 | ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM); |
2443 | ret = -EIO; | 2685 | ret = -EIO; |
2444 | goto err_master; | 2686 | goto err_master; |
2445 | } | 2687 | } |
@@ -2451,11 +2693,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2451 | ret = ath10k_do_pci_wake(ar); | 2693 | ret = ath10k_do_pci_wake(ar); |
2452 | if (ret) { | 2694 | if (ret) { |
2453 | ath10k_err("Failed to get chip id: %d\n", ret); | 2695 | ath10k_err("Failed to get chip id: %d\n", ret); |
2454 | return ret; | 2696 | goto err_iomap; |
2455 | } | 2697 | } |
2456 | 2698 | ||
2457 | chip_id = ath10k_pci_read32(ar, | 2699 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
2458 | RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS); | ||
2459 | 2700 | ||
2460 | ath10k_do_pci_sleep(ar); | 2701 | ath10k_do_pci_sleep(ar); |
2461 | 2702 | ||
@@ -2463,7 +2704,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2463 | 2704 | ||
2464 | ret = ath10k_core_register(ar, chip_id); | 2705 | ret = ath10k_core_register(ar, chip_id); |
2465 | if (ret) { | 2706 | if (ret) { |
2466 | ath10k_err("could not register driver core (%d)\n", ret); | 2707 | ath10k_err("failed to register driver core: %d\n", ret); |
2467 | goto err_iomap; | 2708 | goto err_iomap; |
2468 | } | 2709 | } |
2469 | 2710 | ||
@@ -2529,7 +2770,7 @@ static int __init ath10k_pci_init(void) | |||
2529 | 2770 | ||
2530 | ret = pci_register_driver(&ath10k_pci_driver); | 2771 | ret = pci_register_driver(&ath10k_pci_driver); |
2531 | if (ret) | 2772 | if (ret) |
2532 | ath10k_err("pci_register_driver failed [%d]\n", ret); | 2773 | ath10k_err("failed to register PCI driver: %d\n", ret); |
2533 | 2774 | ||
2534 | return ret; | 2775 | return ret; |
2535 | } | 2776 | } |
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 52fb7b973571..a4f32038c440 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h | |||
@@ -198,9 +198,7 @@ struct ath10k_pci { | |||
198 | 198 | ||
199 | struct tasklet_struct intr_tq; | 199 | struct tasklet_struct intr_tq; |
200 | struct tasklet_struct msi_fw_err; | 200 | struct tasklet_struct msi_fw_err; |
201 | 201 | struct tasklet_struct early_irq_tasklet; | |
202 | /* Number of Copy Engines supported */ | ||
203 | unsigned int ce_count; | ||
204 | 202 | ||
205 | int started; | 203 | int started; |
206 | 204 | ||
@@ -318,6 +316,16 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) | |||
318 | return ioread32(ar_pci->mem + offset); | 316 | return ioread32(ar_pci->mem + offset); |
319 | } | 317 | } |
320 | 318 | ||
319 | static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) | ||
320 | { | ||
321 | return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); | ||
322 | } | ||
323 | |||
324 | static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) | ||
325 | { | ||
326 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); | ||
327 | } | ||
328 | |||
321 | int ath10k_do_pci_wake(struct ath10k *ar); | 329 | int ath10k_do_pci_wake(struct ath10k *ar); |
322 | void ath10k_do_pci_sleep(struct ath10k *ar); | 330 | void ath10k_do_pci_sleep(struct ath10k *ar); |
323 | 331 | ||
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 5ae373a1e294..22829803f087 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c | |||
@@ -75,6 +75,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, | |||
75 | ath10k_report_offchan_tx(htt->ar, msdu); | 75 | ath10k_report_offchan_tx(htt->ar, msdu); |
76 | 76 | ||
77 | info = IEEE80211_SKB_CB(msdu); | 77 | info = IEEE80211_SKB_CB(msdu); |
78 | memset(&info->status, 0, sizeof(info->status)); | ||
78 | 79 | ||
79 | if (tx_done->discard) { | 80 | if (tx_done->discard) { |
80 | ieee80211_free_txskb(htt->ar->hw, msdu); | 81 | ieee80211_free_txskb(htt->ar->hw, msdu); |
@@ -183,7 +184,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, | |||
183 | /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 | 184 | /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 |
184 | TODO check this */ | 185 | TODO check this */ |
185 | mcs = (info2 >> 4) & 0x0F; | 186 | mcs = (info2 >> 4) & 0x0F; |
186 | nss = (info1 >> 10) & 0x07; | 187 | nss = ((info1 >> 10) & 0x07) + 1; |
187 | bw = info1 & 3; | 188 | bw = info1 & 3; |
188 | sgi = info2 & 1; | 189 | sgi = info2 & 1; |
189 | 190 | ||
@@ -236,6 +237,9 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) | |||
236 | if (info->fcs_err) | 237 | if (info->fcs_err) |
237 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | 238 | status->flag |= RX_FLAG_FAILED_FCS_CRC; |
238 | 239 | ||
240 | if (info->amsdu_more) | ||
241 | status->flag |= RX_FLAG_AMSDU_MORE; | ||
242 | |||
239 | status->signal = info->signal; | 243 | status->signal = info->signal; |
240 | 244 | ||
241 | spin_lock_bh(&ar->data_lock); | 245 | spin_lock_bh(&ar->data_lock); |
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ccf3597fd9e2..1260a8d15dc3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c | |||
@@ -674,10 +674,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) | |||
674 | 674 | ||
675 | /* Send the management frame buffer to the target */ | 675 | /* Send the management frame buffer to the target */ |
676 | ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); | 676 | ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); |
677 | if (ret) { | 677 | if (ret) |
678 | dev_kfree_skb_any(skb); | ||
679 | return ret; | 678 | return ret; |
680 | } | ||
681 | 679 | ||
682 | /* TODO: report tx status to mac80211 - temporary just ACK */ | 680 | /* TODO: report tx status to mac80211 - temporary just ACK */ |
683 | info->flags |= IEEE80211_TX_STAT_ACK; | 681 | info->flags |= IEEE80211_TX_STAT_ACK; |
@@ -909,6 +907,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) | |||
909 | ath10k_dbg(ATH10K_DBG_MGMT, | 907 | ath10k_dbg(ATH10K_DBG_MGMT, |
910 | "event mgmt rx status %08x\n", rx_status); | 908 | "event mgmt rx status %08x\n", rx_status); |
911 | 909 | ||
910 | if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { | ||
911 | dev_kfree_skb(skb); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
912 | if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { | 915 | if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { |
913 | dev_kfree_skb(skb); | 916 | dev_kfree_skb(skb); |
914 | return 0; | 917 | return 0; |
@@ -1383,9 +1386,259 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, | |||
1383 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); | 1386 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); |
1384 | } | 1387 | } |
1385 | 1388 | ||
1389 | static void ath10k_dfs_radar_report(struct ath10k *ar, | ||
1390 | struct wmi_single_phyerr_rx_event *event, | ||
1391 | struct phyerr_radar_report *rr, | ||
1392 | u64 tsf) | ||
1393 | { | ||
1394 | u32 reg0, reg1, tsf32l; | ||
1395 | struct pulse_event pe; | ||
1396 | u64 tsf64; | ||
1397 | u8 rssi, width; | ||
1398 | |||
1399 | reg0 = __le32_to_cpu(rr->reg0); | ||
1400 | reg1 = __le32_to_cpu(rr->reg1); | ||
1401 | |||
1402 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1403 | "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", | ||
1404 | MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), | ||
1405 | MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), | ||
1406 | MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), | ||
1407 | MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); | ||
1408 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1409 | "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", | ||
1410 | MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), | ||
1411 | MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), | ||
1412 | MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), | ||
1413 | MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), | ||
1414 | MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); | ||
1415 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1416 | "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", | ||
1417 | MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), | ||
1418 | MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); | ||
1419 | |||
1420 | if (!ar->dfs_detector) | ||
1421 | return; | ||
1422 | |||
1423 | /* report event to DFS pattern detector */ | ||
1424 | tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); | ||
1425 | tsf64 = tsf & (~0xFFFFFFFFULL); | ||
1426 | tsf64 |= tsf32l; | ||
1427 | |||
1428 | width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); | ||
1429 | rssi = event->hdr.rssi_combined; | ||
1430 | |||
1431 | /* hardware store this as 8 bit signed value, | ||
1432 | * set to zero if negative number | ||
1433 | */ | ||
1434 | if (rssi & 0x80) | ||
1435 | rssi = 0; | ||
1436 | |||
1437 | pe.ts = tsf64; | ||
1438 | pe.freq = ar->hw->conf.chandef.chan->center_freq; | ||
1439 | pe.width = width; | ||
1440 | pe.rssi = rssi; | ||
1441 | |||
1442 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1443 | "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", | ||
1444 | pe.freq, pe.width, pe.rssi, pe.ts); | ||
1445 | |||
1446 | ATH10K_DFS_STAT_INC(ar, pulses_detected); | ||
1447 | |||
1448 | if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { | ||
1449 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1450 | "dfs no pulse pattern detected, yet\n"); | ||
1451 | return; | ||
1452 | } | ||
1453 | |||
1454 | ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); | ||
1455 | ATH10K_DFS_STAT_INC(ar, radar_detected); | ||
1456 | |||
1457 | /* Control radar events reporting in debugfs file | ||
1458 | dfs_block_radar_events */ | ||
1459 | if (ar->dfs_block_radar_events) { | ||
1460 | ath10k_info("DFS Radar detected, but ignored as requested\n"); | ||
1461 | return; | ||
1462 | } | ||
1463 | |||
1464 | ieee80211_radar_detected(ar->hw); | ||
1465 | } | ||
1466 | |||
1467 | static int ath10k_dfs_fft_report(struct ath10k *ar, | ||
1468 | struct wmi_single_phyerr_rx_event *event, | ||
1469 | struct phyerr_fft_report *fftr, | ||
1470 | u64 tsf) | ||
1471 | { | ||
1472 | u32 reg0, reg1; | ||
1473 | u8 rssi, peak_mag; | ||
1474 | |||
1475 | reg0 = __le32_to_cpu(fftr->reg0); | ||
1476 | reg1 = __le32_to_cpu(fftr->reg1); | ||
1477 | rssi = event->hdr.rssi_combined; | ||
1478 | |||
1479 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1480 | "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", | ||
1481 | MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), | ||
1482 | MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), | ||
1483 | MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), | ||
1484 | MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); | ||
1485 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1486 | "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", | ||
1487 | MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), | ||
1488 | MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), | ||
1489 | MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), | ||
1490 | MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); | ||
1491 | |||
1492 | peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); | ||
1493 | |||
1494 | /* false event detection */ | ||
1495 | if (rssi == DFS_RSSI_POSSIBLY_FALSE && | ||
1496 | peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { | ||
1497 | ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); | ||
1498 | ATH10K_DFS_STAT_INC(ar, pulses_discarded); | ||
1499 | return -EINVAL; | ||
1500 | } | ||
1501 | |||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | static void ath10k_wmi_event_dfs(struct ath10k *ar, | ||
1506 | struct wmi_single_phyerr_rx_event *event, | ||
1507 | u64 tsf) | ||
1508 | { | ||
1509 | int buf_len, tlv_len, res, i = 0; | ||
1510 | struct phyerr_tlv *tlv; | ||
1511 | struct phyerr_radar_report *rr; | ||
1512 | struct phyerr_fft_report *fftr; | ||
1513 | u8 *tlv_buf; | ||
1514 | |||
1515 | buf_len = __le32_to_cpu(event->hdr.buf_len); | ||
1516 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1517 | "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", | ||
1518 | event->hdr.phy_err_code, event->hdr.rssi_combined, | ||
1519 | __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); | ||
1520 | |||
1521 | /* Skip event if DFS disabled */ | ||
1522 | if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) | ||
1523 | return; | ||
1524 | |||
1525 | ATH10K_DFS_STAT_INC(ar, pulses_total); | ||
1526 | |||
1527 | while (i < buf_len) { | ||
1528 | if (i + sizeof(*tlv) > buf_len) { | ||
1529 | ath10k_warn("too short buf for tlv header (%d)\n", i); | ||
1530 | return; | ||
1531 | } | ||
1532 | |||
1533 | tlv = (struct phyerr_tlv *)&event->bufp[i]; | ||
1534 | tlv_len = __le16_to_cpu(tlv->len); | ||
1535 | tlv_buf = &event->bufp[i + sizeof(*tlv)]; | ||
1536 | ath10k_dbg(ATH10K_DBG_REGULATORY, | ||
1537 | "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", | ||
1538 | tlv_len, tlv->tag, tlv->sig); | ||
1539 | |||
1540 | switch (tlv->tag) { | ||
1541 | case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: | ||
1542 | if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { | ||
1543 | ath10k_warn("too short radar pulse summary (%d)\n", | ||
1544 | i); | ||
1545 | return; | ||
1546 | } | ||
1547 | |||
1548 | rr = (struct phyerr_radar_report *)tlv_buf; | ||
1549 | ath10k_dfs_radar_report(ar, event, rr, tsf); | ||
1550 | break; | ||
1551 | case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: | ||
1552 | if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { | ||
1553 | ath10k_warn("too short fft report (%d)\n", i); | ||
1554 | return; | ||
1555 | } | ||
1556 | |||
1557 | fftr = (struct phyerr_fft_report *)tlv_buf; | ||
1558 | res = ath10k_dfs_fft_report(ar, event, fftr, tsf); | ||
1559 | if (res) | ||
1560 | return; | ||
1561 | break; | ||
1562 | } | ||
1563 | |||
1564 | i += sizeof(*tlv) + tlv_len; | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, | ||
1569 | struct wmi_single_phyerr_rx_event *event, | ||
1570 | u64 tsf) | ||
1571 | { | ||
1572 | ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); | ||
1573 | } | ||
1574 | |||
1386 | static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) | 1575 | static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) |
1387 | { | 1576 | { |
1388 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); | 1577 | struct wmi_comb_phyerr_rx_event *comb_event; |
1578 | struct wmi_single_phyerr_rx_event *event; | ||
1579 | u32 count, i, buf_len, phy_err_code; | ||
1580 | u64 tsf; | ||
1581 | int left_len = skb->len; | ||
1582 | |||
1583 | ATH10K_DFS_STAT_INC(ar, phy_errors); | ||
1584 | |||
1585 | /* Check if combined event available */ | ||
1586 | if (left_len < sizeof(*comb_event)) { | ||
1587 | ath10k_warn("wmi phyerr combined event wrong len\n"); | ||
1588 | return; | ||
1589 | } | ||
1590 | |||
1591 | left_len -= sizeof(*comb_event); | ||
1592 | |||
1593 | /* Check number of included events */ | ||
1594 | comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; | ||
1595 | count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); | ||
1596 | |||
1597 | tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); | ||
1598 | tsf <<= 32; | ||
1599 | tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); | ||
1600 | |||
1601 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1602 | "wmi event phyerr count %d tsf64 0x%llX\n", | ||
1603 | count, tsf); | ||
1604 | |||
1605 | event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; | ||
1606 | for (i = 0; i < count; i++) { | ||
1607 | /* Check if we can read event header */ | ||
1608 | if (left_len < sizeof(*event)) { | ||
1609 | ath10k_warn("single event (%d) wrong head len\n", i); | ||
1610 | return; | ||
1611 | } | ||
1612 | |||
1613 | left_len -= sizeof(*event); | ||
1614 | |||
1615 | buf_len = __le32_to_cpu(event->hdr.buf_len); | ||
1616 | phy_err_code = event->hdr.phy_err_code; | ||
1617 | |||
1618 | if (left_len < buf_len) { | ||
1619 | ath10k_warn("single event (%d) wrong buf len\n", i); | ||
1620 | return; | ||
1621 | } | ||
1622 | |||
1623 | left_len -= buf_len; | ||
1624 | |||
1625 | switch (phy_err_code) { | ||
1626 | case PHY_ERROR_RADAR: | ||
1627 | ath10k_wmi_event_dfs(ar, event, tsf); | ||
1628 | break; | ||
1629 | case PHY_ERROR_SPECTRAL_SCAN: | ||
1630 | ath10k_wmi_event_spectral_scan(ar, event, tsf); | ||
1631 | break; | ||
1632 | case PHY_ERROR_FALSE_RADAR_EXT: | ||
1633 | ath10k_wmi_event_dfs(ar, event, tsf); | ||
1634 | ath10k_wmi_event_spectral_scan(ar, event, tsf); | ||
1635 | break; | ||
1636 | default: | ||
1637 | break; | ||
1638 | } | ||
1639 | |||
1640 | event += sizeof(*event) + buf_len; | ||
1641 | } | ||
1389 | } | 1642 | } |
1390 | 1643 | ||
1391 | static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) | 1644 | static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) |
@@ -2062,6 +2315,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, | |||
2062 | { | 2315 | { |
2063 | struct wmi_set_channel_cmd *cmd; | 2316 | struct wmi_set_channel_cmd *cmd; |
2064 | struct sk_buff *skb; | 2317 | struct sk_buff *skb; |
2318 | u32 ch_flags = 0; | ||
2065 | 2319 | ||
2066 | if (arg->passive) | 2320 | if (arg->passive) |
2067 | return -EINVAL; | 2321 | return -EINVAL; |
@@ -2070,10 +2324,14 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, | |||
2070 | if (!skb) | 2324 | if (!skb) |
2071 | return -ENOMEM; | 2325 | return -ENOMEM; |
2072 | 2326 | ||
2327 | if (arg->chan_radar) | ||
2328 | ch_flags |= WMI_CHAN_FLAG_DFS; | ||
2329 | |||
2073 | cmd = (struct wmi_set_channel_cmd *)skb->data; | 2330 | cmd = (struct wmi_set_channel_cmd *)skb->data; |
2074 | cmd->chan.mhz = __cpu_to_le32(arg->freq); | 2331 | cmd->chan.mhz = __cpu_to_le32(arg->freq); |
2075 | cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); | 2332 | cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); |
2076 | cmd->chan.mode = arg->mode; | 2333 | cmd->chan.mode = arg->mode; |
2334 | cmd->chan.flags |= __cpu_to_le32(ch_flags); | ||
2077 | cmd->chan.min_power = arg->min_power; | 2335 | cmd->chan.min_power = arg->min_power; |
2078 | cmd->chan.max_power = arg->max_power; | 2336 | cmd->chan.max_power = arg->max_power; |
2079 | cmd->chan.reg_power = arg->max_reg_power; | 2337 | cmd->chan.reg_power = arg->max_reg_power; |
@@ -2211,7 +2469,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) | |||
2211 | } | 2469 | } |
2212 | 2470 | ||
2213 | ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", | 2471 | ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", |
2214 | __cpu_to_le32(ar->wmi.num_mem_chunks)); | 2472 | ar->wmi.num_mem_chunks); |
2215 | 2473 | ||
2216 | cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); | 2474 | cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); |
2217 | 2475 | ||
@@ -2224,10 +2482,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) | |||
2224 | __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); | 2482 | __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); |
2225 | 2483 | ||
2226 | ath10k_dbg(ATH10K_DBG_WMI, | 2484 | ath10k_dbg(ATH10K_DBG_WMI, |
2227 | "wmi chunk %d len %d requested, addr 0x%x\n", | 2485 | "wmi chunk %d len %d requested, addr 0x%llx\n", |
2228 | i, | 2486 | i, |
2229 | cmd->host_mem_chunks[i].size, | 2487 | ar->wmi.mem_chunks[i].len, |
2230 | cmd->host_mem_chunks[i].ptr); | 2488 | (unsigned long long)ar->wmi.mem_chunks[i].paddr); |
2231 | } | 2489 | } |
2232 | out: | 2490 | out: |
2233 | memcpy(&cmd->resource_config, &config, sizeof(config)); | 2491 | memcpy(&cmd->resource_config, &config, sizeof(config)); |
@@ -2302,7 +2560,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) | |||
2302 | } | 2560 | } |
2303 | 2561 | ||
2304 | ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", | 2562 | ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", |
2305 | __cpu_to_le32(ar->wmi.num_mem_chunks)); | 2563 | ar->wmi.num_mem_chunks); |
2306 | 2564 | ||
2307 | cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); | 2565 | cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); |
2308 | 2566 | ||
@@ -2315,10 +2573,10 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) | |||
2315 | __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); | 2573 | __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); |
2316 | 2574 | ||
2317 | ath10k_dbg(ATH10K_DBG_WMI, | 2575 | ath10k_dbg(ATH10K_DBG_WMI, |
2318 | "wmi chunk %d len %d requested, addr 0x%x\n", | 2576 | "wmi chunk %d len %d requested, addr 0x%llx\n", |
2319 | i, | 2577 | i, |
2320 | cmd->host_mem_chunks[i].size, | 2578 | ar->wmi.mem_chunks[i].len, |
2321 | cmd->host_mem_chunks[i].ptr); | 2579 | (unsigned long long)ar->wmi.mem_chunks[i].paddr); |
2322 | } | 2580 | } |
2323 | out: | 2581 | out: |
2324 | memcpy(&cmd->resource_config, &config, sizeof(config)); | 2582 | memcpy(&cmd->resource_config, &config, sizeof(config)); |
@@ -2622,6 +2880,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, | |||
2622 | struct sk_buff *skb; | 2880 | struct sk_buff *skb; |
2623 | const char *cmdname; | 2881 | const char *cmdname; |
2624 | u32 flags = 0; | 2882 | u32 flags = 0; |
2883 | u32 ch_flags = 0; | ||
2625 | 2884 | ||
2626 | if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && | 2885 | if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && |
2627 | cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) | 2886 | cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) |
@@ -2648,6 +2907,8 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, | |||
2648 | flags |= WMI_VDEV_START_HIDDEN_SSID; | 2907 | flags |= WMI_VDEV_START_HIDDEN_SSID; |
2649 | if (arg->pmf_enabled) | 2908 | if (arg->pmf_enabled) |
2650 | flags |= WMI_VDEV_START_PMF_ENABLED; | 2909 | flags |= WMI_VDEV_START_PMF_ENABLED; |
2910 | if (arg->channel.chan_radar) | ||
2911 | ch_flags |= WMI_CHAN_FLAG_DFS; | ||
2651 | 2912 | ||
2652 | cmd = (struct wmi_vdev_start_request_cmd *)skb->data; | 2913 | cmd = (struct wmi_vdev_start_request_cmd *)skb->data; |
2653 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | 2914 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); |
@@ -2669,6 +2930,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, | |||
2669 | __cpu_to_le32(arg->channel.band_center_freq1); | 2930 | __cpu_to_le32(arg->channel.band_center_freq1); |
2670 | 2931 | ||
2671 | cmd->chan.mode = arg->channel.mode; | 2932 | cmd->chan.mode = arg->channel.mode; |
2933 | cmd->chan.flags |= __cpu_to_le32(ch_flags); | ||
2672 | cmd->chan.min_power = arg->channel.min_power; | 2934 | cmd->chan.min_power = arg->channel.min_power; |
2673 | cmd->chan.max_power = arg->channel.max_power; | 2935 | cmd->chan.max_power = arg->channel.max_power; |
2674 | cmd->chan.reg_power = arg->channel.max_reg_power; | 2936 | cmd->chan.reg_power = arg->channel.max_reg_power; |
@@ -2676,9 +2938,10 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, | |||
2676 | cmd->chan.antenna_max = arg->channel.max_antenna_gain; | 2938 | cmd->chan.antenna_max = arg->channel.max_antenna_gain; |
2677 | 2939 | ||
2678 | ath10k_dbg(ATH10K_DBG_WMI, | 2940 | ath10k_dbg(ATH10K_DBG_WMI, |
2679 | "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," | 2941 | "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " |
2680 | "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, | 2942 | "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, |
2681 | arg->channel.mode, flags, arg->channel.max_power); | 2943 | flags, arg->channel.freq, arg->channel.mode, |
2944 | cmd->chan.flags, arg->channel.max_power); | ||
2682 | 2945 | ||
2683 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); | 2946 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); |
2684 | } | 2947 | } |
@@ -3012,6 +3275,8 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, | |||
3012 | flags |= WMI_CHAN_FLAG_ALLOW_VHT; | 3275 | flags |= WMI_CHAN_FLAG_ALLOW_VHT; |
3013 | if (ch->ht40plus) | 3276 | if (ch->ht40plus) |
3014 | flags |= WMI_CHAN_FLAG_HT40_PLUS; | 3277 | flags |= WMI_CHAN_FLAG_HT40_PLUS; |
3278 | if (ch->chan_radar) | ||
3279 | flags |= WMI_CHAN_FLAG_DFS; | ||
3015 | 3280 | ||
3016 | ci->mhz = __cpu_to_le32(ch->freq); | 3281 | ci->mhz = __cpu_to_le32(ch->freq); |
3017 | ci->band_center_freq1 = __cpu_to_le32(ch->freq); | 3282 | ci->band_center_freq1 = __cpu_to_le32(ch->freq); |
@@ -3094,6 +3359,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, | |||
3094 | { | 3359 | { |
3095 | struct wmi_bcn_tx_cmd *cmd; | 3360 | struct wmi_bcn_tx_cmd *cmd; |
3096 | struct sk_buff *skb; | 3361 | struct sk_buff *skb; |
3362 | int ret; | ||
3097 | 3363 | ||
3098 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); | 3364 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); |
3099 | if (!skb) | 3365 | if (!skb) |
@@ -3106,7 +3372,11 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, | |||
3106 | cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); | 3372 | cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); |
3107 | memcpy(cmd->bcn, arg->bcn, arg->bcn_len); | 3373 | memcpy(cmd->bcn, arg->bcn, arg->bcn_len); |
3108 | 3374 | ||
3109 | return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid); | 3375 | ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid); |
3376 | if (ret) | ||
3377 | dev_kfree_skb(skb); | ||
3378 | |||
3379 | return ret; | ||
3110 | } | 3380 | } |
3111 | 3381 | ||
3112 | static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, | 3382 | static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, |
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 78c991aec7f9..0087d699b85b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h | |||
@@ -893,6 +893,7 @@ struct wmi_channel { | |||
893 | union { | 893 | union { |
894 | __le32 reginfo0; | 894 | __le32 reginfo0; |
895 | struct { | 895 | struct { |
896 | /* note: power unit is 0.5 dBm */ | ||
896 | u8 min_power; | 897 | u8 min_power; |
897 | u8 max_power; | 898 | u8 max_power; |
898 | u8 reg_power; | 899 | u8 reg_power; |
@@ -915,7 +916,8 @@ struct wmi_channel_arg { | |||
915 | bool allow_ht; | 916 | bool allow_ht; |
916 | bool allow_vht; | 917 | bool allow_vht; |
917 | bool ht40plus; | 918 | bool ht40plus; |
918 | /* note: power unit is 1/4th of dBm */ | 919 | bool chan_radar; |
920 | /* note: power unit is 0.5 dBm */ | ||
919 | u32 min_power; | 921 | u32 min_power; |
920 | u32 max_power; | 922 | u32 max_power; |
921 | u32 max_reg_power; | 923 | u32 max_reg_power; |
@@ -1977,6 +1979,10 @@ struct wmi_mgmt_rx_event_v2 { | |||
1977 | #define WMI_RX_STATUS_ERR_MIC 0x10 | 1979 | #define WMI_RX_STATUS_ERR_MIC 0x10 |
1978 | #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 | 1980 | #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 |
1979 | 1981 | ||
1982 | #define PHY_ERROR_SPECTRAL_SCAN 0x26 | ||
1983 | #define PHY_ERROR_FALSE_RADAR_EXT 0x24 | ||
1984 | #define PHY_ERROR_RADAR 0x05 | ||
1985 | |||
1980 | struct wmi_single_phyerr_rx_hdr { | 1986 | struct wmi_single_phyerr_rx_hdr { |
1981 | /* TSF timestamp */ | 1987 | /* TSF timestamp */ |
1982 | __le32 tsf_timestamp; | 1988 | __le32 tsf_timestamp; |
@@ -2068,6 +2074,87 @@ struct wmi_comb_phyerr_rx_event { | |||
2068 | u8 bufp[0]; | 2074 | u8 bufp[0]; |
2069 | } __packed; | 2075 | } __packed; |
2070 | 2076 | ||
2077 | #define PHYERR_TLV_SIG 0xBB | ||
2078 | #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB | ||
2079 | #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 | ||
2080 | |||
2081 | struct phyerr_radar_report { | ||
2082 | __le32 reg0; /* RADAR_REPORT_REG0_* */ | ||
2083 | __le32 reg1; /* REDAR_REPORT_REG1_* */ | ||
2084 | } __packed; | ||
2085 | |||
2086 | #define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000 | ||
2087 | #define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31 | ||
2088 | |||
2089 | #define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000 | ||
2090 | #define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30 | ||
2091 | |||
2092 | #define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000 | ||
2093 | #define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20 | ||
2094 | |||
2095 | #define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000 | ||
2096 | #define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16 | ||
2097 | |||
2098 | #define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00 | ||
2099 | #define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10 | ||
2100 | |||
2101 | #define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF | ||
2102 | #define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0 | ||
2103 | |||
2104 | #define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000 | ||
2105 | #define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31 | ||
2106 | |||
2107 | #define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000 | ||
2108 | #define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24 | ||
2109 | |||
2110 | #define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000 | ||
2111 | #define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16 | ||
2112 | |||
2113 | #define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00 | ||
2114 | #define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8 | ||
2115 | |||
2116 | #define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF | ||
2117 | #define RADAR_REPORT_REG1_PULSE_DUR_LSB 0 | ||
2118 | |||
2119 | struct phyerr_fft_report { | ||
2120 | __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */ | ||
2121 | __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */ | ||
2122 | } __packed; | ||
2123 | |||
2124 | #define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000 | ||
2125 | #define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23 | ||
2126 | |||
2127 | #define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000 | ||
2128 | #define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14 | ||
2129 | |||
2130 | #define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000 | ||
2131 | #define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12 | ||
2132 | |||
2133 | #define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF | ||
2134 | #define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0 | ||
2135 | |||
2136 | #define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000 | ||
2137 | #define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26 | ||
2138 | |||
2139 | #define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000 | ||
2140 | #define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18 | ||
2141 | |||
2142 | #define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00 | ||
2143 | #define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8 | ||
2144 | |||
2145 | #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF | ||
2146 | #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0 | ||
2147 | |||
2148 | |||
2149 | struct phyerr_tlv { | ||
2150 | __le16 len; | ||
2151 | u8 tag; | ||
2152 | u8 sig; | ||
2153 | } __packed; | ||
2154 | |||
2155 | #define DFS_RSSI_POSSIBLY_FALSE 50 | ||
2156 | #define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40 | ||
2157 | |||
2071 | struct wmi_mgmt_tx_hdr { | 2158 | struct wmi_mgmt_tx_hdr { |
2072 | __le32 vdev_id; | 2159 | __le32 vdev_id; |
2073 | struct wmi_mac_addr peer_macaddr; | 2160 | struct wmi_mac_addr peer_macaddr; |
@@ -2233,7 +2320,12 @@ enum wmi_pdev_param { | |||
2233 | * 0: no protection 1:use CTS-to-self 2: use RTS/CTS | 2320 | * 0: no protection 1:use CTS-to-self 2: use RTS/CTS |
2234 | */ | 2321 | */ |
2235 | WMI_PDEV_PARAM_PROTECTION_MODE, | 2322 | WMI_PDEV_PARAM_PROTECTION_MODE, |
2236 | /* Dynamic bandwidth 0: disable 1: enable */ | 2323 | /* |
2324 | * Dynamic bandwidth - 0: disable, 1: enable | ||
2325 | * | ||
2326 | * When enabled HW rate control tries different bandwidths when | ||
2327 | * retransmitting frames. | ||
2328 | */ | ||
2237 | WMI_PDEV_PARAM_DYNAMIC_BW, | 2329 | WMI_PDEV_PARAM_DYNAMIC_BW, |
2238 | /* Non aggregrate/ 11g sw retry threshold.0-disable */ | 2330 | /* Non aggregrate/ 11g sw retry threshold.0-disable */ |
2239 | WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, | 2331 | WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, |