aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/wmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1089
1 files changed, 842 insertions, 247 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c2c87c916b5a..e500a3cc905e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -487,6 +487,127 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
487 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 487 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
488}; 488};
489 489
490/* firmware 10.2 specific mappings */
491static struct wmi_cmd_map wmi_10_2_cmd_map = {
492 .init_cmdid = WMI_10_2_INIT_CMDID,
493 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
494 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
495 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
496 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
497 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
498 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
499 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
500 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
501 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
502 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
503 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
504 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
505 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
506 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
507 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
508 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
509 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
510 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
511 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
512 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
513 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
514 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
515 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
516 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
517 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
518 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
519 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
520 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
521 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
522 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
523 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
524 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
525 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
526 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
527 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
528 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
529 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
530 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
531 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
532 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
533 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
534 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
535 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
536 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
537 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
538 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
539 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
540 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
541 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
542 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
543 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
544 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
545 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
546 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
547 .roam_scan_rssi_change_threshold =
548 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
549 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
550 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
551 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
552 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
553 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
554 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
555 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
556 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
557 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
558 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
559 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
560 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
561 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
562 .wlan_profile_set_hist_intvl_cmdid =
563 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
564 .wlan_profile_get_profile_data_cmdid =
565 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
566 .wlan_profile_enable_profile_id_cmdid =
567 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
568 .wlan_profile_list_profile_id_cmdid =
569 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
570 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
571 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
572 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
573 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
574 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
575 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
576 .wow_enable_disable_wake_event_cmdid =
577 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
578 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
579 .wow_hostwakeup_from_sleep_cmdid =
580 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
581 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
582 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
583 .vdev_spectral_scan_configure_cmdid =
584 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
585 .vdev_spectral_scan_enable_cmdid =
586 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
587 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
588 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
589 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
590 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
591 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
592 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
593 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
594 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
595 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
596 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
597 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
598 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
599 .echo_cmdid = WMI_10_2_ECHO_CMDID,
600 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
601 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
602 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
603 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
604 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
605 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
606 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
607 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
608 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
609};
610
490int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) 611int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
491{ 612{
492 int ret; 613 int ret;
@@ -503,18 +624,18 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
503 return ret; 624 return ret;
504} 625}
505 626
506static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) 627static struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
507{ 628{
508 struct sk_buff *skb; 629 struct sk_buff *skb;
509 u32 round_len = roundup(len, 4); 630 u32 round_len = roundup(len, 4);
510 631
511 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); 632 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
512 if (!skb) 633 if (!skb)
513 return NULL; 634 return NULL;
514 635
515 skb_reserve(skb, WMI_SKB_HEADROOM); 636 skb_reserve(skb, WMI_SKB_HEADROOM);
516 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 637 if (!IS_ALIGNED((unsigned long)skb->data, 4))
517 ath10k_warn("Unaligned WMI skb\n"); 638 ath10k_warn(ar, "Unaligned WMI skb\n");
518 639
519 skb_put(skb, round_len); 640 skb_put(skb, round_len);
520 memset(skb->data, 0, round_len); 641 memset(skb->data, 0, round_len);
@@ -612,7 +733,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
612 might_sleep(); 733 might_sleep();
613 734
614 if (cmd_id == WMI_CMD_UNSUPPORTED) { 735 if (cmd_id == WMI_CMD_UNSUPPORTED) {
615 ath10k_warn("wmi command %d is not supported by firmware\n", 736 ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
616 cmd_id); 737 cmd_id);
617 return ret; 738 return ret;
618 } 739 }
@@ -660,7 +781,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
660 781
661 len = round_up(len, 4); 782 len = round_up(len, 4);
662 783
663 wmi_skb = ath10k_wmi_alloc_skb(len); 784 wmi_skb = ath10k_wmi_alloc_skb(ar, len);
664 if (!wmi_skb) 785 if (!wmi_skb)
665 return -ENOMEM; 786 return -ENOMEM;
666 787
@@ -674,7 +795,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
674 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 795 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
675 memcpy(cmd->buf, skb->data, skb->len); 796 memcpy(cmd->buf, skb->data, skb->len);
676 797
677 ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 798 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
678 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, 799 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
679 fc & IEEE80211_FCTL_STYPE); 800 fc & IEEE80211_FCTL_STYPE);
680 801
@@ -690,6 +811,130 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
690 return ret; 811 return ret;
691} 812}
692 813
814static void ath10k_wmi_event_scan_started(struct ath10k *ar)
815{
816 lockdep_assert_held(&ar->data_lock);
817
818 switch (ar->scan.state) {
819 case ATH10K_SCAN_IDLE:
820 case ATH10K_SCAN_RUNNING:
821 case ATH10K_SCAN_ABORTING:
822 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
823 ath10k_scan_state_str(ar->scan.state),
824 ar->scan.state);
825 break;
826 case ATH10K_SCAN_STARTING:
827 ar->scan.state = ATH10K_SCAN_RUNNING;
828
829 if (ar->scan.is_roc)
830 ieee80211_ready_on_channel(ar->hw);
831
832 complete(&ar->scan.started);
833 break;
834 }
835}
836
837static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
838{
839 lockdep_assert_held(&ar->data_lock);
840
841 switch (ar->scan.state) {
842 case ATH10K_SCAN_IDLE:
843 case ATH10K_SCAN_STARTING:
844 /* One suspected reason scan can be completed while starting is
845 * if firmware fails to deliver all scan events to the host,
846 * e.g. when transport pipe is full. This has been observed
847 * with spectral scan phyerr events starving wmi transport
848 * pipe. In such case the "scan completed" event should be (and
849 * is) ignored by the host as it may be just firmware's scan
850 * state machine recovering.
851 */
852 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
853 ath10k_scan_state_str(ar->scan.state),
854 ar->scan.state);
855 break;
856 case ATH10K_SCAN_RUNNING:
857 case ATH10K_SCAN_ABORTING:
858 __ath10k_scan_finish(ar);
859 break;
860 }
861}
862
863static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
864{
865 lockdep_assert_held(&ar->data_lock);
866
867 switch (ar->scan.state) {
868 case ATH10K_SCAN_IDLE:
869 case ATH10K_SCAN_STARTING:
870 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
871 ath10k_scan_state_str(ar->scan.state),
872 ar->scan.state);
873 break;
874 case ATH10K_SCAN_RUNNING:
875 case ATH10K_SCAN_ABORTING:
876 ar->scan_channel = NULL;
877 break;
878 }
879}
880
881static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
882{
883 lockdep_assert_held(&ar->data_lock);
884
885 switch (ar->scan.state) {
886 case ATH10K_SCAN_IDLE:
887 case ATH10K_SCAN_STARTING:
888 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
889 ath10k_scan_state_str(ar->scan.state),
890 ar->scan.state);
891 break;
892 case ATH10K_SCAN_RUNNING:
893 case ATH10K_SCAN_ABORTING:
894 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
895
896 if (ar->scan.is_roc && ar->scan.roc_freq == freq)
897 complete(&ar->scan.on_channel);
898 break;
899 }
900}
901
902static const char *
903ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
904 enum wmi_scan_completion_reason reason)
905{
906 switch (type) {
907 case WMI_SCAN_EVENT_STARTED:
908 return "started";
909 case WMI_SCAN_EVENT_COMPLETED:
910 switch (reason) {
911 case WMI_SCAN_REASON_COMPLETED:
912 return "completed";
913 case WMI_SCAN_REASON_CANCELLED:
914 return "completed [cancelled]";
915 case WMI_SCAN_REASON_PREEMPTED:
916 return "completed [preempted]";
917 case WMI_SCAN_REASON_TIMEDOUT:
918 return "completed [timedout]";
919 case WMI_SCAN_REASON_MAX:
920 break;
921 }
922 return "completed [unknown]";
923 case WMI_SCAN_EVENT_BSS_CHANNEL:
924 return "bss channel";
925 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
926 return "foreign channel";
927 case WMI_SCAN_EVENT_DEQUEUED:
928 return "dequeued";
929 case WMI_SCAN_EVENT_PREEMPTED:
930 return "preempted";
931 case WMI_SCAN_EVENT_START_FAILED:
932 return "start failed";
933 default:
934 return "unknown";
935 }
936}
937
693static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 938static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
694{ 939{
695 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 940 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
@@ -707,81 +952,32 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
707 scan_id = __le32_to_cpu(event->scan_id); 952 scan_id = __le32_to_cpu(event->scan_id);
708 vdev_id = __le32_to_cpu(event->vdev_id); 953 vdev_id = __le32_to_cpu(event->vdev_id);
709 954
710 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
711 ath10k_dbg(ATH10K_DBG_WMI,
712 "scan event type %d reason %d freq %d req_id %d "
713 "scan_id %d vdev_id %d\n",
714 event_type, reason, freq, req_id, scan_id, vdev_id);
715
716 spin_lock_bh(&ar->data_lock); 955 spin_lock_bh(&ar->data_lock);
717 956
957 ath10k_dbg(ar, ATH10K_DBG_WMI,
958 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
959 ath10k_wmi_event_scan_type_str(event_type, reason),
960 event_type, reason, freq, req_id, scan_id, vdev_id,
961 ath10k_scan_state_str(ar->scan.state), ar->scan.state);
962
718 switch (event_type) { 963 switch (event_type) {
719 case WMI_SCAN_EVENT_STARTED: 964 case WMI_SCAN_EVENT_STARTED:
720 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); 965 ath10k_wmi_event_scan_started(ar);
721 if (ar->scan.in_progress && ar->scan.is_roc)
722 ieee80211_ready_on_channel(ar->hw);
723
724 complete(&ar->scan.started);
725 break; 966 break;
726 case WMI_SCAN_EVENT_COMPLETED: 967 case WMI_SCAN_EVENT_COMPLETED:
727 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); 968 ath10k_wmi_event_scan_completed(ar);
728 switch (reason) {
729 case WMI_SCAN_REASON_COMPLETED:
730 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
731 break;
732 case WMI_SCAN_REASON_CANCELLED:
733 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
734 break;
735 case WMI_SCAN_REASON_PREEMPTED:
736 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
737 break;
738 case WMI_SCAN_REASON_TIMEDOUT:
739 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
740 break;
741 default:
742 break;
743 }
744
745 ar->scan_channel = NULL;
746 if (!ar->scan.in_progress) {
747 ath10k_warn("no scan requested, ignoring\n");
748 break;
749 }
750
751 if (ar->scan.is_roc) {
752 ath10k_offchan_tx_purge(ar);
753
754 if (!ar->scan.aborting)
755 ieee80211_remain_on_channel_expired(ar->hw);
756 } else {
757 ieee80211_scan_completed(ar->hw, ar->scan.aborting);
758 }
759
760 del_timer(&ar->scan.timeout);
761 complete_all(&ar->scan.completed);
762 ar->scan.in_progress = false;
763 break; 969 break;
764 case WMI_SCAN_EVENT_BSS_CHANNEL: 970 case WMI_SCAN_EVENT_BSS_CHANNEL:
765 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); 971 ath10k_wmi_event_scan_bss_chan(ar);
766 ar->scan_channel = NULL;
767 break; 972 break;
768 case WMI_SCAN_EVENT_FOREIGN_CHANNEL: 973 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
769 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); 974 ath10k_wmi_event_scan_foreign_chan(ar, freq);
770 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
771 if (ar->scan.in_progress && ar->scan.is_roc &&
772 ar->scan.roc_freq == freq) {
773 complete(&ar->scan.on_channel);
774 }
775 break;
776 case WMI_SCAN_EVENT_DEQUEUED:
777 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
778 break;
779 case WMI_SCAN_EVENT_PREEMPTED:
780 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
781 break; 975 break;
782 case WMI_SCAN_EVENT_START_FAILED: 976 case WMI_SCAN_EVENT_START_FAILED:
783 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); 977 ath10k_warn(ar, "received scan start failure event\n");
784 break; 978 break;
979 case WMI_SCAN_EVENT_DEQUEUED:
980 case WMI_SCAN_EVENT_PREEMPTED:
785 default: 981 default:
786 break; 982 break;
787 } 983 }
@@ -911,7 +1107,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
911 1107
912 memset(status, 0, sizeof(*status)); 1108 memset(status, 0, sizeof(*status));
913 1109
914 ath10k_dbg(ATH10K_DBG_MGMT, 1110 ath10k_dbg(ar, ATH10K_DBG_MGMT,
915 "event mgmt rx status %08x\n", rx_status); 1111 "event mgmt rx status %08x\n", rx_status);
916 1112
917 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1113 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
@@ -947,9 +1143,9 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
947 1143
948 if (phy_mode == MODE_11B && 1144 if (phy_mode == MODE_11B &&
949 status->band == IEEE80211_BAND_5GHZ) 1145 status->band == IEEE80211_BAND_5GHZ)
950 ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 1146 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
951 } else { 1147 } else {
952 ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); 1148 ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
953 status->band = phy_mode_to_band(phy_mode); 1149 status->band = phy_mode_to_band(phy_mode);
954 } 1150 }
955 1151
@@ -979,12 +1175,12 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
979 } 1175 }
980 } 1176 }
981 1177
982 ath10k_dbg(ATH10K_DBG_MGMT, 1178 ath10k_dbg(ar, ATH10K_DBG_MGMT,
983 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 1179 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
984 skb, skb->len, 1180 skb, skb->len,
985 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 1181 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
986 1182
987 ath10k_dbg(ATH10K_DBG_MGMT, 1183 ath10k_dbg(ar, ATH10K_DBG_MGMT,
988 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 1184 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
989 status->freq, status->band, status->signal, 1185 status->freq, status->band, status->signal,
990 status->rate_idx); 1186 status->rate_idx);
@@ -1034,21 +1230,26 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1034 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 1230 rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
1035 cycle_count = __le32_to_cpu(ev->cycle_count); 1231 cycle_count = __le32_to_cpu(ev->cycle_count);
1036 1232
1037 ath10k_dbg(ATH10K_DBG_WMI, 1233 ath10k_dbg(ar, ATH10K_DBG_WMI,
1038 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 1234 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
1039 err_code, freq, cmd_flags, noise_floor, rx_clear_count, 1235 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
1040 cycle_count); 1236 cycle_count);
1041 1237
1042 spin_lock_bh(&ar->data_lock); 1238 spin_lock_bh(&ar->data_lock);
1043 1239
1044 if (!ar->scan.in_progress) { 1240 switch (ar->scan.state) {
1045 ath10k_warn("chan info event without a scan request?\n"); 1241 case ATH10K_SCAN_IDLE:
1242 case ATH10K_SCAN_STARTING:
1243 ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
1046 goto exit; 1244 goto exit;
1245 case ATH10K_SCAN_RUNNING:
1246 case ATH10K_SCAN_ABORTING:
1247 break;
1047 } 1248 }
1048 1249
1049 idx = freq_to_idx(ar, freq); 1250 idx = freq_to_idx(ar, freq);
1050 if (idx >= ARRAY_SIZE(ar->survey)) { 1251 if (idx >= ARRAY_SIZE(ar->survey)) {
1051 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", 1252 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
1052 freq, idx); 1253 freq, idx);
1053 goto exit; 1254 goto exit;
1054 } 1255 }
@@ -1079,12 +1280,12 @@ exit:
1079 1280
1080static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 1281static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
1081{ 1282{
1082 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 1283 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
1083} 1284}
1084 1285
1085static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 1286static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
1086{ 1287{
1087 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 1288 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
1088 skb->len); 1289 skb->len);
1089 1290
1090 trace_ath10k_wmi_dbglog(skb->data, skb->len); 1291 trace_ath10k_wmi_dbglog(skb->data, skb->len);
@@ -1097,7 +1298,7 @@ static void ath10k_wmi_event_update_stats(struct ath10k *ar,
1097{ 1298{
1098 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; 1299 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
1099 1300
1100 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 1301 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
1101 1302
1102 ath10k_debug_read_target_stats(ar, ev); 1303 ath10k_debug_read_target_stats(ar, ev);
1103} 1304}
@@ -1107,7 +1308,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
1107{ 1308{
1108 struct wmi_vdev_start_response_event *ev; 1309 struct wmi_vdev_start_response_event *ev;
1109 1310
1110 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 1311 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
1111 1312
1112 ev = (struct wmi_vdev_start_response_event *)skb->data; 1313 ev = (struct wmi_vdev_start_response_event *)skb->data;
1113 1314
@@ -1120,7 +1321,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
1120static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 1321static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
1121 struct sk_buff *skb) 1322 struct sk_buff *skb)
1122{ 1323{
1123 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 1324 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
1124 complete(&ar->vdev_setup_done); 1325 complete(&ar->vdev_setup_done);
1125} 1326}
1126 1327
@@ -1132,14 +1333,14 @@ static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
1132 1333
1133 ev = (struct wmi_peer_sta_kickout_event *)skb->data; 1334 ev = (struct wmi_peer_sta_kickout_event *)skb->data;
1134 1335
1135 ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", 1336 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
1136 ev->peer_macaddr.addr); 1337 ev->peer_macaddr.addr);
1137 1338
1138 rcu_read_lock(); 1339 rcu_read_lock();
1139 1340
1140 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); 1341 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
1141 if (!sta) { 1342 if (!sta) {
1142 ath10k_warn("Spurious quick kickout for STA %pM\n", 1343 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
1143 ev->peer_macaddr.addr); 1344 ev->peer_macaddr.addr);
1144 goto exit; 1345 goto exit;
1145 } 1346 }
@@ -1216,7 +1417,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1216 (u8 *)skb_tail_pointer(bcn) - ies); 1417 (u8 *)skb_tail_pointer(bcn) - ies);
1217 if (!ie) { 1418 if (!ie) {
1218 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1419 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1219 ath10k_warn("no tim ie found;\n"); 1420 ath10k_warn(ar, "no tim ie found;\n");
1220 return; 1421 return;
1221 } 1422 }
1222 1423
@@ -1236,12 +1437,12 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1236 ie_len += expand_size; 1437 ie_len += expand_size;
1237 pvm_len += expand_size; 1438 pvm_len += expand_size;
1238 } else { 1439 } else {
1239 ath10k_warn("tim expansion failed\n"); 1440 ath10k_warn(ar, "tim expansion failed\n");
1240 } 1441 }
1241 } 1442 }
1242 1443
1243 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { 1444 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
1244 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); 1445 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
1245 return; 1446 return;
1246 } 1447 }
1247 1448
@@ -1255,7 +1456,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1255 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; 1456 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
1256 } 1457 }
1257 1458
1258 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 1459 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
1259 tim->dtim_count, tim->dtim_period, 1460 tim->dtim_count, tim->dtim_period,
1260 tim->bitmap_ctrl, pvm_len); 1461 tim->bitmap_ctrl, pvm_len);
1261} 1462}
@@ -1333,7 +1534,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
1333 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1534 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
1334 return; 1535 return;
1335 1536
1336 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 1537 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
1337 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { 1538 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
1338 new_len = ath10k_p2p_calc_noa_ie_len(noa); 1539 new_len = ath10k_p2p_calc_noa_ie_len(noa);
1339 if (!new_len) 1540 if (!new_len)
@@ -1381,7 +1582,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1381 ev = (struct wmi_host_swba_event *)skb->data; 1582 ev = (struct wmi_host_swba_event *)skb->data;
1382 map = __le32_to_cpu(ev->vdev_map); 1583 map = __le32_to_cpu(ev->vdev_map);
1383 1584
1384 ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", 1585 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
1385 ev->vdev_map); 1586 ev->vdev_map);
1386 1587
1387 for (; map; map >>= 1, vdev_id++) { 1588 for (; map; map >>= 1, vdev_id++) {
@@ -1391,13 +1592,13 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1391 i++; 1592 i++;
1392 1593
1393 if (i >= WMI_MAX_AP_VDEV) { 1594 if (i >= WMI_MAX_AP_VDEV) {
1394 ath10k_warn("swba has corrupted vdev map\n"); 1595 ath10k_warn(ar, "swba has corrupted vdev map\n");
1395 break; 1596 break;
1396 } 1597 }
1397 1598
1398 bcn_info = &ev->bcn_info[i]; 1599 bcn_info = &ev->bcn_info[i];
1399 1600
1400 ath10k_dbg(ATH10K_DBG_MGMT, 1601 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1401 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", 1602 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1402 i, 1603 i,
1403 __le32_to_cpu(bcn_info->tim_info.tim_len), 1604 __le32_to_cpu(bcn_info->tim_info.tim_len),
@@ -1411,7 +1612,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1411 1612
1412 arvif = ath10k_get_arvif(ar, vdev_id); 1613 arvif = ath10k_get_arvif(ar, vdev_id);
1413 if (arvif == NULL) { 1614 if (arvif == NULL) {
1414 ath10k_warn("no vif for vdev_id %d found\n", vdev_id); 1615 ath10k_warn(ar, "no vif for vdev_id %d found\n",
1616 vdev_id);
1415 continue; 1617 continue;
1416 } 1618 }
1417 1619
@@ -1428,7 +1630,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1428 1630
1429 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 1631 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
1430 if (!bcn) { 1632 if (!bcn) {
1431 ath10k_warn("could not get mac80211 beacon\n"); 1633 ath10k_warn(ar, "could not get mac80211 beacon\n");
1432 continue; 1634 continue;
1433 } 1635 }
1434 1636
@@ -1440,7 +1642,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1440 1642
1441 if (arvif->beacon) { 1643 if (arvif->beacon) {
1442 if (!arvif->beacon_sent) 1644 if (!arvif->beacon_sent)
1443 ath10k_warn("SWBA overrun on vdev %d\n", 1645 ath10k_warn(ar, "SWBA overrun on vdev %d\n",
1444 arvif->vdev_id); 1646 arvif->vdev_id);
1445 1647
1446 dma_unmap_single(arvif->ar->dev, 1648 dma_unmap_single(arvif->ar->dev,
@@ -1456,7 +1658,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1456 ret = dma_mapping_error(arvif->ar->dev, 1658 ret = dma_mapping_error(arvif->ar->dev,
1457 ATH10K_SKB_CB(bcn)->paddr); 1659 ATH10K_SKB_CB(bcn)->paddr);
1458 if (ret) { 1660 if (ret) {
1459 ath10k_warn("failed to map beacon: %d\n", ret); 1661 ath10k_warn(ar, "failed to map beacon: %d\n", ret);
1460 dev_kfree_skb_any(bcn); 1662 dev_kfree_skb_any(bcn);
1461 goto skip; 1663 goto skip;
1462 } 1664 }
@@ -1473,7 +1675,7 @@ skip:
1473static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 1675static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
1474 struct sk_buff *skb) 1676 struct sk_buff *skb)
1475{ 1677{
1476 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 1678 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
1477} 1679}
1478 1680
1479static void ath10k_dfs_radar_report(struct ath10k *ar, 1681static void ath10k_dfs_radar_report(struct ath10k *ar,
@@ -1489,20 +1691,20 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
1489 reg0 = __le32_to_cpu(rr->reg0); 1691 reg0 = __le32_to_cpu(rr->reg0);
1490 reg1 = __le32_to_cpu(rr->reg1); 1692 reg1 = __le32_to_cpu(rr->reg1);
1491 1693
1492 ath10k_dbg(ATH10K_DBG_REGULATORY, 1694 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1493 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", 1695 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
1494 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), 1696 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
1495 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), 1697 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
1496 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), 1698 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
1497 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); 1699 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
1498 ath10k_dbg(ATH10K_DBG_REGULATORY, 1700 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1499 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", 1701 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
1500 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), 1702 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
1501 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), 1703 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
1502 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), 1704 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
1503 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), 1705 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
1504 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); 1706 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
1505 ath10k_dbg(ATH10K_DBG_REGULATORY, 1707 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1506 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", 1708 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
1507 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), 1709 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
1508 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); 1710 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
@@ -1529,25 +1731,25 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
1529 pe.width = width; 1731 pe.width = width;
1530 pe.rssi = rssi; 1732 pe.rssi = rssi;
1531 1733
1532 ath10k_dbg(ATH10K_DBG_REGULATORY, 1734 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1533 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", 1735 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
1534 pe.freq, pe.width, pe.rssi, pe.ts); 1736 pe.freq, pe.width, pe.rssi, pe.ts);
1535 1737
1536 ATH10K_DFS_STAT_INC(ar, pulses_detected); 1738 ATH10K_DFS_STAT_INC(ar, pulses_detected);
1537 1739
1538 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { 1740 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
1539 ath10k_dbg(ATH10K_DBG_REGULATORY, 1741 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1540 "dfs no pulse pattern detected, yet\n"); 1742 "dfs no pulse pattern detected, yet\n");
1541 return; 1743 return;
1542 } 1744 }
1543 1745
1544 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); 1746 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
1545 ATH10K_DFS_STAT_INC(ar, radar_detected); 1747 ATH10K_DFS_STAT_INC(ar, radar_detected);
1546 1748
1547 /* Control radar events reporting in debugfs file 1749 /* Control radar events reporting in debugfs file
1548 dfs_block_radar_events */ 1750 dfs_block_radar_events */
1549 if (ar->dfs_block_radar_events) { 1751 if (ar->dfs_block_radar_events) {
1550 ath10k_info("DFS Radar detected, but ignored as requested\n"); 1752 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
1551 return; 1753 return;
1552 } 1754 }
1553 1755
@@ -1566,13 +1768,13 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
1566 reg1 = __le32_to_cpu(fftr->reg1); 1768 reg1 = __le32_to_cpu(fftr->reg1);
1567 rssi = event->hdr.rssi_combined; 1769 rssi = event->hdr.rssi_combined;
1568 1770
1569 ath10k_dbg(ATH10K_DBG_REGULATORY, 1771 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1570 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", 1772 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
1571 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), 1773 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
1572 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), 1774 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
1573 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), 1775 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
1574 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); 1776 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
1575 ath10k_dbg(ATH10K_DBG_REGULATORY, 1777 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1576 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", 1778 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
1577 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), 1779 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
1578 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), 1780 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
@@ -1584,7 +1786,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
1584 /* false event detection */ 1786 /* false event detection */
1585 if (rssi == DFS_RSSI_POSSIBLY_FALSE && 1787 if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
1586 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { 1788 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
1587 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); 1789 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
1588 ATH10K_DFS_STAT_INC(ar, pulses_discarded); 1790 ATH10K_DFS_STAT_INC(ar, pulses_discarded);
1589 return -EINVAL; 1791 return -EINVAL;
1590 } 1792 }
@@ -1603,7 +1805,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1603 u8 *tlv_buf; 1805 u8 *tlv_buf;
1604 1806
1605 buf_len = __le32_to_cpu(event->hdr.buf_len); 1807 buf_len = __le32_to_cpu(event->hdr.buf_len);
1606 ath10k_dbg(ATH10K_DBG_REGULATORY, 1808 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1607 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", 1809 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
1608 event->hdr.phy_err_code, event->hdr.rssi_combined, 1810 event->hdr.phy_err_code, event->hdr.rssi_combined,
1609 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); 1811 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
@@ -1616,21 +1818,22 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1616 1818
1617 while (i < buf_len) { 1819 while (i < buf_len) {
1618 if (i + sizeof(*tlv) > buf_len) { 1820 if (i + sizeof(*tlv) > buf_len) {
1619 ath10k_warn("too short buf for tlv header (%d)\n", i); 1821 ath10k_warn(ar, "too short buf for tlv header (%d)\n",
1822 i);
1620 return; 1823 return;
1621 } 1824 }
1622 1825
1623 tlv = (struct phyerr_tlv *)&event->bufp[i]; 1826 tlv = (struct phyerr_tlv *)&event->bufp[i];
1624 tlv_len = __le16_to_cpu(tlv->len); 1827 tlv_len = __le16_to_cpu(tlv->len);
1625 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1828 tlv_buf = &event->bufp[i + sizeof(*tlv)];
1626 ath10k_dbg(ATH10K_DBG_REGULATORY, 1829 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1627 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", 1830 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
1628 tlv_len, tlv->tag, tlv->sig); 1831 tlv_len, tlv->tag, tlv->sig);
1629 1832
1630 switch (tlv->tag) { 1833 switch (tlv->tag) {
1631 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: 1834 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
1632 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { 1835 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
1633 ath10k_warn("too short radar pulse summary (%d)\n", 1836 ath10k_warn(ar, "too short radar pulse summary (%d)\n",
1634 i); 1837 i);
1635 return; 1838 return;
1636 } 1839 }
@@ -1640,7 +1843,8 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1640 break; 1843 break;
1641 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1844 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
1642 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { 1845 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
1643 ath10k_warn("too short fft report (%d)\n", i); 1846 ath10k_warn(ar, "too short fft report (%d)\n",
1847 i);
1644 return; 1848 return;
1645 } 1849 }
1646 1850
@@ -1659,7 +1863,54 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
1659 struct wmi_single_phyerr_rx_event *event, 1863 struct wmi_single_phyerr_rx_event *event,
1660 u64 tsf) 1864 u64 tsf)
1661{ 1865{
1662 ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); 1866 int buf_len, tlv_len, res, i = 0;
1867 struct phyerr_tlv *tlv;
1868 u8 *tlv_buf;
1869 struct phyerr_fft_report *fftr;
1870 size_t fftr_len;
1871
1872 buf_len = __le32_to_cpu(event->hdr.buf_len);
1873
1874 while (i < buf_len) {
1875 if (i + sizeof(*tlv) > buf_len) {
1876 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
1877 i);
1878 return;
1879 }
1880
1881 tlv = (struct phyerr_tlv *)&event->bufp[i];
1882 tlv_len = __le16_to_cpu(tlv->len);
1883 tlv_buf = &event->bufp[i + sizeof(*tlv)];
1884
1885 if (i + sizeof(*tlv) + tlv_len > buf_len) {
1886 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
1887 i);
1888 return;
1889 }
1890
1891 switch (tlv->tag) {
1892 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
1893 if (sizeof(*fftr) > tlv_len) {
1894 ath10k_warn(ar, "failed to parse fft report at byte %d\n",
1895 i);
1896 return;
1897 }
1898
1899 fftr_len = tlv_len - sizeof(*fftr);
1900 fftr = (struct phyerr_fft_report *)tlv_buf;
1901 res = ath10k_spectral_process_fft(ar, event,
1902 fftr, fftr_len,
1903 tsf);
1904 if (res < 0) {
1905 ath10k_warn(ar, "failed to process fft report: %d\n",
1906 res);
1907 return;
1908 }
1909 break;
1910 }
1911
1912 i += sizeof(*tlv) + tlv_len;
1913 }
1663} 1914}
1664 1915
1665static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 1916static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
@@ -1674,7 +1925,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1674 1925
1675 /* Check if combined event available */ 1926 /* Check if combined event available */
1676 if (left_len < sizeof(*comb_event)) { 1927 if (left_len < sizeof(*comb_event)) {
1677 ath10k_warn("wmi phyerr combined event wrong len\n"); 1928 ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
1678 return; 1929 return;
1679 } 1930 }
1680 1931
@@ -1688,7 +1939,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1688 tsf <<= 32; 1939 tsf <<= 32;
1689 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); 1940 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
1690 1941
1691 ath10k_dbg(ATH10K_DBG_WMI, 1942 ath10k_dbg(ar, ATH10K_DBG_WMI,
1692 "wmi event phyerr count %d tsf64 0x%llX\n", 1943 "wmi event phyerr count %d tsf64 0x%llX\n",
1693 count, tsf); 1944 count, tsf);
1694 1945
@@ -1696,7 +1947,8 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1696 for (i = 0; i < count; i++) { 1947 for (i = 0; i < count; i++) {
1697 /* Check if we can read event header */ 1948 /* Check if we can read event header */
1698 if (left_len < sizeof(*event)) { 1949 if (left_len < sizeof(*event)) {
1699 ath10k_warn("single event (%d) wrong head len\n", i); 1950 ath10k_warn(ar, "single event (%d) wrong head len\n",
1951 i);
1700 return; 1952 return;
1701 } 1953 }
1702 1954
@@ -1706,7 +1958,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1706 phy_err_code = event->hdr.phy_err_code; 1958 phy_err_code = event->hdr.phy_err_code;
1707 1959
1708 if (left_len < buf_len) { 1960 if (left_len < buf_len) {
1709 ath10k_warn("single event (%d) wrong buf len\n", i); 1961 ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
1710 return; 1962 return;
1711 } 1963 }
1712 1964
@@ -1733,13 +1985,13 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1733 1985
1734static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 1986static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
1735{ 1987{
1736 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 1988 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
1737} 1989}
1738 1990
1739static void ath10k_wmi_event_profile_match(struct ath10k *ar, 1991static void ath10k_wmi_event_profile_match(struct ath10k *ar,
1740 struct sk_buff *skb) 1992 struct sk_buff *skb)
1741{ 1993{
1742 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 1994 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
1743} 1995}
1744 1996
1745static void ath10k_wmi_event_debug_print(struct ath10k *ar, 1997static void ath10k_wmi_event_debug_print(struct ath10k *ar,
@@ -1764,7 +2016,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
1764 } 2016 }
1765 2017
1766 if (i == sizeof(buf) - 1) 2018 if (i == sizeof(buf) - 1)
1767 ath10k_warn("wmi debug print truncated: %d\n", skb->len); 2019 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
1768 2020
1769 /* for some reason the debug prints end with \n, remove that */ 2021 /* for some reason the debug prints end with \n, remove that */
1770 if (skb->data[i - 1] == '\n') 2022 if (skb->data[i - 1] == '\n')
@@ -1773,108 +2025,108 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
1773 /* the last byte is always reserved for the null character */ 2025 /* the last byte is always reserved for the null character */
1774 buf[i] = '\0'; 2026 buf[i] = '\0';
1775 2027
1776 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); 2028 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
1777} 2029}
1778 2030
1779static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 2031static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
1780{ 2032{
1781 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 2033 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
1782} 2034}
1783 2035
1784static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 2036static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
1785 struct sk_buff *skb) 2037 struct sk_buff *skb)
1786{ 2038{
1787 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 2039 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
1788} 2040}
1789 2041
1790static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 2042static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
1791 struct sk_buff *skb) 2043 struct sk_buff *skb)
1792{ 2044{
1793 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 2045 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
1794} 2046}
1795 2047
1796static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 2048static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
1797 struct sk_buff *skb) 2049 struct sk_buff *skb)
1798{ 2050{
1799 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 2051 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
1800} 2052}
1801 2053
1802static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 2054static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
1803 struct sk_buff *skb) 2055 struct sk_buff *skb)
1804{ 2056{
1805 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 2057 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
1806} 2058}
1807 2059
1808static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 2060static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
1809 struct sk_buff *skb) 2061 struct sk_buff *skb)
1810{ 2062{
1811 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 2063 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
1812} 2064}
1813 2065
1814static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 2066static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
1815 struct sk_buff *skb) 2067 struct sk_buff *skb)
1816{ 2068{
1817 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 2069 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
1818} 2070}
1819 2071
1820static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 2072static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
1821 struct sk_buff *skb) 2073 struct sk_buff *skb)
1822{ 2074{
1823 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 2075 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
1824} 2076}
1825 2077
1826static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 2078static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
1827 struct sk_buff *skb) 2079 struct sk_buff *skb)
1828{ 2080{
1829 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 2081 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
1830} 2082}
1831 2083
1832static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 2084static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
1833 struct sk_buff *skb) 2085 struct sk_buff *skb)
1834{ 2086{
1835 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 2087 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
1836} 2088}
1837 2089
1838static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 2090static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
1839 struct sk_buff *skb) 2091 struct sk_buff *skb)
1840{ 2092{
1841 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 2093 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
1842} 2094}
1843 2095
1844static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 2096static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
1845 struct sk_buff *skb) 2097 struct sk_buff *skb)
1846{ 2098{
1847 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 2099 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
1848} 2100}
1849 2101
1850static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 2102static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
1851 struct sk_buff *skb) 2103 struct sk_buff *skb)
1852{ 2104{
1853 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 2105 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
1854} 2106}
1855 2107
1856static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 2108static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
1857 struct sk_buff *skb) 2109 struct sk_buff *skb)
1858{ 2110{
1859 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 2111 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
1860} 2112}
1861 2113
1862static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, 2114static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
1863 struct sk_buff *skb) 2115 struct sk_buff *skb)
1864{ 2116{
1865 ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 2117 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
1866} 2118}
1867 2119
1868static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, 2120static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
1869 struct sk_buff *skb) 2121 struct sk_buff *skb)
1870{ 2122{
1871 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 2123 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
1872} 2124}
1873 2125
1874static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, 2126static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
1875 struct sk_buff *skb) 2127 struct sk_buff *skb)
1876{ 2128{
1877 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 2129 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
1878} 2130}
1879 2131
1880static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, 2132static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
@@ -1894,7 +2146,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
1894 &paddr, 2146 &paddr,
1895 GFP_ATOMIC); 2147 GFP_ATOMIC);
1896 if (!ar->wmi.mem_chunks[idx].vaddr) { 2148 if (!ar->wmi.mem_chunks[idx].vaddr) {
1897 ath10k_warn("failed to allocate memory chunk\n"); 2149 ath10k_warn(ar, "failed to allocate memory chunk\n");
1898 return -ENOMEM; 2150 return -ENOMEM;
1899 } 2151 }
1900 2152
@@ -1912,9 +2164,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1912 struct sk_buff *skb) 2164 struct sk_buff *skb)
1913{ 2165{
1914 struct wmi_service_ready_event *ev = (void *)skb->data; 2166 struct wmi_service_ready_event *ev = (void *)skb->data;
2167 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
1915 2168
1916 if (skb->len < sizeof(*ev)) { 2169 if (skb->len < sizeof(*ev)) {
1917 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2170 ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
1918 skb->len, sizeof(*ev)); 2171 skb->len, sizeof(*ev));
1919 return; 2172 return;
1920 } 2173 }
@@ -1937,7 +2190,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1937 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); 2190 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
1938 2191
1939 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2192 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
1940 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2193 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
1941 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2194 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
1942 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2195 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
1943 } 2196 }
@@ -1945,8 +2198,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1945 ar->ath_common.regulatory.current_rd = 2198 ar->ath_common.regulatory.current_rd =
1946 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 2199 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
1947 2200
1948 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 2201 wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
1949 sizeof(ev->wmi_service_bitmap)); 2202 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
2203 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
2204 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
1950 2205
1951 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2206 if (strlen(ar->hw->wiphy->fw_version) == 0) {
1952 snprintf(ar->hw->wiphy->fw_version, 2207 snprintf(ar->hw->wiphy->fw_version,
@@ -1960,11 +2215,11 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1960 2215
1961 /* FIXME: it probably should be better to support this */ 2216 /* FIXME: it probably should be better to support this */
1962 if (__le32_to_cpu(ev->num_mem_reqs) > 0) { 2217 if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
1963 ath10k_warn("target requested %d memory chunks; ignoring\n", 2218 ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
1964 __le32_to_cpu(ev->num_mem_reqs)); 2219 __le32_to_cpu(ev->num_mem_reqs));
1965 } 2220 }
1966 2221
1967 ath10k_dbg(ATH10K_DBG_WMI, 2222 ath10k_dbg(ar, ATH10K_DBG_WMI,
1968 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2223 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
1969 __le32_to_cpu(ev->sw_version), 2224 __le32_to_cpu(ev->sw_version),
1970 __le32_to_cpu(ev->sw_version_1), 2225 __le32_to_cpu(ev->sw_version_1),
@@ -1986,9 +2241,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
1986 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; 2241 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
1987 int ret; 2242 int ret;
1988 struct wmi_service_ready_event_10x *ev = (void *)skb->data; 2243 struct wmi_service_ready_event_10x *ev = (void *)skb->data;
2244 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
1989 2245
1990 if (skb->len < sizeof(*ev)) { 2246 if (skb->len < sizeof(*ev)) {
1991 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2247 ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
1992 skb->len, sizeof(*ev)); 2248 skb->len, sizeof(*ev));
1993 return; 2249 return;
1994 } 2250 }
@@ -2004,7 +2260,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2004 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 2260 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
2005 2261
2006 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2262 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
2007 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2263 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
2008 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2264 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
2009 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2265 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
2010 } 2266 }
@@ -2012,8 +2268,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2012 ar->ath_common.regulatory.current_rd = 2268 ar->ath_common.regulatory.current_rd =
2013 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); 2269 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
2014 2270
2015 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, 2271 wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
2016 sizeof(ev->wmi_service_bitmap)); 2272 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
2273 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
2274 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
2017 2275
2018 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2276 if (strlen(ar->hw->wiphy->fw_version) == 0) {
2019 snprintf(ar->hw->wiphy->fw_version, 2277 snprintf(ar->hw->wiphy->fw_version,
@@ -2026,7 +2284,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2026 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); 2284 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
2027 2285
2028 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { 2286 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
2029 ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", 2287 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
2030 num_mem_reqs); 2288 num_mem_reqs);
2031 return; 2289 return;
2032 } 2290 }
@@ -2034,7 +2292,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2034 if (!num_mem_reqs) 2292 if (!num_mem_reqs)
2035 goto exit; 2293 goto exit;
2036 2294
2037 ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", 2295 ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
2038 num_mem_reqs); 2296 num_mem_reqs);
2039 2297
2040 for (i = 0; i < num_mem_reqs; ++i) { 2298 for (i = 0; i < num_mem_reqs; ++i) {
@@ -2052,7 +2310,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2052 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) 2310 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
2053 num_units = TARGET_10X_NUM_VDEVS + 1; 2311 num_units = TARGET_10X_NUM_VDEVS + 1;
2054 2312
2055 ath10k_dbg(ATH10K_DBG_WMI, 2313 ath10k_dbg(ar, ATH10K_DBG_WMI,
2056 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", 2314 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
2057 req_id, 2315 req_id,
2058 __le32_to_cpu(ev->mem_reqs[i].num_units), 2316 __le32_to_cpu(ev->mem_reqs[i].num_units),
@@ -2067,7 +2325,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2067 } 2325 }
2068 2326
2069exit: 2327exit:
2070 ath10k_dbg(ATH10K_DBG_WMI, 2328 ath10k_dbg(ar, ATH10K_DBG_WMI,
2071 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2329 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
2072 __le32_to_cpu(ev->sw_version), 2330 __le32_to_cpu(ev->sw_version),
2073 __le32_to_cpu(ev->abi_version), 2331 __le32_to_cpu(ev->abi_version),
@@ -2091,7 +2349,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
2091 2349
2092 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 2350 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
2093 2351
2094 ath10k_dbg(ATH10K_DBG_WMI, 2352 ath10k_dbg(ar, ATH10K_DBG_WMI,
2095 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", 2353 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
2096 __le32_to_cpu(ev->sw_version), 2354 __le32_to_cpu(ev->sw_version),
2097 __le32_to_cpu(ev->abi_version), 2355 __le32_to_cpu(ev->abi_version),
@@ -2211,7 +2469,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
2211 ath10k_wmi_ready_event_rx(ar, skb); 2469 ath10k_wmi_ready_event_rx(ar, skb);
2212 break; 2470 break;
2213 default: 2471 default:
2214 ath10k_warn("Unknown eventid: %d\n", id); 2472 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2215 break; 2473 break;
2216 } 2474 }
2217 2475
@@ -2318,27 +2576,151 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
2318 ath10k_wmi_ready_event_rx(ar, skb); 2576 ath10k_wmi_ready_event_rx(ar, skb);
2319 break; 2577 break;
2320 default: 2578 default:
2321 ath10k_warn("Unknown eventid: %d\n", id); 2579 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2322 break; 2580 break;
2323 } 2581 }
2324 2582
2325 dev_kfree_skb(skb); 2583 dev_kfree_skb(skb);
2326} 2584}
2327 2585
2586static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
2587{
2588 struct wmi_cmd_hdr *cmd_hdr;
2589 enum wmi_10_2_event_id id;
2590
2591 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
2592 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
2593
2594 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
2595 return;
2596
2597 trace_ath10k_wmi_event(id, skb->data, skb->len);
2598
2599 switch (id) {
2600 case WMI_10_2_MGMT_RX_EVENTID:
2601 ath10k_wmi_event_mgmt_rx(ar, skb);
2602 /* mgmt_rx() owns the skb now! */
2603 return;
2604 case WMI_10_2_SCAN_EVENTID:
2605 ath10k_wmi_event_scan(ar, skb);
2606 break;
2607 case WMI_10_2_CHAN_INFO_EVENTID:
2608 ath10k_wmi_event_chan_info(ar, skb);
2609 break;
2610 case WMI_10_2_ECHO_EVENTID:
2611 ath10k_wmi_event_echo(ar, skb);
2612 break;
2613 case WMI_10_2_DEBUG_MESG_EVENTID:
2614 ath10k_wmi_event_debug_mesg(ar, skb);
2615 break;
2616 case WMI_10_2_UPDATE_STATS_EVENTID:
2617 ath10k_wmi_event_update_stats(ar, skb);
2618 break;
2619 case WMI_10_2_VDEV_START_RESP_EVENTID:
2620 ath10k_wmi_event_vdev_start_resp(ar, skb);
2621 break;
2622 case WMI_10_2_VDEV_STOPPED_EVENTID:
2623 ath10k_wmi_event_vdev_stopped(ar, skb);
2624 break;
2625 case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
2626 ath10k_wmi_event_peer_sta_kickout(ar, skb);
2627 break;
2628 case WMI_10_2_HOST_SWBA_EVENTID:
2629 ath10k_wmi_event_host_swba(ar, skb);
2630 break;
2631 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
2632 ath10k_wmi_event_tbttoffset_update(ar, skb);
2633 break;
2634 case WMI_10_2_PHYERR_EVENTID:
2635 ath10k_wmi_event_phyerr(ar, skb);
2636 break;
2637 case WMI_10_2_ROAM_EVENTID:
2638 ath10k_wmi_event_roam(ar, skb);
2639 break;
2640 case WMI_10_2_PROFILE_MATCH:
2641 ath10k_wmi_event_profile_match(ar, skb);
2642 break;
2643 case WMI_10_2_DEBUG_PRINT_EVENTID:
2644 ath10k_wmi_event_debug_print(ar, skb);
2645 break;
2646 case WMI_10_2_PDEV_QVIT_EVENTID:
2647 ath10k_wmi_event_pdev_qvit(ar, skb);
2648 break;
2649 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
2650 ath10k_wmi_event_wlan_profile_data(ar, skb);
2651 break;
2652 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
2653 ath10k_wmi_event_rtt_measurement_report(ar, skb);
2654 break;
2655 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
2656 ath10k_wmi_event_tsf_measurement_report(ar, skb);
2657 break;
2658 case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
2659 ath10k_wmi_event_rtt_error_report(ar, skb);
2660 break;
2661 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
2662 ath10k_wmi_event_wow_wakeup_host(ar, skb);
2663 break;
2664 case WMI_10_2_DCS_INTERFERENCE_EVENTID:
2665 ath10k_wmi_event_dcs_interference(ar, skb);
2666 break;
2667 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
2668 ath10k_wmi_event_pdev_tpc_config(ar, skb);
2669 break;
2670 case WMI_10_2_INST_RSSI_STATS_EVENTID:
2671 ath10k_wmi_event_inst_rssi_stats(ar, skb);
2672 break;
2673 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
2674 ath10k_wmi_event_vdev_standby_req(ar, skb);
2675 break;
2676 case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
2677 ath10k_wmi_event_vdev_resume_req(ar, skb);
2678 break;
2679 case WMI_10_2_SERVICE_READY_EVENTID:
2680 ath10k_wmi_10x_service_ready_event_rx(ar, skb);
2681 break;
2682 case WMI_10_2_READY_EVENTID:
2683 ath10k_wmi_ready_event_rx(ar, skb);
2684 break;
2685 case WMI_10_2_RTT_KEEPALIVE_EVENTID:
2686 case WMI_10_2_GPIO_INPUT_EVENTID:
2687 case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
2688 case WMI_10_2_GENERIC_BUFFER_EVENTID:
2689 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
2690 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
2691 case WMI_10_2_WDS_PEER_EVENTID:
2692 ath10k_dbg(ar, ATH10K_DBG_WMI,
2693 "received event id %d not implemented\n", id);
2694 break;
2695 default:
2696 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2697 break;
2698 }
2699
2700 dev_kfree_skb(skb);
2701}
2328 2702
2329static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 2703static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
2330{ 2704{
2331 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 2705 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
2332 ath10k_wmi_10x_process_rx(ar, skb); 2706 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
2333 else 2707 ath10k_wmi_10_2_process_rx(ar, skb);
2708 else
2709 ath10k_wmi_10x_process_rx(ar, skb);
2710 } else {
2334 ath10k_wmi_main_process_rx(ar, skb); 2711 ath10k_wmi_main_process_rx(ar, skb);
2712 }
2335} 2713}
2336 2714
2337/* WMI Initialization functions */ 2715/* WMI Initialization functions */
2338int ath10k_wmi_attach(struct ath10k *ar) 2716int ath10k_wmi_attach(struct ath10k *ar)
2339{ 2717{
2340 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 2718 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
2341 ar->wmi.cmd = &wmi_10x_cmd_map; 2719 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
2720 ar->wmi.cmd = &wmi_10_2_cmd_map;
2721 else
2722 ar->wmi.cmd = &wmi_10x_cmd_map;
2723
2342 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 2724 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
2343 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 2725 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
2344 } else { 2726 } else {
@@ -2388,7 +2770,7 @@ int ath10k_wmi_connect(struct ath10k *ar)
2388 2770
2389 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 2771 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
2390 if (status) { 2772 if (status) {
2391 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 2773 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
2392 status); 2774 status);
2393 return status; 2775 return status;
2394 } 2776 }
@@ -2404,7 +2786,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2404 struct wmi_pdev_set_regdomain_cmd *cmd; 2786 struct wmi_pdev_set_regdomain_cmd *cmd;
2405 struct sk_buff *skb; 2787 struct sk_buff *skb;
2406 2788
2407 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2789 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2408 if (!skb) 2790 if (!skb)
2409 return -ENOMEM; 2791 return -ENOMEM;
2410 2792
@@ -2415,7 +2797,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2415 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 2797 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
2416 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2798 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2417 2799
2418 ath10k_dbg(ATH10K_DBG_WMI, 2800 ath10k_dbg(ar, ATH10K_DBG_WMI,
2419 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 2801 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
2420 rd, rd2g, rd5g, ctl2g, ctl5g); 2802 rd, rd2g, rd5g, ctl2g, ctl5g);
2421 2803
@@ -2431,7 +2813,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2431 struct wmi_pdev_set_regdomain_cmd_10x *cmd; 2813 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
2432 struct sk_buff *skb; 2814 struct sk_buff *skb;
2433 2815
2434 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2816 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2435 if (!skb) 2817 if (!skb)
2436 return -ENOMEM; 2818 return -ENOMEM;
2437 2819
@@ -2443,7 +2825,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2443 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2825 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2444 cmd->dfs_domain = __cpu_to_le32(dfs_reg); 2826 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
2445 2827
2446 ath10k_dbg(ATH10K_DBG_WMI, 2828 ath10k_dbg(ar, ATH10K_DBG_WMI,
2447 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", 2829 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
2448 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); 2830 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
2449 2831
@@ -2473,7 +2855,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2473 if (arg->passive) 2855 if (arg->passive)
2474 return -EINVAL; 2856 return -EINVAL;
2475 2857
2476 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2858 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2477 if (!skb) 2859 if (!skb)
2478 return -ENOMEM; 2860 return -ENOMEM;
2479 2861
@@ -2491,7 +2873,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2491 cmd->chan.reg_classid = arg->reg_class_id; 2873 cmd->chan.reg_classid = arg->reg_class_id;
2492 cmd->chan.antenna_max = arg->max_antenna_gain; 2874 cmd->chan.antenna_max = arg->max_antenna_gain;
2493 2875
2494 ath10k_dbg(ATH10K_DBG_WMI, 2876 ath10k_dbg(ar, ATH10K_DBG_WMI,
2495 "wmi set channel mode %d freq %d\n", 2877 "wmi set channel mode %d freq %d\n",
2496 arg->mode, arg->freq); 2878 arg->mode, arg->freq);
2497 2879
@@ -2504,7 +2886,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
2504 struct wmi_pdev_suspend_cmd *cmd; 2886 struct wmi_pdev_suspend_cmd *cmd;
2505 struct sk_buff *skb; 2887 struct sk_buff *skb;
2506 2888
2507 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2889 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2508 if (!skb) 2890 if (!skb)
2509 return -ENOMEM; 2891 return -ENOMEM;
2510 2892
@@ -2518,7 +2900,7 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
2518{ 2900{
2519 struct sk_buff *skb; 2901 struct sk_buff *skb;
2520 2902
2521 skb = ath10k_wmi_alloc_skb(0); 2903 skb = ath10k_wmi_alloc_skb(ar, 0);
2522 if (skb == NULL) 2904 if (skb == NULL)
2523 return -ENOMEM; 2905 return -ENOMEM;
2524 2906
@@ -2531,11 +2913,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
2531 struct sk_buff *skb; 2913 struct sk_buff *skb;
2532 2914
2533 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 2915 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
2534 ath10k_warn("pdev param %d not supported by firmware\n", id); 2916 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
2917 id);
2535 return -EOPNOTSUPP; 2918 return -EOPNOTSUPP;
2536 } 2919 }
2537 2920
2538 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2921 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2539 if (!skb) 2922 if (!skb)
2540 return -ENOMEM; 2923 return -ENOMEM;
2541 2924
@@ -2543,7 +2926,7 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
2543 cmd->param_id = __cpu_to_le32(id); 2926 cmd->param_id = __cpu_to_le32(id);
2544 cmd->param_value = __cpu_to_le32(value); 2927 cmd->param_value = __cpu_to_le32(value);
2545 2928
2546 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 2929 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
2547 id, value); 2930 id, value);
2548 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 2931 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
2549} 2932}
@@ -2610,7 +2993,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2610 len = sizeof(*cmd) + 2993 len = sizeof(*cmd) +
2611 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2994 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
2612 2995
2613 buf = ath10k_wmi_alloc_skb(len); 2996 buf = ath10k_wmi_alloc_skb(ar, len);
2614 if (!buf) 2997 if (!buf)
2615 return -ENOMEM; 2998 return -ENOMEM;
2616 2999
@@ -2621,7 +3004,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2621 goto out; 3004 goto out;
2622 } 3005 }
2623 3006
2624 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3007 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
2625 ar->wmi.num_mem_chunks); 3008 ar->wmi.num_mem_chunks);
2626 3009
2627 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3010 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2634,7 +3017,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2634 cmd->host_mem_chunks[i].req_id = 3017 cmd->host_mem_chunks[i].req_id =
2635 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3018 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
2636 3019
2637 ath10k_dbg(ATH10K_DBG_WMI, 3020 ath10k_dbg(ar, ATH10K_DBG_WMI,
2638 "wmi chunk %d len %d requested, addr 0x%llx\n", 3021 "wmi chunk %d len %d requested, addr 0x%llx\n",
2639 i, 3022 i,
2640 ar->wmi.mem_chunks[i].len, 3023 ar->wmi.mem_chunks[i].len,
@@ -2643,7 +3026,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2643out: 3026out:
2644 memcpy(&cmd->resource_config, &config, sizeof(config)); 3027 memcpy(&cmd->resource_config, &config, sizeof(config));
2645 3028
2646 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 3029 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
2647 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3030 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
2648} 3031}
2649 3032
@@ -2701,7 +3084,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
2701 len = sizeof(*cmd) + 3084 len = sizeof(*cmd) +
2702 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 3085 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
2703 3086
2704 buf = ath10k_wmi_alloc_skb(len); 3087 buf = ath10k_wmi_alloc_skb(ar, len);
2705 if (!buf) 3088 if (!buf)
2706 return -ENOMEM; 3089 return -ENOMEM;
2707 3090
@@ -2712,7 +3095,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
2712 goto out; 3095 goto out;
2713 } 3096 }
2714 3097
2715 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3098 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
2716 ar->wmi.num_mem_chunks); 3099 ar->wmi.num_mem_chunks);
2717 3100
2718 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3101 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -2725,7 +3108,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
2725 cmd->host_mem_chunks[i].req_id = 3108 cmd->host_mem_chunks[i].req_id =
2726 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3109 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
2727 3110
2728 ath10k_dbg(ATH10K_DBG_WMI, 3111 ath10k_dbg(ar, ATH10K_DBG_WMI,
2729 "wmi chunk %d len %d requested, addr 0x%llx\n", 3112 "wmi chunk %d len %d requested, addr 0x%llx\n",
2730 i, 3113 i,
2731 ar->wmi.mem_chunks[i].len, 3114 ar->wmi.mem_chunks[i].len,
@@ -2734,7 +3117,98 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
2734out: 3117out:
2735 memcpy(&cmd->resource_config, &config, sizeof(config)); 3118 memcpy(&cmd->resource_config, &config, sizeof(config));
2736 3119
2737 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); 3120 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
3121 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
3122}
3123
3124static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3125{
3126 struct wmi_init_cmd_10_2 *cmd;
3127 struct sk_buff *buf;
3128 struct wmi_resource_config_10x config = {};
3129 u32 len, val;
3130 int i;
3131
3132 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
3133 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
3134 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
3135 config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
3136 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
3137 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
3138 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
3139 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3140 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3141 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
3142 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
3143 config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
3144
3145 config.scan_max_pending_reqs =
3146 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
3147
3148 config.bmiss_offload_max_vdev =
3149 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
3150
3151 config.roam_offload_max_vdev =
3152 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
3153
3154 config.roam_offload_max_ap_profiles =
3155 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
3156
3157 config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
3158 config.num_mcast_table_elems =
3159 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
3160
3161 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
3162 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
3163 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
3164 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
3165 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
3166
3167 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
3168 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
3169
3170 config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
3171
3172 config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
3173 config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
3174
3175 len = sizeof(*cmd) +
3176 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3177
3178 buf = ath10k_wmi_alloc_skb(ar, len);
3179 if (!buf)
3180 return -ENOMEM;
3181
3182 cmd = (struct wmi_init_cmd_10_2 *)buf->data;
3183
3184 if (ar->wmi.num_mem_chunks == 0) {
3185 cmd->num_host_mem_chunks = 0;
3186 goto out;
3187 }
3188
3189 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
3190 ar->wmi.num_mem_chunks);
3191
3192 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
3193
3194 for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
3195 cmd->host_mem_chunks[i].ptr =
3196 __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
3197 cmd->host_mem_chunks[i].size =
3198 __cpu_to_le32(ar->wmi.mem_chunks[i].len);
3199 cmd->host_mem_chunks[i].req_id =
3200 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
3201
3202 ath10k_dbg(ar, ATH10K_DBG_WMI,
3203 "wmi chunk %d len %d requested, addr 0x%llx\n",
3204 i,
3205 ar->wmi.mem_chunks[i].len,
3206 (unsigned long long)ar->wmi.mem_chunks[i].paddr);
3207 }
3208out:
3209 memcpy(&cmd->resource_config.common, &config, sizeof(config));
3210
3211 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
2738 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3212 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
2739} 3213}
2740 3214
@@ -2742,10 +3216,14 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
2742{ 3216{
2743 int ret; 3217 int ret;
2744 3218
2745 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 3219 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
2746 ret = ath10k_wmi_10x_cmd_init(ar); 3220 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
2747 else 3221 ret = ath10k_wmi_10_2_cmd_init(ar);
3222 else
3223 ret = ath10k_wmi_10x_cmd_init(ar);
3224 } else {
2748 ret = ath10k_wmi_main_cmd_init(ar); 3225 ret = ath10k_wmi_main_cmd_init(ar);
3226 }
2749 3227
2750 return ret; 3228 return ret;
2751} 3229}
@@ -2822,7 +3300,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
2822 if (len < 0) 3300 if (len < 0)
2823 return len; /* len contains error code here */ 3301 return len; /* len contains error code here */
2824 3302
2825 skb = ath10k_wmi_alloc_skb(len); 3303 skb = ath10k_wmi_alloc_skb(ar, len);
2826 if (!skb) 3304 if (!skb)
2827 return -ENOMEM; 3305 return -ENOMEM;
2828 3306
@@ -2865,8 +3343,8 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
2865 channels->num_chan = __cpu_to_le32(arg->n_channels); 3343 channels->num_chan = __cpu_to_le32(arg->n_channels);
2866 3344
2867 for (i = 0; i < arg->n_channels; i++) 3345 for (i = 0; i < arg->n_channels; i++)
2868 channels->channel_list[i] = 3346 channels->channel_list[i].freq =
2869 __cpu_to_le32(arg->channels[i]); 3347 __cpu_to_le16(arg->channels[i]);
2870 3348
2871 off += sizeof(*channels); 3349 off += sizeof(*channels);
2872 off += sizeof(__le32) * arg->n_channels; 3350 off += sizeof(__le32) * arg->n_channels;
@@ -2918,7 +3396,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
2918 return -EINVAL; 3396 return -EINVAL;
2919 } 3397 }
2920 3398
2921 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 3399 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
2922 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 3400 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
2923} 3401}
2924 3402
@@ -2960,7 +3438,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
2960 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 3438 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2961 return -EINVAL; 3439 return -EINVAL;
2962 3440
2963 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3441 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2964 if (!skb) 3442 if (!skb)
2965 return -ENOMEM; 3443 return -ENOMEM;
2966 3444
@@ -2976,7 +3454,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
2976 cmd->scan_id = __cpu_to_le32(scan_id); 3454 cmd->scan_id = __cpu_to_le32(scan_id);
2977 cmd->scan_req_id = __cpu_to_le32(req_id); 3455 cmd->scan_req_id = __cpu_to_le32(req_id);
2978 3456
2979 ath10k_dbg(ATH10K_DBG_WMI, 3457 ath10k_dbg(ar, ATH10K_DBG_WMI,
2980 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 3458 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
2981 arg->req_id, arg->req_type, arg->u.scan_id); 3459 arg->req_id, arg->req_type, arg->u.scan_id);
2982 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 3460 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
@@ -2990,7 +3468,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
2990 struct wmi_vdev_create_cmd *cmd; 3468 struct wmi_vdev_create_cmd *cmd;
2991 struct sk_buff *skb; 3469 struct sk_buff *skb;
2992 3470
2993 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3471 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2994 if (!skb) 3472 if (!skb)
2995 return -ENOMEM; 3473 return -ENOMEM;
2996 3474
@@ -3000,7 +3478,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
3000 cmd->vdev_subtype = __cpu_to_le32(subtype); 3478 cmd->vdev_subtype = __cpu_to_le32(subtype);
3001 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); 3479 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
3002 3480
3003 ath10k_dbg(ATH10K_DBG_WMI, 3481 ath10k_dbg(ar, ATH10K_DBG_WMI,
3004 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 3482 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
3005 vdev_id, type, subtype, macaddr); 3483 vdev_id, type, subtype, macaddr);
3006 3484
@@ -3012,14 +3490,14 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
3012 struct wmi_vdev_delete_cmd *cmd; 3490 struct wmi_vdev_delete_cmd *cmd;
3013 struct sk_buff *skb; 3491 struct sk_buff *skb;
3014 3492
3015 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3493 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3016 if (!skb) 3494 if (!skb)
3017 return -ENOMEM; 3495 return -ENOMEM;
3018 3496
3019 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 3497 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
3020 cmd->vdev_id = __cpu_to_le32(vdev_id); 3498 cmd->vdev_id = __cpu_to_le32(vdev_id);
3021 3499
3022 ath10k_dbg(ATH10K_DBG_WMI, 3500 ath10k_dbg(ar, ATH10K_DBG_WMI,
3023 "WMI vdev delete id %d\n", vdev_id); 3501 "WMI vdev delete id %d\n", vdev_id);
3024 3502
3025 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 3503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
@@ -3052,7 +3530,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
3052 else 3530 else
3053 return -EINVAL; /* should not happen, we already check cmd_id */ 3531 return -EINVAL; /* should not happen, we already check cmd_id */
3054 3532
3055 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3533 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3056 if (!skb) 3534 if (!skb)
3057 return -ENOMEM; 3535 return -ENOMEM;
3058 3536
@@ -3090,7 +3568,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
3090 cmd->chan.reg_classid = arg->channel.reg_class_id; 3568 cmd->chan.reg_classid = arg->channel.reg_class_id;
3091 cmd->chan.antenna_max = arg->channel.max_antenna_gain; 3569 cmd->chan.antenna_max = arg->channel.max_antenna_gain;
3092 3570
3093 ath10k_dbg(ATH10K_DBG_WMI, 3571 ath10k_dbg(ar, ATH10K_DBG_WMI,
3094 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " 3572 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
3095 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, 3573 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
3096 flags, arg->channel.freq, arg->channel.mode, 3574 flags, arg->channel.freq, arg->channel.mode,
@@ -3120,14 +3598,14 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
3120 struct wmi_vdev_stop_cmd *cmd; 3598 struct wmi_vdev_stop_cmd *cmd;
3121 struct sk_buff *skb; 3599 struct sk_buff *skb;
3122 3600
3123 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3601 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3124 if (!skb) 3602 if (!skb)
3125 return -ENOMEM; 3603 return -ENOMEM;
3126 3604
3127 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 3605 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
3128 cmd->vdev_id = __cpu_to_le32(vdev_id); 3606 cmd->vdev_id = __cpu_to_le32(vdev_id);
3129 3607
3130 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 3608 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
3131 3609
3132 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 3610 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
3133} 3611}
@@ -3137,7 +3615,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
3137 struct wmi_vdev_up_cmd *cmd; 3615 struct wmi_vdev_up_cmd *cmd;
3138 struct sk_buff *skb; 3616 struct sk_buff *skb;
3139 3617
3140 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3618 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3141 if (!skb) 3619 if (!skb)
3142 return -ENOMEM; 3620 return -ENOMEM;
3143 3621
@@ -3146,7 +3624,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
3146 cmd->vdev_assoc_id = __cpu_to_le32(aid); 3624 cmd->vdev_assoc_id = __cpu_to_le32(aid);
3147 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); 3625 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
3148 3626
3149 ath10k_dbg(ATH10K_DBG_WMI, 3627 ath10k_dbg(ar, ATH10K_DBG_WMI,
3150 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 3628 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
3151 vdev_id, aid, bssid); 3629 vdev_id, aid, bssid);
3152 3630
@@ -3158,14 +3636,14 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
3158 struct wmi_vdev_down_cmd *cmd; 3636 struct wmi_vdev_down_cmd *cmd;
3159 struct sk_buff *skb; 3637 struct sk_buff *skb;
3160 3638
3161 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3639 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3162 if (!skb) 3640 if (!skb)
3163 return -ENOMEM; 3641 return -ENOMEM;
3164 3642
3165 cmd = (struct wmi_vdev_down_cmd *)skb->data; 3643 cmd = (struct wmi_vdev_down_cmd *)skb->data;
3166 cmd->vdev_id = __cpu_to_le32(vdev_id); 3644 cmd->vdev_id = __cpu_to_le32(vdev_id);
3167 3645
3168 ath10k_dbg(ATH10K_DBG_WMI, 3646 ath10k_dbg(ar, ATH10K_DBG_WMI,
3169 "wmi mgmt vdev down id 0x%x\n", vdev_id); 3647 "wmi mgmt vdev down id 0x%x\n", vdev_id);
3170 3648
3171 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 3649 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
@@ -3178,13 +3656,13 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3178 struct sk_buff *skb; 3656 struct sk_buff *skb;
3179 3657
3180 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { 3658 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
3181 ath10k_dbg(ATH10K_DBG_WMI, 3659 ath10k_dbg(ar, ATH10K_DBG_WMI,
3182 "vdev param %d not supported by firmware\n", 3660 "vdev param %d not supported by firmware\n",
3183 param_id); 3661 param_id);
3184 return -EOPNOTSUPP; 3662 return -EOPNOTSUPP;
3185 } 3663 }
3186 3664
3187 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3665 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3188 if (!skb) 3666 if (!skb)
3189 return -ENOMEM; 3667 return -ENOMEM;
3190 3668
@@ -3193,7 +3671,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3193 cmd->param_id = __cpu_to_le32(param_id); 3671 cmd->param_id = __cpu_to_le32(param_id);
3194 cmd->param_value = __cpu_to_le32(param_value); 3672 cmd->param_value = __cpu_to_le32(param_value);
3195 3673
3196 ath10k_dbg(ATH10K_DBG_WMI, 3674 ath10k_dbg(ar, ATH10K_DBG_WMI,
3197 "wmi vdev id 0x%x set param %d value %d\n", 3675 "wmi vdev id 0x%x set param %d value %d\n",
3198 vdev_id, param_id, param_value); 3676 vdev_id, param_id, param_value);
3199 3677
@@ -3211,7 +3689,7 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3211 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 3689 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
3212 return -EINVAL; 3690 return -EINVAL;
3213 3691
3214 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); 3692 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
3215 if (!skb) 3693 if (!skb)
3216 return -ENOMEM; 3694 return -ENOMEM;
3217 3695
@@ -3229,20 +3707,76 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3229 if (arg->key_data) 3707 if (arg->key_data)
3230 memcpy(cmd->key_data, arg->key_data, arg->key_len); 3708 memcpy(cmd->key_data, arg->key_data, arg->key_len);
3231 3709
3232 ath10k_dbg(ATH10K_DBG_WMI, 3710 ath10k_dbg(ar, ATH10K_DBG_WMI,
3233 "wmi vdev install key idx %d cipher %d len %d\n", 3711 "wmi vdev install key idx %d cipher %d len %d\n",
3234 arg->key_idx, arg->key_cipher, arg->key_len); 3712 arg->key_idx, arg->key_cipher, arg->key_len);
3235 return ath10k_wmi_cmd_send(ar, skb, 3713 return ath10k_wmi_cmd_send(ar, skb,
3236 ar->wmi.cmd->vdev_install_key_cmdid); 3714 ar->wmi.cmd->vdev_install_key_cmdid);
3237} 3715}
3238 3716
3717int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
3718 const struct wmi_vdev_spectral_conf_arg *arg)
3719{
3720 struct wmi_vdev_spectral_conf_cmd *cmd;
3721 struct sk_buff *skb;
3722 u32 cmdid;
3723
3724 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3725 if (!skb)
3726 return -ENOMEM;
3727
3728 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
3729 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3730 cmd->scan_count = __cpu_to_le32(arg->scan_count);
3731 cmd->scan_period = __cpu_to_le32(arg->scan_period);
3732 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
3733 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
3734 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
3735 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
3736 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
3737 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
3738 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
3739 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
3740 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
3741 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
3742 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
3743 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
3744 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
3745 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
3746 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
3747 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
3748
3749 cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
3750 return ath10k_wmi_cmd_send(ar, skb, cmdid);
3751}
3752
3753int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
3754 u32 enable)
3755{
3756 struct wmi_vdev_spectral_enable_cmd *cmd;
3757 struct sk_buff *skb;
3758 u32 cmdid;
3759
3760 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3761 if (!skb)
3762 return -ENOMEM;
3763
3764 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
3765 cmd->vdev_id = __cpu_to_le32(vdev_id);
3766 cmd->trigger_cmd = __cpu_to_le32(trigger);
3767 cmd->enable_cmd = __cpu_to_le32(enable);
3768
3769 cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
3770 return ath10k_wmi_cmd_send(ar, skb, cmdid);
3771}
3772
3239int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 3773int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3240 const u8 peer_addr[ETH_ALEN]) 3774 const u8 peer_addr[ETH_ALEN])
3241{ 3775{
3242 struct wmi_peer_create_cmd *cmd; 3776 struct wmi_peer_create_cmd *cmd;
3243 struct sk_buff *skb; 3777 struct sk_buff *skb;
3244 3778
3245 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3779 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3246 if (!skb) 3780 if (!skb)
3247 return -ENOMEM; 3781 return -ENOMEM;
3248 3782
@@ -3250,7 +3784,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3250 cmd->vdev_id = __cpu_to_le32(vdev_id); 3784 cmd->vdev_id = __cpu_to_le32(vdev_id);
3251 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3785 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3252 3786
3253 ath10k_dbg(ATH10K_DBG_WMI, 3787 ath10k_dbg(ar, ATH10K_DBG_WMI,
3254 "wmi peer create vdev_id %d peer_addr %pM\n", 3788 "wmi peer create vdev_id %d peer_addr %pM\n",
3255 vdev_id, peer_addr); 3789 vdev_id, peer_addr);
3256 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 3790 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
@@ -3262,7 +3796,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3262 struct wmi_peer_delete_cmd *cmd; 3796 struct wmi_peer_delete_cmd *cmd;
3263 struct sk_buff *skb; 3797 struct sk_buff *skb;
3264 3798
3265 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3799 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3266 if (!skb) 3800 if (!skb)
3267 return -ENOMEM; 3801 return -ENOMEM;
3268 3802
@@ -3270,7 +3804,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3270 cmd->vdev_id = __cpu_to_le32(vdev_id); 3804 cmd->vdev_id = __cpu_to_le32(vdev_id);
3271 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3805 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3272 3806
3273 ath10k_dbg(ATH10K_DBG_WMI, 3807 ath10k_dbg(ar, ATH10K_DBG_WMI,
3274 "wmi peer delete vdev_id %d peer_addr %pM\n", 3808 "wmi peer delete vdev_id %d peer_addr %pM\n",
3275 vdev_id, peer_addr); 3809 vdev_id, peer_addr);
3276 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 3810 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
@@ -3282,7 +3816,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
3282 struct wmi_peer_flush_tids_cmd *cmd; 3816 struct wmi_peer_flush_tids_cmd *cmd;
3283 struct sk_buff *skb; 3817 struct sk_buff *skb;
3284 3818
3285 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3819 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3286 if (!skb) 3820 if (!skb)
3287 return -ENOMEM; 3821 return -ENOMEM;
3288 3822
@@ -3291,7 +3825,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
3291 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 3825 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
3292 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3826 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3293 3827
3294 ath10k_dbg(ATH10K_DBG_WMI, 3828 ath10k_dbg(ar, ATH10K_DBG_WMI,
3295 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 3829 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
3296 vdev_id, peer_addr, tid_bitmap); 3830 vdev_id, peer_addr, tid_bitmap);
3297 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 3831 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
@@ -3304,7 +3838,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
3304 struct wmi_peer_set_param_cmd *cmd; 3838 struct wmi_peer_set_param_cmd *cmd;
3305 struct sk_buff *skb; 3839 struct sk_buff *skb;
3306 3840
3307 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3841 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3308 if (!skb) 3842 if (!skb)
3309 return -ENOMEM; 3843 return -ENOMEM;
3310 3844
@@ -3314,7 +3848,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
3314 cmd->param_value = __cpu_to_le32(param_value); 3848 cmd->param_value = __cpu_to_le32(param_value);
3315 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3849 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3316 3850
3317 ath10k_dbg(ATH10K_DBG_WMI, 3851 ath10k_dbg(ar, ATH10K_DBG_WMI,
3318 "wmi vdev %d peer 0x%pM set param %d value %d\n", 3852 "wmi vdev %d peer 0x%pM set param %d value %d\n",
3319 vdev_id, peer_addr, param_id, param_value); 3853 vdev_id, peer_addr, param_id, param_value);
3320 3854
@@ -3327,7 +3861,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
3327 struct wmi_sta_powersave_mode_cmd *cmd; 3861 struct wmi_sta_powersave_mode_cmd *cmd;
3328 struct sk_buff *skb; 3862 struct sk_buff *skb;
3329 3863
3330 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3864 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3331 if (!skb) 3865 if (!skb)
3332 return -ENOMEM; 3866 return -ENOMEM;
3333 3867
@@ -3335,7 +3869,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
3335 cmd->vdev_id = __cpu_to_le32(vdev_id); 3869 cmd->vdev_id = __cpu_to_le32(vdev_id);
3336 cmd->sta_ps_mode = __cpu_to_le32(psmode); 3870 cmd->sta_ps_mode = __cpu_to_le32(psmode);
3337 3871
3338 ath10k_dbg(ATH10K_DBG_WMI, 3872 ath10k_dbg(ar, ATH10K_DBG_WMI,
3339 "wmi set powersave id 0x%x mode %d\n", 3873 "wmi set powersave id 0x%x mode %d\n",
3340 vdev_id, psmode); 3874 vdev_id, psmode);
3341 3875
@@ -3350,7 +3884,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
3350 struct wmi_sta_powersave_param_cmd *cmd; 3884 struct wmi_sta_powersave_param_cmd *cmd;
3351 struct sk_buff *skb; 3885 struct sk_buff *skb;
3352 3886
3353 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3887 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3354 if (!skb) 3888 if (!skb)
3355 return -ENOMEM; 3889 return -ENOMEM;
3356 3890
@@ -3359,7 +3893,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
3359 cmd->param_id = __cpu_to_le32(param_id); 3893 cmd->param_id = __cpu_to_le32(param_id);
3360 cmd->param_value = __cpu_to_le32(value); 3894 cmd->param_value = __cpu_to_le32(value);
3361 3895
3362 ath10k_dbg(ATH10K_DBG_WMI, 3896 ath10k_dbg(ar, ATH10K_DBG_WMI,
3363 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 3897 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
3364 vdev_id, param_id, value); 3898 vdev_id, param_id, value);
3365 return ath10k_wmi_cmd_send(ar, skb, 3899 return ath10k_wmi_cmd_send(ar, skb,
@@ -3375,7 +3909,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3375 if (!mac) 3909 if (!mac)
3376 return -EINVAL; 3910 return -EINVAL;
3377 3911
3378 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3912 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3379 if (!skb) 3913 if (!skb)
3380 return -ENOMEM; 3914 return -ENOMEM;
3381 3915
@@ -3385,7 +3919,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3385 cmd->param_value = __cpu_to_le32(value); 3919 cmd->param_value = __cpu_to_le32(value);
3386 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); 3920 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
3387 3921
3388 ath10k_dbg(ATH10K_DBG_WMI, 3922 ath10k_dbg(ar, ATH10K_DBG_WMI,
3389 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 3923 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
3390 vdev_id, param_id, value, mac); 3924 vdev_id, param_id, value, mac);
3391 3925
@@ -3405,7 +3939,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3405 3939
3406 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); 3940 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
3407 3941
3408 skb = ath10k_wmi_alloc_skb(len); 3942 skb = ath10k_wmi_alloc_skb(ar, len);
3409 if (!skb) 3943 if (!skb)
3410 return -EINVAL; 3944 return -EINVAL;
3411 3945
@@ -3447,24 +3981,12 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3447 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 3981 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
3448} 3982}
3449 3983
3450int ath10k_wmi_peer_assoc(struct ath10k *ar, 3984static void
3451 const struct wmi_peer_assoc_complete_arg *arg) 3985ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
3986 const struct wmi_peer_assoc_complete_arg *arg)
3452{ 3987{
3453 struct wmi_peer_assoc_complete_cmd *cmd; 3988 struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
3454 struct sk_buff *skb;
3455 3989
3456 if (arg->peer_mpdu_density > 16)
3457 return -EINVAL;
3458 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
3459 return -EINVAL;
3460 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
3461 return -EINVAL;
3462
3463 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
3464 if (!skb)
3465 return -ENOMEM;
3466
3467 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
3468 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3990 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3469 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 3991 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
3470 cmd->peer_associd = __cpu_to_le32(arg->peer_aid); 3992 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
@@ -3499,8 +4021,80 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
3499 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 4021 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
3500 cmd->peer_vht_rates.tx_mcs_set = 4022 cmd->peer_vht_rates.tx_mcs_set =
3501 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 4023 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
4024}
4025
4026static void
4027ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
4028 const struct wmi_peer_assoc_complete_arg *arg)
4029{
4030 struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
4031
4032 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4033 memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
4034}
4035
4036static void
4037ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
4038 const struct wmi_peer_assoc_complete_arg *arg)
4039{
4040 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4041}
4042
4043static void
4044ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
4045 const struct wmi_peer_assoc_complete_arg *arg)
4046{
4047 struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
4048 int max_mcs, max_nss;
4049 u32 info0;
4050
4051 /* TODO: Is using max values okay with firmware? */
4052 max_mcs = 0xf;
4053 max_nss = 0xf;
4054
4055 info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
4056 SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
4057
4058 ath10k_wmi_peer_assoc_fill(ar, buf, arg);
4059 cmd->info0 = __cpu_to_le32(info0);
4060}
4061
4062int ath10k_wmi_peer_assoc(struct ath10k *ar,
4063 const struct wmi_peer_assoc_complete_arg *arg)
4064{
4065 struct sk_buff *skb;
4066 int len;
4067
4068 if (arg->peer_mpdu_density > 16)
4069 return -EINVAL;
4070 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
4071 return -EINVAL;
4072 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
4073 return -EINVAL;
4074
4075 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
4076 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
4077 len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
4078 else
4079 len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
4080 } else {
4081 len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
4082 }
4083
4084 skb = ath10k_wmi_alloc_skb(ar, len);
4085 if (!skb)
4086 return -ENOMEM;
4087
4088 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
4089 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
4090 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
4091 else
4092 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
4093 } else {
4094 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
4095 }
3502 4096
3503 ath10k_dbg(ATH10K_DBG_WMI, 4097 ath10k_dbg(ar, ATH10K_DBG_WMI,
3504 "wmi peer assoc vdev %d addr %pM (%s)\n", 4098 "wmi peer assoc vdev %d addr %pM (%s)\n",
3505 arg->vdev_id, arg->addr, 4099 arg->vdev_id, arg->addr,
3506 arg->peer_reassoc ? "reassociate" : "new"); 4100 arg->peer_reassoc ? "reassociate" : "new");
@@ -3518,7 +4112,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
3518 int ret; 4112 int ret;
3519 u16 fc; 4113 u16 fc;
3520 4114
3521 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4115 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3522 if (!skb) 4116 if (!skb)
3523 return -ENOMEM; 4117 return -ENOMEM;
3524 4118
@@ -3532,6 +4126,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
3532 cmd->msdu_id = 0; 4126 cmd->msdu_id = 0;
3533 cmd->frame_control = __cpu_to_le32(fc); 4127 cmd->frame_control = __cpu_to_le32(fc);
3534 cmd->flags = 0; 4128 cmd->flags = 0;
4129 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
3535 4130
3536 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) 4131 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
3537 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 4132 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
@@ -3565,7 +4160,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3565 struct wmi_pdev_set_wmm_params *cmd; 4160 struct wmi_pdev_set_wmm_params *cmd;
3566 struct sk_buff *skb; 4161 struct sk_buff *skb;
3567 4162
3568 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4163 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3569 if (!skb) 4164 if (!skb)
3570 return -ENOMEM; 4165 return -ENOMEM;
3571 4166
@@ -3575,7 +4170,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
3575 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 4170 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
3576 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 4171 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
3577 4172
3578 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 4173 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
3579 return ath10k_wmi_cmd_send(ar, skb, 4174 return ath10k_wmi_cmd_send(ar, skb,
3580 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 4175 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
3581} 4176}
@@ -3585,14 +4180,14 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
3585 struct wmi_request_stats_cmd *cmd; 4180 struct wmi_request_stats_cmd *cmd;
3586 struct sk_buff *skb; 4181 struct sk_buff *skb;
3587 4182
3588 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4183 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3589 if (!skb) 4184 if (!skb)
3590 return -ENOMEM; 4185 return -ENOMEM;
3591 4186
3592 cmd = (struct wmi_request_stats_cmd *)skb->data; 4187 cmd = (struct wmi_request_stats_cmd *)skb->data;
3593 cmd->stats_id = __cpu_to_le32(stats_id); 4188 cmd->stats_id = __cpu_to_le32(stats_id);
3594 4189
3595 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 4190 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
3596 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 4191 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
3597} 4192}
3598 4193
@@ -3602,7 +4197,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
3602 struct wmi_force_fw_hang_cmd *cmd; 4197 struct wmi_force_fw_hang_cmd *cmd;
3603 struct sk_buff *skb; 4198 struct sk_buff *skb;
3604 4199
3605 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4200 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3606 if (!skb) 4201 if (!skb)
3607 return -ENOMEM; 4202 return -ENOMEM;
3608 4203
@@ -3610,7 +4205,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
3610 cmd->type = __cpu_to_le32(type); 4205 cmd->type = __cpu_to_le32(type);
3611 cmd->delay_ms = __cpu_to_le32(delay_ms); 4206 cmd->delay_ms = __cpu_to_le32(delay_ms);
3612 4207
3613 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 4208 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
3614 type, delay_ms); 4209 type, delay_ms);
3615 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 4210 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
3616} 4211}
@@ -3621,7 +4216,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
3621 struct sk_buff *skb; 4216 struct sk_buff *skb;
3622 u32 cfg; 4217 u32 cfg;
3623 4218
3624 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4219 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3625 if (!skb) 4220 if (!skb)
3626 return -ENOMEM; 4221 return -ENOMEM;
3627 4222
@@ -3642,7 +4237,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
3642 cmd->config_enable = __cpu_to_le32(cfg); 4237 cmd->config_enable = __cpu_to_le32(cfg);
3643 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 4238 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
3644 4239
3645 ath10k_dbg(ATH10K_DBG_WMI, 4240 ath10k_dbg(ar, ATH10K_DBG_WMI,
3646 "wmi dbglog cfg modules %08x %08x config %08x %08x\n", 4241 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
3647 __le32_to_cpu(cmd->module_enable), 4242 __le32_to_cpu(cmd->module_enable),
3648 __le32_to_cpu(cmd->module_valid), 4243 __le32_to_cpu(cmd->module_valid),