aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/wmi.c
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2014-08-25 06:09:38 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2014-08-26 12:04:48 -0400
commit7aa7a72a23679abf1cea9b3b65a8921244e769a7 (patch)
tree1fae909aa0300aff481db6fe53e5bd09c696d57b /drivers/net/wireless/ath/ath10k/wmi.c
parent61e9aab7a1930ae031d1b9d948837b5ffd8e3f5e (diff)
ath10k: improve logging to include dev id
This makes it a lot easier to log and debug messages if there's more than 1 ath10k device on a system. Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c355
1 files changed, 180 insertions, 175 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ec3bf4e00b4d..e500a3cc905e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -624,18 +624,18 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
624 return ret; 624 return ret;
625} 625}
626 626
627static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) 627static struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
628{ 628{
629 struct sk_buff *skb; 629 struct sk_buff *skb;
630 u32 round_len = roundup(len, 4); 630 u32 round_len = roundup(len, 4);
631 631
632 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); 632 skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
633 if (!skb) 633 if (!skb)
634 return NULL; 634 return NULL;
635 635
636 skb_reserve(skb, WMI_SKB_HEADROOM); 636 skb_reserve(skb, WMI_SKB_HEADROOM);
637 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 637 if (!IS_ALIGNED((unsigned long)skb->data, 4))
638 ath10k_warn("Unaligned WMI skb\n"); 638 ath10k_warn(ar, "Unaligned WMI skb\n");
639 639
640 skb_put(skb, round_len); 640 skb_put(skb, round_len);
641 memset(skb->data, 0, round_len); 641 memset(skb->data, 0, round_len);
@@ -733,7 +733,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
733 might_sleep(); 733 might_sleep();
734 734
735 if (cmd_id == WMI_CMD_UNSUPPORTED) { 735 if (cmd_id == WMI_CMD_UNSUPPORTED) {
736 ath10k_warn("wmi command %d is not supported by firmware\n", 736 ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
737 cmd_id); 737 cmd_id);
738 return ret; 738 return ret;
739 } 739 }
@@ -781,7 +781,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
781 781
782 len = round_up(len, 4); 782 len = round_up(len, 4);
783 783
784 wmi_skb = ath10k_wmi_alloc_skb(len); 784 wmi_skb = ath10k_wmi_alloc_skb(ar, len);
785 if (!wmi_skb) 785 if (!wmi_skb)
786 return -ENOMEM; 786 return -ENOMEM;
787 787
@@ -795,7 +795,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
795 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); 795 memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
796 memcpy(cmd->buf, skb->data, skb->len); 796 memcpy(cmd->buf, skb->data, skb->len);
797 797
798 ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 798 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
799 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, 799 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
800 fc & IEEE80211_FCTL_STYPE); 800 fc & IEEE80211_FCTL_STYPE);
801 801
@@ -819,7 +819,7 @@ static void ath10k_wmi_event_scan_started(struct ath10k *ar)
819 case ATH10K_SCAN_IDLE: 819 case ATH10K_SCAN_IDLE:
820 case ATH10K_SCAN_RUNNING: 820 case ATH10K_SCAN_RUNNING:
821 case ATH10K_SCAN_ABORTING: 821 case ATH10K_SCAN_ABORTING:
822 ath10k_warn("received scan started event in an invalid scan state: %s (%d)\n", 822 ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
823 ath10k_scan_state_str(ar->scan.state), 823 ath10k_scan_state_str(ar->scan.state),
824 ar->scan.state); 824 ar->scan.state);
825 break; 825 break;
@@ -849,7 +849,7 @@ static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
849 * is) ignored by the host as it may be just firmware's scan 849 * is) ignored by the host as it may be just firmware's scan
850 * state machine recovering. 850 * state machine recovering.
851 */ 851 */
852 ath10k_warn("received scan completed event in an invalid scan state: %s (%d)\n", 852 ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
853 ath10k_scan_state_str(ar->scan.state), 853 ath10k_scan_state_str(ar->scan.state),
854 ar->scan.state); 854 ar->scan.state);
855 break; 855 break;
@@ -867,7 +867,7 @@ static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
867 switch (ar->scan.state) { 867 switch (ar->scan.state) {
868 case ATH10K_SCAN_IDLE: 868 case ATH10K_SCAN_IDLE:
869 case ATH10K_SCAN_STARTING: 869 case ATH10K_SCAN_STARTING:
870 ath10k_warn("received scan bss chan event in an invalid scan state: %s (%d)\n", 870 ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
871 ath10k_scan_state_str(ar->scan.state), 871 ath10k_scan_state_str(ar->scan.state),
872 ar->scan.state); 872 ar->scan.state);
873 break; 873 break;
@@ -885,7 +885,7 @@ static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
885 switch (ar->scan.state) { 885 switch (ar->scan.state) {
886 case ATH10K_SCAN_IDLE: 886 case ATH10K_SCAN_IDLE:
887 case ATH10K_SCAN_STARTING: 887 case ATH10K_SCAN_STARTING:
888 ath10k_warn("received scan foreign chan event in an invalid scan state: %s (%d)\n", 888 ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
889 ath10k_scan_state_str(ar->scan.state), 889 ath10k_scan_state_str(ar->scan.state),
890 ar->scan.state); 890 ar->scan.state);
891 break; 891 break;
@@ -954,7 +954,7 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
954 954
955 spin_lock_bh(&ar->data_lock); 955 spin_lock_bh(&ar->data_lock);
956 956
957 ath10k_dbg(ATH10K_DBG_WMI, 957 ath10k_dbg(ar, ATH10K_DBG_WMI,
958 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 958 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
959 ath10k_wmi_event_scan_type_str(event_type, reason), 959 ath10k_wmi_event_scan_type_str(event_type, reason),
960 event_type, reason, freq, req_id, scan_id, vdev_id, 960 event_type, reason, freq, req_id, scan_id, vdev_id,
@@ -974,7 +974,7 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
974 ath10k_wmi_event_scan_foreign_chan(ar, freq); 974 ath10k_wmi_event_scan_foreign_chan(ar, freq);
975 break; 975 break;
976 case WMI_SCAN_EVENT_START_FAILED: 976 case WMI_SCAN_EVENT_START_FAILED:
977 ath10k_warn("received scan start failure event\n"); 977 ath10k_warn(ar, "received scan start failure event\n");
978 break; 978 break;
979 case WMI_SCAN_EVENT_DEQUEUED: 979 case WMI_SCAN_EVENT_DEQUEUED:
980 case WMI_SCAN_EVENT_PREEMPTED: 980 case WMI_SCAN_EVENT_PREEMPTED:
@@ -1107,7 +1107,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1107 1107
1108 memset(status, 0, sizeof(*status)); 1108 memset(status, 0, sizeof(*status));
1109 1109
1110 ath10k_dbg(ATH10K_DBG_MGMT, 1110 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1111 "event mgmt rx status %08x\n", rx_status); 1111 "event mgmt rx status %08x\n", rx_status);
1112 1112
1113 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 1113 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
@@ -1143,9 +1143,9 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1143 1143
1144 if (phy_mode == MODE_11B && 1144 if (phy_mode == MODE_11B &&
1145 status->band == IEEE80211_BAND_5GHZ) 1145 status->band == IEEE80211_BAND_5GHZ)
1146 ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); 1146 ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
1147 } else { 1147 } else {
1148 ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); 1148 ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
1149 status->band = phy_mode_to_band(phy_mode); 1149 status->band = phy_mode_to_band(phy_mode);
1150 } 1150 }
1151 1151
@@ -1175,12 +1175,12 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1175 } 1175 }
1176 } 1176 }
1177 1177
1178 ath10k_dbg(ATH10K_DBG_MGMT, 1178 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1179 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 1179 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
1180 skb, skb->len, 1180 skb, skb->len,
1181 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 1181 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
1182 1182
1183 ath10k_dbg(ATH10K_DBG_MGMT, 1183 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1184 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 1184 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
1185 status->freq, status->band, status->signal, 1185 status->freq, status->band, status->signal,
1186 status->rate_idx); 1186 status->rate_idx);
@@ -1230,7 +1230,7 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1230 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 1230 rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
1231 cycle_count = __le32_to_cpu(ev->cycle_count); 1231 cycle_count = __le32_to_cpu(ev->cycle_count);
1232 1232
1233 ath10k_dbg(ATH10K_DBG_WMI, 1233 ath10k_dbg(ar, ATH10K_DBG_WMI,
1234 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 1234 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
1235 err_code, freq, cmd_flags, noise_floor, rx_clear_count, 1235 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
1236 cycle_count); 1236 cycle_count);
@@ -1240,7 +1240,7 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1240 switch (ar->scan.state) { 1240 switch (ar->scan.state) {
1241 case ATH10K_SCAN_IDLE: 1241 case ATH10K_SCAN_IDLE:
1242 case ATH10K_SCAN_STARTING: 1242 case ATH10K_SCAN_STARTING:
1243 ath10k_warn("received chan info event without a scan request, ignoring\n"); 1243 ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
1244 goto exit; 1244 goto exit;
1245 case ATH10K_SCAN_RUNNING: 1245 case ATH10K_SCAN_RUNNING:
1246 case ATH10K_SCAN_ABORTING: 1246 case ATH10K_SCAN_ABORTING:
@@ -1249,7 +1249,7 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1249 1249
1250 idx = freq_to_idx(ar, freq); 1250 idx = freq_to_idx(ar, freq);
1251 if (idx >= ARRAY_SIZE(ar->survey)) { 1251 if (idx >= ARRAY_SIZE(ar->survey)) {
1252 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", 1252 ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
1253 freq, idx); 1253 freq, idx);
1254 goto exit; 1254 goto exit;
1255 } 1255 }
@@ -1280,12 +1280,12 @@ exit:
1280 1280
1281static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 1281static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
1282{ 1282{
1283 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 1283 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
1284} 1284}
1285 1285
1286static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 1286static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
1287{ 1287{
1288 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 1288 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
1289 skb->len); 1289 skb->len);
1290 1290
1291 trace_ath10k_wmi_dbglog(skb->data, skb->len); 1291 trace_ath10k_wmi_dbglog(skb->data, skb->len);
@@ -1298,7 +1298,7 @@ static void ath10k_wmi_event_update_stats(struct ath10k *ar,
1298{ 1298{
1299 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; 1299 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
1300 1300
1301 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 1301 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
1302 1302
1303 ath10k_debug_read_target_stats(ar, ev); 1303 ath10k_debug_read_target_stats(ar, ev);
1304} 1304}
@@ -1308,7 +1308,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
1308{ 1308{
1309 struct wmi_vdev_start_response_event *ev; 1309 struct wmi_vdev_start_response_event *ev;
1310 1310
1311 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 1311 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
1312 1312
1313 ev = (struct wmi_vdev_start_response_event *)skb->data; 1313 ev = (struct wmi_vdev_start_response_event *)skb->data;
1314 1314
@@ -1321,7 +1321,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
1321static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 1321static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
1322 struct sk_buff *skb) 1322 struct sk_buff *skb)
1323{ 1323{
1324 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 1324 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
1325 complete(&ar->vdev_setup_done); 1325 complete(&ar->vdev_setup_done);
1326} 1326}
1327 1327
@@ -1333,14 +1333,14 @@ static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
1333 1333
1334 ev = (struct wmi_peer_sta_kickout_event *)skb->data; 1334 ev = (struct wmi_peer_sta_kickout_event *)skb->data;
1335 1335
1336 ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", 1336 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
1337 ev->peer_macaddr.addr); 1337 ev->peer_macaddr.addr);
1338 1338
1339 rcu_read_lock(); 1339 rcu_read_lock();
1340 1340
1341 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); 1341 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
1342 if (!sta) { 1342 if (!sta) {
1343 ath10k_warn("Spurious quick kickout for STA %pM\n", 1343 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
1344 ev->peer_macaddr.addr); 1344 ev->peer_macaddr.addr);
1345 goto exit; 1345 goto exit;
1346 } 1346 }
@@ -1417,7 +1417,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1417 (u8 *)skb_tail_pointer(bcn) - ies); 1417 (u8 *)skb_tail_pointer(bcn) - ies);
1418 if (!ie) { 1418 if (!ie) {
1419 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1419 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1420 ath10k_warn("no tim ie found;\n"); 1420 ath10k_warn(ar, "no tim ie found;\n");
1421 return; 1421 return;
1422 } 1422 }
1423 1423
@@ -1437,12 +1437,12 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1437 ie_len += expand_size; 1437 ie_len += expand_size;
1438 pvm_len += expand_size; 1438 pvm_len += expand_size;
1439 } else { 1439 } else {
1440 ath10k_warn("tim expansion failed\n"); 1440 ath10k_warn(ar, "tim expansion failed\n");
1441 } 1441 }
1442 } 1442 }
1443 1443
1444 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { 1444 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
1445 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); 1445 ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
1446 return; 1446 return;
1447 } 1447 }
1448 1448
@@ -1456,7 +1456,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1456 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; 1456 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
1457 } 1457 }
1458 1458
1459 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", 1459 ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
1460 tim->dtim_count, tim->dtim_period, 1460 tim->dtim_count, tim->dtim_period,
1461 tim->bitmap_ctrl, pvm_len); 1461 tim->bitmap_ctrl, pvm_len);
1462} 1462}
@@ -1534,7 +1534,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
1534 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1534 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
1535 return; 1535 return;
1536 1536
1537 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); 1537 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
1538 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { 1538 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
1539 new_len = ath10k_p2p_calc_noa_ie_len(noa); 1539 new_len = ath10k_p2p_calc_noa_ie_len(noa);
1540 if (!new_len) 1540 if (!new_len)
@@ -1582,7 +1582,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1582 ev = (struct wmi_host_swba_event *)skb->data; 1582 ev = (struct wmi_host_swba_event *)skb->data;
1583 map = __le32_to_cpu(ev->vdev_map); 1583 map = __le32_to_cpu(ev->vdev_map);
1584 1584
1585 ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", 1585 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
1586 ev->vdev_map); 1586 ev->vdev_map);
1587 1587
1588 for (; map; map >>= 1, vdev_id++) { 1588 for (; map; map >>= 1, vdev_id++) {
@@ -1592,13 +1592,13 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1592 i++; 1592 i++;
1593 1593
1594 if (i >= WMI_MAX_AP_VDEV) { 1594 if (i >= WMI_MAX_AP_VDEV) {
1595 ath10k_warn("swba has corrupted vdev map\n"); 1595 ath10k_warn(ar, "swba has corrupted vdev map\n");
1596 break; 1596 break;
1597 } 1597 }
1598 1598
1599 bcn_info = &ev->bcn_info[i]; 1599 bcn_info = &ev->bcn_info[i];
1600 1600
1601 ath10k_dbg(ATH10K_DBG_MGMT, 1601 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1602 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", 1602 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1603 i, 1603 i,
1604 __le32_to_cpu(bcn_info->tim_info.tim_len), 1604 __le32_to_cpu(bcn_info->tim_info.tim_len),
@@ -1612,7 +1612,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1612 1612
1613 arvif = ath10k_get_arvif(ar, vdev_id); 1613 arvif = ath10k_get_arvif(ar, vdev_id);
1614 if (arvif == NULL) { 1614 if (arvif == NULL) {
1615 ath10k_warn("no vif for vdev_id %d found\n", vdev_id); 1615 ath10k_warn(ar, "no vif for vdev_id %d found\n",
1616 vdev_id);
1616 continue; 1617 continue;
1617 } 1618 }
1618 1619
@@ -1629,7 +1630,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1629 1630
1630 bcn = ieee80211_beacon_get(ar->hw, arvif->vif); 1631 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
1631 if (!bcn) { 1632 if (!bcn) {
1632 ath10k_warn("could not get mac80211 beacon\n"); 1633 ath10k_warn(ar, "could not get mac80211 beacon\n");
1633 continue; 1634 continue;
1634 } 1635 }
1635 1636
@@ -1641,7 +1642,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1641 1642
1642 if (arvif->beacon) { 1643 if (arvif->beacon) {
1643 if (!arvif->beacon_sent) 1644 if (!arvif->beacon_sent)
1644 ath10k_warn("SWBA overrun on vdev %d\n", 1645 ath10k_warn(ar, "SWBA overrun on vdev %d\n",
1645 arvif->vdev_id); 1646 arvif->vdev_id);
1646 1647
1647 dma_unmap_single(arvif->ar->dev, 1648 dma_unmap_single(arvif->ar->dev,
@@ -1657,7 +1658,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1657 ret = dma_mapping_error(arvif->ar->dev, 1658 ret = dma_mapping_error(arvif->ar->dev,
1658 ATH10K_SKB_CB(bcn)->paddr); 1659 ATH10K_SKB_CB(bcn)->paddr);
1659 if (ret) { 1660 if (ret) {
1660 ath10k_warn("failed to map beacon: %d\n", ret); 1661 ath10k_warn(ar, "failed to map beacon: %d\n", ret);
1661 dev_kfree_skb_any(bcn); 1662 dev_kfree_skb_any(bcn);
1662 goto skip; 1663 goto skip;
1663 } 1664 }
@@ -1674,7 +1675,7 @@ skip:
1674static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 1675static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
1675 struct sk_buff *skb) 1676 struct sk_buff *skb)
1676{ 1677{
1677 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 1678 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
1678} 1679}
1679 1680
1680static void ath10k_dfs_radar_report(struct ath10k *ar, 1681static void ath10k_dfs_radar_report(struct ath10k *ar,
@@ -1690,20 +1691,20 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
1690 reg0 = __le32_to_cpu(rr->reg0); 1691 reg0 = __le32_to_cpu(rr->reg0);
1691 reg1 = __le32_to_cpu(rr->reg1); 1692 reg1 = __le32_to_cpu(rr->reg1);
1692 1693
1693 ath10k_dbg(ATH10K_DBG_REGULATORY, 1694 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1694 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", 1695 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
1695 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), 1696 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
1696 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), 1697 MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
1697 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), 1698 MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
1698 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); 1699 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
1699 ath10k_dbg(ATH10K_DBG_REGULATORY, 1700 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1700 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", 1701 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
1701 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), 1702 MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
1702 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), 1703 MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
1703 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), 1704 MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
1704 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), 1705 MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
1705 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); 1706 MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
1706 ath10k_dbg(ATH10K_DBG_REGULATORY, 1707 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1707 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", 1708 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
1708 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), 1709 MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
1709 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); 1710 MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
@@ -1730,25 +1731,25 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
1730 pe.width = width; 1731 pe.width = width;
1731 pe.rssi = rssi; 1732 pe.rssi = rssi;
1732 1733
1733 ath10k_dbg(ATH10K_DBG_REGULATORY, 1734 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1734 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", 1735 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
1735 pe.freq, pe.width, pe.rssi, pe.ts); 1736 pe.freq, pe.width, pe.rssi, pe.ts);
1736 1737
1737 ATH10K_DFS_STAT_INC(ar, pulses_detected); 1738 ATH10K_DFS_STAT_INC(ar, pulses_detected);
1738 1739
1739 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { 1740 if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
1740 ath10k_dbg(ATH10K_DBG_REGULATORY, 1741 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1741 "dfs no pulse pattern detected, yet\n"); 1742 "dfs no pulse pattern detected, yet\n");
1742 return; 1743 return;
1743 } 1744 }
1744 1745
1745 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); 1746 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
1746 ATH10K_DFS_STAT_INC(ar, radar_detected); 1747 ATH10K_DFS_STAT_INC(ar, radar_detected);
1747 1748
1748 /* Control radar events reporting in debugfs file 1749 /* Control radar events reporting in debugfs file
1749 dfs_block_radar_events */ 1750 dfs_block_radar_events */
1750 if (ar->dfs_block_radar_events) { 1751 if (ar->dfs_block_radar_events) {
1751 ath10k_info("DFS Radar detected, but ignored as requested\n"); 1752 ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
1752 return; 1753 return;
1753 } 1754 }
1754 1755
@@ -1767,13 +1768,13 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
1767 reg1 = __le32_to_cpu(fftr->reg1); 1768 reg1 = __le32_to_cpu(fftr->reg1);
1768 rssi = event->hdr.rssi_combined; 1769 rssi = event->hdr.rssi_combined;
1769 1770
1770 ath10k_dbg(ATH10K_DBG_REGULATORY, 1771 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1771 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", 1772 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
1772 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), 1773 MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
1773 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), 1774 MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
1774 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), 1775 MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
1775 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); 1776 MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
1776 ath10k_dbg(ATH10K_DBG_REGULATORY, 1777 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1777 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", 1778 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
1778 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), 1779 MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
1779 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), 1780 MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
@@ -1785,7 +1786,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
1785 /* false event detection */ 1786 /* false event detection */
1786 if (rssi == DFS_RSSI_POSSIBLY_FALSE && 1787 if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
1787 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { 1788 peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
1788 ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); 1789 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
1789 ATH10K_DFS_STAT_INC(ar, pulses_discarded); 1790 ATH10K_DFS_STAT_INC(ar, pulses_discarded);
1790 return -EINVAL; 1791 return -EINVAL;
1791 } 1792 }
@@ -1804,7 +1805,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1804 u8 *tlv_buf; 1805 u8 *tlv_buf;
1805 1806
1806 buf_len = __le32_to_cpu(event->hdr.buf_len); 1807 buf_len = __le32_to_cpu(event->hdr.buf_len);
1807 ath10k_dbg(ATH10K_DBG_REGULATORY, 1808 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1808 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", 1809 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
1809 event->hdr.phy_err_code, event->hdr.rssi_combined, 1810 event->hdr.phy_err_code, event->hdr.rssi_combined,
1810 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); 1811 __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
@@ -1817,21 +1818,22 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1817 1818
1818 while (i < buf_len) { 1819 while (i < buf_len) {
1819 if (i + sizeof(*tlv) > buf_len) { 1820 if (i + sizeof(*tlv) > buf_len) {
1820 ath10k_warn("too short buf for tlv header (%d)\n", i); 1821 ath10k_warn(ar, "too short buf for tlv header (%d)\n",
1822 i);
1821 return; 1823 return;
1822 } 1824 }
1823 1825
1824 tlv = (struct phyerr_tlv *)&event->bufp[i]; 1826 tlv = (struct phyerr_tlv *)&event->bufp[i];
1825 tlv_len = __le16_to_cpu(tlv->len); 1827 tlv_len = __le16_to_cpu(tlv->len);
1826 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1828 tlv_buf = &event->bufp[i + sizeof(*tlv)];
1827 ath10k_dbg(ATH10K_DBG_REGULATORY, 1829 ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
1828 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", 1830 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
1829 tlv_len, tlv->tag, tlv->sig); 1831 tlv_len, tlv->tag, tlv->sig);
1830 1832
1831 switch (tlv->tag) { 1833 switch (tlv->tag) {
1832 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: 1834 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
1833 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { 1835 if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
1834 ath10k_warn("too short radar pulse summary (%d)\n", 1836 ath10k_warn(ar, "too short radar pulse summary (%d)\n",
1835 i); 1837 i);
1836 return; 1838 return;
1837 } 1839 }
@@ -1841,7 +1843,8 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
1841 break; 1843 break;
1842 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1844 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
1843 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { 1845 if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
1844 ath10k_warn("too short fft report (%d)\n", i); 1846 ath10k_warn(ar, "too short fft report (%d)\n",
1847 i);
1845 return; 1848 return;
1846 } 1849 }
1847 1850
@@ -1870,7 +1873,7 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
1870 1873
1871 while (i < buf_len) { 1874 while (i < buf_len) {
1872 if (i + sizeof(*tlv) > buf_len) { 1875 if (i + sizeof(*tlv) > buf_len) {
1873 ath10k_warn("failed to parse phyerr tlv header at byte %d\n", 1876 ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
1874 i); 1877 i);
1875 return; 1878 return;
1876 } 1879 }
@@ -1880,7 +1883,7 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
1880 tlv_buf = &event->bufp[i + sizeof(*tlv)]; 1883 tlv_buf = &event->bufp[i + sizeof(*tlv)];
1881 1884
1882 if (i + sizeof(*tlv) + tlv_len > buf_len) { 1885 if (i + sizeof(*tlv) + tlv_len > buf_len) {
1883 ath10k_warn("failed to parse phyerr tlv payload at byte %d\n", 1886 ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
1884 i); 1887 i);
1885 return; 1888 return;
1886 } 1889 }
@@ -1888,7 +1891,7 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
1888 switch (tlv->tag) { 1891 switch (tlv->tag) {
1889 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: 1892 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
1890 if (sizeof(*fftr) > tlv_len) { 1893 if (sizeof(*fftr) > tlv_len) {
1891 ath10k_warn("failed to parse fft report at byte %d\n", 1894 ath10k_warn(ar, "failed to parse fft report at byte %d\n",
1892 i); 1895 i);
1893 return; 1896 return;
1894 } 1897 }
@@ -1899,7 +1902,7 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
1899 fftr, fftr_len, 1902 fftr, fftr_len,
1900 tsf); 1903 tsf);
1901 if (res < 0) { 1904 if (res < 0) {
1902 ath10k_warn("failed to process fft report: %d\n", 1905 ath10k_warn(ar, "failed to process fft report: %d\n",
1903 res); 1906 res);
1904 return; 1907 return;
1905 } 1908 }
@@ -1922,7 +1925,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1922 1925
1923 /* Check if combined event available */ 1926 /* Check if combined event available */
1924 if (left_len < sizeof(*comb_event)) { 1927 if (left_len < sizeof(*comb_event)) {
1925 ath10k_warn("wmi phyerr combined event wrong len\n"); 1928 ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
1926 return; 1929 return;
1927 } 1930 }
1928 1931
@@ -1936,7 +1939,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1936 tsf <<= 32; 1939 tsf <<= 32;
1937 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); 1940 tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
1938 1941
1939 ath10k_dbg(ATH10K_DBG_WMI, 1942 ath10k_dbg(ar, ATH10K_DBG_WMI,
1940 "wmi event phyerr count %d tsf64 0x%llX\n", 1943 "wmi event phyerr count %d tsf64 0x%llX\n",
1941 count, tsf); 1944 count, tsf);
1942 1945
@@ -1944,7 +1947,8 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1944 for (i = 0; i < count; i++) { 1947 for (i = 0; i < count; i++) {
1945 /* Check if we can read event header */ 1948 /* Check if we can read event header */
1946 if (left_len < sizeof(*event)) { 1949 if (left_len < sizeof(*event)) {
1947 ath10k_warn("single event (%d) wrong head len\n", i); 1950 ath10k_warn(ar, "single event (%d) wrong head len\n",
1951 i);
1948 return; 1952 return;
1949 } 1953 }
1950 1954
@@ -1954,7 +1958,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1954 phy_err_code = event->hdr.phy_err_code; 1958 phy_err_code = event->hdr.phy_err_code;
1955 1959
1956 if (left_len < buf_len) { 1960 if (left_len < buf_len) {
1957 ath10k_warn("single event (%d) wrong buf len\n", i); 1961 ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
1958 return; 1962 return;
1959 } 1963 }
1960 1964
@@ -1981,13 +1985,13 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
1981 1985
1982static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 1986static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
1983{ 1987{
1984 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 1988 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
1985} 1989}
1986 1990
1987static void ath10k_wmi_event_profile_match(struct ath10k *ar, 1991static void ath10k_wmi_event_profile_match(struct ath10k *ar,
1988 struct sk_buff *skb) 1992 struct sk_buff *skb)
1989{ 1993{
1990 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 1994 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
1991} 1995}
1992 1996
1993static void ath10k_wmi_event_debug_print(struct ath10k *ar, 1997static void ath10k_wmi_event_debug_print(struct ath10k *ar,
@@ -2012,7 +2016,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
2012 } 2016 }
2013 2017
2014 if (i == sizeof(buf) - 1) 2018 if (i == sizeof(buf) - 1)
2015 ath10k_warn("wmi debug print truncated: %d\n", skb->len); 2019 ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
2016 2020
2017 /* for some reason the debug prints end with \n, remove that */ 2021 /* for some reason the debug prints end with \n, remove that */
2018 if (skb->data[i - 1] == '\n') 2022 if (skb->data[i - 1] == '\n')
@@ -2021,108 +2025,108 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
2021 /* the last byte is always reserved for the null character */ 2025 /* the last byte is always reserved for the null character */
2022 buf[i] = '\0'; 2026 buf[i] = '\0';
2023 2027
2024 ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); 2028 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
2025} 2029}
2026 2030
2027static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 2031static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
2028{ 2032{
2029 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 2033 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
2030} 2034}
2031 2035
2032static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 2036static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
2033 struct sk_buff *skb) 2037 struct sk_buff *skb)
2034{ 2038{
2035 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 2039 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
2036} 2040}
2037 2041
2038static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 2042static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
2039 struct sk_buff *skb) 2043 struct sk_buff *skb)
2040{ 2044{
2041 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 2045 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
2042} 2046}
2043 2047
2044static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 2048static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
2045 struct sk_buff *skb) 2049 struct sk_buff *skb)
2046{ 2050{
2047 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 2051 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
2048} 2052}
2049 2053
2050static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 2054static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
2051 struct sk_buff *skb) 2055 struct sk_buff *skb)
2052{ 2056{
2053 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 2057 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
2054} 2058}
2055 2059
2056static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 2060static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
2057 struct sk_buff *skb) 2061 struct sk_buff *skb)
2058{ 2062{
2059 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 2063 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
2060} 2064}
2061 2065
2062static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 2066static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
2063 struct sk_buff *skb) 2067 struct sk_buff *skb)
2064{ 2068{
2065 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 2069 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
2066} 2070}
2067 2071
2068static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 2072static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
2069 struct sk_buff *skb) 2073 struct sk_buff *skb)
2070{ 2074{
2071 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 2075 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
2072} 2076}
2073 2077
2074static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 2078static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
2075 struct sk_buff *skb) 2079 struct sk_buff *skb)
2076{ 2080{
2077 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 2081 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
2078} 2082}
2079 2083
2080static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 2084static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
2081 struct sk_buff *skb) 2085 struct sk_buff *skb)
2082{ 2086{
2083 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 2087 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
2084} 2088}
2085 2089
2086static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 2090static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
2087 struct sk_buff *skb) 2091 struct sk_buff *skb)
2088{ 2092{
2089 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 2093 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
2090} 2094}
2091 2095
2092static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 2096static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
2093 struct sk_buff *skb) 2097 struct sk_buff *skb)
2094{ 2098{
2095 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 2099 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
2096} 2100}
2097 2101
2098static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 2102static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
2099 struct sk_buff *skb) 2103 struct sk_buff *skb)
2100{ 2104{
2101 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 2105 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
2102} 2106}
2103 2107
2104static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 2108static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
2105 struct sk_buff *skb) 2109 struct sk_buff *skb)
2106{ 2110{
2107 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 2111 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
2108} 2112}
2109 2113
2110static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, 2114static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
2111 struct sk_buff *skb) 2115 struct sk_buff *skb)
2112{ 2116{
2113 ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 2117 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
2114} 2118}
2115 2119
2116static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, 2120static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
2117 struct sk_buff *skb) 2121 struct sk_buff *skb)
2118{ 2122{
2119 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 2123 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
2120} 2124}
2121 2125
2122static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, 2126static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
2123 struct sk_buff *skb) 2127 struct sk_buff *skb)
2124{ 2128{
2125 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 2129 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
2126} 2130}
2127 2131
2128static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, 2132static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
@@ -2142,7 +2146,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
2142 &paddr, 2146 &paddr,
2143 GFP_ATOMIC); 2147 GFP_ATOMIC);
2144 if (!ar->wmi.mem_chunks[idx].vaddr) { 2148 if (!ar->wmi.mem_chunks[idx].vaddr) {
2145 ath10k_warn("failed to allocate memory chunk\n"); 2149 ath10k_warn(ar, "failed to allocate memory chunk\n");
2146 return -ENOMEM; 2150 return -ENOMEM;
2147 } 2151 }
2148 2152
@@ -2163,7 +2167,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
2163 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {}; 2167 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
2164 2168
2165 if (skb->len < sizeof(*ev)) { 2169 if (skb->len < sizeof(*ev)) {
2166 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2170 ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
2167 skb->len, sizeof(*ev)); 2171 skb->len, sizeof(*ev));
2168 return; 2172 return;
2169 } 2173 }
@@ -2186,7 +2190,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
2186 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); 2190 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
2187 2191
2188 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2192 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
2189 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2193 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
2190 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2194 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
2191 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2195 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
2192 } 2196 }
@@ -2196,7 +2200,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
2196 2200
2197 wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap); 2201 wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
2198 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); 2202 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
2199 ath10k_dbg_dump(ATH10K_DBG_WMI, NULL, "ath10k: wmi svc: ", 2203 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
2200 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); 2204 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
2201 2205
2202 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2206 if (strlen(ar->hw->wiphy->fw_version) == 0) {
@@ -2211,11 +2215,11 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
2211 2215
2212 /* FIXME: it probably should be better to support this */ 2216 /* FIXME: it probably should be better to support this */
2213 if (__le32_to_cpu(ev->num_mem_reqs) > 0) { 2217 if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
2214 ath10k_warn("target requested %d memory chunks; ignoring\n", 2218 ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
2215 __le32_to_cpu(ev->num_mem_reqs)); 2219 __le32_to_cpu(ev->num_mem_reqs));
2216 } 2220 }
2217 2221
2218 ath10k_dbg(ATH10K_DBG_WMI, 2222 ath10k_dbg(ar, ATH10K_DBG_WMI,
2219 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2223 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
2220 __le32_to_cpu(ev->sw_version), 2224 __le32_to_cpu(ev->sw_version),
2221 __le32_to_cpu(ev->sw_version_1), 2225 __le32_to_cpu(ev->sw_version_1),
@@ -2240,7 +2244,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2240 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {}; 2244 DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
2241 2245
2242 if (skb->len < sizeof(*ev)) { 2246 if (skb->len < sizeof(*ev)) {
2243 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", 2247 ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
2244 skb->len, sizeof(*ev)); 2248 skb->len, sizeof(*ev));
2245 return; 2249 return;
2246 } 2250 }
@@ -2256,7 +2260,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2256 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); 2260 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
2257 2261
2258 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { 2262 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
2259 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", 2263 ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
2260 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); 2264 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
2261 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; 2265 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
2262 } 2266 }
@@ -2266,7 +2270,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2266 2270
2267 wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap); 2271 wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
2268 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); 2272 ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
2269 ath10k_dbg_dump(ATH10K_DBG_WMI, NULL, "ath10k: wmi svc: ", 2273 ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
2270 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); 2274 ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
2271 2275
2272 if (strlen(ar->hw->wiphy->fw_version) == 0) { 2276 if (strlen(ar->hw->wiphy->fw_version) == 0) {
@@ -2280,7 +2284,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2280 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); 2284 num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
2281 2285
2282 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { 2286 if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
2283 ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", 2287 ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
2284 num_mem_reqs); 2288 num_mem_reqs);
2285 return; 2289 return;
2286 } 2290 }
@@ -2288,7 +2292,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2288 if (!num_mem_reqs) 2292 if (!num_mem_reqs)
2289 goto exit; 2293 goto exit;
2290 2294
2291 ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", 2295 ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
2292 num_mem_reqs); 2296 num_mem_reqs);
2293 2297
2294 for (i = 0; i < num_mem_reqs; ++i) { 2298 for (i = 0; i < num_mem_reqs; ++i) {
@@ -2306,7 +2310,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2306 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) 2310 else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
2307 num_units = TARGET_10X_NUM_VDEVS + 1; 2311 num_units = TARGET_10X_NUM_VDEVS + 1;
2308 2312
2309 ath10k_dbg(ATH10K_DBG_WMI, 2313 ath10k_dbg(ar, ATH10K_DBG_WMI,
2310 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", 2314 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
2311 req_id, 2315 req_id,
2312 __le32_to_cpu(ev->mem_reqs[i].num_units), 2316 __le32_to_cpu(ev->mem_reqs[i].num_units),
@@ -2321,7 +2325,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
2321 } 2325 }
2322 2326
2323exit: 2327exit:
2324 ath10k_dbg(ATH10K_DBG_WMI, 2328 ath10k_dbg(ar, ATH10K_DBG_WMI,
2325 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", 2329 "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
2326 __le32_to_cpu(ev->sw_version), 2330 __le32_to_cpu(ev->sw_version),
2327 __le32_to_cpu(ev->abi_version), 2331 __le32_to_cpu(ev->abi_version),
@@ -2345,7 +2349,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
2345 2349
2346 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); 2350 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
2347 2351
2348 ath10k_dbg(ATH10K_DBG_WMI, 2352 ath10k_dbg(ar, ATH10K_DBG_WMI,
2349 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", 2353 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
2350 __le32_to_cpu(ev->sw_version), 2354 __le32_to_cpu(ev->sw_version),
2351 __le32_to_cpu(ev->abi_version), 2355 __le32_to_cpu(ev->abi_version),
@@ -2465,7 +2469,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
2465 ath10k_wmi_ready_event_rx(ar, skb); 2469 ath10k_wmi_ready_event_rx(ar, skb);
2466 break; 2470 break;
2467 default: 2471 default:
2468 ath10k_warn("Unknown eventid: %d\n", id); 2472 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2469 break; 2473 break;
2470 } 2474 }
2471 2475
@@ -2572,7 +2576,7 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
2572 ath10k_wmi_ready_event_rx(ar, skb); 2576 ath10k_wmi_ready_event_rx(ar, skb);
2573 break; 2577 break;
2574 default: 2578 default:
2575 ath10k_warn("Unknown eventid: %d\n", id); 2579 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2576 break; 2580 break;
2577 } 2581 }
2578 2582
@@ -2685,11 +2689,11 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
2685 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: 2689 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
2686 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: 2690 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
2687 case WMI_10_2_WDS_PEER_EVENTID: 2691 case WMI_10_2_WDS_PEER_EVENTID:
2688 ath10k_dbg(ATH10K_DBG_WMI, 2692 ath10k_dbg(ar, ATH10K_DBG_WMI,
2689 "received event id %d not implemented\n", id); 2693 "received event id %d not implemented\n", id);
2690 break; 2694 break;
2691 default: 2695 default:
2692 ath10k_warn("Unknown eventid: %d\n", id); 2696 ath10k_warn(ar, "Unknown eventid: %d\n", id);
2693 break; 2697 break;
2694 } 2698 }
2695 2699
@@ -2766,7 +2770,7 @@ int ath10k_wmi_connect(struct ath10k *ar)
2766 2770
2767 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 2771 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
2768 if (status) { 2772 if (status) {
2769 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", 2773 ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
2770 status); 2774 status);
2771 return status; 2775 return status;
2772 } 2776 }
@@ -2782,7 +2786,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2782 struct wmi_pdev_set_regdomain_cmd *cmd; 2786 struct wmi_pdev_set_regdomain_cmd *cmd;
2783 struct sk_buff *skb; 2787 struct sk_buff *skb;
2784 2788
2785 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2789 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2786 if (!skb) 2790 if (!skb)
2787 return -ENOMEM; 2791 return -ENOMEM;
2788 2792
@@ -2793,7 +2797,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2793 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); 2797 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
2794 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2798 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2795 2799
2796 ath10k_dbg(ATH10K_DBG_WMI, 2800 ath10k_dbg(ar, ATH10K_DBG_WMI,
2797 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 2801 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
2798 rd, rd2g, rd5g, ctl2g, ctl5g); 2802 rd, rd2g, rd5g, ctl2g, ctl5g);
2799 2803
@@ -2809,7 +2813,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2809 struct wmi_pdev_set_regdomain_cmd_10x *cmd; 2813 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
2810 struct sk_buff *skb; 2814 struct sk_buff *skb;
2811 2815
2812 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2816 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2813 if (!skb) 2817 if (!skb)
2814 return -ENOMEM; 2818 return -ENOMEM;
2815 2819
@@ -2821,7 +2825,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
2821 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); 2825 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
2822 cmd->dfs_domain = __cpu_to_le32(dfs_reg); 2826 cmd->dfs_domain = __cpu_to_le32(dfs_reg);
2823 2827
2824 ath10k_dbg(ATH10K_DBG_WMI, 2828 ath10k_dbg(ar, ATH10K_DBG_WMI,
2825 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", 2829 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
2826 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); 2830 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
2827 2831
@@ -2851,7 +2855,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2851 if (arg->passive) 2855 if (arg->passive)
2852 return -EINVAL; 2856 return -EINVAL;
2853 2857
2854 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2858 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2855 if (!skb) 2859 if (!skb)
2856 return -ENOMEM; 2860 return -ENOMEM;
2857 2861
@@ -2869,7 +2873,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
2869 cmd->chan.reg_classid = arg->reg_class_id; 2873 cmd->chan.reg_classid = arg->reg_class_id;
2870 cmd->chan.antenna_max = arg->max_antenna_gain; 2874 cmd->chan.antenna_max = arg->max_antenna_gain;
2871 2875
2872 ath10k_dbg(ATH10K_DBG_WMI, 2876 ath10k_dbg(ar, ATH10K_DBG_WMI,
2873 "wmi set channel mode %d freq %d\n", 2877 "wmi set channel mode %d freq %d\n",
2874 arg->mode, arg->freq); 2878 arg->mode, arg->freq);
2875 2879
@@ -2882,7 +2886,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
2882 struct wmi_pdev_suspend_cmd *cmd; 2886 struct wmi_pdev_suspend_cmd *cmd;
2883 struct sk_buff *skb; 2887 struct sk_buff *skb;
2884 2888
2885 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2889 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2886 if (!skb) 2890 if (!skb)
2887 return -ENOMEM; 2891 return -ENOMEM;
2888 2892
@@ -2896,7 +2900,7 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
2896{ 2900{
2897 struct sk_buff *skb; 2901 struct sk_buff *skb;
2898 2902
2899 skb = ath10k_wmi_alloc_skb(0); 2903 skb = ath10k_wmi_alloc_skb(ar, 0);
2900 if (skb == NULL) 2904 if (skb == NULL)
2901 return -ENOMEM; 2905 return -ENOMEM;
2902 2906
@@ -2909,11 +2913,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
2909 struct sk_buff *skb; 2913 struct sk_buff *skb;
2910 2914
2911 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 2915 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
2912 ath10k_warn("pdev param %d not supported by firmware\n", id); 2916 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
2917 id);
2913 return -EOPNOTSUPP; 2918 return -EOPNOTSUPP;
2914 } 2919 }
2915 2920
2916 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 2921 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2917 if (!skb) 2922 if (!skb)
2918 return -ENOMEM; 2923 return -ENOMEM;
2919 2924
@@ -2921,7 +2926,7 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
2921 cmd->param_id = __cpu_to_le32(id); 2926 cmd->param_id = __cpu_to_le32(id);
2922 cmd->param_value = __cpu_to_le32(value); 2927 cmd->param_value = __cpu_to_le32(value);
2923 2928
2924 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 2929 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
2925 id, value); 2930 id, value);
2926 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 2931 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
2927} 2932}
@@ -2988,7 +2993,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2988 len = sizeof(*cmd) + 2993 len = sizeof(*cmd) +
2989 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 2994 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
2990 2995
2991 buf = ath10k_wmi_alloc_skb(len); 2996 buf = ath10k_wmi_alloc_skb(ar, len);
2992 if (!buf) 2997 if (!buf)
2993 return -ENOMEM; 2998 return -ENOMEM;
2994 2999
@@ -2999,7 +3004,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
2999 goto out; 3004 goto out;
3000 } 3005 }
3001 3006
3002 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3007 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
3003 ar->wmi.num_mem_chunks); 3008 ar->wmi.num_mem_chunks);
3004 3009
3005 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3010 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -3012,7 +3017,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
3012 cmd->host_mem_chunks[i].req_id = 3017 cmd->host_mem_chunks[i].req_id =
3013 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3018 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
3014 3019
3015 ath10k_dbg(ATH10K_DBG_WMI, 3020 ath10k_dbg(ar, ATH10K_DBG_WMI,
3016 "wmi chunk %d len %d requested, addr 0x%llx\n", 3021 "wmi chunk %d len %d requested, addr 0x%llx\n",
3017 i, 3022 i,
3018 ar->wmi.mem_chunks[i].len, 3023 ar->wmi.mem_chunks[i].len,
@@ -3021,7 +3026,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
3021out: 3026out:
3022 memcpy(&cmd->resource_config, &config, sizeof(config)); 3027 memcpy(&cmd->resource_config, &config, sizeof(config));
3023 3028
3024 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); 3029 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
3025 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3030 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
3026} 3031}
3027 3032
@@ -3079,7 +3084,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3079 len = sizeof(*cmd) + 3084 len = sizeof(*cmd) +
3080 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 3085 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3081 3086
3082 buf = ath10k_wmi_alloc_skb(len); 3087 buf = ath10k_wmi_alloc_skb(ar, len);
3083 if (!buf) 3088 if (!buf)
3084 return -ENOMEM; 3089 return -ENOMEM;
3085 3090
@@ -3090,7 +3095,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3090 goto out; 3095 goto out;
3091 } 3096 }
3092 3097
3093 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3098 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
3094 ar->wmi.num_mem_chunks); 3099 ar->wmi.num_mem_chunks);
3095 3100
3096 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3101 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -3103,7 +3108,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3103 cmd->host_mem_chunks[i].req_id = 3108 cmd->host_mem_chunks[i].req_id =
3104 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3109 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
3105 3110
3106 ath10k_dbg(ATH10K_DBG_WMI, 3111 ath10k_dbg(ar, ATH10K_DBG_WMI,
3107 "wmi chunk %d len %d requested, addr 0x%llx\n", 3112 "wmi chunk %d len %d requested, addr 0x%llx\n",
3108 i, 3113 i,
3109 ar->wmi.mem_chunks[i].len, 3114 ar->wmi.mem_chunks[i].len,
@@ -3112,7 +3117,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3112out: 3117out:
3113 memcpy(&cmd->resource_config, &config, sizeof(config)); 3118 memcpy(&cmd->resource_config, &config, sizeof(config));
3114 3119
3115 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); 3120 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
3116 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3121 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
3117} 3122}
3118 3123
@@ -3170,7 +3175,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3170 len = sizeof(*cmd) + 3175 len = sizeof(*cmd) +
3171 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); 3176 (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
3172 3177
3173 buf = ath10k_wmi_alloc_skb(len); 3178 buf = ath10k_wmi_alloc_skb(ar, len);
3174 if (!buf) 3179 if (!buf)
3175 return -ENOMEM; 3180 return -ENOMEM;
3176 3181
@@ -3181,7 +3186,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3181 goto out; 3186 goto out;
3182 } 3187 }
3183 3188
3184 ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", 3189 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
3185 ar->wmi.num_mem_chunks); 3190 ar->wmi.num_mem_chunks);
3186 3191
3187 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 3192 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
@@ -3194,7 +3199,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3194 cmd->host_mem_chunks[i].req_id = 3199 cmd->host_mem_chunks[i].req_id =
3195 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 3200 __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
3196 3201
3197 ath10k_dbg(ATH10K_DBG_WMI, 3202 ath10k_dbg(ar, ATH10K_DBG_WMI,
3198 "wmi chunk %d len %d requested, addr 0x%llx\n", 3203 "wmi chunk %d len %d requested, addr 0x%llx\n",
3199 i, 3204 i,
3200 ar->wmi.mem_chunks[i].len, 3205 ar->wmi.mem_chunks[i].len,
@@ -3203,7 +3208,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3203out: 3208out:
3204 memcpy(&cmd->resource_config.common, &config, sizeof(config)); 3209 memcpy(&cmd->resource_config.common, &config, sizeof(config));
3205 3210
3206 ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10.2\n"); 3211 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
3207 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3212 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
3208} 3213}
3209 3214
@@ -3295,7 +3300,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
3295 if (len < 0) 3300 if (len < 0)
3296 return len; /* len contains error code here */ 3301 return len; /* len contains error code here */
3297 3302
3298 skb = ath10k_wmi_alloc_skb(len); 3303 skb = ath10k_wmi_alloc_skb(ar, len);
3299 if (!skb) 3304 if (!skb)
3300 return -ENOMEM; 3305 return -ENOMEM;
3301 3306
@@ -3391,7 +3396,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
3391 return -EINVAL; 3396 return -EINVAL;
3392 } 3397 }
3393 3398
3394 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); 3399 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
3395 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 3400 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
3396} 3401}
3397 3402
@@ -3433,7 +3438,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
3433 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 3438 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
3434 return -EINVAL; 3439 return -EINVAL;
3435 3440
3436 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3441 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3437 if (!skb) 3442 if (!skb)
3438 return -ENOMEM; 3443 return -ENOMEM;
3439 3444
@@ -3449,7 +3454,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
3449 cmd->scan_id = __cpu_to_le32(scan_id); 3454 cmd->scan_id = __cpu_to_le32(scan_id);
3450 cmd->scan_req_id = __cpu_to_le32(req_id); 3455 cmd->scan_req_id = __cpu_to_le32(req_id);
3451 3456
3452 ath10k_dbg(ATH10K_DBG_WMI, 3457 ath10k_dbg(ar, ATH10K_DBG_WMI,
3453 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 3458 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
3454 arg->req_id, arg->req_type, arg->u.scan_id); 3459 arg->req_id, arg->req_type, arg->u.scan_id);
3455 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 3460 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
@@ -3463,7 +3468,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
3463 struct wmi_vdev_create_cmd *cmd; 3468 struct wmi_vdev_create_cmd *cmd;
3464 struct sk_buff *skb; 3469 struct sk_buff *skb;
3465 3470
3466 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3471 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3467 if (!skb) 3472 if (!skb)
3468 return -ENOMEM; 3473 return -ENOMEM;
3469 3474
@@ -3473,7 +3478,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
3473 cmd->vdev_subtype = __cpu_to_le32(subtype); 3478 cmd->vdev_subtype = __cpu_to_le32(subtype);
3474 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); 3479 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
3475 3480
3476 ath10k_dbg(ATH10K_DBG_WMI, 3481 ath10k_dbg(ar, ATH10K_DBG_WMI,
3477 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 3482 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
3478 vdev_id, type, subtype, macaddr); 3483 vdev_id, type, subtype, macaddr);
3479 3484
@@ -3485,14 +3490,14 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
3485 struct wmi_vdev_delete_cmd *cmd; 3490 struct wmi_vdev_delete_cmd *cmd;
3486 struct sk_buff *skb; 3491 struct sk_buff *skb;
3487 3492
3488 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3493 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3489 if (!skb) 3494 if (!skb)
3490 return -ENOMEM; 3495 return -ENOMEM;
3491 3496
3492 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 3497 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
3493 cmd->vdev_id = __cpu_to_le32(vdev_id); 3498 cmd->vdev_id = __cpu_to_le32(vdev_id);
3494 3499
3495 ath10k_dbg(ATH10K_DBG_WMI, 3500 ath10k_dbg(ar, ATH10K_DBG_WMI,
3496 "WMI vdev delete id %d\n", vdev_id); 3501 "WMI vdev delete id %d\n", vdev_id);
3497 3502
3498 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 3503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
@@ -3525,7 +3530,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
3525 else 3530 else
3526 return -EINVAL; /* should not happen, we already check cmd_id */ 3531 return -EINVAL; /* should not happen, we already check cmd_id */
3527 3532
3528 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3533 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3529 if (!skb) 3534 if (!skb)
3530 return -ENOMEM; 3535 return -ENOMEM;
3531 3536
@@ -3563,7 +3568,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
3563 cmd->chan.reg_classid = arg->channel.reg_class_id; 3568 cmd->chan.reg_classid = arg->channel.reg_class_id;
3564 cmd->chan.antenna_max = arg->channel.max_antenna_gain; 3569 cmd->chan.antenna_max = arg->channel.max_antenna_gain;
3565 3570
3566 ath10k_dbg(ATH10K_DBG_WMI, 3571 ath10k_dbg(ar, ATH10K_DBG_WMI,
3567 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " 3572 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
3568 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, 3573 "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
3569 flags, arg->channel.freq, arg->channel.mode, 3574 flags, arg->channel.freq, arg->channel.mode,
@@ -3593,14 +3598,14 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
3593 struct wmi_vdev_stop_cmd *cmd; 3598 struct wmi_vdev_stop_cmd *cmd;
3594 struct sk_buff *skb; 3599 struct sk_buff *skb;
3595 3600
3596 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3601 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3597 if (!skb) 3602 if (!skb)
3598 return -ENOMEM; 3603 return -ENOMEM;
3599 3604
3600 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 3605 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
3601 cmd->vdev_id = __cpu_to_le32(vdev_id); 3606 cmd->vdev_id = __cpu_to_le32(vdev_id);
3602 3607
3603 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 3608 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
3604 3609
3605 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 3610 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
3606} 3611}
@@ -3610,7 +3615,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
3610 struct wmi_vdev_up_cmd *cmd; 3615 struct wmi_vdev_up_cmd *cmd;
3611 struct sk_buff *skb; 3616 struct sk_buff *skb;
3612 3617
3613 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3618 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3614 if (!skb) 3619 if (!skb)
3615 return -ENOMEM; 3620 return -ENOMEM;
3616 3621
@@ -3619,7 +3624,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
3619 cmd->vdev_assoc_id = __cpu_to_le32(aid); 3624 cmd->vdev_assoc_id = __cpu_to_le32(aid);
3620 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); 3625 memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
3621 3626
3622 ath10k_dbg(ATH10K_DBG_WMI, 3627 ath10k_dbg(ar, ATH10K_DBG_WMI,
3623 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 3628 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
3624 vdev_id, aid, bssid); 3629 vdev_id, aid, bssid);
3625 3630
@@ -3631,14 +3636,14 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
3631 struct wmi_vdev_down_cmd *cmd; 3636 struct wmi_vdev_down_cmd *cmd;
3632 struct sk_buff *skb; 3637 struct sk_buff *skb;
3633 3638
3634 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3639 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3635 if (!skb) 3640 if (!skb)
3636 return -ENOMEM; 3641 return -ENOMEM;
3637 3642
3638 cmd = (struct wmi_vdev_down_cmd *)skb->data; 3643 cmd = (struct wmi_vdev_down_cmd *)skb->data;
3639 cmd->vdev_id = __cpu_to_le32(vdev_id); 3644 cmd->vdev_id = __cpu_to_le32(vdev_id);
3640 3645
3641 ath10k_dbg(ATH10K_DBG_WMI, 3646 ath10k_dbg(ar, ATH10K_DBG_WMI,
3642 "wmi mgmt vdev down id 0x%x\n", vdev_id); 3647 "wmi mgmt vdev down id 0x%x\n", vdev_id);
3643 3648
3644 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 3649 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
@@ -3651,13 +3656,13 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3651 struct sk_buff *skb; 3656 struct sk_buff *skb;
3652 3657
3653 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { 3658 if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
3654 ath10k_dbg(ATH10K_DBG_WMI, 3659 ath10k_dbg(ar, ATH10K_DBG_WMI,
3655 "vdev param %d not supported by firmware\n", 3660 "vdev param %d not supported by firmware\n",
3656 param_id); 3661 param_id);
3657 return -EOPNOTSUPP; 3662 return -EOPNOTSUPP;
3658 } 3663 }
3659 3664
3660 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3665 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3661 if (!skb) 3666 if (!skb)
3662 return -ENOMEM; 3667 return -ENOMEM;
3663 3668
@@ -3666,7 +3671,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3666 cmd->param_id = __cpu_to_le32(param_id); 3671 cmd->param_id = __cpu_to_le32(param_id);
3667 cmd->param_value = __cpu_to_le32(param_value); 3672 cmd->param_value = __cpu_to_le32(param_value);
3668 3673
3669 ath10k_dbg(ATH10K_DBG_WMI, 3674 ath10k_dbg(ar, ATH10K_DBG_WMI,
3670 "wmi vdev id 0x%x set param %d value %d\n", 3675 "wmi vdev id 0x%x set param %d value %d\n",
3671 vdev_id, param_id, param_value); 3676 vdev_id, param_id, param_value);
3672 3677
@@ -3684,7 +3689,7 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3684 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 3689 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
3685 return -EINVAL; 3690 return -EINVAL;
3686 3691
3687 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); 3692 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
3688 if (!skb) 3693 if (!skb)
3689 return -ENOMEM; 3694 return -ENOMEM;
3690 3695
@@ -3702,7 +3707,7 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3702 if (arg->key_data) 3707 if (arg->key_data)
3703 memcpy(cmd->key_data, arg->key_data, arg->key_len); 3708 memcpy(cmd->key_data, arg->key_data, arg->key_len);
3704 3709
3705 ath10k_dbg(ATH10K_DBG_WMI, 3710 ath10k_dbg(ar, ATH10K_DBG_WMI,
3706 "wmi vdev install key idx %d cipher %d len %d\n", 3711 "wmi vdev install key idx %d cipher %d len %d\n",
3707 arg->key_idx, arg->key_cipher, arg->key_len); 3712 arg->key_idx, arg->key_cipher, arg->key_len);
3708 return ath10k_wmi_cmd_send(ar, skb, 3713 return ath10k_wmi_cmd_send(ar, skb,
@@ -3716,7 +3721,7 @@ int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
3716 struct sk_buff *skb; 3721 struct sk_buff *skb;
3717 u32 cmdid; 3722 u32 cmdid;
3718 3723
3719 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3724 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3720 if (!skb) 3725 if (!skb)
3721 return -ENOMEM; 3726 return -ENOMEM;
3722 3727
@@ -3752,7 +3757,7 @@ int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
3752 struct sk_buff *skb; 3757 struct sk_buff *skb;
3753 u32 cmdid; 3758 u32 cmdid;
3754 3759
3755 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3760 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3756 if (!skb) 3761 if (!skb)
3757 return -ENOMEM; 3762 return -ENOMEM;
3758 3763
@@ -3771,7 +3776,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3771 struct wmi_peer_create_cmd *cmd; 3776 struct wmi_peer_create_cmd *cmd;
3772 struct sk_buff *skb; 3777 struct sk_buff *skb;
3773 3778
3774 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3779 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3775 if (!skb) 3780 if (!skb)
3776 return -ENOMEM; 3781 return -ENOMEM;
3777 3782
@@ -3779,7 +3784,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3779 cmd->vdev_id = __cpu_to_le32(vdev_id); 3784 cmd->vdev_id = __cpu_to_le32(vdev_id);
3780 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3785 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3781 3786
3782 ath10k_dbg(ATH10K_DBG_WMI, 3787 ath10k_dbg(ar, ATH10K_DBG_WMI,
3783 "wmi peer create vdev_id %d peer_addr %pM\n", 3788 "wmi peer create vdev_id %d peer_addr %pM\n",
3784 vdev_id, peer_addr); 3789 vdev_id, peer_addr);
3785 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 3790 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
@@ -3791,7 +3796,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3791 struct wmi_peer_delete_cmd *cmd; 3796 struct wmi_peer_delete_cmd *cmd;
3792 struct sk_buff *skb; 3797 struct sk_buff *skb;
3793 3798
3794 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3799 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3795 if (!skb) 3800 if (!skb)
3796 return -ENOMEM; 3801 return -ENOMEM;
3797 3802
@@ -3799,7 +3804,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3799 cmd->vdev_id = __cpu_to_le32(vdev_id); 3804 cmd->vdev_id = __cpu_to_le32(vdev_id);
3800 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3805 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3801 3806
3802 ath10k_dbg(ATH10K_DBG_WMI, 3807 ath10k_dbg(ar, ATH10K_DBG_WMI,
3803 "wmi peer delete vdev_id %d peer_addr %pM\n", 3808 "wmi peer delete vdev_id %d peer_addr %pM\n",
3804 vdev_id, peer_addr); 3809 vdev_id, peer_addr);
3805 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 3810 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
@@ -3811,7 +3816,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
3811 struct wmi_peer_flush_tids_cmd *cmd; 3816 struct wmi_peer_flush_tids_cmd *cmd;
3812 struct sk_buff *skb; 3817 struct sk_buff *skb;
3813 3818
3814 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3819 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3815 if (!skb) 3820 if (!skb)
3816 return -ENOMEM; 3821 return -ENOMEM;
3817 3822
@@ -3820,7 +3825,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
3820 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 3825 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
3821 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3826 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3822 3827
3823 ath10k_dbg(ATH10K_DBG_WMI, 3828 ath10k_dbg(ar, ATH10K_DBG_WMI,
3824 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 3829 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
3825 vdev_id, peer_addr, tid_bitmap); 3830 vdev_id, peer_addr, tid_bitmap);
3826 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 3831 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
@@ -3833,7 +3838,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
3833 struct wmi_peer_set_param_cmd *cmd; 3838 struct wmi_peer_set_param_cmd *cmd;
3834 struct sk_buff *skb; 3839 struct sk_buff *skb;
3835 3840
3836 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3841 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3837 if (!skb) 3842 if (!skb)
3838 return -ENOMEM; 3843 return -ENOMEM;
3839 3844
@@ -3843,7 +3848,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
3843 cmd->param_value = __cpu_to_le32(param_value); 3848 cmd->param_value = __cpu_to_le32(param_value);
3844 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); 3849 memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
3845 3850
3846 ath10k_dbg(ATH10K_DBG_WMI, 3851 ath10k_dbg(ar, ATH10K_DBG_WMI,
3847 "wmi vdev %d peer 0x%pM set param %d value %d\n", 3852 "wmi vdev %d peer 0x%pM set param %d value %d\n",
3848 vdev_id, peer_addr, param_id, param_value); 3853 vdev_id, peer_addr, param_id, param_value);
3849 3854
@@ -3856,7 +3861,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
3856 struct wmi_sta_powersave_mode_cmd *cmd; 3861 struct wmi_sta_powersave_mode_cmd *cmd;
3857 struct sk_buff *skb; 3862 struct sk_buff *skb;
3858 3863
3859 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3864 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3860 if (!skb) 3865 if (!skb)
3861 return -ENOMEM; 3866 return -ENOMEM;
3862 3867
@@ -3864,7 +3869,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
3864 cmd->vdev_id = __cpu_to_le32(vdev_id); 3869 cmd->vdev_id = __cpu_to_le32(vdev_id);
3865 cmd->sta_ps_mode = __cpu_to_le32(psmode); 3870 cmd->sta_ps_mode = __cpu_to_le32(psmode);
3866 3871
3867 ath10k_dbg(ATH10K_DBG_WMI, 3872 ath10k_dbg(ar, ATH10K_DBG_WMI,
3868 "wmi set powersave id 0x%x mode %d\n", 3873 "wmi set powersave id 0x%x mode %d\n",
3869 vdev_id, psmode); 3874 vdev_id, psmode);
3870 3875
@@ -3879,7 +3884,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
3879 struct wmi_sta_powersave_param_cmd *cmd; 3884 struct wmi_sta_powersave_param_cmd *cmd;
3880 struct sk_buff *skb; 3885 struct sk_buff *skb;
3881 3886
3882 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3887 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3883 if (!skb) 3888 if (!skb)
3884 return -ENOMEM; 3889 return -ENOMEM;
3885 3890
@@ -3888,7 +3893,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
3888 cmd->param_id = __cpu_to_le32(param_id); 3893 cmd->param_id = __cpu_to_le32(param_id);
3889 cmd->param_value = __cpu_to_le32(value); 3894 cmd->param_value = __cpu_to_le32(value);
3890 3895
3891 ath10k_dbg(ATH10K_DBG_WMI, 3896 ath10k_dbg(ar, ATH10K_DBG_WMI,
3892 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 3897 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
3893 vdev_id, param_id, value); 3898 vdev_id, param_id, value);
3894 return ath10k_wmi_cmd_send(ar, skb, 3899 return ath10k_wmi_cmd_send(ar, skb,
@@ -3904,7 +3909,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3904 if (!mac) 3909 if (!mac)
3905 return -EINVAL; 3910 return -EINVAL;
3906 3911
3907 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 3912 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3908 if (!skb) 3913 if (!skb)
3909 return -ENOMEM; 3914 return -ENOMEM;
3910 3915
@@ -3914,7 +3919,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
3914 cmd->param_value = __cpu_to_le32(value); 3919 cmd->param_value = __cpu_to_le32(value);
3915 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); 3920 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
3916 3921
3917 ath10k_dbg(ATH10K_DBG_WMI, 3922 ath10k_dbg(ar, ATH10K_DBG_WMI,
3918 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 3923 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
3919 vdev_id, param_id, value, mac); 3924 vdev_id, param_id, value, mac);
3920 3925
@@ -3934,7 +3939,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
3934 3939
3935 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); 3940 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
3936 3941
3937 skb = ath10k_wmi_alloc_skb(len); 3942 skb = ath10k_wmi_alloc_skb(ar, len);
3938 if (!skb) 3943 if (!skb)
3939 return -EINVAL; 3944 return -EINVAL;
3940 3945
@@ -4076,7 +4081,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
4076 len = sizeof(struct wmi_main_peer_assoc_complete_cmd); 4081 len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
4077 } 4082 }
4078 4083
4079 skb = ath10k_wmi_alloc_skb(len); 4084 skb = ath10k_wmi_alloc_skb(ar, len);
4080 if (!skb) 4085 if (!skb)
4081 return -ENOMEM; 4086 return -ENOMEM;
4082 4087
@@ -4089,7 +4094,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
4089 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); 4094 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
4090 } 4095 }
4091 4096
4092 ath10k_dbg(ATH10K_DBG_WMI, 4097 ath10k_dbg(ar, ATH10K_DBG_WMI,
4093 "wmi peer assoc vdev %d addr %pM (%s)\n", 4098 "wmi peer assoc vdev %d addr %pM (%s)\n",
4094 arg->vdev_id, arg->addr, 4099 arg->vdev_id, arg->addr,
4095 arg->peer_reassoc ? "reassociate" : "new"); 4100 arg->peer_reassoc ? "reassociate" : "new");
@@ -4107,7 +4112,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
4107 int ret; 4112 int ret;
4108 u16 fc; 4113 u16 fc;
4109 4114
4110 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4115 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4111 if (!skb) 4116 if (!skb)
4112 return -ENOMEM; 4117 return -ENOMEM;
4113 4118
@@ -4155,7 +4160,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
4155 struct wmi_pdev_set_wmm_params *cmd; 4160 struct wmi_pdev_set_wmm_params *cmd;
4156 struct sk_buff *skb; 4161 struct sk_buff *skb;
4157 4162
4158 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4163 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4159 if (!skb) 4164 if (!skb)
4160 return -ENOMEM; 4165 return -ENOMEM;
4161 4166
@@ -4165,7 +4170,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
4165 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 4170 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
4166 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 4171 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
4167 4172
4168 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 4173 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
4169 return ath10k_wmi_cmd_send(ar, skb, 4174 return ath10k_wmi_cmd_send(ar, skb,
4170 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 4175 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
4171} 4176}
@@ -4175,14 +4180,14 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
4175 struct wmi_request_stats_cmd *cmd; 4180 struct wmi_request_stats_cmd *cmd;
4176 struct sk_buff *skb; 4181 struct sk_buff *skb;
4177 4182
4178 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4183 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4179 if (!skb) 4184 if (!skb)
4180 return -ENOMEM; 4185 return -ENOMEM;
4181 4186
4182 cmd = (struct wmi_request_stats_cmd *)skb->data; 4187 cmd = (struct wmi_request_stats_cmd *)skb->data;
4183 cmd->stats_id = __cpu_to_le32(stats_id); 4188 cmd->stats_id = __cpu_to_le32(stats_id);
4184 4189
4185 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 4190 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
4186 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 4191 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
4187} 4192}
4188 4193
@@ -4192,7 +4197,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
4192 struct wmi_force_fw_hang_cmd *cmd; 4197 struct wmi_force_fw_hang_cmd *cmd;
4193 struct sk_buff *skb; 4198 struct sk_buff *skb;
4194 4199
4195 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4200 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4196 if (!skb) 4201 if (!skb)
4197 return -ENOMEM; 4202 return -ENOMEM;
4198 4203
@@ -4200,7 +4205,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
4200 cmd->type = __cpu_to_le32(type); 4205 cmd->type = __cpu_to_le32(type);
4201 cmd->delay_ms = __cpu_to_le32(delay_ms); 4206 cmd->delay_ms = __cpu_to_le32(delay_ms);
4202 4207
4203 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 4208 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
4204 type, delay_ms); 4209 type, delay_ms);
4205 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 4210 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
4206} 4211}
@@ -4211,7 +4216,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
4211 struct sk_buff *skb; 4216 struct sk_buff *skb;
4212 u32 cfg; 4217 u32 cfg;
4213 4218
4214 skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); 4219 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4215 if (!skb) 4220 if (!skb)
4216 return -ENOMEM; 4221 return -ENOMEM;
4217 4222
@@ -4232,7 +4237,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
4232 cmd->config_enable = __cpu_to_le32(cfg); 4237 cmd->config_enable = __cpu_to_le32(cfg);
4233 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); 4238 cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
4234 4239
4235 ath10k_dbg(ATH10K_DBG_WMI, 4240 ath10k_dbg(ar, ATH10K_DBG_WMI,
4236 "wmi dbglog cfg modules %08x %08x config %08x %08x\n", 4241 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
4237 __le32_to_cpu(cmd->module_enable), 4242 __le32_to_cpu(cmd->module_enable),
4238 __le32_to_cpu(cmd->module_valid), 4243 __le32_to_cpu(cmd->module_valid),