diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
28 files changed, 2953 insertions, 435 deletions
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index ffa3b1a8745f..f4dbb3e93bf8 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile | |||
@@ -9,12 +9,14 @@ ath10k_core-y += mac.o \ | |||
9 | txrx.o \ | 9 | txrx.o \ |
10 | wmi.o \ | 10 | wmi.o \ |
11 | wmi-tlv.o \ | 11 | wmi-tlv.o \ |
12 | bmi.o | 12 | bmi.o \ |
13 | hw.o | ||
13 | 14 | ||
14 | ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o | 15 | ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o |
15 | ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o | 16 | ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o |
16 | ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o | 17 | ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o |
17 | ath10k_core-$(CONFIG_THERMAL) += thermal.o | 18 | ath10k_core-$(CONFIG_THERMAL) += thermal.o |
19 | ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o | ||
18 | 20 | ||
19 | obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o | 21 | obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o |
20 | ath10k_pci-y += pci.o \ | 22 | ath10k_pci-y += pci.o \ |
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 42ec79327943..e508c65b6ba8 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c | |||
@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) | |||
803 | int ce_id; | 803 | int ce_id; |
804 | 804 | ||
805 | for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { | 805 | for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { |
806 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | 806 | u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
807 | 807 | ||
808 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); | 808 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
809 | ath10k_ce_error_intr_disable(ar, ctrl_addr); | 809 | ath10k_ce_error_intr_disable(ar, ctrl_addr); |
@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, | |||
832 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 832 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
833 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | 833 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
834 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; | 834 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
835 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); | 835 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
836 | 836 | ||
837 | nentries = roundup_pow_of_two(attr->src_nentries); | 837 | nentries = roundup_pow_of_two(attr->src_nentries); |
838 | 838 | ||
@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, | |||
869 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 869 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
870 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; | 870 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
871 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; | 871 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; |
872 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); | 872 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
873 | 873 | ||
874 | nentries = roundup_pow_of_two(attr->dest_nentries); | 874 | nentries = roundup_pow_of_two(attr->dest_nentries); |
875 | 875 | ||
@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, | |||
1051 | 1051 | ||
1052 | static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) | 1052 | static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) |
1053 | { | 1053 | { |
1054 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | 1054 | u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
1055 | 1055 | ||
1056 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); | 1056 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); |
1057 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); | 1057 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); |
@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) | |||
1061 | 1061 | ||
1062 | static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) | 1062 | static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) |
1063 | { | 1063 | { |
1064 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | 1064 | u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
1065 | 1065 | ||
1066 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); | 1066 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); |
1067 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); | 1067 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); |
@@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, | |||
1098 | 1098 | ||
1099 | ce_state->ar = ar; | 1099 | ce_state->ar = ar; |
1100 | ce_state->id = ce_id; | 1100 | ce_state->id = ce_id; |
1101 | ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); | 1101 | ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); |
1102 | ce_state->attr_flags = attr->flags; | 1102 | ce_state->attr_flags = attr->flags; |
1103 | ce_state->src_sz_max = attr->src_sz_max; | 1103 | ce_state->src_sz_max = attr->src_sz_max; |
1104 | 1104 | ||
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 617a151e8ce4..c18647b87f71 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h | |||
@@ -394,7 +394,7 @@ struct ce_attr { | |||
394 | #define DST_WATERMARK_HIGH_RESET 0 | 394 | #define DST_WATERMARK_HIGH_RESET 0 |
395 | #define DST_WATERMARK_ADDRESS 0x0050 | 395 | #define DST_WATERMARK_ADDRESS 0x0050 |
396 | 396 | ||
397 | static inline u32 ath10k_ce_base_address(unsigned int ce_id) | 397 | static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) |
398 | { | 398 | { |
399 | return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; | 399 | return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; |
400 | } | 400 | } |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2d0671ebcf2b..310e12bc078a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
57 | .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, | 57 | .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, |
58 | }, | 58 | }, |
59 | }, | 59 | }, |
60 | { | ||
61 | .id = QCA6174_HW_2_1_VERSION, | ||
62 | .name = "qca6174 hw2.1", | ||
63 | .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, | ||
64 | .uart_pin = 6, | ||
65 | .fw = { | ||
66 | .dir = QCA6174_HW_2_1_FW_DIR, | ||
67 | .fw = QCA6174_HW_2_1_FW_FILE, | ||
68 | .otp = QCA6174_HW_2_1_OTP_FILE, | ||
69 | .board = QCA6174_HW_2_1_BOARD_DATA_FILE, | ||
70 | .board_size = QCA6174_BOARD_DATA_SZ, | ||
71 | .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, | ||
72 | }, | ||
73 | }, | ||
74 | { | ||
75 | .id = QCA6174_HW_3_0_VERSION, | ||
76 | .name = "qca6174 hw3.0", | ||
77 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, | ||
78 | .uart_pin = 6, | ||
79 | .fw = { | ||
80 | .dir = QCA6174_HW_3_0_FW_DIR, | ||
81 | .fw = QCA6174_HW_3_0_FW_FILE, | ||
82 | .otp = QCA6174_HW_3_0_OTP_FILE, | ||
83 | .board = QCA6174_HW_3_0_BOARD_DATA_FILE, | ||
84 | .board_size = QCA6174_BOARD_DATA_SZ, | ||
85 | .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, | ||
86 | }, | ||
87 | }, | ||
88 | { | ||
89 | .id = QCA6174_HW_3_2_VERSION, | ||
90 | .name = "qca6174 hw3.2", | ||
91 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, | ||
92 | .uart_pin = 6, | ||
93 | .fw = { | ||
94 | /* uses same binaries as hw3.0 */ | ||
95 | .dir = QCA6174_HW_3_0_FW_DIR, | ||
96 | .fw = QCA6174_HW_3_0_FW_FILE, | ||
97 | .otp = QCA6174_HW_3_0_OTP_FILE, | ||
98 | .board = QCA6174_HW_3_0_BOARD_DATA_FILE, | ||
99 | .board_size = QCA6174_BOARD_DATA_SZ, | ||
100 | .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, | ||
101 | }, | ||
102 | }, | ||
60 | }; | 103 | }; |
61 | 104 | ||
62 | static void ath10k_send_suspend_complete(struct ath10k *ar) | 105 | static void ath10k_send_suspend_complete(struct ath10k *ar) |
@@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) | |||
927 | case ATH10K_FW_WMI_OP_VERSION_TLV: | 970 | case ATH10K_FW_WMI_OP_VERSION_TLV: |
928 | ar->max_num_peers = TARGET_TLV_NUM_PEERS; | 971 | ar->max_num_peers = TARGET_TLV_NUM_PEERS; |
929 | ar->max_num_stations = TARGET_TLV_NUM_STATIONS; | 972 | ar->max_num_stations = TARGET_TLV_NUM_STATIONS; |
973 | ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; | ||
930 | ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; | 974 | ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; |
931 | break; | 975 | break; |
932 | case ATH10K_FW_WMI_OP_VERSION_UNSET: | 976 | case ATH10K_FW_WMI_OP_VERSION_UNSET: |
@@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) | |||
1060 | goto err_hif_stop; | 1104 | goto err_hif_stop; |
1061 | } | 1105 | } |
1062 | 1106 | ||
1107 | /* If firmware indicates Full Rx Reorder support it must be used in a | ||
1108 | * slightly different manner. Let HTT code know. | ||
1109 | */ | ||
1110 | ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, | ||
1111 | ar->wmi.svc_map)); | ||
1112 | |||
1113 | status = ath10k_htt_rx_ring_refill(ar); | ||
1114 | if (status) { | ||
1115 | ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); | ||
1116 | goto err_hif_stop; | ||
1117 | } | ||
1118 | |||
1063 | /* we don't care about HTT in UTF mode */ | 1119 | /* we don't care about HTT in UTF mode */ |
1064 | if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { | 1120 | if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { |
1065 | status = ath10k_htt_setup(&ar->htt); | 1121 | status = ath10k_htt_setup(&ar->htt); |
@@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister); | |||
1295 | 1351 | ||
1296 | struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, | 1352 | struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, |
1297 | enum ath10k_bus bus, | 1353 | enum ath10k_bus bus, |
1354 | enum ath10k_hw_rev hw_rev, | ||
1298 | const struct ath10k_hif_ops *hif_ops) | 1355 | const struct ath10k_hif_ops *hif_ops) |
1299 | { | 1356 | { |
1300 | struct ath10k *ar; | 1357 | struct ath10k *ar; |
@@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, | |||
1307 | ar->ath_common.priv = ar; | 1364 | ar->ath_common.priv = ar; |
1308 | ar->ath_common.hw = ar->hw; | 1365 | ar->ath_common.hw = ar->hw; |
1309 | ar->dev = dev; | 1366 | ar->dev = dev; |
1367 | ar->hw_rev = hw_rev; | ||
1310 | ar->hif.ops = hif_ops; | 1368 | ar->hif.ops = hif_ops; |
1311 | ar->hif.bus = bus; | 1369 | ar->hif.bus = bus; |
1312 | 1370 | ||
1371 | switch (hw_rev) { | ||
1372 | case ATH10K_HW_QCA988X: | ||
1373 | ar->regs = &qca988x_regs; | ||
1374 | break; | ||
1375 | case ATH10K_HW_QCA6174: | ||
1376 | ar->regs = &qca6174_regs; | ||
1377 | break; | ||
1378 | default: | ||
1379 | ath10k_err(ar, "unsupported core hardware revision %d\n", | ||
1380 | hw_rev); | ||
1381 | ret = -ENOTSUPP; | ||
1382 | goto err_free_mac; | ||
1383 | } | ||
1384 | |||
1313 | init_completion(&ar->scan.started); | 1385 | init_completion(&ar->scan.started); |
1314 | init_completion(&ar->scan.completed); | 1386 | init_completion(&ar->scan.completed); |
1315 | init_completion(&ar->scan.on_channel); | 1387 | init_completion(&ar->scan.on_channel); |
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 7b6d9e4567a3..d60e46fe6d19 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h | |||
@@ -97,6 +97,11 @@ struct ath10k_skb_cb { | |||
97 | } bcn; | 97 | } bcn; |
98 | } __packed; | 98 | } __packed; |
99 | 99 | ||
100 | struct ath10k_skb_rxcb { | ||
101 | dma_addr_t paddr; | ||
102 | struct hlist_node hlist; | ||
103 | }; | ||
104 | |||
100 | static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) | 105 | static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) |
101 | { | 106 | { |
102 | BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > | 107 | BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > |
@@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) | |||
104 | return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; | 109 | return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; |
105 | } | 110 | } |
106 | 111 | ||
112 | static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb) | ||
113 | { | ||
114 | BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb)); | ||
115 | return (struct ath10k_skb_rxcb *)skb->cb; | ||
116 | } | ||
117 | |||
118 | #define ATH10K_RXCB_SKB(rxcb) \ | ||
119 | container_of((void *)rxcb, struct sk_buff, cb) | ||
120 | |||
107 | static inline u32 host_interest_item_address(u32 item_offset) | 121 | static inline u32 host_interest_item_address(u32 item_offset) |
108 | { | 122 | { |
109 | return QCA988X_HOST_INTEREST_ADDRESS + item_offset; | 123 | return QCA988X_HOST_INTEREST_ADDRESS + item_offset; |
@@ -239,10 +253,21 @@ struct ath10k_sta { | |||
239 | u32 smps; | 253 | u32 smps; |
240 | 254 | ||
241 | struct work_struct update_wk; | 255 | struct work_struct update_wk; |
256 | |||
257 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
258 | /* protected by conf_mutex */ | ||
259 | bool aggr_mode; | ||
260 | #endif | ||
242 | }; | 261 | }; |
243 | 262 | ||
244 | #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) | 263 | #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) |
245 | 264 | ||
265 | enum ath10k_beacon_state { | ||
266 | ATH10K_BEACON_SCHEDULED = 0, | ||
267 | ATH10K_BEACON_SENDING, | ||
268 | ATH10K_BEACON_SENT, | ||
269 | }; | ||
270 | |||
246 | struct ath10k_vif { | 271 | struct ath10k_vif { |
247 | struct list_head list; | 272 | struct list_head list; |
248 | 273 | ||
@@ -253,7 +278,7 @@ struct ath10k_vif { | |||
253 | u32 dtim_period; | 278 | u32 dtim_period; |
254 | struct sk_buff *beacon; | 279 | struct sk_buff *beacon; |
255 | /* protected by data_lock */ | 280 | /* protected by data_lock */ |
256 | bool beacon_sent; | 281 | enum ath10k_beacon_state beacon_state; |
257 | void *beacon_buf; | 282 | void *beacon_buf; |
258 | dma_addr_t beacon_paddr; | 283 | dma_addr_t beacon_paddr; |
259 | 284 | ||
@@ -266,10 +291,8 @@ struct ath10k_vif { | |||
266 | u32 aid; | 291 | u32 aid; |
267 | u8 bssid[ETH_ALEN]; | 292 | u8 bssid[ETH_ALEN]; |
268 | 293 | ||
269 | struct work_struct wep_key_work; | ||
270 | struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; | 294 | struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; |
271 | u8 def_wep_key_idx; | 295 | s8 def_wep_key_idx; |
272 | u8 def_wep_key_newidx; | ||
273 | 296 | ||
274 | u16 tx_seq_no; | 297 | u16 tx_seq_no; |
275 | 298 | ||
@@ -296,6 +319,7 @@ struct ath10k_vif { | |||
296 | bool use_cts_prot; | 319 | bool use_cts_prot; |
297 | int num_legacy_stations; | 320 | int num_legacy_stations; |
298 | int txpower; | 321 | int txpower; |
322 | struct wmi_wmm_params_all_arg wmm_params; | ||
299 | }; | 323 | }; |
300 | 324 | ||
301 | struct ath10k_vif_iter { | 325 | struct ath10k_vif_iter { |
@@ -326,6 +350,7 @@ struct ath10k_debug { | |||
326 | 350 | ||
327 | /* protected by conf_mutex */ | 351 | /* protected by conf_mutex */ |
328 | u32 fw_dbglog_mask; | 352 | u32 fw_dbglog_mask; |
353 | u32 fw_dbglog_level; | ||
329 | u32 pktlog_filter; | 354 | u32 pktlog_filter; |
330 | u32 reg_addr; | 355 | u32 reg_addr; |
331 | u32 nf_cal_period; | 356 | u32 nf_cal_period; |
@@ -452,6 +477,7 @@ struct ath10k { | |||
452 | struct device *dev; | 477 | struct device *dev; |
453 | u8 mac_addr[ETH_ALEN]; | 478 | u8 mac_addr[ETH_ALEN]; |
454 | 479 | ||
480 | enum ath10k_hw_rev hw_rev; | ||
455 | u32 chip_id; | 481 | u32 chip_id; |
456 | u32 target_version; | 482 | u32 target_version; |
457 | u8 fw_version_major; | 483 | u8 fw_version_major; |
@@ -467,9 +493,6 @@ struct ath10k { | |||
467 | 493 | ||
468 | DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); | 494 | DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); |
469 | 495 | ||
470 | struct targetdef *targetdef; | ||
471 | struct hostdef *hostdef; | ||
472 | |||
473 | bool p2p; | 496 | bool p2p; |
474 | 497 | ||
475 | struct { | 498 | struct { |
@@ -479,6 +502,7 @@ struct ath10k { | |||
479 | 502 | ||
480 | struct completion target_suspend; | 503 | struct completion target_suspend; |
481 | 504 | ||
505 | const struct ath10k_hw_regs *regs; | ||
482 | struct ath10k_bmi bmi; | 506 | struct ath10k_bmi bmi; |
483 | struct ath10k_wmi wmi; | 507 | struct ath10k_wmi wmi; |
484 | struct ath10k_htc htc; | 508 | struct ath10k_htc htc; |
@@ -559,7 +583,6 @@ struct ath10k { | |||
559 | u8 cfg_tx_chainmask; | 583 | u8 cfg_tx_chainmask; |
560 | u8 cfg_rx_chainmask; | 584 | u8 cfg_rx_chainmask; |
561 | 585 | ||
562 | struct wmi_pdev_set_wmm_params_arg wmm_params; | ||
563 | struct completion install_key_done; | 586 | struct completion install_key_done; |
564 | 587 | ||
565 | struct completion vdev_setup_done; | 588 | struct completion vdev_setup_done; |
@@ -643,6 +666,7 @@ struct ath10k { | |||
643 | 666 | ||
644 | struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, | 667 | struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, |
645 | enum ath10k_bus bus, | 668 | enum ath10k_bus bus, |
669 | enum ath10k_hw_rev hw_rev, | ||
646 | const struct ath10k_hif_ops *hif_ops); | 670 | const struct ath10k_hif_ops *hif_ops); |
647 | void ath10k_core_destroy(struct ath10k *ar); | 671 | void ath10k_core_destroy(struct ath10k *ar); |
648 | 672 | ||
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 6ca24427e184..d2281e5c2ffe 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c | |||
@@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) | |||
371 | 371 | ||
372 | ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, | 372 | ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, |
373 | 1*HZ); | 373 | 1*HZ); |
374 | if (ret <= 0) | 374 | if (ret == 0) |
375 | return -ETIMEDOUT; | 375 | return -ETIMEDOUT; |
376 | 376 | ||
377 | spin_lock_bh(&ar->data_lock); | 377 | spin_lock_bh(&ar->data_lock); |
@@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, | |||
1318 | { | 1318 | { |
1319 | struct ath10k *ar = file->private_data; | 1319 | struct ath10k *ar = file->private_data; |
1320 | unsigned int len; | 1320 | unsigned int len; |
1321 | char buf[32]; | 1321 | char buf[64]; |
1322 | 1322 | ||
1323 | len = scnprintf(buf, sizeof(buf), "0x%08x\n", | 1323 | len = scnprintf(buf, sizeof(buf), "0x%08x %u\n", |
1324 | ar->debug.fw_dbglog_mask); | 1324 | ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); |
1325 | 1325 | ||
1326 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | 1326 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
1327 | } | 1327 | } |
@@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, | |||
1331 | size_t count, loff_t *ppos) | 1331 | size_t count, loff_t *ppos) |
1332 | { | 1332 | { |
1333 | struct ath10k *ar = file->private_data; | 1333 | struct ath10k *ar = file->private_data; |
1334 | unsigned long mask; | ||
1335 | int ret; | 1334 | int ret; |
1335 | char buf[64]; | ||
1336 | unsigned int log_level, mask; | ||
1336 | 1337 | ||
1337 | ret = kstrtoul_from_user(user_buf, count, 0, &mask); | 1338 | simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); |
1338 | if (ret) | 1339 | |
1339 | return ret; | 1340 | /* make sure that buf is null terminated */ |
1341 | buf[sizeof(buf) - 1] = 0; | ||
1342 | |||
1343 | ret = sscanf(buf, "%x %u", &mask, &log_level); | ||
1344 | |||
1345 | if (!ret) | ||
1346 | return -EINVAL; | ||
1347 | |||
1348 | if (ret == 1) | ||
1349 | /* default if user did not specify */ | ||
1350 | log_level = ATH10K_DBGLOG_LEVEL_WARN; | ||
1340 | 1351 | ||
1341 | mutex_lock(&ar->conf_mutex); | 1352 | mutex_lock(&ar->conf_mutex); |
1342 | 1353 | ||
1343 | ar->debug.fw_dbglog_mask = mask; | 1354 | ar->debug.fw_dbglog_mask = mask; |
1355 | ar->debug.fw_dbglog_level = log_level; | ||
1344 | 1356 | ||
1345 | if (ar->state == ATH10K_STATE_ON) { | 1357 | if (ar->state == ATH10K_STATE_ON) { |
1346 | ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); | 1358 | ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, |
1359 | ar->debug.fw_dbglog_level); | ||
1347 | if (ret) { | 1360 | if (ret) { |
1348 | ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", | 1361 | ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", |
1349 | ret); | 1362 | ret); |
@@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar) | |||
1685 | ret); | 1698 | ret); |
1686 | 1699 | ||
1687 | if (ar->debug.fw_dbglog_mask) { | 1700 | if (ar->debug.fw_dbglog_mask) { |
1688 | ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); | 1701 | ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, |
1702 | ATH10K_DBGLOG_LEVEL_WARN); | ||
1689 | if (ret) | 1703 | if (ret) |
1690 | /* not serious */ | 1704 | /* not serious */ |
1691 | ath10k_warn(ar, "failed to enable dbglog during start: %d", | 1705 | ath10k_warn(ar, "failed to enable dbglog during start: %d", |
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 1b87a5dbec53..a12b8323f9f1 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h | |||
@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter { | |||
48 | ATH10K_PKTLOG_ANY = 0x00000001f, | 48 | ATH10K_PKTLOG_ANY = 0x00000001f, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | enum ath10k_dbg_aggr_mode { | ||
52 | ATH10K_DBG_AGGR_MODE_AUTO, | ||
53 | ATH10K_DBG_AGGR_MODE_MANUAL, | ||
54 | ATH10K_DBG_AGGR_MODE_MAX, | ||
55 | }; | ||
56 | |||
51 | extern unsigned int ath10k_debug_mask; | 57 | extern unsigned int ath10k_debug_mask; |
52 | 58 | ||
53 | __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); | 59 | __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); |
@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, | |||
77 | void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, | 83 | void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, |
78 | struct ieee80211_vif *vif, | 84 | struct ieee80211_vif *vif, |
79 | struct ethtool_stats *stats, u64 *data); | 85 | struct ethtool_stats *stats, u64 *data); |
80 | |||
81 | #else | 86 | #else |
82 | static inline int ath10k_debug_start(struct ath10k *ar) | 87 | static inline int ath10k_debug_start(struct ath10k *ar) |
83 | { | 88 | { |
@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) | |||
129 | #define ath10k_debug_get_et_stats NULL | 134 | #define ath10k_debug_get_et_stats NULL |
130 | 135 | ||
131 | #endif /* CONFIG_ATH10K_DEBUGFS */ | 136 | #endif /* CONFIG_ATH10K_DEBUGFS */ |
137 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
138 | void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
139 | struct ieee80211_sta *sta, struct dentry *dir); | ||
140 | #endif /* CONFIG_MAC80211_DEBUGFS */ | ||
132 | 141 | ||
133 | #ifdef CONFIG_ATH10K_DEBUG | 142 | #ifdef CONFIG_ATH10K_DEBUG |
134 | __printf(3, 4) void ath10k_dbg(struct ath10k *ar, | 143 | __printf(3, 4) void ath10k_dbg(struct ath10k *ar, |
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c new file mode 100644 index 000000000000..95b5c49374e0 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Qualcomm Atheros, Inc. | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | #include "core.h" | ||
18 | #include "wmi-ops.h" | ||
19 | #include "debug.h" | ||
20 | |||
21 | static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, | ||
22 | char __user *user_buf, | ||
23 | size_t count, loff_t *ppos) | ||
24 | { | ||
25 | struct ieee80211_sta *sta = file->private_data; | ||
26 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | ||
27 | struct ath10k *ar = arsta->arvif->ar; | ||
28 | char buf[32]; | ||
29 | int len = 0; | ||
30 | |||
31 | mutex_lock(&ar->conf_mutex); | ||
32 | len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n", | ||
33 | (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ? | ||
34 | "auto" : "manual"); | ||
35 | mutex_unlock(&ar->conf_mutex); | ||
36 | |||
37 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
38 | } | ||
39 | |||
40 | static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file, | ||
41 | const char __user *user_buf, | ||
42 | size_t count, loff_t *ppos) | ||
43 | { | ||
44 | struct ieee80211_sta *sta = file->private_data; | ||
45 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | ||
46 | struct ath10k *ar = arsta->arvif->ar; | ||
47 | u32 aggr_mode; | ||
48 | int ret; | ||
49 | |||
50 | if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode)) | ||
51 | return -EINVAL; | ||
52 | |||
53 | if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX) | ||
54 | return -EINVAL; | ||
55 | |||
56 | mutex_lock(&ar->conf_mutex); | ||
57 | if ((ar->state != ATH10K_STATE_ON) || | ||
58 | (aggr_mode == arsta->aggr_mode)) { | ||
59 | ret = count; | ||
60 | goto out; | ||
61 | } | ||
62 | |||
63 | ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr); | ||
64 | if (ret) { | ||
65 | ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret); | ||
66 | goto out; | ||
67 | } | ||
68 | |||
69 | arsta->aggr_mode = aggr_mode; | ||
70 | out: | ||
71 | mutex_unlock(&ar->conf_mutex); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static const struct file_operations fops_aggr_mode = { | ||
76 | .read = ath10k_dbg_sta_read_aggr_mode, | ||
77 | .write = ath10k_dbg_sta_write_aggr_mode, | ||
78 | .open = simple_open, | ||
79 | .owner = THIS_MODULE, | ||
80 | .llseek = default_llseek, | ||
81 | }; | ||
82 | |||
83 | static ssize_t ath10k_dbg_sta_write_addba(struct file *file, | ||
84 | const char __user *user_buf, | ||
85 | size_t count, loff_t *ppos) | ||
86 | { | ||
87 | struct ieee80211_sta *sta = file->private_data; | ||
88 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | ||
89 | struct ath10k *ar = arsta->arvif->ar; | ||
90 | u32 tid, buf_size; | ||
91 | int ret; | ||
92 | char buf[64]; | ||
93 | |||
94 | simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); | ||
95 | |||
96 | /* make sure that buf is null terminated */ | ||
97 | buf[sizeof(buf) - 1] = '\0'; | ||
98 | |||
99 | ret = sscanf(buf, "%u %u", &tid, &buf_size); | ||
100 | if (ret != 2) | ||
101 | return -EINVAL; | ||
102 | |||
103 | /* Valid TID values are 0 through 15 */ | ||
104 | if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) | ||
105 | return -EINVAL; | ||
106 | |||
107 | mutex_lock(&ar->conf_mutex); | ||
108 | if ((ar->state != ATH10K_STATE_ON) || | ||
109 | (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { | ||
110 | ret = count; | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr, | ||
115 | tid, buf_size); | ||
116 | if (ret) { | ||
117 | ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n", | ||
118 | arsta->arvif->vdev_id, sta->addr, tid, buf_size); | ||
119 | } | ||
120 | |||
121 | ret = count; | ||
122 | out: | ||
123 | mutex_unlock(&ar->conf_mutex); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | static const struct file_operations fops_addba = { | ||
128 | .write = ath10k_dbg_sta_write_addba, | ||
129 | .open = simple_open, | ||
130 | .owner = THIS_MODULE, | ||
131 | .llseek = default_llseek, | ||
132 | }; | ||
133 | |||
134 | static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file, | ||
135 | const char __user *user_buf, | ||
136 | size_t count, loff_t *ppos) | ||
137 | { | ||
138 | struct ieee80211_sta *sta = file->private_data; | ||
139 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | ||
140 | struct ath10k *ar = arsta->arvif->ar; | ||
141 | u32 tid, status; | ||
142 | int ret; | ||
143 | char buf[64]; | ||
144 | |||
145 | simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); | ||
146 | |||
147 | /* make sure that buf is null terminated */ | ||
148 | buf[sizeof(buf) - 1] = '\0'; | ||
149 | |||
150 | ret = sscanf(buf, "%u %u", &tid, &status); | ||
151 | if (ret != 2) | ||
152 | return -EINVAL; | ||
153 | |||
154 | /* Valid TID values are 0 through 15 */ | ||
155 | if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) | ||
156 | return -EINVAL; | ||
157 | |||
158 | mutex_lock(&ar->conf_mutex); | ||
159 | if ((ar->state != ATH10K_STATE_ON) || | ||
160 | (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { | ||
161 | ret = count; | ||
162 | goto out; | ||
163 | } | ||
164 | |||
165 | ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr, | ||
166 | tid, status); | ||
167 | if (ret) { | ||
168 | ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n", | ||
169 | arsta->arvif->vdev_id, sta->addr, tid, status); | ||
170 | } | ||
171 | ret = count; | ||
172 | out: | ||
173 | mutex_unlock(&ar->conf_mutex); | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static const struct file_operations fops_addba_resp = { | ||
178 | .write = ath10k_dbg_sta_write_addba_resp, | ||
179 | .open = simple_open, | ||
180 | .owner = THIS_MODULE, | ||
181 | .llseek = default_llseek, | ||
182 | }; | ||
183 | |||
184 | static ssize_t ath10k_dbg_sta_write_delba(struct file *file, | ||
185 | const char __user *user_buf, | ||
186 | size_t count, loff_t *ppos) | ||
187 | { | ||
188 | struct ieee80211_sta *sta = file->private_data; | ||
189 | struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; | ||
190 | struct ath10k *ar = arsta->arvif->ar; | ||
191 | u32 tid, initiator, reason; | ||
192 | int ret; | ||
193 | char buf[64]; | ||
194 | |||
195 | simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); | ||
196 | |||
197 | /* make sure that buf is null terminated */ | ||
198 | buf[sizeof(buf) - 1] = '\0'; | ||
199 | |||
200 | ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason); | ||
201 | if (ret != 3) | ||
202 | return -EINVAL; | ||
203 | |||
204 | /* Valid TID values are 0 through 15 */ | ||
205 | if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2) | ||
206 | return -EINVAL; | ||
207 | |||
208 | mutex_lock(&ar->conf_mutex); | ||
209 | if ((ar->state != ATH10K_STATE_ON) || | ||
210 | (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) { | ||
211 | ret = count; | ||
212 | goto out; | ||
213 | } | ||
214 | |||
215 | ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr, | ||
216 | tid, initiator, reason); | ||
217 | if (ret) { | ||
218 | ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n", | ||
219 | arsta->arvif->vdev_id, sta->addr, tid, initiator, | ||
220 | reason); | ||
221 | } | ||
222 | ret = count; | ||
223 | out: | ||
224 | mutex_unlock(&ar->conf_mutex); | ||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | static const struct file_operations fops_delba = { | ||
229 | .write = ath10k_dbg_sta_write_delba, | ||
230 | .open = simple_open, | ||
231 | .owner = THIS_MODULE, | ||
232 | .llseek = default_llseek, | ||
233 | }; | ||
234 | |||
235 | void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
236 | struct ieee80211_sta *sta, struct dentry *dir) | ||
237 | { | ||
238 | debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta, | ||
239 | &fops_aggr_mode); | ||
240 | debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); | ||
241 | debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); | ||
242 | debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); | ||
243 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index f1946a6be442..2fd9e180272b 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c | |||
@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, | |||
703 | /* wait for response */ | 703 | /* wait for response */ |
704 | status = wait_for_completion_timeout(&htc->ctl_resp, | 704 | status = wait_for_completion_timeout(&htc->ctl_resp, |
705 | ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); | 705 | ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); |
706 | if (status <= 0) { | 706 | if (status == 0) { |
707 | if (status == 0) | ||
708 | status = -ETIMEDOUT; | ||
709 | ath10k_err(ar, "Service connect timeout: %d\n", status); | 707 | ath10k_err(ar, "Service connect timeout: %d\n", status); |
710 | return status; | 708 | return -ETIMEDOUT; |
711 | } | 709 | } |
712 | 710 | ||
713 | /* we controlled the buffer creation, it's aligned */ | 711 | /* we controlled the buffer creation, it's aligned */ |
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 56cb4aceb383..4f59ab923e48 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c | |||
@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar) | |||
53 | struct ath10k_htt *htt = &ar->htt; | 53 | struct ath10k_htt *htt = &ar->htt; |
54 | 54 | ||
55 | htt->ar = ar; | 55 | htt->ar = ar; |
56 | htt->max_throughput_mbps = 800; | ||
57 | 56 | ||
58 | /* | 57 | /* |
59 | * Prefetch enough data to satisfy target | 58 | * Prefetch enough data to satisfy target |
@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) | |||
102 | 101 | ||
103 | status = wait_for_completion_timeout(&htt->target_version_received, | 102 | status = wait_for_completion_timeout(&htt->target_version_received, |
104 | HTT_TARGET_VERSION_TIMEOUT_HZ); | 103 | HTT_TARGET_VERSION_TIMEOUT_HZ); |
105 | if (status <= 0) { | 104 | if (status == 0) { |
106 | ath10k_warn(ar, "htt version request timed out\n"); | 105 | ath10k_warn(ar, "htt version request timed out\n"); |
107 | return -ETIMEDOUT; | 106 | return -ETIMEDOUT; |
108 | } | 107 | } |
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 1bd5545af903..874bf44ff7a2 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/dmapool.h> | 23 | #include <linux/dmapool.h> |
24 | #include <linux/hashtable.h> | ||
24 | #include <net/mac80211.h> | 25 | #include <net/mac80211.h> |
25 | 26 | ||
26 | #include "htc.h" | 27 | #include "htc.h" |
@@ -286,7 +287,19 @@ enum htt_t2h_msg_type { | |||
286 | HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, | 287 | HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, |
287 | HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, | 288 | HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, |
288 | HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, | 289 | HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, |
290 | HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, | ||
291 | HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10, | ||
292 | HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, | ||
293 | HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12, | ||
294 | /* 0x13 reservd */ | ||
295 | HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14, | ||
296 | |||
297 | /* FIXME: Do not depend on this event id. Numbering of this event id is | ||
298 | * broken across different firmware revisions and HTT version fails to | ||
299 | * indicate this. | ||
300 | */ | ||
289 | HTT_T2H_MSG_TYPE_TEST, | 301 | HTT_T2H_MSG_TYPE_TEST, |
302 | |||
290 | /* keep this last */ | 303 | /* keep this last */ |
291 | HTT_T2H_NUM_MSGS | 304 | HTT_T2H_NUM_MSGS |
292 | }; | 305 | }; |
@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication { | |||
655 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 | 668 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 |
656 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 | 669 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 |
657 | 670 | ||
671 | struct htt_rx_pn_ind { | ||
672 | __le16 peer_id; | ||
673 | u8 tid; | ||
674 | u8 seqno_start; | ||
675 | u8 seqno_end; | ||
676 | u8 pn_ie_count; | ||
677 | u8 reserved; | ||
678 | u8 pn_ies[0]; | ||
679 | } __packed; | ||
680 | |||
681 | struct htt_rx_offload_msdu { | ||
682 | __le16 msdu_len; | ||
683 | __le16 peer_id; | ||
684 | u8 vdev_id; | ||
685 | u8 tid; | ||
686 | u8 fw_desc; | ||
687 | u8 payload[0]; | ||
688 | } __packed; | ||
689 | |||
690 | struct htt_rx_offload_ind { | ||
691 | u8 reserved; | ||
692 | __le16 msdu_count; | ||
693 | } __packed; | ||
694 | |||
695 | struct htt_rx_in_ord_msdu_desc { | ||
696 | __le32 msdu_paddr; | ||
697 | __le16 msdu_len; | ||
698 | u8 fw_desc; | ||
699 | u8 reserved; | ||
700 | } __packed; | ||
701 | |||
702 | struct htt_rx_in_ord_ind { | ||
703 | u8 info; | ||
704 | __le16 peer_id; | ||
705 | u8 vdev_id; | ||
706 | u8 reserved; | ||
707 | __le16 msdu_count; | ||
708 | struct htt_rx_in_ord_msdu_desc msdu_descs[0]; | ||
709 | } __packed; | ||
710 | |||
711 | #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f | ||
712 | #define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0 | ||
713 | #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020 | ||
714 | #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5 | ||
715 | #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040 | ||
716 | #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6 | ||
717 | |||
658 | /* | 718 | /* |
659 | * target -> host test message definition | 719 | * target -> host test message definition |
660 | * | 720 | * |
@@ -1150,6 +1210,9 @@ struct htt_resp { | |||
1150 | struct htt_rx_test rx_test; | 1210 | struct htt_rx_test rx_test; |
1151 | struct htt_pktlog_msg pktlog_msg; | 1211 | struct htt_pktlog_msg pktlog_msg; |
1152 | struct htt_stats_conf stats_conf; | 1212 | struct htt_stats_conf stats_conf; |
1213 | struct htt_rx_pn_ind rx_pn_ind; | ||
1214 | struct htt_rx_offload_ind rx_offload_ind; | ||
1215 | struct htt_rx_in_ord_ind rx_in_ord_ind; | ||
1153 | }; | 1216 | }; |
1154 | } __packed; | 1217 | } __packed; |
1155 | 1218 | ||
@@ -1182,7 +1245,6 @@ struct ath10k_htt { | |||
1182 | struct ath10k *ar; | 1245 | struct ath10k *ar; |
1183 | enum ath10k_htc_ep_id eid; | 1246 | enum ath10k_htc_ep_id eid; |
1184 | 1247 | ||
1185 | int max_throughput_mbps; | ||
1186 | u8 target_version_major; | 1248 | u8 target_version_major; |
1187 | u8 target_version_minor; | 1249 | u8 target_version_minor; |
1188 | struct completion target_version_received; | 1250 | struct completion target_version_received; |
@@ -1198,6 +1260,20 @@ struct ath10k_htt { | |||
1198 | * filled. | 1260 | * filled. |
1199 | */ | 1261 | */ |
1200 | struct sk_buff **netbufs_ring; | 1262 | struct sk_buff **netbufs_ring; |
1263 | |||
1264 | /* This is used only with firmware supporting IN_ORD_IND. | ||
1265 | * | ||
1266 | * With Full Rx Reorder the HTT Rx Ring is more of a temporary | ||
1267 | * buffer ring from which buffer addresses are copied by the | ||
1268 | * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND | ||
1269 | * pointing to specific (re-ordered) buffers. | ||
1270 | * | ||
1271 | * FIXME: With kernel generic hashing functions there's a lot | ||
1272 | * of hash collisions for sk_buffs. | ||
1273 | */ | ||
1274 | bool in_ord_rx; | ||
1275 | DECLARE_HASHTABLE(skb_table, 4); | ||
1276 | |||
1201 | /* | 1277 | /* |
1202 | * Ring of buffer addresses - | 1278 | * Ring of buffer addresses - |
1203 | * This ring holds the "physical" device address of the | 1279 | * This ring holds the "physical" device address of the |
@@ -1252,12 +1328,11 @@ struct ath10k_htt { | |||
1252 | 1328 | ||
1253 | unsigned int prefetch_len; | 1329 | unsigned int prefetch_len; |
1254 | 1330 | ||
1255 | /* Protects access to %pending_tx, %used_msdu_ids */ | 1331 | /* Protects access to pending_tx, num_pending_tx */ |
1256 | spinlock_t tx_lock; | 1332 | spinlock_t tx_lock; |
1257 | int max_num_pending_tx; | 1333 | int max_num_pending_tx; |
1258 | int num_pending_tx; | 1334 | int num_pending_tx; |
1259 | struct sk_buff **pending_tx; | 1335 | struct idr pending_tx; |
1260 | unsigned long *used_msdu_ids; /* bitmap */ | ||
1261 | wait_queue_head_t empty_tx_wq; | 1336 | wait_queue_head_t empty_tx_wq; |
1262 | struct dma_pool *tx_pool; | 1337 | struct dma_pool *tx_pool; |
1263 | 1338 | ||
@@ -1271,6 +1346,7 @@ struct ath10k_htt { | |||
1271 | struct tasklet_struct txrx_compl_task; | 1346 | struct tasklet_struct txrx_compl_task; |
1272 | struct sk_buff_head tx_compl_q; | 1347 | struct sk_buff_head tx_compl_q; |
1273 | struct sk_buff_head rx_compl_q; | 1348 | struct sk_buff_head rx_compl_q; |
1349 | struct sk_buff_head rx_in_ord_compl_q; | ||
1274 | 1350 | ||
1275 | /* rx_status template */ | 1351 | /* rx_status template */ |
1276 | struct ieee80211_rx_status rx_status; | 1352 | struct ieee80211_rx_status rx_status; |
@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt); | |||
1334 | void ath10k_htt_tx_free(struct ath10k_htt *htt); | 1410 | void ath10k_htt_tx_free(struct ath10k_htt *htt); |
1335 | 1411 | ||
1336 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt); | 1412 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt); |
1413 | int ath10k_htt_rx_ring_refill(struct ath10k *ar); | ||
1337 | void ath10k_htt_rx_free(struct ath10k_htt *htt); | 1414 | void ath10k_htt_rx_free(struct ath10k_htt *htt); |
1338 | 1415 | ||
1339 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); | 1416 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); |
@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, | |||
1346 | u8 max_subfrms_amsdu); | 1423 | u8 max_subfrms_amsdu); |
1347 | 1424 | ||
1348 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); | 1425 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); |
1349 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); | 1426 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); |
1350 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); | 1427 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); |
1351 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); | 1428 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); |
1352 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); | 1429 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); |
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 9c782a42665e..c1da44f65a4d 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
@@ -25,8 +25,8 @@ | |||
25 | 25 | ||
26 | #include <linux/log2.h> | 26 | #include <linux/log2.h> |
27 | 27 | ||
28 | #define HTT_RX_RING_SIZE 1024 | 28 | #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX |
29 | #define HTT_RX_RING_FILL_LEVEL 1000 | 29 | #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) |
30 | 30 | ||
31 | /* when under memory pressure rx ring refill may fail and needs a retry */ | 31 | /* when under memory pressure rx ring refill may fail and needs a retry */ |
32 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | 32 | #define HTT_RX_RING_REFILL_RETRY_MS 50 |
@@ -34,31 +34,70 @@ | |||
34 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); | 34 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); |
35 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); | 35 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); |
36 | 36 | ||
37 | static struct sk_buff * | ||
38 | ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) | ||
39 | { | ||
40 | struct ath10k_skb_rxcb *rxcb; | ||
41 | |||
42 | hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) | ||
43 | if (rxcb->paddr == paddr) | ||
44 | return ATH10K_RXCB_SKB(rxcb); | ||
45 | |||
46 | WARN_ON_ONCE(1); | ||
47 | return NULL; | ||
48 | } | ||
49 | |||
37 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) | 50 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) |
38 | { | 51 | { |
39 | struct sk_buff *skb; | 52 | struct sk_buff *skb; |
40 | struct ath10k_skb_cb *cb; | 53 | struct ath10k_skb_rxcb *rxcb; |
54 | struct hlist_node *n; | ||
41 | int i; | 55 | int i; |
42 | 56 | ||
43 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { | 57 | if (htt->rx_ring.in_ord_rx) { |
44 | skb = htt->rx_ring.netbufs_ring[i]; | 58 | hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { |
45 | cb = ATH10K_SKB_CB(skb); | 59 | skb = ATH10K_RXCB_SKB(rxcb); |
46 | dma_unmap_single(htt->ar->dev, cb->paddr, | 60 | dma_unmap_single(htt->ar->dev, rxcb->paddr, |
47 | skb->len + skb_tailroom(skb), | 61 | skb->len + skb_tailroom(skb), |
48 | DMA_FROM_DEVICE); | 62 | DMA_FROM_DEVICE); |
49 | dev_kfree_skb_any(skb); | 63 | hash_del(&rxcb->hlist); |
64 | dev_kfree_skb_any(skb); | ||
65 | } | ||
66 | } else { | ||
67 | for (i = 0; i < htt->rx_ring.size; i++) { | ||
68 | skb = htt->rx_ring.netbufs_ring[i]; | ||
69 | if (!skb) | ||
70 | continue; | ||
71 | |||
72 | rxcb = ATH10K_SKB_RXCB(skb); | ||
73 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | ||
74 | skb->len + skb_tailroom(skb), | ||
75 | DMA_FROM_DEVICE); | ||
76 | dev_kfree_skb_any(skb); | ||
77 | } | ||
50 | } | 78 | } |
51 | 79 | ||
52 | htt->rx_ring.fill_cnt = 0; | 80 | htt->rx_ring.fill_cnt = 0; |
81 | hash_init(htt->rx_ring.skb_table); | ||
82 | memset(htt->rx_ring.netbufs_ring, 0, | ||
83 | htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); | ||
53 | } | 84 | } |
54 | 85 | ||
55 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | 86 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) |
56 | { | 87 | { |
57 | struct htt_rx_desc *rx_desc; | 88 | struct htt_rx_desc *rx_desc; |
89 | struct ath10k_skb_rxcb *rxcb; | ||
58 | struct sk_buff *skb; | 90 | struct sk_buff *skb; |
59 | dma_addr_t paddr; | 91 | dma_addr_t paddr; |
60 | int ret = 0, idx; | 92 | int ret = 0, idx; |
61 | 93 | ||
94 | /* The Full Rx Reorder firmware has no way of telling the host | ||
95 | * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. | ||
96 | * To keep things simple make sure ring is always half empty. This | ||
97 | * guarantees there'll be no replenishment overruns possible. | ||
98 | */ | ||
99 | BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); | ||
100 | |||
62 | idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); | 101 | idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
63 | while (num > 0) { | 102 | while (num > 0) { |
64 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | 103 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); |
@@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |||
86 | goto fail; | 125 | goto fail; |
87 | } | 126 | } |
88 | 127 | ||
89 | ATH10K_SKB_CB(skb)->paddr = paddr; | 128 | rxcb = ATH10K_SKB_RXCB(skb); |
129 | rxcb->paddr = paddr; | ||
90 | htt->rx_ring.netbufs_ring[idx] = skb; | 130 | htt->rx_ring.netbufs_ring[idx] = skb; |
91 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | 131 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); |
92 | htt->rx_ring.fill_cnt++; | 132 | htt->rx_ring.fill_cnt++; |
93 | 133 | ||
134 | if (htt->rx_ring.in_ord_rx) { | ||
135 | hash_add(htt->rx_ring.skb_table, | ||
136 | &ATH10K_SKB_RXCB(skb)->hlist, | ||
137 | (u32)paddr); | ||
138 | } | ||
139 | |||
94 | num--; | 140 | num--; |
95 | idx++; | 141 | idx++; |
96 | idx &= htt->rx_ring.size_mask; | 142 | idx &= htt->rx_ring.size_mask; |
97 | } | 143 | } |
98 | 144 | ||
99 | fail: | 145 | fail: |
146 | /* | ||
147 | * Make sure the rx buffer is updated before available buffer | ||
148 | * index to avoid any potential rx ring corruption. | ||
149 | */ | ||
150 | mb(); | ||
100 | *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); | 151 | *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); |
101 | return ret; | 152 | return ret; |
102 | } | 153 | } |
@@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | |||
153 | ath10k_htt_rx_msdu_buff_replenish(htt); | 204 | ath10k_htt_rx_msdu_buff_replenish(htt); |
154 | } | 205 | } |
155 | 206 | ||
156 | static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) | 207 | int ath10k_htt_rx_ring_refill(struct ath10k *ar) |
157 | { | 208 | { |
158 | struct sk_buff *skb; | 209 | struct ath10k_htt *htt = &ar->htt; |
159 | int i; | 210 | int ret; |
160 | 211 | ||
161 | for (i = 0; i < htt->rx_ring.size; i++) { | 212 | spin_lock_bh(&htt->rx_ring.lock); |
162 | skb = htt->rx_ring.netbufs_ring[i]; | 213 | ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - |
163 | if (!skb) | 214 | htt->rx_ring.fill_cnt)); |
164 | continue; | 215 | spin_unlock_bh(&htt->rx_ring.lock); |
165 | 216 | ||
166 | dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, | 217 | if (ret) |
167 | skb->len + skb_tailroom(skb), | 218 | ath10k_htt_rx_ring_free(htt); |
168 | DMA_FROM_DEVICE); | 219 | |
169 | dev_kfree_skb_any(skb); | 220 | return ret; |
170 | htt->rx_ring.netbufs_ring[i] = NULL; | ||
171 | } | ||
172 | } | 221 | } |
173 | 222 | ||
174 | void ath10k_htt_rx_free(struct ath10k_htt *htt) | 223 | void ath10k_htt_rx_free(struct ath10k_htt *htt) |
@@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) | |||
179 | 228 | ||
180 | skb_queue_purge(&htt->tx_compl_q); | 229 | skb_queue_purge(&htt->tx_compl_q); |
181 | skb_queue_purge(&htt->rx_compl_q); | 230 | skb_queue_purge(&htt->rx_compl_q); |
231 | skb_queue_purge(&htt->rx_in_ord_compl_q); | ||
182 | 232 | ||
183 | ath10k_htt_rx_ring_clean_up(htt); | 233 | ath10k_htt_rx_ring_free(htt); |
184 | 234 | ||
185 | dma_free_coherent(htt->ar->dev, | 235 | dma_free_coherent(htt->ar->dev, |
186 | (htt->rx_ring.size * | 236 | (htt->rx_ring.size * |
@@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |||
212 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | 262 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; |
213 | msdu = htt->rx_ring.netbufs_ring[idx]; | 263 | msdu = htt->rx_ring.netbufs_ring[idx]; |
214 | htt->rx_ring.netbufs_ring[idx] = NULL; | 264 | htt->rx_ring.netbufs_ring[idx] = NULL; |
265 | htt->rx_ring.paddrs_ring[idx] = 0; | ||
215 | 266 | ||
216 | idx++; | 267 | idx++; |
217 | idx &= htt->rx_ring.size_mask; | 268 | idx &= htt->rx_ring.size_mask; |
@@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |||
219 | htt->rx_ring.fill_cnt--; | 270 | htt->rx_ring.fill_cnt--; |
220 | 271 | ||
221 | dma_unmap_single(htt->ar->dev, | 272 | dma_unmap_single(htt->ar->dev, |
222 | ATH10K_SKB_CB(msdu)->paddr, | 273 | ATH10K_SKB_RXCB(msdu)->paddr, |
223 | msdu->len + skb_tailroom(msdu), | 274 | msdu->len + skb_tailroom(msdu), |
224 | DMA_FROM_DEVICE); | 275 | DMA_FROM_DEVICE); |
225 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | 276 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", |
@@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr) | |||
379 | ath10k_htt_rx_msdu_buff_replenish(htt); | 430 | ath10k_htt_rx_msdu_buff_replenish(htt); |
380 | } | 431 | } |
381 | 432 | ||
433 | static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, | ||
434 | u32 paddr) | ||
435 | { | ||
436 | struct ath10k *ar = htt->ar; | ||
437 | struct ath10k_skb_rxcb *rxcb; | ||
438 | struct sk_buff *msdu; | ||
439 | |||
440 | lockdep_assert_held(&htt->rx_ring.lock); | ||
441 | |||
442 | msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); | ||
443 | if (!msdu) | ||
444 | return NULL; | ||
445 | |||
446 | rxcb = ATH10K_SKB_RXCB(msdu); | ||
447 | hash_del(&rxcb->hlist); | ||
448 | htt->rx_ring.fill_cnt--; | ||
449 | |||
450 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | ||
451 | msdu->len + skb_tailroom(msdu), | ||
452 | DMA_FROM_DEVICE); | ||
453 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | ||
454 | msdu->data, msdu->len + skb_tailroom(msdu)); | ||
455 | |||
456 | return msdu; | ||
457 | } | ||
458 | |||
459 | static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, | ||
460 | struct htt_rx_in_ord_ind *ev, | ||
461 | struct sk_buff_head *list) | ||
462 | { | ||
463 | struct ath10k *ar = htt->ar; | ||
464 | struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs; | ||
465 | struct htt_rx_desc *rxd; | ||
466 | struct sk_buff *msdu; | ||
467 | int msdu_count; | ||
468 | bool is_offload; | ||
469 | u32 paddr; | ||
470 | |||
471 | lockdep_assert_held(&htt->rx_ring.lock); | ||
472 | |||
473 | msdu_count = __le16_to_cpu(ev->msdu_count); | ||
474 | is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | ||
475 | |||
476 | while (msdu_count--) { | ||
477 | paddr = __le32_to_cpu(msdu_desc->msdu_paddr); | ||
478 | |||
479 | msdu = ath10k_htt_rx_pop_paddr(htt, paddr); | ||
480 | if (!msdu) { | ||
481 | __skb_queue_purge(list); | ||
482 | return -ENOENT; | ||
483 | } | ||
484 | |||
485 | __skb_queue_tail(list, msdu); | ||
486 | |||
487 | if (!is_offload) { | ||
488 | rxd = (void *)msdu->data; | ||
489 | |||
490 | trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); | ||
491 | |||
492 | skb_put(msdu, sizeof(*rxd)); | ||
493 | skb_pull(msdu, sizeof(*rxd)); | ||
494 | skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); | ||
495 | |||
496 | if (!(__le32_to_cpu(rxd->attention.flags) & | ||
497 | RX_ATTENTION_FLAGS_MSDU_DONE)) { | ||
498 | ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); | ||
499 | return -EIO; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | msdu_desc++; | ||
504 | } | ||
505 | |||
506 | return 0; | ||
507 | } | ||
508 | |||
382 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt) | 509 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt) |
383 | { | 510 | { |
384 | struct ath10k *ar = htt->ar; | 511 | struct ath10k *ar = htt->ar; |
@@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) | |||
424 | 551 | ||
425 | htt->rx_ring.alloc_idx.vaddr = vaddr; | 552 | htt->rx_ring.alloc_idx.vaddr = vaddr; |
426 | htt->rx_ring.alloc_idx.paddr = paddr; | 553 | htt->rx_ring.alloc_idx.paddr = paddr; |
427 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; | 554 | htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; |
428 | *htt->rx_ring.alloc_idx.vaddr = 0; | 555 | *htt->rx_ring.alloc_idx.vaddr = 0; |
429 | 556 | ||
430 | /* Initialize the Rx refill retry timer */ | 557 | /* Initialize the Rx refill retry timer */ |
@@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) | |||
433 | spin_lock_init(&htt->rx_ring.lock); | 560 | spin_lock_init(&htt->rx_ring.lock); |
434 | 561 | ||
435 | htt->rx_ring.fill_cnt = 0; | 562 | htt->rx_ring.fill_cnt = 0; |
436 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) | 563 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; |
437 | goto err_fill_ring; | 564 | hash_init(htt->rx_ring.skb_table); |
438 | 565 | ||
439 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, | 566 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, |
440 | (unsigned long)htt); | 567 | (unsigned long)htt); |
441 | 568 | ||
442 | skb_queue_head_init(&htt->tx_compl_q); | 569 | skb_queue_head_init(&htt->tx_compl_q); |
443 | skb_queue_head_init(&htt->rx_compl_q); | 570 | skb_queue_head_init(&htt->rx_compl_q); |
571 | skb_queue_head_init(&htt->rx_in_ord_compl_q); | ||
444 | 572 | ||
445 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, | 573 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, |
446 | (unsigned long)htt); | 574 | (unsigned long)htt); |
@@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) | |||
449 | htt->rx_ring.size, htt->rx_ring.fill_level); | 577 | htt->rx_ring.size, htt->rx_ring.fill_level); |
450 | return 0; | 578 | return 0; |
451 | 579 | ||
452 | err_fill_ring: | ||
453 | ath10k_htt_rx_ring_free(htt); | ||
454 | dma_free_coherent(htt->ar->dev, | ||
455 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | ||
456 | htt->rx_ring.alloc_idx.vaddr, | ||
457 | htt->rx_ring.alloc_idx.paddr); | ||
458 | err_dma_idx: | 580 | err_dma_idx: |
459 | dma_free_coherent(htt->ar->dev, | 581 | dma_free_coherent(htt->ar->dev, |
460 | (htt->rx_ring.size * | 582 | (htt->rx_ring.size * |
@@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar, | |||
691 | * | 813 | * |
692 | * FIXME: Can we get/compute 64bit TSF? | 814 | * FIXME: Can we get/compute 64bit TSF? |
693 | */ | 815 | */ |
694 | status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); | 816 | status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); |
695 | status->flag |= RX_FLAG_MACTIME_END; | 817 | status->flag |= RX_FLAG_MACTIME_END; |
696 | } | 818 | } |
697 | 819 | ||
@@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) | |||
1578 | spin_unlock_bh(&ar->data_lock); | 1700 | spin_unlock_bh(&ar->data_lock); |
1579 | } | 1701 | } |
1580 | 1702 | ||
1703 | static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, | ||
1704 | struct sk_buff_head *amsdu) | ||
1705 | { | ||
1706 | struct sk_buff *msdu; | ||
1707 | struct htt_rx_desc *rxd; | ||
1708 | |||
1709 | if (skb_queue_empty(list)) | ||
1710 | return -ENOBUFS; | ||
1711 | |||
1712 | if (WARN_ON(!skb_queue_empty(amsdu))) | ||
1713 | return -EINVAL; | ||
1714 | |||
1715 | while ((msdu = __skb_dequeue(list))) { | ||
1716 | __skb_queue_tail(amsdu, msdu); | ||
1717 | |||
1718 | rxd = (void *)msdu->data - sizeof(*rxd); | ||
1719 | if (rxd->msdu_end.info0 & | ||
1720 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) | ||
1721 | break; | ||
1722 | } | ||
1723 | |||
1724 | msdu = skb_peek_tail(amsdu); | ||
1725 | rxd = (void *)msdu->data - sizeof(*rxd); | ||
1726 | if (!(rxd->msdu_end.info0 & | ||
1727 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { | ||
1728 | skb_queue_splice_init(amsdu, list); | ||
1729 | return -EAGAIN; | ||
1730 | } | ||
1731 | |||
1732 | return 0; | ||
1733 | } | ||
1734 | |||
1735 | static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, | ||
1736 | struct sk_buff *skb) | ||
1737 | { | ||
1738 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1739 | |||
1740 | if (!ieee80211_has_protected(hdr->frame_control)) | ||
1741 | return; | ||
1742 | |||
1743 | /* Offloaded frames are already decrypted but firmware insists they are | ||
1744 | * protected in the 802.11 header. Strip the flag. Otherwise mac80211 | ||
1745 | * will drop the frame. | ||
1746 | */ | ||
1747 | |||
1748 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | ||
1749 | status->flag |= RX_FLAG_DECRYPTED | | ||
1750 | RX_FLAG_IV_STRIPPED | | ||
1751 | RX_FLAG_MMIC_STRIPPED; | ||
1752 | } | ||
1753 | |||
1754 | static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, | ||
1755 | struct sk_buff_head *list) | ||
1756 | { | ||
1757 | struct ath10k_htt *htt = &ar->htt; | ||
1758 | struct ieee80211_rx_status *status = &htt->rx_status; | ||
1759 | struct htt_rx_offload_msdu *rx; | ||
1760 | struct sk_buff *msdu; | ||
1761 | size_t offset; | ||
1762 | |||
1763 | while ((msdu = __skb_dequeue(list))) { | ||
1764 | /* Offloaded frames don't have Rx descriptor. Instead they have | ||
1765 | * a short meta information header. | ||
1766 | */ | ||
1767 | |||
1768 | rx = (void *)msdu->data; | ||
1769 | |||
1770 | skb_put(msdu, sizeof(*rx)); | ||
1771 | skb_pull(msdu, sizeof(*rx)); | ||
1772 | |||
1773 | if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { | ||
1774 | ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); | ||
1775 | dev_kfree_skb_any(msdu); | ||
1776 | continue; | ||
1777 | } | ||
1778 | |||
1779 | skb_put(msdu, __le16_to_cpu(rx->msdu_len)); | ||
1780 | |||
1781 | /* Offloaded rx header length isn't multiple of 2 nor 4 so the | ||
1782 | * actual payload is unaligned. Align the frame. Otherwise | ||
1783 | * mac80211 complains. This shouldn't reduce performance much | ||
1784 | * because these offloaded frames are rare. | ||
1785 | */ | ||
1786 | offset = 4 - ((unsigned long)msdu->data & 3); | ||
1787 | skb_put(msdu, offset); | ||
1788 | memmove(msdu->data + offset, msdu->data, msdu->len); | ||
1789 | skb_pull(msdu, offset); | ||
1790 | |||
1791 | /* FIXME: The frame is NWifi. Re-construct QoS Control | ||
1792 | * if possible later. | ||
1793 | */ | ||
1794 | |||
1795 | memset(status, 0, sizeof(*status)); | ||
1796 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; | ||
1797 | |||
1798 | ath10k_htt_rx_h_rx_offload_prot(status, msdu); | ||
1799 | ath10k_htt_rx_h_channel(ar, status); | ||
1800 | ath10k_process_rx(ar, status, msdu); | ||
1801 | } | ||
1802 | } | ||
1803 | |||
1804 | static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) | ||
1805 | { | ||
1806 | struct ath10k_htt *htt = &ar->htt; | ||
1807 | struct htt_resp *resp = (void *)skb->data; | ||
1808 | struct ieee80211_rx_status *status = &htt->rx_status; | ||
1809 | struct sk_buff_head list; | ||
1810 | struct sk_buff_head amsdu; | ||
1811 | u16 peer_id; | ||
1812 | u16 msdu_count; | ||
1813 | u8 vdev_id; | ||
1814 | u8 tid; | ||
1815 | bool offload; | ||
1816 | bool frag; | ||
1817 | int ret; | ||
1818 | |||
1819 | lockdep_assert_held(&htt->rx_ring.lock); | ||
1820 | |||
1821 | if (htt->rx_confused) | ||
1822 | return; | ||
1823 | |||
1824 | skb_pull(skb, sizeof(resp->hdr)); | ||
1825 | skb_pull(skb, sizeof(resp->rx_in_ord_ind)); | ||
1826 | |||
1827 | peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); | ||
1828 | msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); | ||
1829 | vdev_id = resp->rx_in_ord_ind.vdev_id; | ||
1830 | tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); | ||
1831 | offload = !!(resp->rx_in_ord_ind.info & | ||
1832 | HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | ||
1833 | frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); | ||
1834 | |||
1835 | ath10k_dbg(ar, ATH10K_DBG_HTT, | ||
1836 | "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", | ||
1837 | vdev_id, peer_id, tid, offload, frag, msdu_count); | ||
1838 | |||
1839 | if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) { | ||
1840 | ath10k_warn(ar, "dropping invalid in order rx indication\n"); | ||
1841 | return; | ||
1842 | } | ||
1843 | |||
1844 | /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later | ||
1845 | * extracted and processed. | ||
1846 | */ | ||
1847 | __skb_queue_head_init(&list); | ||
1848 | ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list); | ||
1849 | if (ret < 0) { | ||
1850 | ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); | ||
1851 | htt->rx_confused = true; | ||
1852 | return; | ||
1853 | } | ||
1854 | |||
1855 | /* Offloaded frames are very different and need to be handled | ||
1856 | * separately. | ||
1857 | */ | ||
1858 | if (offload) | ||
1859 | ath10k_htt_rx_h_rx_offload(ar, &list); | ||
1860 | |||
1861 | while (!skb_queue_empty(&list)) { | ||
1862 | __skb_queue_head_init(&amsdu); | ||
1863 | ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); | ||
1864 | switch (ret) { | ||
1865 | case 0: | ||
1866 | /* Note: The in-order indication may report interleaved | ||
1867 | * frames from different PPDUs meaning reported rx rate | ||
1868 | * to mac80211 isn't accurate/reliable. It's still | ||
1869 | * better to report something than nothing though. This | ||
1870 | * should still give an idea about rx rate to the user. | ||
1871 | */ | ||
1872 | ath10k_htt_rx_h_ppdu(ar, &amsdu, status); | ||
1873 | ath10k_htt_rx_h_filter(ar, &amsdu, status); | ||
1874 | ath10k_htt_rx_h_mpdu(ar, &amsdu, status); | ||
1875 | ath10k_htt_rx_h_deliver(ar, &amsdu, status); | ||
1876 | break; | ||
1877 | case -EAGAIN: | ||
1878 | /* fall through */ | ||
1879 | default: | ||
1880 | /* Should not happen. */ | ||
1881 | ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); | ||
1882 | htt->rx_confused = true; | ||
1883 | __skb_queue_purge(&list); | ||
1884 | return; | ||
1885 | } | ||
1886 | } | ||
1887 | |||
1888 | tasklet_schedule(&htt->rx_replenish_task); | ||
1889 | } | ||
1890 | |||
1581 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) | 1891 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
1582 | { | 1892 | { |
1583 | struct ath10k_htt *htt = &ar->htt; | 1893 | struct ath10k_htt *htt = &ar->htt; |
@@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) | |||
1700 | */ | 2010 | */ |
1701 | break; | 2011 | break; |
1702 | } | 2012 | } |
2013 | case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { | ||
2014 | spin_lock_bh(&htt->rx_ring.lock); | ||
2015 | __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); | ||
2016 | spin_unlock_bh(&htt->rx_ring.lock); | ||
2017 | tasklet_schedule(&htt->txrx_compl_task); | ||
2018 | return; | ||
2019 | } | ||
2020 | case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: | ||
2021 | /* FIXME: This WMI-TLV event is overlapping with 10.2 | ||
2022 | * CHAN_CHANGE - both being 0xF. Neither is being used in | ||
2023 | * practice so no immediate action is necessary. Nevertheless | ||
2024 | * HTT may need an abstraction layer like WMI has one day. | ||
2025 | */ | ||
2026 | break; | ||
1703 | default: | 2027 | default: |
1704 | ath10k_warn(ar, "htt event (%d) not handled\n", | 2028 | ath10k_warn(ar, "htt event (%d) not handled\n", |
1705 | resp->hdr.msg_type); | 2029 | resp->hdr.msg_type); |
@@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) | |||
1715 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) | 2039 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) |
1716 | { | 2040 | { |
1717 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | 2041 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; |
2042 | struct ath10k *ar = htt->ar; | ||
1718 | struct htt_resp *resp; | 2043 | struct htt_resp *resp; |
1719 | struct sk_buff *skb; | 2044 | struct sk_buff *skb; |
1720 | 2045 | ||
@@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) | |||
1731 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | 2056 | ath10k_htt_rx_handler(htt, &resp->rx_ind); |
1732 | dev_kfree_skb_any(skb); | 2057 | dev_kfree_skb_any(skb); |
1733 | } | 2058 | } |
2059 | |||
2060 | while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { | ||
2061 | ath10k_htt_rx_in_ord_ind(ar, skb); | ||
2062 | dev_kfree_skb_any(skb); | ||
2063 | } | ||
1734 | spin_unlock_bh(&htt->rx_ring.lock); | 2064 | spin_unlock_bh(&htt->rx_ring.lock); |
1735 | } | 2065 | } |
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a1bda41fb543..cbd2bc9e6202 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c | |||
@@ -56,21 +56,18 @@ exit: | |||
56 | return ret; | 56 | return ret; |
57 | } | 57 | } |
58 | 58 | ||
59 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) | 59 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
60 | { | 60 | { |
61 | struct ath10k *ar = htt->ar; | 61 | struct ath10k *ar = htt->ar; |
62 | int msdu_id; | 62 | int ret; |
63 | 63 | ||
64 | lockdep_assert_held(&htt->tx_lock); | 64 | lockdep_assert_held(&htt->tx_lock); |
65 | 65 | ||
66 | msdu_id = find_first_zero_bit(htt->used_msdu_ids, | 66 | ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC); |
67 | htt->max_num_pending_tx); | 67 | |
68 | if (msdu_id == htt->max_num_pending_tx) | 68 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); |
69 | return -ENOBUFS; | ||
70 | 69 | ||
71 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); | 70 | return ret; |
72 | __set_bit(msdu_id, htt->used_msdu_ids); | ||
73 | return msdu_id; | ||
74 | } | 71 | } |
75 | 72 | ||
76 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) | 73 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) |
@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) | |||
79 | 76 | ||
80 | lockdep_assert_held(&htt->tx_lock); | 77 | lockdep_assert_held(&htt->tx_lock); |
81 | 78 | ||
82 | if (!test_bit(msdu_id, htt->used_msdu_ids)) | ||
83 | ath10k_warn(ar, "trying to free unallocated msdu_id %d\n", | ||
84 | msdu_id); | ||
85 | |||
86 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); | 79 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); |
87 | __clear_bit(msdu_id, htt->used_msdu_ids); | 80 | |
81 | idr_remove(&htt->pending_tx, msdu_id); | ||
88 | } | 82 | } |
89 | 83 | ||
90 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) | 84 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) |
91 | { | 85 | { |
92 | struct ath10k *ar = htt->ar; | 86 | struct ath10k *ar = htt->ar; |
93 | 87 | ||
94 | spin_lock_init(&htt->tx_lock); | ||
95 | |||
96 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", | 88 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
97 | htt->max_num_pending_tx); | 89 | htt->max_num_pending_tx); |
98 | 90 | ||
99 | htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * | 91 | spin_lock_init(&htt->tx_lock); |
100 | htt->max_num_pending_tx, GFP_KERNEL); | 92 | idr_init(&htt->pending_tx); |
101 | if (!htt->pending_tx) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * | ||
105 | BITS_TO_LONGS(htt->max_num_pending_tx), | ||
106 | GFP_KERNEL); | ||
107 | if (!htt->used_msdu_ids) { | ||
108 | kfree(htt->pending_tx); | ||
109 | return -ENOMEM; | ||
110 | } | ||
111 | 93 | ||
112 | htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, | 94 | htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, |
113 | sizeof(struct ath10k_htt_txbuf), 4, 0); | 95 | sizeof(struct ath10k_htt_txbuf), 4, 0); |
114 | if (!htt->tx_pool) { | 96 | if (!htt->tx_pool) { |
115 | kfree(htt->used_msdu_ids); | 97 | idr_destroy(&htt->pending_tx); |
116 | kfree(htt->pending_tx); | ||
117 | return -ENOMEM; | 98 | return -ENOMEM; |
118 | } | 99 | } |
119 | 100 | ||
120 | return 0; | 101 | return 0; |
121 | } | 102 | } |
122 | 103 | ||
123 | static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) | 104 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
124 | { | 105 | { |
125 | struct ath10k *ar = htt->ar; | 106 | struct ath10k *ar = ctx; |
107 | struct ath10k_htt *htt = &ar->htt; | ||
126 | struct htt_tx_done tx_done = {0}; | 108 | struct htt_tx_done tx_done = {0}; |
127 | int msdu_id; | ||
128 | |||
129 | spin_lock_bh(&htt->tx_lock); | ||
130 | for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { | ||
131 | if (!test_bit(msdu_id, htt->used_msdu_ids)) | ||
132 | continue; | ||
133 | 109 | ||
134 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", | 110 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); |
135 | msdu_id); | ||
136 | 111 | ||
137 | tx_done.discard = 1; | 112 | tx_done.discard = 1; |
138 | tx_done.msdu_id = msdu_id; | 113 | tx_done.msdu_id = msdu_id; |
139 | 114 | ||
140 | ath10k_txrx_tx_unref(htt, &tx_done); | 115 | spin_lock_bh(&htt->tx_lock); |
141 | } | 116 | ath10k_txrx_tx_unref(htt, &tx_done); |
142 | spin_unlock_bh(&htt->tx_lock); | 117 | spin_unlock_bh(&htt->tx_lock); |
118 | |||
119 | return 0; | ||
143 | } | 120 | } |
144 | 121 | ||
145 | void ath10k_htt_tx_free(struct ath10k_htt *htt) | 122 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
146 | { | 123 | { |
147 | ath10k_htt_tx_free_pending(htt); | 124 | idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); |
148 | kfree(htt->pending_tx); | 125 | idr_destroy(&htt->pending_tx); |
149 | kfree(htt->used_msdu_ids); | ||
150 | dma_pool_destroy(htt->tx_pool); | 126 | dma_pool_destroy(htt->tx_pool); |
151 | } | 127 | } |
152 | 128 | ||
@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | |||
378 | len += sizeof(cmd->mgmt_tx); | 354 | len += sizeof(cmd->mgmt_tx); |
379 | 355 | ||
380 | spin_lock_bh(&htt->tx_lock); | 356 | spin_lock_bh(&htt->tx_lock); |
381 | res = ath10k_htt_tx_alloc_msdu_id(htt); | 357 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
382 | if (res < 0) { | 358 | if (res < 0) { |
383 | spin_unlock_bh(&htt->tx_lock); | 359 | spin_unlock_bh(&htt->tx_lock); |
384 | goto err_tx_dec; | 360 | goto err_tx_dec; |
385 | } | 361 | } |
386 | msdu_id = res; | 362 | msdu_id = res; |
387 | htt->pending_tx[msdu_id] = msdu; | ||
388 | spin_unlock_bh(&htt->tx_lock); | 363 | spin_unlock_bh(&htt->tx_lock); |
389 | 364 | ||
390 | txdesc = ath10k_htc_alloc_skb(ar, len); | 365 | txdesc = ath10k_htc_alloc_skb(ar, len); |
@@ -423,7 +398,6 @@ err_free_txdesc: | |||
423 | dev_kfree_skb_any(txdesc); | 398 | dev_kfree_skb_any(txdesc); |
424 | err_free_msdu_id: | 399 | err_free_msdu_id: |
425 | spin_lock_bh(&htt->tx_lock); | 400 | spin_lock_bh(&htt->tx_lock); |
426 | htt->pending_tx[msdu_id] = NULL; | ||
427 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); | 401 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
428 | spin_unlock_bh(&htt->tx_lock); | 402 | spin_unlock_bh(&htt->tx_lock); |
429 | err_tx_dec: | 403 | err_tx_dec: |
@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | |||
455 | goto err; | 429 | goto err; |
456 | 430 | ||
457 | spin_lock_bh(&htt->tx_lock); | 431 | spin_lock_bh(&htt->tx_lock); |
458 | res = ath10k_htt_tx_alloc_msdu_id(htt); | 432 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
459 | if (res < 0) { | 433 | if (res < 0) { |
460 | spin_unlock_bh(&htt->tx_lock); | 434 | spin_unlock_bh(&htt->tx_lock); |
461 | goto err_tx_dec; | 435 | goto err_tx_dec; |
462 | } | 436 | } |
463 | msdu_id = res; | 437 | msdu_id = res; |
464 | htt->pending_tx[msdu_id] = msdu; | ||
465 | spin_unlock_bh(&htt->tx_lock); | 438 | spin_unlock_bh(&htt->tx_lock); |
466 | 439 | ||
467 | prefetch_len = min(htt->prefetch_len, msdu->len); | 440 | prefetch_len = min(htt->prefetch_len, msdu->len); |
@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | |||
475 | 448 | ||
476 | skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, | 449 | skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, |
477 | &paddr); | 450 | &paddr); |
478 | if (!skb_cb->htt.txbuf) | 451 | if (!skb_cb->htt.txbuf) { |
452 | res = -ENOMEM; | ||
479 | goto err_free_msdu_id; | 453 | goto err_free_msdu_id; |
454 | } | ||
480 | skb_cb->htt.txbuf_paddr = paddr; | 455 | skb_cb->htt.txbuf_paddr = paddr; |
481 | 456 | ||
457 | if ((ieee80211_is_action(hdr->frame_control) || | ||
458 | ieee80211_is_deauth(hdr->frame_control) || | ||
459 | ieee80211_is_disassoc(hdr->frame_control)) && | ||
460 | ieee80211_has_protected(hdr->frame_control)) | ||
461 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); | ||
462 | |||
482 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, | 463 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
483 | DMA_TO_DEVICE); | 464 | DMA_TO_DEVICE); |
484 | res = dma_mapping_error(dev, skb_cb->paddr); | 465 | res = dma_mapping_error(dev, skb_cb->paddr); |
@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | |||
534 | 515 | ||
535 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); | 516 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
536 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); | 517 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); |
537 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; | 518 | if (msdu->ip_summed == CHECKSUM_PARTIAL) { |
538 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; | 519 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
520 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; | ||
521 | } | ||
539 | 522 | ||
540 | /* Prevent firmware from sending up tx inspection requests. There's | 523 | /* Prevent firmware from sending up tx inspection requests. There's |
541 | * nothing ath10k can do with frames requested for inspection so force | 524 | * nothing ath10k can do with frames requested for inspection so force |
@@ -593,7 +576,6 @@ err_free_txbuf: | |||
593 | skb_cb->htt.txbuf_paddr); | 576 | skb_cb->htt.txbuf_paddr); |
594 | err_free_msdu_id: | 577 | err_free_msdu_id: |
595 | spin_lock_bh(&htt->tx_lock); | 578 | spin_lock_bh(&htt->tx_lock); |
596 | htt->pending_tx[msdu_id] = NULL; | ||
597 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); | 579 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
598 | spin_unlock_bh(&htt->tx_lock); | 580 | spin_unlock_bh(&htt->tx_lock); |
599 | err_tx_dec: | 581 | err_tx_dec: |
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c new file mode 100644 index 000000000000..839a8791fb9e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hw.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include "hw.h" | ||
19 | |||
20 | const struct ath10k_hw_regs qca988x_regs = { | ||
21 | .rtc_state_cold_reset_mask = 0x00000400, | ||
22 | .rtc_soc_base_address = 0x00004000, | ||
23 | .rtc_wmac_base_address = 0x00005000, | ||
24 | .soc_core_base_address = 0x00009000, | ||
25 | .ce_wrapper_base_address = 0x00057000, | ||
26 | .ce0_base_address = 0x00057400, | ||
27 | .ce1_base_address = 0x00057800, | ||
28 | .ce2_base_address = 0x00057c00, | ||
29 | .ce3_base_address = 0x00058000, | ||
30 | .ce4_base_address = 0x00058400, | ||
31 | .ce5_base_address = 0x00058800, | ||
32 | .ce6_base_address = 0x00058c00, | ||
33 | .ce7_base_address = 0x00059000, | ||
34 | .soc_reset_control_si0_rst_mask = 0x00000001, | ||
35 | .soc_reset_control_ce_rst_mask = 0x00040000, | ||
36 | .soc_chip_id_address = 0x00ec, | ||
37 | .scratch_3_address = 0x0030, | ||
38 | }; | ||
39 | |||
40 | const struct ath10k_hw_regs qca6174_regs = { | ||
41 | .rtc_state_cold_reset_mask = 0x00002000, | ||
42 | .rtc_soc_base_address = 0x00000800, | ||
43 | .rtc_wmac_base_address = 0x00001000, | ||
44 | .soc_core_base_address = 0x0003a000, | ||
45 | .ce_wrapper_base_address = 0x00034000, | ||
46 | .ce0_base_address = 0x00034400, | ||
47 | .ce1_base_address = 0x00034800, | ||
48 | .ce2_base_address = 0x00034c00, | ||
49 | .ce3_base_address = 0x00035000, | ||
50 | .ce4_base_address = 0x00035400, | ||
51 | .ce5_base_address = 0x00035800, | ||
52 | .ce6_base_address = 0x00035c00, | ||
53 | .ce7_base_address = 0x00036000, | ||
54 | .soc_reset_control_si0_rst_mask = 0x00000000, | ||
55 | .soc_reset_control_ce_rst_mask = 0x00000001, | ||
56 | .soc_chip_id_address = 0x000f0, | ||
57 | .scratch_3_address = 0x0028, | ||
58 | }; | ||
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 5729901923ac..460771fcfe9e 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h | |||
@@ -34,6 +34,44 @@ | |||
34 | #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" | 34 | #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" |
35 | #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 | 35 | #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 |
36 | 36 | ||
37 | /* QCA6174 target BMI version signatures */ | ||
38 | #define QCA6174_HW_1_0_VERSION 0x05000000 | ||
39 | #define QCA6174_HW_1_1_VERSION 0x05000001 | ||
40 | #define QCA6174_HW_1_3_VERSION 0x05000003 | ||
41 | #define QCA6174_HW_2_1_VERSION 0x05010000 | ||
42 | #define QCA6174_HW_3_0_VERSION 0x05020000 | ||
43 | #define QCA6174_HW_3_2_VERSION 0x05030000 | ||
44 | |||
45 | enum qca6174_pci_rev { | ||
46 | QCA6174_PCI_REV_1_1 = 0x11, | ||
47 | QCA6174_PCI_REV_1_3 = 0x13, | ||
48 | QCA6174_PCI_REV_2_0 = 0x20, | ||
49 | QCA6174_PCI_REV_3_0 = 0x30, | ||
50 | }; | ||
51 | |||
52 | enum qca6174_chip_id_rev { | ||
53 | QCA6174_HW_1_0_CHIP_ID_REV = 0, | ||
54 | QCA6174_HW_1_1_CHIP_ID_REV = 1, | ||
55 | QCA6174_HW_1_3_CHIP_ID_REV = 2, | ||
56 | QCA6174_HW_2_1_CHIP_ID_REV = 4, | ||
57 | QCA6174_HW_2_2_CHIP_ID_REV = 5, | ||
58 | QCA6174_HW_3_0_CHIP_ID_REV = 8, | ||
59 | QCA6174_HW_3_1_CHIP_ID_REV = 9, | ||
60 | QCA6174_HW_3_2_CHIP_ID_REV = 10, | ||
61 | }; | ||
62 | |||
63 | #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" | ||
64 | #define QCA6174_HW_2_1_FW_FILE "firmware.bin" | ||
65 | #define QCA6174_HW_2_1_OTP_FILE "otp.bin" | ||
66 | #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" | ||
67 | #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 | ||
68 | |||
69 | #define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0" | ||
70 | #define QCA6174_HW_3_0_FW_FILE "firmware.bin" | ||
71 | #define QCA6174_HW_3_0_OTP_FILE "otp.bin" | ||
72 | #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" | ||
73 | #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 | ||
74 | |||
37 | #define ATH10K_FW_API2_FILE "firmware-2.bin" | 75 | #define ATH10K_FW_API2_FILE "firmware-2.bin" |
38 | #define ATH10K_FW_API3_FILE "firmware-3.bin" | 76 | #define ATH10K_FW_API3_FILE "firmware-3.bin" |
39 | 77 | ||
@@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version { | |||
81 | ATH10K_FW_WMI_OP_VERSION_MAX, | 119 | ATH10K_FW_WMI_OP_VERSION_MAX, |
82 | }; | 120 | }; |
83 | 121 | ||
122 | enum ath10k_hw_rev { | ||
123 | ATH10K_HW_QCA988X, | ||
124 | ATH10K_HW_QCA6174, | ||
125 | }; | ||
126 | |||
127 | struct ath10k_hw_regs { | ||
128 | u32 rtc_state_cold_reset_mask; | ||
129 | u32 rtc_soc_base_address; | ||
130 | u32 rtc_wmac_base_address; | ||
131 | u32 soc_core_base_address; | ||
132 | u32 ce_wrapper_base_address; | ||
133 | u32 ce0_base_address; | ||
134 | u32 ce1_base_address; | ||
135 | u32 ce2_base_address; | ||
136 | u32 ce3_base_address; | ||
137 | u32 ce4_base_address; | ||
138 | u32 ce5_base_address; | ||
139 | u32 ce6_base_address; | ||
140 | u32 ce7_base_address; | ||
141 | u32 soc_reset_control_si0_rst_mask; | ||
142 | u32 soc_reset_control_ce_rst_mask; | ||
143 | u32 soc_chip_id_address; | ||
144 | u32 scratch_3_address; | ||
145 | }; | ||
146 | |||
147 | extern const struct ath10k_hw_regs qca988x_regs; | ||
148 | extern const struct ath10k_hw_regs qca6174_regs; | ||
149 | |||
150 | #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) | ||
151 | #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) | ||
152 | |||
84 | /* Known pecularities: | 153 | /* Known pecularities: |
85 | * - current FW doesn't support raw rx mode (last tested v599) | 154 | * - current FW doesn't support raw rx mode (last tested v599) |
86 | * - current FW dumps upon raw tx mode (last tested v599) | 155 | * - current FW dumps upon raw tx mode (last tested v599) |
@@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr { | |||
183 | #define TARGET_10X_NUM_MSDU_DESC (1024 + 400) | 252 | #define TARGET_10X_NUM_MSDU_DESC (1024 + 400) |
184 | #define TARGET_10X_MAX_FRAG_ENTRIES 0 | 253 | #define TARGET_10X_MAX_FRAG_ENTRIES 0 |
185 | 254 | ||
255 | /* 10.2 parameters */ | ||
256 | #define TARGET_10_2_DMA_BURST_SIZE 1 | ||
257 | |||
186 | /* Target specific defines for WMI-TLV firmware */ | 258 | /* Target specific defines for WMI-TLV firmware */ |
187 | #define TARGET_TLV_NUM_VDEVS 3 | 259 | #define TARGET_TLV_NUM_VDEVS 3 |
188 | #define TARGET_TLV_NUM_STATIONS 32 | 260 | #define TARGET_TLV_NUM_STATIONS 32 |
@@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr { | |||
222 | /* as of IP3.7.1 */ | 294 | /* as of IP3.7.1 */ |
223 | #define RTC_STATE_V_ON 3 | 295 | #define RTC_STATE_V_ON 3 |
224 | 296 | ||
225 | #define RTC_STATE_COLD_RESET_MASK 0x00000400 | 297 | #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask |
226 | #define RTC_STATE_V_LSB 0 | 298 | #define RTC_STATE_V_LSB 0 |
227 | #define RTC_STATE_V_MASK 0x00000007 | 299 | #define RTC_STATE_V_MASK 0x00000007 |
228 | #define RTC_STATE_ADDRESS 0x0000 | 300 | #define RTC_STATE_ADDRESS 0x0000 |
@@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr { | |||
231 | #define PCIE_SOC_WAKE_RESET 0x00000000 | 303 | #define PCIE_SOC_WAKE_RESET 0x00000000 |
232 | #define SOC_GLOBAL_RESET_ADDRESS 0x0008 | 304 | #define SOC_GLOBAL_RESET_ADDRESS 0x0008 |
233 | 305 | ||
234 | #define RTC_SOC_BASE_ADDRESS 0x00004000 | 306 | #define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address |
235 | #define RTC_WMAC_BASE_ADDRESS 0x00005000 | 307 | #define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address |
236 | #define MAC_COEX_BASE_ADDRESS 0x00006000 | 308 | #define MAC_COEX_BASE_ADDRESS 0x00006000 |
237 | #define BT_COEX_BASE_ADDRESS 0x00007000 | 309 | #define BT_COEX_BASE_ADDRESS 0x00007000 |
238 | #define SOC_PCIE_BASE_ADDRESS 0x00008000 | 310 | #define SOC_PCIE_BASE_ADDRESS 0x00008000 |
239 | #define SOC_CORE_BASE_ADDRESS 0x00009000 | 311 | #define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address |
240 | #define WLAN_UART_BASE_ADDRESS 0x0000c000 | 312 | #define WLAN_UART_BASE_ADDRESS 0x0000c000 |
241 | #define WLAN_SI_BASE_ADDRESS 0x00010000 | 313 | #define WLAN_SI_BASE_ADDRESS 0x00010000 |
242 | #define WLAN_GPIO_BASE_ADDRESS 0x00014000 | 314 | #define WLAN_GPIO_BASE_ADDRESS 0x00014000 |
@@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr { | |||
245 | #define EFUSE_BASE_ADDRESS 0x00030000 | 317 | #define EFUSE_BASE_ADDRESS 0x00030000 |
246 | #define FPGA_REG_BASE_ADDRESS 0x00039000 | 318 | #define FPGA_REG_BASE_ADDRESS 0x00039000 |
247 | #define WLAN_UART2_BASE_ADDRESS 0x00054c00 | 319 | #define WLAN_UART2_BASE_ADDRESS 0x00054c00 |
248 | #define CE_WRAPPER_BASE_ADDRESS 0x00057000 | 320 | #define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address |
249 | #define CE0_BASE_ADDRESS 0x00057400 | 321 | #define CE0_BASE_ADDRESS ar->regs->ce0_base_address |
250 | #define CE1_BASE_ADDRESS 0x00057800 | 322 | #define CE1_BASE_ADDRESS ar->regs->ce1_base_address |
251 | #define CE2_BASE_ADDRESS 0x00057c00 | 323 | #define CE2_BASE_ADDRESS ar->regs->ce2_base_address |
252 | #define CE3_BASE_ADDRESS 0x00058000 | 324 | #define CE3_BASE_ADDRESS ar->regs->ce3_base_address |
253 | #define CE4_BASE_ADDRESS 0x00058400 | 325 | #define CE4_BASE_ADDRESS ar->regs->ce4_base_address |
254 | #define CE5_BASE_ADDRESS 0x00058800 | 326 | #define CE5_BASE_ADDRESS ar->regs->ce5_base_address |
255 | #define CE6_BASE_ADDRESS 0x00058c00 | 327 | #define CE6_BASE_ADDRESS ar->regs->ce6_base_address |
256 | #define CE7_BASE_ADDRESS 0x00059000 | 328 | #define CE7_BASE_ADDRESS ar->regs->ce7_base_address |
257 | #define DBI_BASE_ADDRESS 0x00060000 | 329 | #define DBI_BASE_ADDRESS 0x00060000 |
258 | #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 | 330 | #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 |
259 | #define PCIE_LOCAL_BASE_ADDRESS 0x00080000 | 331 | #define PCIE_LOCAL_BASE_ADDRESS 0x00080000 |
260 | 332 | ||
261 | #define SOC_RESET_CONTROL_ADDRESS 0x00000000 | 333 | #define SOC_RESET_CONTROL_ADDRESS 0x00000000 |
262 | #define SOC_RESET_CONTROL_OFFSET 0x00000000 | 334 | #define SOC_RESET_CONTROL_OFFSET 0x00000000 |
263 | #define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 | 335 | #define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask |
264 | #define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 | 336 | #define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask |
265 | #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 | 337 | #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 |
266 | #define SOC_CPU_CLOCK_OFFSET 0x00000020 | 338 | #define SOC_CPU_CLOCK_OFFSET 0x00000020 |
267 | #define SOC_CPU_CLOCK_STANDARD_LSB 0 | 339 | #define SOC_CPU_CLOCK_STANDARD_LSB 0 |
@@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr { | |||
275 | #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 | 347 | #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 |
276 | #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 | 348 | #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 |
277 | 349 | ||
278 | #define SOC_CHIP_ID_ADDRESS 0x000000ec | 350 | #define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address |
279 | #define SOC_CHIP_ID_REV_LSB 8 | 351 | #define SOC_CHIP_ID_REV_LSB 8 |
280 | #define SOC_CHIP_ID_REV_MASK 0x00000f00 | 352 | #define SOC_CHIP_ID_REV_MASK 0x00000f00 |
281 | 353 | ||
@@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr { | |||
331 | #define PCIE_INTR_ENABLE_ADDRESS 0x0008 | 403 | #define PCIE_INTR_ENABLE_ADDRESS 0x0008 |
332 | #define PCIE_INTR_CAUSE_ADDRESS 0x000c | 404 | #define PCIE_INTR_CAUSE_ADDRESS 0x000c |
333 | #define PCIE_INTR_CLR_ADDRESS 0x0014 | 405 | #define PCIE_INTR_CLR_ADDRESS 0x0014 |
334 | #define SCRATCH_3_ADDRESS 0x0030 | 406 | #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address |
335 | #define CPU_INTR_ADDRESS 0x0010 | 407 | #define CPU_INTR_ADDRESS 0x0010 |
336 | 408 | ||
337 | /* Firmware indications to the Host via SCRATCH_3 register. */ | 409 | /* Firmware indications to the Host via SCRATCH_3 register. */ |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 60a524b73207..d6d2f0f00caa 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -37,7 +37,7 @@ | |||
37 | static int ath10k_send_key(struct ath10k_vif *arvif, | 37 | static int ath10k_send_key(struct ath10k_vif *arvif, |
38 | struct ieee80211_key_conf *key, | 38 | struct ieee80211_key_conf *key, |
39 | enum set_key_cmd cmd, | 39 | enum set_key_cmd cmd, |
40 | const u8 *macaddr) | 40 | const u8 *macaddr, bool def_idx) |
41 | { | 41 | { |
42 | struct ath10k *ar = arvif->ar; | 42 | struct ath10k *ar = arvif->ar; |
43 | struct wmi_vdev_install_key_arg arg = { | 43 | struct wmi_vdev_install_key_arg arg = { |
@@ -58,10 +58,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, | |||
58 | switch (key->cipher) { | 58 | switch (key->cipher) { |
59 | case WLAN_CIPHER_SUITE_CCMP: | 59 | case WLAN_CIPHER_SUITE_CCMP: |
60 | arg.key_cipher = WMI_CIPHER_AES_CCM; | 60 | arg.key_cipher = WMI_CIPHER_AES_CCM; |
61 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) | 61 | key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; |
62 | key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; | ||
63 | else | ||
64 | key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; | ||
65 | break; | 62 | break; |
66 | case WLAN_CIPHER_SUITE_TKIP: | 63 | case WLAN_CIPHER_SUITE_TKIP: |
67 | arg.key_cipher = WMI_CIPHER_TKIP; | 64 | arg.key_cipher = WMI_CIPHER_TKIP; |
@@ -75,6 +72,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif, | |||
75 | * Otherwise pairwise key must be set */ | 72 | * Otherwise pairwise key must be set */ |
76 | if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) | 73 | if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) |
77 | arg.key_flags = WMI_KEY_PAIRWISE; | 74 | arg.key_flags = WMI_KEY_PAIRWISE; |
75 | |||
76 | if (def_idx) | ||
77 | arg.key_flags |= WMI_KEY_TX_USAGE; | ||
78 | break; | 78 | break; |
79 | case WLAN_CIPHER_SUITE_AES_CMAC: | 79 | case WLAN_CIPHER_SUITE_AES_CMAC: |
80 | /* this one needs to be done in software */ | 80 | /* this one needs to be done in software */ |
@@ -95,7 +95,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, | |||
95 | static int ath10k_install_key(struct ath10k_vif *arvif, | 95 | static int ath10k_install_key(struct ath10k_vif *arvif, |
96 | struct ieee80211_key_conf *key, | 96 | struct ieee80211_key_conf *key, |
97 | enum set_key_cmd cmd, | 97 | enum set_key_cmd cmd, |
98 | const u8 *macaddr) | 98 | const u8 *macaddr, bool def_idx) |
99 | { | 99 | { |
100 | struct ath10k *ar = arvif->ar; | 100 | struct ath10k *ar = arvif->ar; |
101 | int ret; | 101 | int ret; |
@@ -104,7 +104,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif, | |||
104 | 104 | ||
105 | reinit_completion(&ar->install_key_done); | 105 | reinit_completion(&ar->install_key_done); |
106 | 106 | ||
107 | ret = ath10k_send_key(arvif, key, cmd, macaddr); | 107 | ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx); |
108 | if (ret) | 108 | if (ret) |
109 | return ret; | 109 | return ret; |
110 | 110 | ||
@@ -122,6 +122,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, | |||
122 | struct ath10k_peer *peer; | 122 | struct ath10k_peer *peer; |
123 | int ret; | 123 | int ret; |
124 | int i; | 124 | int i; |
125 | bool def_idx; | ||
125 | 126 | ||
126 | lockdep_assert_held(&ar->conf_mutex); | 127 | lockdep_assert_held(&ar->conf_mutex); |
127 | 128 | ||
@@ -135,9 +136,14 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, | |||
135 | for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { | 136 | for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { |
136 | if (arvif->wep_keys[i] == NULL) | 137 | if (arvif->wep_keys[i] == NULL) |
137 | continue; | 138 | continue; |
139 | /* set TX_USAGE flag for default key id */ | ||
140 | if (arvif->def_wep_key_idx == i) | ||
141 | def_idx = true; | ||
142 | else | ||
143 | def_idx = false; | ||
138 | 144 | ||
139 | ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, | 145 | ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, |
140 | addr); | 146 | addr, def_idx); |
141 | if (ret) | 147 | if (ret) |
142 | return ret; | 148 | return ret; |
143 | 149 | ||
@@ -171,8 +177,9 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, | |||
171 | if (peer->keys[i] == NULL) | 177 | if (peer->keys[i] == NULL) |
172 | continue; | 178 | continue; |
173 | 179 | ||
180 | /* key flags are not required to delete the key */ | ||
174 | ret = ath10k_install_key(arvif, peer->keys[i], | 181 | ret = ath10k_install_key(arvif, peer->keys[i], |
175 | DISABLE_KEY, addr); | 182 | DISABLE_KEY, addr, false); |
176 | if (ret && first_errno == 0) | 183 | if (ret && first_errno == 0) |
177 | first_errno = ret; | 184 | first_errno = ret; |
178 | 185 | ||
@@ -246,8 +253,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, | |||
246 | 253 | ||
247 | if (i == ARRAY_SIZE(peer->keys)) | 254 | if (i == ARRAY_SIZE(peer->keys)) |
248 | break; | 255 | break; |
249 | 256 | /* key flags are not required to delete the key */ | |
250 | ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); | 257 | ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false); |
251 | if (ret && first_errno == 0) | 258 | if (ret && first_errno == 0) |
252 | first_errno = ret; | 259 | first_errno = ret; |
253 | 260 | ||
@@ -527,10 +534,14 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) | |||
527 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, | 534 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, |
528 | arvif->beacon->len, DMA_TO_DEVICE); | 535 | arvif->beacon->len, DMA_TO_DEVICE); |
529 | 536 | ||
537 | if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && | ||
538 | arvif->beacon_state != ATH10K_BEACON_SENT)) | ||
539 | return; | ||
540 | |||
530 | dev_kfree_skb_any(arvif->beacon); | 541 | dev_kfree_skb_any(arvif->beacon); |
531 | 542 | ||
532 | arvif->beacon = NULL; | 543 | arvif->beacon = NULL; |
533 | arvif->beacon_sent = false; | 544 | arvif->beacon_state = ATH10K_BEACON_SCHEDULED; |
534 | } | 545 | } |
535 | 546 | ||
536 | static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) | 547 | static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) |
@@ -970,6 +981,143 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) | |||
970 | return ret; | 981 | return ret; |
971 | } | 982 | } |
972 | 983 | ||
984 | static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, | ||
985 | struct sk_buff *bcn) | ||
986 | { | ||
987 | struct ath10k *ar = arvif->ar; | ||
988 | struct ieee80211_mgmt *mgmt; | ||
989 | const u8 *p2p_ie; | ||
990 | int ret; | ||
991 | |||
992 | if (arvif->vdev_type != WMI_VDEV_TYPE_AP) | ||
993 | return 0; | ||
994 | |||
995 | if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) | ||
996 | return 0; | ||
997 | |||
998 | mgmt = (void *)bcn->data; | ||
999 | p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, | ||
1000 | mgmt->u.beacon.variable, | ||
1001 | bcn->len - (mgmt->u.beacon.variable - | ||
1002 | bcn->data)); | ||
1003 | if (!p2p_ie) | ||
1004 | return -ENOENT; | ||
1005 | |||
1006 | ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); | ||
1007 | if (ret) { | ||
1008 | ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", | ||
1009 | arvif->vdev_id, ret); | ||
1010 | return ret; | ||
1011 | } | ||
1012 | |||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, | ||
1017 | u8 oui_type, size_t ie_offset) | ||
1018 | { | ||
1019 | size_t len; | ||
1020 | const u8 *next; | ||
1021 | const u8 *end; | ||
1022 | u8 *ie; | ||
1023 | |||
1024 | if (WARN_ON(skb->len < ie_offset)) | ||
1025 | return -EINVAL; | ||
1026 | |||
1027 | ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, | ||
1028 | skb->data + ie_offset, | ||
1029 | skb->len - ie_offset); | ||
1030 | if (!ie) | ||
1031 | return -ENOENT; | ||
1032 | |||
1033 | len = ie[1] + 2; | ||
1034 | end = skb->data + skb->len; | ||
1035 | next = ie + len; | ||
1036 | |||
1037 | if (WARN_ON(next > end)) | ||
1038 | return -EINVAL; | ||
1039 | |||
1040 | memmove(ie, next, end - next); | ||
1041 | skb_trim(skb, skb->len - len); | ||
1042 | |||
1043 | return 0; | ||
1044 | } | ||
1045 | |||
1046 | static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) | ||
1047 | { | ||
1048 | struct ath10k *ar = arvif->ar; | ||
1049 | struct ieee80211_hw *hw = ar->hw; | ||
1050 | struct ieee80211_vif *vif = arvif->vif; | ||
1051 | struct ieee80211_mutable_offsets offs = {}; | ||
1052 | struct sk_buff *bcn; | ||
1053 | int ret; | ||
1054 | |||
1055 | if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) | ||
1056 | return 0; | ||
1057 | |||
1058 | bcn = ieee80211_beacon_get_template(hw, vif, &offs); | ||
1059 | if (!bcn) { | ||
1060 | ath10k_warn(ar, "failed to get beacon template from mac80211\n"); | ||
1061 | return -EPERM; | ||
1062 | } | ||
1063 | |||
1064 | ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); | ||
1065 | if (ret) { | ||
1066 | ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); | ||
1067 | kfree_skb(bcn); | ||
1068 | return ret; | ||
1069 | } | ||
1070 | |||
1071 | /* P2P IE is inserted by firmware automatically (as configured above) | ||
1072 | * so remove it from the base beacon template to avoid duplicate P2P | ||
1073 | * IEs in beacon frames. | ||
1074 | */ | ||
1075 | ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, | ||
1076 | offsetof(struct ieee80211_mgmt, | ||
1077 | u.beacon.variable)); | ||
1078 | |||
1079 | ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, | ||
1080 | 0, NULL, 0); | ||
1081 | kfree_skb(bcn); | ||
1082 | |||
1083 | if (ret) { | ||
1084 | ath10k_warn(ar, "failed to submit beacon template command: %d\n", | ||
1085 | ret); | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) | ||
1093 | { | ||
1094 | struct ath10k *ar = arvif->ar; | ||
1095 | struct ieee80211_hw *hw = ar->hw; | ||
1096 | struct ieee80211_vif *vif = arvif->vif; | ||
1097 | struct sk_buff *prb; | ||
1098 | int ret; | ||
1099 | |||
1100 | if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) | ||
1101 | return 0; | ||
1102 | |||
1103 | prb = ieee80211_proberesp_get(hw, vif); | ||
1104 | if (!prb) { | ||
1105 | ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); | ||
1106 | return -EPERM; | ||
1107 | } | ||
1108 | |||
1109 | ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); | ||
1110 | kfree_skb(prb); | ||
1111 | |||
1112 | if (ret) { | ||
1113 | ath10k_warn(ar, "failed to submit probe resp template command: %d\n", | ||
1114 | ret); | ||
1115 | return ret; | ||
1116 | } | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
973 | static void ath10k_control_beaconing(struct ath10k_vif *arvif, | 1121 | static void ath10k_control_beaconing(struct ath10k_vif *arvif, |
974 | struct ieee80211_bss_conf *info) | 1122 | struct ieee80211_bss_conf *info) |
975 | { | 1123 | { |
@@ -1155,6 +1303,38 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) | |||
1155 | return 0; | 1303 | return 0; |
1156 | } | 1304 | } |
1157 | 1305 | ||
1306 | static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) | ||
1307 | { | ||
1308 | struct ath10k *ar = arvif->ar; | ||
1309 | struct wmi_sta_keepalive_arg arg = {}; | ||
1310 | int ret; | ||
1311 | |||
1312 | lockdep_assert_held(&arvif->ar->conf_mutex); | ||
1313 | |||
1314 | if (arvif->vdev_type != WMI_VDEV_TYPE_STA) | ||
1315 | return 0; | ||
1316 | |||
1317 | if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) | ||
1318 | return 0; | ||
1319 | |||
1320 | /* Some firmware revisions have a bug and ignore the `enabled` field. | ||
1321 | * Instead use the interval to disable the keepalive. | ||
1322 | */ | ||
1323 | arg.vdev_id = arvif->vdev_id; | ||
1324 | arg.enabled = 1; | ||
1325 | arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; | ||
1326 | arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; | ||
1327 | |||
1328 | ret = ath10k_wmi_sta_keepalive(ar, &arg); | ||
1329 | if (ret) { | ||
1330 | ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", | ||
1331 | arvif->vdev_id, ret); | ||
1332 | return ret; | ||
1333 | } | ||
1334 | |||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1158 | /**********************/ | 1338 | /**********************/ |
1159 | /* Station management */ | 1339 | /* Station management */ |
1160 | /**********************/ | 1340 | /**********************/ |
@@ -1423,6 +1603,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, | |||
1423 | return; | 1603 | return; |
1424 | 1604 | ||
1425 | arg->peer_flags |= WMI_PEER_VHT; | 1605 | arg->peer_flags |= WMI_PEER_VHT; |
1606 | |||
1607 | if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) | ||
1608 | arg->peer_flags |= WMI_PEER_VHT_2G; | ||
1609 | |||
1426 | arg->peer_vht_caps = vht_cap->cap; | 1610 | arg->peer_vht_caps = vht_cap->cap; |
1427 | 1611 | ||
1428 | ampdu_factor = (vht_cap->cap & | 1612 | ampdu_factor = (vht_cap->cap & |
@@ -1501,7 +1685,12 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, | |||
1501 | 1685 | ||
1502 | switch (ar->hw->conf.chandef.chan->band) { | 1686 | switch (ar->hw->conf.chandef.chan->band) { |
1503 | case IEEE80211_BAND_2GHZ: | 1687 | case IEEE80211_BAND_2GHZ: |
1504 | if (sta->ht_cap.ht_supported) { | 1688 | if (sta->vht_cap.vht_supported) { |
1689 | if (sta->bandwidth == IEEE80211_STA_RX_BW_40) | ||
1690 | phymode = MODE_11AC_VHT40; | ||
1691 | else | ||
1692 | phymode = MODE_11AC_VHT20; | ||
1693 | } else if (sta->ht_cap.ht_supported) { | ||
1505 | if (sta->bandwidth == IEEE80211_STA_RX_BW_40) | 1694 | if (sta->bandwidth == IEEE80211_STA_RX_BW_40) |
1506 | phymode = MODE_11NG_HT40; | 1695 | phymode = MODE_11NG_HT40; |
1507 | else | 1696 | else |
@@ -1683,7 +1872,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, | |||
1683 | ath10k_warn(ar, "faield to down vdev %i: %d\n", | 1872 | ath10k_warn(ar, "faield to down vdev %i: %d\n", |
1684 | arvif->vdev_id, ret); | 1873 | arvif->vdev_id, ret); |
1685 | 1874 | ||
1686 | arvif->def_wep_key_idx = 0; | 1875 | arvif->def_wep_key_idx = -1; |
1876 | |||
1687 | arvif->is_up = false; | 1877 | arvif->is_up = false; |
1688 | } | 1878 | } |
1689 | 1879 | ||
@@ -1742,11 +1932,14 @@ static int ath10k_station_assoc(struct ath10k *ar, | |||
1742 | } | 1932 | } |
1743 | } | 1933 | } |
1744 | 1934 | ||
1745 | ret = ath10k_install_peer_wep_keys(arvif, sta->addr); | 1935 | /* Plumb cached keys only for static WEP */ |
1746 | if (ret) { | 1936 | if (arvif->def_wep_key_idx != -1) { |
1747 | ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", | 1937 | ret = ath10k_install_peer_wep_keys(arvif, sta->addr); |
1748 | arvif->vdev_id, ret); | 1938 | if (ret) { |
1749 | return ret; | 1939 | ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", |
1940 | arvif->vdev_id, ret); | ||
1941 | return ret; | ||
1942 | } | ||
1750 | } | 1943 | } |
1751 | } | 1944 | } |
1752 | 1945 | ||
@@ -2011,75 +2204,13 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2011 | * used only for CQM purposes (e.g. hostapd station keepalive ping) so | 2204 | * used only for CQM purposes (e.g. hostapd station keepalive ping) so |
2012 | * it is safe to downgrade to NullFunc. | 2205 | * it is safe to downgrade to NullFunc. |
2013 | */ | 2206 | */ |
2207 | hdr = (void *)skb->data; | ||
2014 | if (ieee80211_is_qos_nullfunc(hdr->frame_control)) { | 2208 | if (ieee80211_is_qos_nullfunc(hdr->frame_control)) { |
2015 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | 2209 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
2016 | cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; | 2210 | cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; |
2017 | } | 2211 | } |
2018 | } | 2212 | } |
2019 | 2213 | ||
2020 | static void ath10k_tx_wep_key_work(struct work_struct *work) | ||
2021 | { | ||
2022 | struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, | ||
2023 | wep_key_work); | ||
2024 | struct ath10k *ar = arvif->ar; | ||
2025 | int ret, keyidx = arvif->def_wep_key_newidx; | ||
2026 | |||
2027 | mutex_lock(&arvif->ar->conf_mutex); | ||
2028 | |||
2029 | if (arvif->ar->state != ATH10K_STATE_ON) | ||
2030 | goto unlock; | ||
2031 | |||
2032 | if (arvif->def_wep_key_idx == keyidx) | ||
2033 | goto unlock; | ||
2034 | |||
2035 | ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", | ||
2036 | arvif->vdev_id, keyidx); | ||
2037 | |||
2038 | ret = ath10k_wmi_vdev_set_param(arvif->ar, | ||
2039 | arvif->vdev_id, | ||
2040 | arvif->ar->wmi.vdev_param->def_keyid, | ||
2041 | keyidx); | ||
2042 | if (ret) { | ||
2043 | ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", | ||
2044 | arvif->vdev_id, | ||
2045 | ret); | ||
2046 | goto unlock; | ||
2047 | } | ||
2048 | |||
2049 | arvif->def_wep_key_idx = keyidx; | ||
2050 | |||
2051 | unlock: | ||
2052 | mutex_unlock(&arvif->ar->conf_mutex); | ||
2053 | } | ||
2054 | |||
2055 | static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif, | ||
2056 | struct ieee80211_key_conf *key, | ||
2057 | struct sk_buff *skb) | ||
2058 | { | ||
2059 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2060 | struct ath10k *ar = arvif->ar; | ||
2061 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
2062 | |||
2063 | if (!ieee80211_has_protected(hdr->frame_control)) | ||
2064 | return; | ||
2065 | |||
2066 | if (!key) | ||
2067 | return; | ||
2068 | |||
2069 | if (key->cipher != WLAN_CIPHER_SUITE_WEP40 && | ||
2070 | key->cipher != WLAN_CIPHER_SUITE_WEP104) | ||
2071 | return; | ||
2072 | |||
2073 | if (key->keyidx == arvif->def_wep_key_idx) | ||
2074 | return; | ||
2075 | |||
2076 | /* FIXME: Most likely a few frames will be TXed with an old key. Simply | ||
2077 | * queueing frames until key index is updated is not an option because | ||
2078 | * sk_buff may need more processing to be done, e.g. offchannel */ | ||
2079 | arvif->def_wep_key_newidx = key->keyidx; | ||
2080 | ieee80211_queue_work(ar->hw, &arvif->wep_key_work); | ||
2081 | } | ||
2082 | |||
2083 | static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, | 2214 | static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, |
2084 | struct ieee80211_vif *vif, | 2215 | struct ieee80211_vif *vif, |
2085 | struct sk_buff *skb) | 2216 | struct sk_buff *skb) |
@@ -2231,7 +2362,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) | |||
2231 | 2362 | ||
2232 | ret = wait_for_completion_timeout(&ar->offchan_tx_completed, | 2363 | ret = wait_for_completion_timeout(&ar->offchan_tx_completed, |
2233 | 3 * HZ); | 2364 | 3 * HZ); |
2234 | if (ret <= 0) | 2365 | if (ret == 0) |
2235 | ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", | 2366 | ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", |
2236 | skb); | 2367 | skb); |
2237 | 2368 | ||
@@ -2293,6 +2424,7 @@ void __ath10k_scan_finish(struct ath10k *ar) | |||
2293 | case ATH10K_SCAN_RUNNING: | 2424 | case ATH10K_SCAN_RUNNING: |
2294 | if (ar->scan.is_roc) | 2425 | if (ar->scan.is_roc) |
2295 | ieee80211_remain_on_channel_expired(ar->hw); | 2426 | ieee80211_remain_on_channel_expired(ar->hw); |
2427 | /* fall through */ | ||
2296 | case ATH10K_SCAN_ABORTING: | 2428 | case ATH10K_SCAN_ABORTING: |
2297 | if (!ar->scan.is_roc) | 2429 | if (!ar->scan.is_roc) |
2298 | ieee80211_scan_completed(ar->hw, | 2430 | ieee80211_scan_completed(ar->hw, |
@@ -2439,7 +2571,6 @@ static void ath10k_tx(struct ieee80211_hw *hw, | |||
2439 | struct ath10k *ar = hw->priv; | 2571 | struct ath10k *ar = hw->priv; |
2440 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2572 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2441 | struct ieee80211_vif *vif = info->control.vif; | 2573 | struct ieee80211_vif *vif = info->control.vif; |
2442 | struct ieee80211_key_conf *key = info->control.hw_key; | ||
2443 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 2574 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
2444 | 2575 | ||
2445 | /* We should disable CCK RATE due to P2P */ | 2576 | /* We should disable CCK RATE due to P2P */ |
@@ -2453,7 +2584,6 @@ static void ath10k_tx(struct ieee80211_hw *hw, | |||
2453 | /* it makes no sense to process injected frames like that */ | 2584 | /* it makes no sense to process injected frames like that */ |
2454 | if (vif && vif->type != NL80211_IFTYPE_MONITOR) { | 2585 | if (vif && vif->type != NL80211_IFTYPE_MONITOR) { |
2455 | ath10k_tx_h_nwifi(hw, skb); | 2586 | ath10k_tx_h_nwifi(hw, skb); |
2456 | ath10k_tx_h_update_wep_key(vif, key, skb); | ||
2457 | ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); | 2587 | ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); |
2458 | ath10k_tx_h_seq_no(vif, skb); | 2588 | ath10k_tx_h_seq_no(vif, skb); |
2459 | } | 2589 | } |
@@ -2960,7 +3090,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, | |||
2960 | arvif->ar = ar; | 3090 | arvif->ar = ar; |
2961 | arvif->vif = vif; | 3091 | arvif->vif = vif; |
2962 | 3092 | ||
2963 | INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); | ||
2964 | INIT_LIST_HEAD(&arvif->list); | 3093 | INIT_LIST_HEAD(&arvif->list); |
2965 | 3094 | ||
2966 | if (ar->free_vdev_map == 0) { | 3095 | if (ar->free_vdev_map == 0) { |
@@ -3049,15 +3178,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, | |||
3049 | ar->free_vdev_map &= ~(1LL << arvif->vdev_id); | 3178 | ar->free_vdev_map &= ~(1LL << arvif->vdev_id); |
3050 | list_add(&arvif->list, &ar->arvifs); | 3179 | list_add(&arvif->list, &ar->arvifs); |
3051 | 3180 | ||
3052 | vdev_param = ar->wmi.vdev_param->def_keyid; | 3181 | /* It makes no sense to have firmware do keepalives. mac80211 already |
3053 | ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, | 3182 | * takes care of this with idle connection polling. |
3054 | arvif->def_wep_key_idx); | 3183 | */ |
3184 | ret = ath10k_mac_vif_disable_keepalive(arvif); | ||
3055 | if (ret) { | 3185 | if (ret) { |
3056 | ath10k_warn(ar, "failed to set vdev %i default key id: %d\n", | 3186 | ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", |
3057 | arvif->vdev_id, ret); | 3187 | arvif->vdev_id, ret); |
3058 | goto err_vdev_delete; | 3188 | goto err_vdev_delete; |
3059 | } | 3189 | } |
3060 | 3190 | ||
3191 | arvif->def_wep_key_idx = -1; | ||
3192 | |||
3061 | vdev_param = ar->wmi.vdev_param->tx_encap_type; | 3193 | vdev_param = ar->wmi.vdev_param->tx_encap_type; |
3062 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, | 3194 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, |
3063 | ATH10K_HW_TXRX_NATIVE_WIFI); | 3195 | ATH10K_HW_TXRX_NATIVE_WIFI); |
@@ -3176,8 +3308,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, | |||
3176 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | 3308 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); |
3177 | int ret; | 3309 | int ret; |
3178 | 3310 | ||
3179 | cancel_work_sync(&arvif->wep_key_work); | ||
3180 | |||
3181 | mutex_lock(&ar->conf_mutex); | 3311 | mutex_lock(&ar->conf_mutex); |
3182 | 3312 | ||
3183 | spin_lock_bh(&ar->data_lock); | 3313 | spin_lock_bh(&ar->data_lock); |
@@ -3288,9 +3418,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, | |||
3288 | if (ret) | 3418 | if (ret) |
3289 | ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", | 3419 | ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", |
3290 | arvif->vdev_id, ret); | 3420 | arvif->vdev_id, ret); |
3421 | |||
3422 | ret = ath10k_mac_setup_bcn_tmpl(arvif); | ||
3423 | if (ret) | ||
3424 | ath10k_warn(ar, "failed to update beacon template: %d\n", | ||
3425 | ret); | ||
3426 | } | ||
3427 | |||
3428 | if (changed & BSS_CHANGED_AP_PROBE_RESP) { | ||
3429 | ret = ath10k_mac_setup_prb_tmpl(arvif); | ||
3430 | if (ret) | ||
3431 | ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", | ||
3432 | arvif->vdev_id, ret); | ||
3291 | } | 3433 | } |
3292 | 3434 | ||
3293 | if (changed & BSS_CHANGED_BEACON_INFO) { | 3435 | if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { |
3294 | arvif->dtim_period = info->dtim_period; | 3436 | arvif->dtim_period = info->dtim_period; |
3295 | 3437 | ||
3296 | ath10k_dbg(ar, ATH10K_DBG_MAC, | 3438 | ath10k_dbg(ar, ATH10K_DBG_MAC, |
@@ -3537,6 +3679,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
3537 | const u8 *peer_addr; | 3679 | const u8 *peer_addr; |
3538 | bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || | 3680 | bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || |
3539 | key->cipher == WLAN_CIPHER_SUITE_WEP104; | 3681 | key->cipher == WLAN_CIPHER_SUITE_WEP104; |
3682 | bool def_idx = false; | ||
3540 | int ret = 0; | 3683 | int ret = 0; |
3541 | 3684 | ||
3542 | if (key->keyidx > WMI_MAX_KEY_INDEX) | 3685 | if (key->keyidx > WMI_MAX_KEY_INDEX) |
@@ -3582,7 +3725,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
3582 | ath10k_clear_vdev_key(arvif, key); | 3725 | ath10k_clear_vdev_key(arvif, key); |
3583 | } | 3726 | } |
3584 | 3727 | ||
3585 | ret = ath10k_install_key(arvif, key, cmd, peer_addr); | 3728 | /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For |
3729 | * static WEP, do not set this flag for the keys whose key id | ||
3730 | * is greater than default key id. | ||
3731 | */ | ||
3732 | if (arvif->def_wep_key_idx == -1) | ||
3733 | def_idx = true; | ||
3734 | |||
3735 | ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx); | ||
3586 | if (ret) { | 3736 | if (ret) { |
3587 | ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", | 3737 | ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", |
3588 | arvif->vdev_id, peer_addr, ret); | 3738 | arvif->vdev_id, peer_addr, ret); |
@@ -3607,6 +3757,39 @@ exit: | |||
3607 | return ret; | 3757 | return ret; |
3608 | } | 3758 | } |
3609 | 3759 | ||
3760 | static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, | ||
3761 | struct ieee80211_vif *vif, | ||
3762 | int keyidx) | ||
3763 | { | ||
3764 | struct ath10k *ar = hw->priv; | ||
3765 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
3766 | int ret; | ||
3767 | |||
3768 | mutex_lock(&arvif->ar->conf_mutex); | ||
3769 | |||
3770 | if (arvif->ar->state != ATH10K_STATE_ON) | ||
3771 | goto unlock; | ||
3772 | |||
3773 | ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", | ||
3774 | arvif->vdev_id, keyidx); | ||
3775 | |||
3776 | ret = ath10k_wmi_vdev_set_param(arvif->ar, | ||
3777 | arvif->vdev_id, | ||
3778 | arvif->ar->wmi.vdev_param->def_keyid, | ||
3779 | keyidx); | ||
3780 | |||
3781 | if (ret) { | ||
3782 | ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", | ||
3783 | arvif->vdev_id, | ||
3784 | ret); | ||
3785 | goto unlock; | ||
3786 | } | ||
3787 | |||
3788 | arvif->def_wep_key_idx = keyidx; | ||
3789 | unlock: | ||
3790 | mutex_unlock(&arvif->ar->conf_mutex); | ||
3791 | } | ||
3792 | |||
3610 | static void ath10k_sta_rc_update_wk(struct work_struct *wk) | 3793 | static void ath10k_sta_rc_update_wk(struct work_struct *wk) |
3611 | { | 3794 | { |
3612 | struct ath10k *ar; | 3795 | struct ath10k *ar; |
@@ -3842,6 +4025,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, | |||
3842 | u16 ac, bool enable) | 4025 | u16 ac, bool enable) |
3843 | { | 4026 | { |
3844 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | 4027 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); |
4028 | struct wmi_sta_uapsd_auto_trig_arg arg = {}; | ||
4029 | u32 prio = 0, acc = 0; | ||
3845 | u32 value = 0; | 4030 | u32 value = 0; |
3846 | int ret = 0; | 4031 | int ret = 0; |
3847 | 4032 | ||
@@ -3854,18 +4039,26 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, | |||
3854 | case IEEE80211_AC_VO: | 4039 | case IEEE80211_AC_VO: |
3855 | value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | | 4040 | value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | |
3856 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; | 4041 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; |
4042 | prio = 7; | ||
4043 | acc = 3; | ||
3857 | break; | 4044 | break; |
3858 | case IEEE80211_AC_VI: | 4045 | case IEEE80211_AC_VI: |
3859 | value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | | 4046 | value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | |
3860 | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; | 4047 | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; |
4048 | prio = 5; | ||
4049 | acc = 2; | ||
3861 | break; | 4050 | break; |
3862 | case IEEE80211_AC_BE: | 4051 | case IEEE80211_AC_BE: |
3863 | value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | | 4052 | value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | |
3864 | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; | 4053 | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; |
4054 | prio = 2; | ||
4055 | acc = 1; | ||
3865 | break; | 4056 | break; |
3866 | case IEEE80211_AC_BK: | 4057 | case IEEE80211_AC_BK: |
3867 | value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | | 4058 | value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | |
3868 | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; | 4059 | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; |
4060 | prio = 0; | ||
4061 | acc = 0; | ||
3869 | break; | 4062 | break; |
3870 | } | 4063 | } |
3871 | 4064 | ||
@@ -3907,6 +4100,29 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, | |||
3907 | return ret; | 4100 | return ret; |
3908 | } | 4101 | } |
3909 | 4102 | ||
4103 | if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || | ||
4104 | test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { | ||
4105 | /* Only userspace can make an educated decision when to send | ||
4106 | * trigger frame. The following effectively disables u-UAPSD | ||
4107 | * autotrigger in firmware (which is enabled by default | ||
4108 | * provided the autotrigger service is available). | ||
4109 | */ | ||
4110 | |||
4111 | arg.wmm_ac = acc; | ||
4112 | arg.user_priority = prio; | ||
4113 | arg.service_interval = 0; | ||
4114 | arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; | ||
4115 | arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; | ||
4116 | |||
4117 | ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, | ||
4118 | arvif->bssid, &arg, 1); | ||
4119 | if (ret) { | ||
4120 | ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", | ||
4121 | ret); | ||
4122 | return ret; | ||
4123 | } | ||
4124 | } | ||
4125 | |||
3910 | exit: | 4126 | exit: |
3911 | return ret; | 4127 | return ret; |
3912 | } | 4128 | } |
@@ -3916,6 +4132,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, | |||
3916 | const struct ieee80211_tx_queue_params *params) | 4132 | const struct ieee80211_tx_queue_params *params) |
3917 | { | 4133 | { |
3918 | struct ath10k *ar = hw->priv; | 4134 | struct ath10k *ar = hw->priv; |
4135 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
3919 | struct wmi_wmm_params_arg *p = NULL; | 4136 | struct wmi_wmm_params_arg *p = NULL; |
3920 | int ret; | 4137 | int ret; |
3921 | 4138 | ||
@@ -3923,16 +4140,16 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, | |||
3923 | 4140 | ||
3924 | switch (ac) { | 4141 | switch (ac) { |
3925 | case IEEE80211_AC_VO: | 4142 | case IEEE80211_AC_VO: |
3926 | p = &ar->wmm_params.ac_vo; | 4143 | p = &arvif->wmm_params.ac_vo; |
3927 | break; | 4144 | break; |
3928 | case IEEE80211_AC_VI: | 4145 | case IEEE80211_AC_VI: |
3929 | p = &ar->wmm_params.ac_vi; | 4146 | p = &arvif->wmm_params.ac_vi; |
3930 | break; | 4147 | break; |
3931 | case IEEE80211_AC_BE: | 4148 | case IEEE80211_AC_BE: |
3932 | p = &ar->wmm_params.ac_be; | 4149 | p = &arvif->wmm_params.ac_be; |
3933 | break; | 4150 | break; |
3934 | case IEEE80211_AC_BK: | 4151 | case IEEE80211_AC_BK: |
3935 | p = &ar->wmm_params.ac_bk; | 4152 | p = &arvif->wmm_params.ac_bk; |
3936 | break; | 4153 | break; |
3937 | } | 4154 | } |
3938 | 4155 | ||
@@ -3952,11 +4169,23 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, | |||
3952 | */ | 4169 | */ |
3953 | p->txop = params->txop * 32; | 4170 | p->txop = params->txop * 32; |
3954 | 4171 | ||
3955 | /* FIXME: FW accepts wmm params per hw, not per vif */ | 4172 | if (ar->wmi.ops->gen_vdev_wmm_conf) { |
3956 | ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); | 4173 | ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, |
3957 | if (ret) { | 4174 | &arvif->wmm_params); |
3958 | ath10k_warn(ar, "failed to set wmm params: %d\n", ret); | 4175 | if (ret) { |
3959 | goto exit; | 4176 | ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", |
4177 | arvif->vdev_id, ret); | ||
4178 | goto exit; | ||
4179 | } | ||
4180 | } else { | ||
4181 | /* This won't work well with multi-interface cases but it's | ||
4182 | * better than nothing. | ||
4183 | */ | ||
4184 | ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); | ||
4185 | if (ret) { | ||
4186 | ath10k_warn(ar, "failed to set wmm params: %d\n", ret); | ||
4187 | goto exit; | ||
4188 | } | ||
3960 | } | 4189 | } |
3961 | 4190 | ||
3962 | ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); | 4191 | ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); |
@@ -4726,6 +4955,7 @@ static const struct ieee80211_ops ath10k_ops = { | |||
4726 | .hw_scan = ath10k_hw_scan, | 4955 | .hw_scan = ath10k_hw_scan, |
4727 | .cancel_hw_scan = ath10k_cancel_hw_scan, | 4956 | .cancel_hw_scan = ath10k_cancel_hw_scan, |
4728 | .set_key = ath10k_set_key, | 4957 | .set_key = ath10k_set_key, |
4958 | .set_default_unicast_key = ath10k_set_default_unicast_key, | ||
4729 | .sta_state = ath10k_sta_state, | 4959 | .sta_state = ath10k_sta_state, |
4730 | .conf_tx = ath10k_conf_tx, | 4960 | .conf_tx = ath10k_conf_tx, |
4731 | .remain_on_channel = ath10k_remain_on_channel, | 4961 | .remain_on_channel = ath10k_remain_on_channel, |
@@ -4751,6 +4981,9 @@ static const struct ieee80211_ops ath10k_ops = { | |||
4751 | .suspend = ath10k_suspend, | 4981 | .suspend = ath10k_suspend, |
4752 | .resume = ath10k_resume, | 4982 | .resume = ath10k_resume, |
4753 | #endif | 4983 | #endif |
4984 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
4985 | .sta_add_debugfs = ath10k_sta_add_debugfs, | ||
4986 | #endif | ||
4754 | }; | 4987 | }; |
4755 | 4988 | ||
4756 | #define RATETAB_ENT(_rate, _rateid, _flags) { \ | 4989 | #define RATETAB_ENT(_rate, _rateid, _flags) { \ |
@@ -5074,7 +5307,8 @@ int ath10k_mac_register(struct ath10k *ar) | |||
5074 | band->bitrates = ath10k_g_rates; | 5307 | band->bitrates = ath10k_g_rates; |
5075 | band->ht_cap = ht_cap; | 5308 | band->ht_cap = ht_cap; |
5076 | 5309 | ||
5077 | /* vht is not supported in 2.4 GHz */ | 5310 | /* Enable the VHT support at 2.4 GHz */ |
5311 | band->vht_cap = vht_cap; | ||
5078 | 5312 | ||
5079 | ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; | 5313 | ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; |
5080 | } | 5314 | } |
@@ -5139,6 +5373,19 @@ int ath10k_mac_register(struct ath10k *ar) | |||
5139 | 5373 | ||
5140 | ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; | 5374 | ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; |
5141 | 5375 | ||
5376 | if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { | ||
5377 | ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; | ||
5378 | |||
5379 | /* Firmware delivers WPS/P2P Probe Requests frames to driver so | ||
5380 | * that userspace (e.g. wpa_supplicant/hostapd) can generate | ||
5381 | * correct Probe Responses. This is more of a hack advert.. | ||
5382 | */ | ||
5383 | ar->hw->wiphy->probe_resp_offload |= | ||
5384 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | | ||
5385 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | | ||
5386 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; | ||
5387 | } | ||
5388 | |||
5142 | ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; | 5389 | ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; |
5143 | ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; | 5390 | ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; |
5144 | ar->hw->wiphy->max_remain_on_channel_duration = 5000; | 5391 | ar->hw->wiphy->max_remain_on_channel_duration = 5000; |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 5e50214246f8..e6972b09333e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); | |||
58 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 | 58 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 |
59 | 59 | ||
60 | #define QCA988X_2_0_DEVICE_ID (0x003c) | 60 | #define QCA988X_2_0_DEVICE_ID (0x003c) |
61 | #define QCA6174_2_1_DEVICE_ID (0x003e) | ||
61 | 62 | ||
62 | static const struct pci_device_id ath10k_pci_id_table[] = { | 63 | static const struct pci_device_id ath10k_pci_id_table[] = { |
63 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ | 64 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
65 | { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ | ||
64 | {0} | 66 | {0} |
65 | }; | 67 | }; |
66 | 68 | ||
@@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { | |||
70 | * because of that. | 72 | * because of that. |
71 | */ | 73 | */ |
72 | { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, | 74 | { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, |
75 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, | ||
76 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, | ||
77 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, | ||
78 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, | ||
79 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | ||
73 | }; | 80 | }; |
74 | 81 | ||
75 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); | 82 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); |
@@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) | |||
403 | return -EIO; | 410 | return -EIO; |
404 | } | 411 | } |
405 | 412 | ||
406 | ATH10K_SKB_CB(skb)->paddr = paddr; | 413 | ATH10K_SKB_RXCB(skb)->paddr = paddr; |
407 | 414 | ||
408 | ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); | 415 | ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); |
409 | if (ret) { | 416 | if (ret) { |
@@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) | |||
872 | &flags) == 0) { | 879 | &flags) == 0) { |
873 | skb = transfer_context; | 880 | skb = transfer_context; |
874 | max_nbytes = skb->len + skb_tailroom(skb); | 881 | max_nbytes = skb->len + skb_tailroom(skb); |
875 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | 882 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
876 | max_nbytes, DMA_FROM_DEVICE); | 883 | max_nbytes, DMA_FROM_DEVICE); |
877 | 884 | ||
878 | if (unlikely(max_nbytes < nbytes)) { | 885 | if (unlikely(max_nbytes < nbytes)) { |
@@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) | |||
1238 | 1245 | ||
1239 | ce_ring->per_transfer_context[i] = NULL; | 1246 | ce_ring->per_transfer_context[i] = NULL; |
1240 | 1247 | ||
1241 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | 1248 | dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, |
1242 | skb->len + skb_tailroom(skb), | 1249 | skb->len + skb_tailroom(skb), |
1243 | DMA_FROM_DEVICE); | 1250 | DMA_FROM_DEVICE); |
1244 | dev_kfree_skb_any(skb); | 1251 | dev_kfree_skb_any(skb); |
@@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | |||
1506 | return 0; | 1513 | return 0; |
1507 | } | 1514 | } |
1508 | 1515 | ||
1516 | static int ath10k_pci_get_num_banks(struct ath10k *ar) | ||
1517 | { | ||
1518 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1519 | |||
1520 | switch (ar_pci->pdev->device) { | ||
1521 | case QCA988X_2_0_DEVICE_ID: | ||
1522 | return 1; | ||
1523 | case QCA6174_2_1_DEVICE_ID: | ||
1524 | switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { | ||
1525 | case QCA6174_HW_1_0_CHIP_ID_REV: | ||
1526 | case QCA6174_HW_1_1_CHIP_ID_REV: | ||
1527 | return 3; | ||
1528 | case QCA6174_HW_1_3_CHIP_ID_REV: | ||
1529 | return 2; | ||
1530 | case QCA6174_HW_2_1_CHIP_ID_REV: | ||
1531 | case QCA6174_HW_2_2_CHIP_ID_REV: | ||
1532 | return 6; | ||
1533 | case QCA6174_HW_3_0_CHIP_ID_REV: | ||
1534 | case QCA6174_HW_3_1_CHIP_ID_REV: | ||
1535 | case QCA6174_HW_3_2_CHIP_ID_REV: | ||
1536 | return 9; | ||
1537 | } | ||
1538 | break; | ||
1539 | } | ||
1540 | |||
1541 | ath10k_warn(ar, "unknown number of banks, assuming 1\n"); | ||
1542 | return 1; | ||
1543 | } | ||
1544 | |||
1509 | static int ath10k_pci_init_config(struct ath10k *ar) | 1545 | static int ath10k_pci_init_config(struct ath10k *ar) |
1510 | { | 1546 | { |
1511 | u32 interconnect_targ_addr; | 1547 | u32 interconnect_targ_addr; |
@@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar) | |||
1616 | /* first bank is switched to IRAM */ | 1652 | /* first bank is switched to IRAM */ |
1617 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & | 1653 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & |
1618 | HI_EARLY_ALLOC_MAGIC_MASK); | 1654 | HI_EARLY_ALLOC_MAGIC_MASK); |
1619 | ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & | 1655 | ealloc_value |= ((ath10k_pci_get_num_banks(ar) << |
1656 | HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & | ||
1620 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); | 1657 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); |
1621 | 1658 | ||
1622 | ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); | 1659 | ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); |
@@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) | |||
1812 | return 0; | 1849 | return 0; |
1813 | } | 1850 | } |
1814 | 1851 | ||
1815 | static int ath10k_pci_chip_reset(struct ath10k *ar) | 1852 | static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) |
1816 | { | 1853 | { |
1817 | int i, ret; | 1854 | int i, ret; |
1818 | u32 val; | 1855 | u32 val; |
1819 | 1856 | ||
1820 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); | 1857 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); |
1821 | 1858 | ||
1822 | /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. | 1859 | /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. |
1823 | * It is thus preferred to use warm reset which is safer but may not be | 1860 | * It is thus preferred to use warm reset which is safer but may not be |
@@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) | |||
1881 | return ret; | 1918 | return ret; |
1882 | } | 1919 | } |
1883 | 1920 | ||
1884 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); | 1921 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); |
1885 | 1922 | ||
1886 | return 0; | 1923 | return 0; |
1887 | } | 1924 | } |
1888 | 1925 | ||
1926 | static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) | ||
1927 | { | ||
1928 | int ret; | ||
1929 | |||
1930 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); | ||
1931 | |||
1932 | /* FIXME: QCA6174 requires cold + warm reset to work. */ | ||
1933 | |||
1934 | ret = ath10k_pci_cold_reset(ar); | ||
1935 | if (ret) { | ||
1936 | ath10k_warn(ar, "failed to cold reset: %d\n", ret); | ||
1937 | return ret; | ||
1938 | } | ||
1939 | |||
1940 | ret = ath10k_pci_wait_for_target_init(ar); | ||
1941 | if (ret) { | ||
1942 | ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", | ||
1943 | ret); | ||
1944 | return ret; | ||
1945 | } | ||
1946 | |||
1947 | ret = ath10k_pci_warm_reset(ar); | ||
1948 | if (ret) { | ||
1949 | ath10k_warn(ar, "failed to warm reset: %d\n", ret); | ||
1950 | return ret; | ||
1951 | } | ||
1952 | |||
1953 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); | ||
1954 | |||
1955 | return 0; | ||
1956 | } | ||
1957 | |||
1958 | static int ath10k_pci_chip_reset(struct ath10k *ar) | ||
1959 | { | ||
1960 | if (QCA_REV_988X(ar)) | ||
1961 | return ath10k_pci_qca988x_chip_reset(ar); | ||
1962 | else if (QCA_REV_6174(ar)) | ||
1963 | return ath10k_pci_qca6174_chip_reset(ar); | ||
1964 | else | ||
1965 | return -ENOTSUPP; | ||
1966 | } | ||
1967 | |||
1889 | static int ath10k_pci_hif_power_up(struct ath10k *ar) | 1968 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
1890 | { | 1969 | { |
1891 | int ret; | 1970 | int ret; |
@@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) | |||
1910 | */ | 1989 | */ |
1911 | ret = ath10k_pci_chip_reset(ar); | 1990 | ret = ath10k_pci_chip_reset(ar); |
1912 | if (ret) { | 1991 | if (ret) { |
1992 | if (ath10k_pci_has_fw_crashed(ar)) { | ||
1993 | ath10k_warn(ar, "firmware crashed during chip reset\n"); | ||
1994 | ath10k_pci_fw_crashed_clear(ar); | ||
1995 | ath10k_pci_fw_crashed_dump(ar); | ||
1996 | } | ||
1997 | |||
1913 | ath10k_err(ar, "failed to reset chip: %d\n", ret); | 1998 | ath10k_err(ar, "failed to reset chip: %d\n", ret); |
1914 | goto err_sleep; | 1999 | goto err_sleep; |
1915 | } | 2000 | } |
@@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data) | |||
2041 | return; | 2126 | return; |
2042 | } | 2127 | } |
2043 | 2128 | ||
2129 | ath10k_pci_irq_disable(ar); | ||
2044 | ath10k_pci_fw_crashed_clear(ar); | 2130 | ath10k_pci_fw_crashed_clear(ar); |
2045 | ath10k_pci_fw_crashed_dump(ar); | 2131 | ath10k_pci_fw_crashed_dump(ar); |
2046 | } | 2132 | } |
@@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data) | |||
2110 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2196 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
2111 | 2197 | ||
2112 | if (ath10k_pci_has_fw_crashed(ar)) { | 2198 | if (ath10k_pci_has_fw_crashed(ar)) { |
2199 | ath10k_pci_irq_disable(ar); | ||
2113 | ath10k_pci_fw_crashed_clear(ar); | 2200 | ath10k_pci_fw_crashed_clear(ar); |
2114 | ath10k_pci_fw_crashed_dump(ar); | 2201 | ath10k_pci_fw_crashed_dump(ar); |
2115 | return; | 2202 | return; |
@@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) | |||
2352 | 2439 | ||
2353 | if (val & FW_IND_EVENT_PENDING) { | 2440 | if (val & FW_IND_EVENT_PENDING) { |
2354 | ath10k_warn(ar, "device has crashed during init\n"); | 2441 | ath10k_warn(ar, "device has crashed during init\n"); |
2355 | ath10k_pci_fw_crashed_clear(ar); | ||
2356 | ath10k_pci_fw_crashed_dump(ar); | ||
2357 | return -ECOMM; | 2442 | return -ECOMM; |
2358 | } | 2443 | } |
2359 | 2444 | ||
@@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2507 | int ret = 0; | 2592 | int ret = 0; |
2508 | struct ath10k *ar; | 2593 | struct ath10k *ar; |
2509 | struct ath10k_pci *ar_pci; | 2594 | struct ath10k_pci *ar_pci; |
2595 | enum ath10k_hw_rev hw_rev; | ||
2510 | u32 chip_id; | 2596 | u32 chip_id; |
2511 | 2597 | ||
2512 | ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, | 2598 | switch (pci_dev->device) { |
2513 | ATH10K_BUS_PCI, | 2599 | case QCA988X_2_0_DEVICE_ID: |
2514 | &ath10k_pci_hif_ops); | 2600 | hw_rev = ATH10K_HW_QCA988X; |
2601 | break; | ||
2602 | case QCA6174_2_1_DEVICE_ID: | ||
2603 | hw_rev = ATH10K_HW_QCA6174; | ||
2604 | break; | ||
2605 | default: | ||
2606 | WARN_ON(1); | ||
2607 | return -ENOTSUPP; | ||
2608 | } | ||
2609 | |||
2610 | ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, | ||
2611 | hw_rev, &ath10k_pci_hif_ops); | ||
2515 | if (!ar) { | 2612 | if (!ar) { |
2516 | dev_err(&pdev->dev, "failed to allocate core\n"); | 2613 | dev_err(&pdev->dev, "failed to allocate core\n"); |
2517 | return -ENOMEM; | 2614 | return -ENOMEM; |
@@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2540 | goto err_release; | 2637 | goto err_release; |
2541 | } | 2638 | } |
2542 | 2639 | ||
2543 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); | ||
2544 | if (chip_id == 0xffffffff) { | ||
2545 | ath10k_err(ar, "failed to get chip id\n"); | ||
2546 | goto err_sleep; | ||
2547 | } | ||
2548 | |||
2549 | if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { | ||
2550 | ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", | ||
2551 | pdev->device, chip_id); | ||
2552 | goto err_sleep; | ||
2553 | } | ||
2554 | |||
2555 | ret = ath10k_pci_alloc_pipes(ar); | 2640 | ret = ath10k_pci_alloc_pipes(ar); |
2556 | if (ret) { | 2641 | if (ret) { |
2557 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", | 2642 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", |
@@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
2578 | goto err_deinit_irq; | 2663 | goto err_deinit_irq; |
2579 | } | 2664 | } |
2580 | 2665 | ||
2666 | ret = ath10k_pci_chip_reset(ar); | ||
2667 | if (ret) { | ||
2668 | ath10k_err(ar, "failed to reset chip: %d\n", ret); | ||
2669 | goto err_free_irq; | ||
2670 | } | ||
2671 | |||
2672 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); | ||
2673 | if (chip_id == 0xffffffff) { | ||
2674 | ath10k_err(ar, "failed to get chip id\n"); | ||
2675 | goto err_free_irq; | ||
2676 | } | ||
2677 | |||
2678 | if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { | ||
2679 | ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", | ||
2680 | pdev->device, chip_id); | ||
2681 | goto err_sleep; | ||
2682 | } | ||
2683 | |||
2581 | ath10k_pci_sleep(ar); | 2684 | ath10k_pci_sleep(ar); |
2582 | 2685 | ||
2583 | ret = ath10k_core_register(ar, chip_id); | 2686 | ret = ath10k_core_register(ar, chip_id); |
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index ce4a1ef89961..bddf54320160 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h | |||
@@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) | |||
194 | 194 | ||
195 | #define ATH10K_PCI_RX_POST_RETRY_MS 50 | 195 | #define ATH10K_PCI_RX_POST_RETRY_MS 50 |
196 | #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ | 196 | #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ |
197 | #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ | 197 | #define PCIE_WAKE_TIMEOUT 10000 /* 10ms */ |
198 | 198 | ||
199 | #define BAR_NUM 0 | 199 | #define BAR_NUM 0 |
200 | 200 | ||
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index e1ffdd57a18c..e9cc7787bf5f 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h | |||
@@ -850,7 +850,7 @@ struct rx_ppdu_start { | |||
850 | 850 | ||
851 | #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) | 851 | #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) |
852 | 852 | ||
853 | struct rx_ppdu_end { | 853 | struct rx_ppdu_end_common { |
854 | __le32 evm_p0; | 854 | __le32 evm_p0; |
855 | __le32 evm_p1; | 855 | __le32 evm_p1; |
856 | __le32 evm_p2; | 856 | __le32 evm_p2; |
@@ -873,10 +873,33 @@ struct rx_ppdu_end { | |||
873 | u8 phy_err_code; | 873 | u8 phy_err_code; |
874 | __le16 flags; /* %RX_PPDU_END_FLAGS_ */ | 874 | __le16 flags; /* %RX_PPDU_END_FLAGS_ */ |
875 | __le32 info0; /* %RX_PPDU_END_INFO0_ */ | 875 | __le32 info0; /* %RX_PPDU_END_INFO0_ */ |
876 | } __packed; | ||
877 | |||
878 | struct rx_ppdu_end_qca988x { | ||
876 | __le16 bb_length; | 879 | __le16 bb_length; |
877 | __le16 info1; /* %RX_PPDU_END_INFO1_ */ | 880 | __le16 info1; /* %RX_PPDU_END_INFO1_ */ |
878 | } __packed; | 881 | } __packed; |
879 | 882 | ||
883 | #define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff | ||
884 | #define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0 | ||
885 | #define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000 | ||
886 | #define RX_PPDU_END_RTT_UNUSED_LSB 24 | ||
887 | #define RX_PPDU_END_RTT_NORMAL_MODE BIT(31) | ||
888 | |||
889 | struct rx_ppdu_end_qca6174 { | ||
890 | __le32 rtt; /* %RX_PPDU_END_RTT_ */ | ||
891 | __le16 bb_length; | ||
892 | __le16 info1; /* %RX_PPDU_END_INFO1_ */ | ||
893 | } __packed; | ||
894 | |||
895 | struct rx_ppdu_end { | ||
896 | struct rx_ppdu_end_common common; | ||
897 | union { | ||
898 | struct rx_ppdu_end_qca988x qca988x; | ||
899 | struct rx_ppdu_end_qca6174 qca6174; | ||
900 | } __packed; | ||
901 | } __packed; | ||
902 | |||
880 | /* | 903 | /* |
881 | * evm_p0 | 904 | * evm_p0 |
882 | * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. | 905 | * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. |
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 9d0ae30f9ff1..a417aae52623 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #ifndef __TARGADDRS_H__ | 18 | #ifndef __TARGADDRS_H__ |
19 | #define __TARGADDRS_H__ | 19 | #define __TARGADDRS_H__ |
20 | 20 | ||
21 | #include "hw.h" | ||
22 | |||
21 | /* | 23 | /* |
22 | * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the | 24 | * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the |
23 | * host_interest structure. It must match the address of the _host_interest | 25 | * host_interest structure. It must match the address of the _host_interest |
@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask | |||
445 | #define QCA988X_BOARD_DATA_SZ 7168 | 447 | #define QCA988X_BOARD_DATA_SZ 7168 |
446 | #define QCA988X_BOARD_EXT_DATA_SZ 0 | 448 | #define QCA988X_BOARD_EXT_DATA_SZ 0 |
447 | 449 | ||
450 | #define QCA6174_BOARD_DATA_SZ 8192 | ||
451 | #define QCA6174_BOARD_EXT_DATA_SZ 0 | ||
452 | |||
448 | #endif /* __TARGADDRS_H__ */ | 453 | #endif /* __TARGADDRS_H__ */ |
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index b14ae8d135f6..aede750809fe 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c | |||
@@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev, | |||
98 | } | 98 | } |
99 | period = max(ATH10K_QUIET_PERIOD_MIN, | 99 | period = max(ATH10K_QUIET_PERIOD_MIN, |
100 | (ATH10K_QUIET_PERIOD_DEFAULT / num_bss)); | 100 | (ATH10K_QUIET_PERIOD_DEFAULT / num_bss)); |
101 | duration = period * (duty_cycle / 100); | 101 | duration = (period * duty_cycle) / 100; |
102 | enabled = duration ? 1 : 0; | 102 | enabled = duration ? 1 : 0; |
103 | 103 | ||
104 | ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, | 104 | ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, |
@@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, | |||
160 | temperature = ar->thermal.temperature; | 160 | temperature = ar->thermal.temperature; |
161 | spin_unlock_bh(&ar->data_lock); | 161 | spin_unlock_bh(&ar->data_lock); |
162 | 162 | ||
163 | ret = snprintf(buf, PAGE_SIZE, "%d", temperature); | 163 | /* display in millidegree celcius */ |
164 | ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000); | ||
164 | out: | 165 | out: |
165 | mutex_unlock(&ar->conf_mutex); | 166 | mutex_unlock(&ar->conf_mutex); |
166 | return ret; | 167 | return ret; |
@@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar) | |||
215 | 216 | ||
216 | /* Avoid linking error on devm_hwmon_device_register_with_groups, I | 217 | /* Avoid linking error on devm_hwmon_device_register_with_groups, I |
217 | * guess linux/hwmon.h is missing proper stubs. */ | 218 | * guess linux/hwmon.h is missing proper stubs. */ |
218 | if (!config_enabled(HWMON)) | 219 | if (!config_enabled(CONFIG_HWMON)) |
219 | return 0; | 220 | return 0; |
220 | 221 | ||
221 | hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, | 222 | hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, |
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index b289378b6e3e..5407887380ab 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h | |||
@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc, | |||
453 | ) | 453 | ) |
454 | ); | 454 | ); |
455 | 455 | ||
456 | TRACE_EVENT(ath10k_wmi_diag_container, | ||
457 | TP_PROTO(struct ath10k *ar, | ||
458 | u8 type, | ||
459 | u32 timestamp, | ||
460 | u32 code, | ||
461 | u16 len, | ||
462 | const void *data), | ||
463 | |||
464 | TP_ARGS(ar, type, timestamp, code, len, data), | ||
465 | |||
466 | TP_STRUCT__entry( | ||
467 | __string(device, dev_name(ar->dev)) | ||
468 | __string(driver, dev_driver_string(ar->dev)) | ||
469 | __field(u8, type) | ||
470 | __field(u32, timestamp) | ||
471 | __field(u32, code) | ||
472 | __field(u16, len) | ||
473 | __dynamic_array(u8, data, len) | ||
474 | ), | ||
475 | |||
476 | TP_fast_assign( | ||
477 | __assign_str(device, dev_name(ar->dev)); | ||
478 | __assign_str(driver, dev_driver_string(ar->dev)); | ||
479 | __entry->type = type; | ||
480 | __entry->timestamp = timestamp; | ||
481 | __entry->code = code; | ||
482 | __entry->len = len; | ||
483 | memcpy(__get_dynamic_array(data), data, len); | ||
484 | ), | ||
485 | |||
486 | TP_printk( | ||
487 | "%s %s diag container type %hhu timestamp %u code %u len %d", | ||
488 | __get_str(driver), | ||
489 | __get_str(device), | ||
490 | __entry->type, | ||
491 | __entry->timestamp, | ||
492 | __entry->code, | ||
493 | __entry->len | ||
494 | ) | ||
495 | ); | ||
496 | |||
497 | TRACE_EVENT(ath10k_wmi_diag, | ||
498 | TP_PROTO(struct ath10k *ar, const void *data, size_t len), | ||
499 | |||
500 | TP_ARGS(ar, data, len), | ||
501 | |||
502 | TP_STRUCT__entry( | ||
503 | __string(device, dev_name(ar->dev)) | ||
504 | __string(driver, dev_driver_string(ar->dev)) | ||
505 | __field(u16, len) | ||
506 | __dynamic_array(u8, data, len) | ||
507 | ), | ||
508 | |||
509 | TP_fast_assign( | ||
510 | __assign_str(device, dev_name(ar->dev)); | ||
511 | __assign_str(driver, dev_driver_string(ar->dev)); | ||
512 | __entry->len = len; | ||
513 | memcpy(__get_dynamic_array(data), data, len); | ||
514 | ), | ||
515 | |||
516 | TP_printk( | ||
517 | "%s %s tlv diag len %d", | ||
518 | __get_str(driver), | ||
519 | __get_str(device), | ||
520 | __entry->len | ||
521 | ) | ||
522 | ); | ||
523 | |||
456 | #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ | 524 | #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ |
457 | 525 | ||
458 | /* we don't want to use include/trace/events */ | 526 | /* we don't want to use include/trace/events */ |
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 7579de8e7a8c..3f00cec8aef5 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c | |||
@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, | |||
64 | return; | 64 | return; |
65 | } | 65 | } |
66 | 66 | ||
67 | msdu = htt->pending_tx[tx_done->msdu_id]; | 67 | msdu = idr_find(&htt->pending_tx, tx_done->msdu_id); |
68 | if (!msdu) { | ||
69 | ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", | ||
70 | tx_done->msdu_id); | ||
71 | return; | ||
72 | } | ||
73 | |||
68 | skb_cb = ATH10K_SKB_CB(msdu); | 74 | skb_cb = ATH10K_SKB_CB(msdu); |
69 | 75 | ||
70 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); | 76 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, | |||
95 | /* we do not own the msdu anymore */ | 101 | /* we do not own the msdu anymore */ |
96 | 102 | ||
97 | exit: | 103 | exit: |
98 | htt->pending_tx[tx_done->msdu_id] = NULL; | ||
99 | ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); | 104 | ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); |
100 | __ath10k_htt_tx_dec_pending(htt); | 105 | __ath10k_htt_tx_dec_pending(htt); |
101 | if (htt->num_pending_tx == 0) | 106 | if (htt->num_pending_tx == 0) |
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 20e2c3002bb5..04dc4b9db04e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h | |||
@@ -78,6 +78,8 @@ struct wmi_ops { | |||
78 | const struct wmi_vdev_spectral_conf_arg *arg); | 78 | const struct wmi_vdev_spectral_conf_arg *arg); |
79 | struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, | 79 | struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, |
80 | u32 trigger, u32 enable); | 80 | u32 trigger, u32 enable); |
81 | struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, | ||
82 | const struct wmi_wmm_params_all_arg *arg); | ||
81 | struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, | 83 | struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, |
82 | const u8 peer_addr[ETH_ALEN]); | 84 | const u8 peer_addr[ETH_ALEN]); |
83 | struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, | 85 | struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, |
@@ -102,16 +104,20 @@ struct wmi_ops { | |||
102 | u32 value); | 104 | u32 value); |
103 | struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, | 105 | struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, |
104 | const struct wmi_scan_chan_list_arg *arg); | 106 | const struct wmi_scan_chan_list_arg *arg); |
105 | struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif); | 107 | struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, |
108 | const void *bcn, size_t bcn_len, | ||
109 | u32 bcn_paddr, bool dtim_zero, | ||
110 | bool deliver_cab); | ||
106 | struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, | 111 | struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, |
107 | const struct wmi_pdev_set_wmm_params_arg *arg); | 112 | const struct wmi_wmm_params_all_arg *arg); |
108 | struct sk_buff *(*gen_request_stats)(struct ath10k *ar, | 113 | struct sk_buff *(*gen_request_stats)(struct ath10k *ar, |
109 | enum wmi_stats_id stats_id); | 114 | enum wmi_stats_id stats_id); |
110 | struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, | 115 | struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, |
111 | enum wmi_force_fw_hang_type type, | 116 | enum wmi_force_fw_hang_type type, |
112 | u32 delay_ms); | 117 | u32 delay_ms); |
113 | struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); | 118 | struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); |
114 | struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable); | 119 | struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable, |
120 | u32 log_level); | ||
115 | struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); | 121 | struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); |
116 | struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); | 122 | struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); |
117 | struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, | 123 | struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, |
@@ -119,6 +125,30 @@ struct wmi_ops { | |||
119 | u32 next_offset, | 125 | u32 next_offset, |
120 | u32 enabled); | 126 | u32 enabled); |
121 | struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); | 127 | struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); |
128 | struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, | ||
129 | const u8 *mac); | ||
130 | struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, | ||
131 | const u8 *mac, u32 tid, u32 buf_size); | ||
132 | struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, | ||
133 | const u8 *mac, u32 tid, | ||
134 | u32 status); | ||
135 | struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, | ||
136 | const u8 *mac, u32 tid, u32 initiator, | ||
137 | u32 reason); | ||
138 | struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, | ||
139 | u32 tim_ie_offset, struct sk_buff *bcn, | ||
140 | u32 prb_caps, u32 prb_erp, | ||
141 | void *prb_ies, size_t prb_ies_len); | ||
142 | struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, | ||
143 | struct sk_buff *bcn); | ||
144 | struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, | ||
145 | const u8 *p2p_ie); | ||
146 | struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, | ||
147 | const u8 peer_addr[ETH_ALEN], | ||
148 | const struct wmi_sta_uapsd_auto_trig_arg *args, | ||
149 | u32 num_ac); | ||
150 | struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, | ||
151 | const struct wmi_sta_keepalive_arg *arg); | ||
122 | }; | 152 | }; |
123 | 153 | ||
124 | int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); | 154 | int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); |
@@ -558,6 +588,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, | |||
558 | } | 588 | } |
559 | 589 | ||
560 | static inline int | 590 | static inline int |
591 | ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, | ||
592 | const u8 peer_addr[ETH_ALEN], | ||
593 | const struct wmi_sta_uapsd_auto_trig_arg *args, | ||
594 | u32 num_ac) | ||
595 | { | ||
596 | struct sk_buff *skb; | ||
597 | u32 cmd_id; | ||
598 | |||
599 | if (!ar->wmi.ops->gen_vdev_sta_uapsd) | ||
600 | return -EOPNOTSUPP; | ||
601 | |||
602 | skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, | ||
603 | num_ac); | ||
604 | if (IS_ERR(skb)) | ||
605 | return PTR_ERR(skb); | ||
606 | |||
607 | cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; | ||
608 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); | ||
609 | } | ||
610 | |||
611 | static inline int | ||
612 | ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, | ||
613 | const struct wmi_wmm_params_all_arg *arg) | ||
614 | { | ||
615 | struct sk_buff *skb; | ||
616 | u32 cmd_id; | ||
617 | |||
618 | skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); | ||
619 | if (IS_ERR(skb)) | ||
620 | return PTR_ERR(skb); | ||
621 | |||
622 | cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; | ||
623 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); | ||
624 | } | ||
625 | |||
626 | static inline int | ||
561 | ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, | 627 | ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, |
562 | const u8 peer_addr[ETH_ALEN]) | 628 | const u8 peer_addr[ETH_ALEN]) |
563 | { | 629 | { |
@@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar, | |||
706 | } | 772 | } |
707 | 773 | ||
708 | static inline int | 774 | static inline int |
709 | ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) | 775 | ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, |
776 | const void *bcn, size_t bcn_len, | ||
777 | u32 bcn_paddr, bool dtim_zero, | ||
778 | bool deliver_cab) | ||
710 | { | 779 | { |
711 | struct ath10k *ar = arvif->ar; | ||
712 | struct sk_buff *skb; | 780 | struct sk_buff *skb; |
713 | int ret; | 781 | int ret; |
714 | 782 | ||
715 | if (!ar->wmi.ops->gen_beacon_dma) | 783 | if (!ar->wmi.ops->gen_beacon_dma) |
716 | return -EOPNOTSUPP; | 784 | return -EOPNOTSUPP; |
717 | 785 | ||
718 | skb = ar->wmi.ops->gen_beacon_dma(arvif); | 786 | skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, |
787 | dtim_zero, deliver_cab); | ||
719 | if (IS_ERR(skb)) | 788 | if (IS_ERR(skb)) |
720 | return PTR_ERR(skb); | 789 | return PTR_ERR(skb); |
721 | 790 | ||
@@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) | |||
731 | 800 | ||
732 | static inline int | 801 | static inline int |
733 | ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, | 802 | ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, |
734 | const struct wmi_pdev_set_wmm_params_arg *arg) | 803 | const struct wmi_wmm_params_all_arg *arg) |
735 | { | 804 | { |
736 | struct sk_buff *skb; | 805 | struct sk_buff *skb; |
737 | 806 | ||
@@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar, | |||
778 | } | 847 | } |
779 | 848 | ||
780 | static inline int | 849 | static inline int |
781 | ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) | 850 | ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) |
782 | { | 851 | { |
783 | struct sk_buff *skb; | 852 | struct sk_buff *skb; |
784 | 853 | ||
785 | if (!ar->wmi.ops->gen_dbglog_cfg) | 854 | if (!ar->wmi.ops->gen_dbglog_cfg) |
786 | return -EOPNOTSUPP; | 855 | return -EOPNOTSUPP; |
787 | 856 | ||
788 | skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable); | 857 | skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); |
789 | if (IS_ERR(skb)) | 858 | if (IS_ERR(skb)) |
790 | return PTR_ERR(skb); | 859 | return PTR_ERR(skb); |
791 | 860 | ||
@@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar) | |||
857 | ar->wmi.cmd->pdev_get_temperature_cmdid); | 926 | ar->wmi.cmd->pdev_get_temperature_cmdid); |
858 | } | 927 | } |
859 | 928 | ||
929 | static inline int | ||
930 | ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) | ||
931 | { | ||
932 | struct sk_buff *skb; | ||
933 | |||
934 | if (!ar->wmi.ops->gen_addba_clear_resp) | ||
935 | return -EOPNOTSUPP; | ||
936 | |||
937 | skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); | ||
938 | if (IS_ERR(skb)) | ||
939 | return PTR_ERR(skb); | ||
940 | |||
941 | return ath10k_wmi_cmd_send(ar, skb, | ||
942 | ar->wmi.cmd->addba_clear_resp_cmdid); | ||
943 | } | ||
944 | |||
945 | static inline int | ||
946 | ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
947 | u32 tid, u32 buf_size) | ||
948 | { | ||
949 | struct sk_buff *skb; | ||
950 | |||
951 | if (!ar->wmi.ops->gen_addba_send) | ||
952 | return -EOPNOTSUPP; | ||
953 | |||
954 | skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); | ||
955 | if (IS_ERR(skb)) | ||
956 | return PTR_ERR(skb); | ||
957 | |||
958 | return ath10k_wmi_cmd_send(ar, skb, | ||
959 | ar->wmi.cmd->addba_send_cmdid); | ||
960 | } | ||
961 | |||
962 | static inline int | ||
963 | ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
964 | u32 tid, u32 status) | ||
965 | { | ||
966 | struct sk_buff *skb; | ||
967 | |||
968 | if (!ar->wmi.ops->gen_addba_set_resp) | ||
969 | return -EOPNOTSUPP; | ||
970 | |||
971 | skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); | ||
972 | if (IS_ERR(skb)) | ||
973 | return PTR_ERR(skb); | ||
974 | |||
975 | return ath10k_wmi_cmd_send(ar, skb, | ||
976 | ar->wmi.cmd->addba_set_resp_cmdid); | ||
977 | } | ||
978 | |||
979 | static inline int | ||
980 | ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
981 | u32 tid, u32 initiator, u32 reason) | ||
982 | { | ||
983 | struct sk_buff *skb; | ||
984 | |||
985 | if (!ar->wmi.ops->gen_delba_send) | ||
986 | return -EOPNOTSUPP; | ||
987 | |||
988 | skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, | ||
989 | reason); | ||
990 | if (IS_ERR(skb)) | ||
991 | return PTR_ERR(skb); | ||
992 | |||
993 | return ath10k_wmi_cmd_send(ar, skb, | ||
994 | ar->wmi.cmd->delba_send_cmdid); | ||
995 | } | ||
996 | |||
997 | static inline int | ||
998 | ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, | ||
999 | struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, | ||
1000 | void *prb_ies, size_t prb_ies_len) | ||
1001 | { | ||
1002 | struct sk_buff *skb; | ||
1003 | |||
1004 | if (!ar->wmi.ops->gen_bcn_tmpl) | ||
1005 | return -EOPNOTSUPP; | ||
1006 | |||
1007 | skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, | ||
1008 | prb_caps, prb_erp, prb_ies, | ||
1009 | prb_ies_len); | ||
1010 | if (IS_ERR(skb)) | ||
1011 | return PTR_ERR(skb); | ||
1012 | |||
1013 | return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); | ||
1014 | } | ||
1015 | |||
1016 | static inline int | ||
1017 | ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) | ||
1018 | { | ||
1019 | struct sk_buff *skb; | ||
1020 | |||
1021 | if (!ar->wmi.ops->gen_prb_tmpl) | ||
1022 | return -EOPNOTSUPP; | ||
1023 | |||
1024 | skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); | ||
1025 | if (IS_ERR(skb)) | ||
1026 | return PTR_ERR(skb); | ||
1027 | |||
1028 | return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); | ||
1029 | } | ||
1030 | |||
1031 | static inline int | ||
1032 | ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) | ||
1033 | { | ||
1034 | struct sk_buff *skb; | ||
1035 | |||
1036 | if (!ar->wmi.ops->gen_p2p_go_bcn_ie) | ||
1037 | return -EOPNOTSUPP; | ||
1038 | |||
1039 | skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); | ||
1040 | if (IS_ERR(skb)) | ||
1041 | return PTR_ERR(skb); | ||
1042 | |||
1043 | return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); | ||
1044 | } | ||
1045 | |||
1046 | static inline int | ||
1047 | ath10k_wmi_sta_keepalive(struct ath10k *ar, | ||
1048 | const struct wmi_sta_keepalive_arg *arg) | ||
1049 | { | ||
1050 | struct sk_buff *skb; | ||
1051 | u32 cmd_id; | ||
1052 | |||
1053 | if (!ar->wmi.ops->gen_sta_keepalive) | ||
1054 | return -EOPNOTSUPP; | ||
1055 | |||
1056 | skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); | ||
1057 | if (IS_ERR(skb)) | ||
1058 | return PTR_ERR(skb); | ||
1059 | |||
1060 | cmd_id = ar->wmi.cmd->sta_keepalive_cmd; | ||
1061 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); | ||
1062 | } | ||
1063 | |||
860 | #endif | 1064 | #endif |
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 4c050cec3966..71614ba1b145 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c | |||
@@ -58,6 +58,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { | |||
58 | = { .min_len = sizeof(struct wlan_host_mem_req) }, | 58 | = { .min_len = sizeof(struct wlan_host_mem_req) }, |
59 | [WMI_TLV_TAG_STRUCT_READY_EVENT] | 59 | [WMI_TLV_TAG_STRUCT_READY_EVENT] |
60 | = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, | 60 | = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, |
61 | [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT] | ||
62 | = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, | ||
63 | [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] | ||
64 | = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, | ||
61 | }; | 65 | }; |
62 | 66 | ||
63 | static int | 67 | static int |
@@ -156,6 +160,142 @@ static u16 ath10k_wmi_tlv_len(const void *ptr) | |||
156 | return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); | 160 | return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); |
157 | } | 161 | } |
158 | 162 | ||
163 | /**************/ | ||
164 | /* TLV events */ | ||
165 | /**************/ | ||
166 | static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, | ||
167 | struct sk_buff *skb) | ||
168 | { | ||
169 | const void **tb; | ||
170 | const struct wmi_tlv_bcn_tx_status_ev *ev; | ||
171 | u32 vdev_id, tx_status; | ||
172 | int ret; | ||
173 | |||
174 | tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); | ||
175 | if (IS_ERR(tb)) { | ||
176 | ret = PTR_ERR(tb); | ||
177 | ath10k_warn(ar, "failed to parse tlv: %d\n", ret); | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; | ||
182 | if (!ev) { | ||
183 | kfree(tb); | ||
184 | return -EPROTO; | ||
185 | } | ||
186 | |||
187 | tx_status = __le32_to_cpu(ev->tx_status); | ||
188 | vdev_id = __le32_to_cpu(ev->vdev_id); | ||
189 | |||
190 | switch (tx_status) { | ||
191 | case WMI_TLV_BCN_TX_STATUS_OK: | ||
192 | break; | ||
193 | case WMI_TLV_BCN_TX_STATUS_XRETRY: | ||
194 | case WMI_TLV_BCN_TX_STATUS_DROP: | ||
195 | case WMI_TLV_BCN_TX_STATUS_FILTERED: | ||
196 | /* FIXME: It's probably worth telling mac80211 to stop the | ||
197 | * interface as it is crippled. | ||
198 | */ | ||
199 | ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", | ||
200 | vdev_id, tx_status); | ||
201 | break; | ||
202 | } | ||
203 | |||
204 | kfree(tb); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, | ||
209 | struct sk_buff *skb) | ||
210 | { | ||
211 | const void **tb; | ||
212 | const struct wmi_tlv_diag_data_ev *ev; | ||
213 | const struct wmi_tlv_diag_item *item; | ||
214 | const void *data; | ||
215 | int ret, num_items, len; | ||
216 | |||
217 | tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); | ||
218 | if (IS_ERR(tb)) { | ||
219 | ret = PTR_ERR(tb); | ||
220 | ath10k_warn(ar, "failed to parse tlv: %d\n", ret); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; | ||
225 | data = tb[WMI_TLV_TAG_ARRAY_BYTE]; | ||
226 | if (!ev || !data) { | ||
227 | kfree(tb); | ||
228 | return -EPROTO; | ||
229 | } | ||
230 | |||
231 | num_items = __le32_to_cpu(ev->num_items); | ||
232 | len = ath10k_wmi_tlv_len(data); | ||
233 | |||
234 | while (num_items--) { | ||
235 | if (len == 0) | ||
236 | break; | ||
237 | if (len < sizeof(*item)) { | ||
238 | ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); | ||
239 | break; | ||
240 | } | ||
241 | |||
242 | item = data; | ||
243 | |||
244 | if (len < sizeof(*item) + __le16_to_cpu(item->len)) { | ||
245 | ath10k_warn(ar, "failed to parse diag data: item is too long\n"); | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | trace_ath10k_wmi_diag_container(ar, | ||
250 | item->type, | ||
251 | __le32_to_cpu(item->timestamp), | ||
252 | __le32_to_cpu(item->code), | ||
253 | __le16_to_cpu(item->len), | ||
254 | item->payload); | ||
255 | |||
256 | len -= sizeof(*item); | ||
257 | len -= roundup(__le16_to_cpu(item->len), 4); | ||
258 | |||
259 | data += sizeof(*item); | ||
260 | data += roundup(__le16_to_cpu(item->len), 4); | ||
261 | } | ||
262 | |||
263 | if (num_items != -1 || len != 0) | ||
264 | ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", | ||
265 | num_items, len); | ||
266 | |||
267 | kfree(tb); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, | ||
272 | struct sk_buff *skb) | ||
273 | { | ||
274 | const void **tb; | ||
275 | const void *data; | ||
276 | int ret, len; | ||
277 | |||
278 | tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); | ||
279 | if (IS_ERR(tb)) { | ||
280 | ret = PTR_ERR(tb); | ||
281 | ath10k_warn(ar, "failed to parse tlv: %d\n", ret); | ||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | data = tb[WMI_TLV_TAG_ARRAY_BYTE]; | ||
286 | if (!data) { | ||
287 | kfree(tb); | ||
288 | return -EPROTO; | ||
289 | } | ||
290 | len = ath10k_wmi_tlv_len(data); | ||
291 | |||
292 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); | ||
293 | trace_ath10k_wmi_diag(ar, data, len); | ||
294 | |||
295 | kfree(tb); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
159 | /***********/ | 299 | /***********/ |
160 | /* TLV ops */ | 300 | /* TLV ops */ |
161 | /***********/ | 301 | /***********/ |
@@ -268,6 +408,15 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) | |||
268 | case WMI_TLV_READY_EVENTID: | 408 | case WMI_TLV_READY_EVENTID: |
269 | ath10k_wmi_event_ready(ar, skb); | 409 | ath10k_wmi_event_ready(ar, skb); |
270 | break; | 410 | break; |
411 | case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: | ||
412 | ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); | ||
413 | break; | ||
414 | case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID: | ||
415 | ath10k_wmi_tlv_event_diag_data(ar, skb); | ||
416 | break; | ||
417 | case WMI_TLV_DIAG_EVENTID: | ||
418 | ath10k_wmi_tlv_event_diag(ar, skb); | ||
419 | break; | ||
271 | default: | 420 | default: |
272 | ath10k_warn(ar, "Unknown eventid: %d\n", id); | 421 | ath10k_warn(ar, "Unknown eventid: %d\n", id); |
273 | break; | 422 | break; |
@@ -903,8 +1052,15 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) | |||
903 | 1052 | ||
904 | cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); | 1053 | cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); |
905 | cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); | 1054 | cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); |
906 | cfg->num_offload_peers = __cpu_to_le32(0); | 1055 | |
907 | cfg->num_offload_reorder_bufs = __cpu_to_le32(0); | 1056 | if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { |
1057 | cfg->num_offload_peers = __cpu_to_le32(3); | ||
1058 | cfg->num_offload_reorder_bufs = __cpu_to_le32(3); | ||
1059 | } else { | ||
1060 | cfg->num_offload_peers = __cpu_to_le32(0); | ||
1061 | cfg->num_offload_reorder_bufs = __cpu_to_le32(0); | ||
1062 | } | ||
1063 | |||
908 | cfg->num_peer_keys = __cpu_to_le32(2); | 1064 | cfg->num_peer_keys = __cpu_to_le32(2); |
909 | cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); | 1065 | cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); |
910 | cfg->ast_skid_limit = __cpu_to_le32(0x10); | 1066 | cfg->ast_skid_limit = __cpu_to_le32(0x10); |
@@ -1356,6 +1512,173 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, | |||
1356 | return skb; | 1512 | return skb; |
1357 | } | 1513 | } |
1358 | 1514 | ||
1515 | static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, | ||
1516 | const struct wmi_sta_uapsd_auto_trig_arg *arg) | ||
1517 | { | ||
1518 | struct wmi_sta_uapsd_auto_trig_param *ac; | ||
1519 | struct wmi_tlv *tlv; | ||
1520 | |||
1521 | tlv = ptr; | ||
1522 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM); | ||
1523 | tlv->len = __cpu_to_le16(sizeof(*ac)); | ||
1524 | ac = (void *)tlv->value; | ||
1525 | |||
1526 | ac->wmm_ac = __cpu_to_le32(arg->wmm_ac); | ||
1527 | ac->user_priority = __cpu_to_le32(arg->user_priority); | ||
1528 | ac->service_interval = __cpu_to_le32(arg->service_interval); | ||
1529 | ac->suspend_interval = __cpu_to_le32(arg->suspend_interval); | ||
1530 | ac->delay_interval = __cpu_to_le32(arg->delay_interval); | ||
1531 | |||
1532 | ath10k_dbg(ar, ATH10K_DBG_WMI, | ||
1533 | "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n", | ||
1534 | ac->wmm_ac, ac->user_priority, ac->service_interval, | ||
1535 | ac->suspend_interval, ac->delay_interval); | ||
1536 | |||
1537 | return ptr + sizeof(*tlv) + sizeof(*ac); | ||
1538 | } | ||
1539 | |||
1540 | static struct sk_buff * | ||
1541 | ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, | ||
1542 | const u8 peer_addr[ETH_ALEN], | ||
1543 | const struct wmi_sta_uapsd_auto_trig_arg *args, | ||
1544 | u32 num_ac) | ||
1545 | { | ||
1546 | struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; | ||
1547 | struct wmi_sta_uapsd_auto_trig_param *ac; | ||
1548 | struct wmi_tlv *tlv; | ||
1549 | struct sk_buff *skb; | ||
1550 | size_t len; | ||
1551 | size_t ac_tlv_len; | ||
1552 | void *ptr; | ||
1553 | int i; | ||
1554 | |||
1555 | ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac)); | ||
1556 | len = sizeof(*tlv) + sizeof(*cmd) + | ||
1557 | sizeof(*tlv) + ac_tlv_len; | ||
1558 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
1559 | if (!skb) | ||
1560 | return ERR_PTR(-ENOMEM); | ||
1561 | |||
1562 | ptr = (void *)skb->data; | ||
1563 | tlv = ptr; | ||
1564 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD); | ||
1565 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
1566 | cmd = (void *)tlv->value; | ||
1567 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1568 | cmd->num_ac = __cpu_to_le32(num_ac); | ||
1569 | ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); | ||
1570 | |||
1571 | ptr += sizeof(*tlv); | ||
1572 | ptr += sizeof(*cmd); | ||
1573 | |||
1574 | tlv = ptr; | ||
1575 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); | ||
1576 | tlv->len = __cpu_to_le16(ac_tlv_len); | ||
1577 | ac = (void *)tlv->value; | ||
1578 | |||
1579 | ptr += sizeof(*tlv); | ||
1580 | for (i = 0; i < num_ac; i++) | ||
1581 | ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); | ||
1582 | |||
1583 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); | ||
1584 | return skb; | ||
1585 | } | ||
1586 | |||
1587 | static void *ath10k_wmi_tlv_put_wmm(void *ptr, | ||
1588 | const struct wmi_wmm_params_arg *arg) | ||
1589 | { | ||
1590 | struct wmi_wmm_params *wmm; | ||
1591 | struct wmi_tlv *tlv; | ||
1592 | |||
1593 | tlv = ptr; | ||
1594 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); | ||
1595 | tlv->len = __cpu_to_le16(sizeof(*wmm)); | ||
1596 | wmm = (void *)tlv->value; | ||
1597 | ath10k_wmi_set_wmm_param(wmm, arg); | ||
1598 | |||
1599 | return ptr + sizeof(*tlv) + sizeof(*wmm); | ||
1600 | } | ||
1601 | |||
1602 | static struct sk_buff * | ||
1603 | ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, | ||
1604 | const struct wmi_wmm_params_all_arg *arg) | ||
1605 | { | ||
1606 | struct wmi_tlv_vdev_set_wmm_cmd *cmd; | ||
1607 | struct wmi_wmm_params *wmm; | ||
1608 | struct wmi_tlv *tlv; | ||
1609 | struct sk_buff *skb; | ||
1610 | size_t len; | ||
1611 | void *ptr; | ||
1612 | |||
1613 | len = (sizeof(*tlv) + sizeof(*cmd)) + | ||
1614 | (4 * (sizeof(*tlv) + sizeof(*wmm))); | ||
1615 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
1616 | if (!skb) | ||
1617 | return ERR_PTR(-ENOMEM); | ||
1618 | |||
1619 | ptr = (void *)skb->data; | ||
1620 | tlv = ptr; | ||
1621 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD); | ||
1622 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
1623 | cmd = (void *)tlv->value; | ||
1624 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1625 | |||
1626 | ptr += sizeof(*tlv); | ||
1627 | ptr += sizeof(*cmd); | ||
1628 | |||
1629 | ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); | ||
1630 | ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); | ||
1631 | ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); | ||
1632 | ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); | ||
1633 | |||
1634 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); | ||
1635 | return skb; | ||
1636 | } | ||
1637 | |||
1638 | static struct sk_buff * | ||
1639 | ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, | ||
1640 | const struct wmi_sta_keepalive_arg *arg) | ||
1641 | { | ||
1642 | struct wmi_tlv_sta_keepalive_cmd *cmd; | ||
1643 | struct wmi_sta_keepalive_arp_resp *arp; | ||
1644 | struct sk_buff *skb; | ||
1645 | struct wmi_tlv *tlv; | ||
1646 | void *ptr; | ||
1647 | size_t len; | ||
1648 | |||
1649 | len = sizeof(*tlv) + sizeof(*cmd) + | ||
1650 | sizeof(*tlv) + sizeof(*arp); | ||
1651 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
1652 | if (!skb) | ||
1653 | return ERR_PTR(-ENOMEM); | ||
1654 | |||
1655 | ptr = (void *)skb->data; | ||
1656 | tlv = ptr; | ||
1657 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD); | ||
1658 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
1659 | cmd = (void *)tlv->value; | ||
1660 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | ||
1661 | cmd->enabled = __cpu_to_le32(arg->enabled); | ||
1662 | cmd->method = __cpu_to_le32(arg->method); | ||
1663 | cmd->interval = __cpu_to_le32(arg->interval); | ||
1664 | |||
1665 | ptr += sizeof(*tlv); | ||
1666 | ptr += sizeof(*cmd); | ||
1667 | |||
1668 | tlv = ptr; | ||
1669 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE); | ||
1670 | tlv->len = __cpu_to_le16(sizeof(*arp)); | ||
1671 | arp = (void *)tlv->value; | ||
1672 | |||
1673 | arp->src_ip4_addr = arg->src_ip4_addr; | ||
1674 | arp->dest_ip4_addr = arg->dest_ip4_addr; | ||
1675 | ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); | ||
1676 | |||
1677 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n", | ||
1678 | arg->vdev_id, arg->enabled, arg->method, arg->interval); | ||
1679 | return skb; | ||
1680 | } | ||
1681 | |||
1359 | static struct sk_buff * | 1682 | static struct sk_buff * |
1360 | ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, | 1683 | ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, |
1361 | const u8 peer_addr[ETH_ALEN]) | 1684 | const u8 peer_addr[ETH_ALEN]) |
@@ -1678,13 +2001,15 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, | |||
1678 | } | 2001 | } |
1679 | 2002 | ||
1680 | static struct sk_buff * | 2003 | static struct sk_buff * |
1681 | ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif) | 2004 | ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, |
2005 | const void *bcn, size_t bcn_len, | ||
2006 | u32 bcn_paddr, bool dtim_zero, | ||
2007 | bool deliver_cab) | ||
2008 | |||
1682 | { | 2009 | { |
1683 | struct ath10k *ar = arvif->ar; | ||
1684 | struct wmi_bcn_tx_ref_cmd *cmd; | 2010 | struct wmi_bcn_tx_ref_cmd *cmd; |
1685 | struct wmi_tlv *tlv; | 2011 | struct wmi_tlv *tlv; |
1686 | struct sk_buff *skb; | 2012 | struct sk_buff *skb; |
1687 | struct sk_buff *beacon = arvif->beacon; | ||
1688 | struct ieee80211_hdr *hdr; | 2013 | struct ieee80211_hdr *hdr; |
1689 | u16 fc; | 2014 | u16 fc; |
1690 | 2015 | ||
@@ -1692,48 +2017,33 @@ ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif) | |||
1692 | if (!skb) | 2017 | if (!skb) |
1693 | return ERR_PTR(-ENOMEM); | 2018 | return ERR_PTR(-ENOMEM); |
1694 | 2019 | ||
1695 | hdr = (struct ieee80211_hdr *)beacon->data; | 2020 | hdr = (struct ieee80211_hdr *)bcn; |
1696 | fc = le16_to_cpu(hdr->frame_control); | 2021 | fc = le16_to_cpu(hdr->frame_control); |
1697 | 2022 | ||
1698 | tlv = (void *)skb->data; | 2023 | tlv = (void *)skb->data; |
1699 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); | 2024 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); |
1700 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | 2025 | tlv->len = __cpu_to_le16(sizeof(*cmd)); |
1701 | cmd = (void *)tlv->value; | 2026 | cmd = (void *)tlv->value; |
1702 | cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); | 2027 | cmd->vdev_id = __cpu_to_le32(vdev_id); |
1703 | cmd->data_len = __cpu_to_le32(beacon->len); | 2028 | cmd->data_len = __cpu_to_le32(bcn_len); |
1704 | cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); | 2029 | cmd->data_ptr = __cpu_to_le32(bcn_paddr); |
1705 | cmd->msdu_id = 0; | 2030 | cmd->msdu_id = 0; |
1706 | cmd->frame_control = __cpu_to_le32(fc); | 2031 | cmd->frame_control = __cpu_to_le32(fc); |
1707 | cmd->flags = 0; | 2032 | cmd->flags = 0; |
1708 | 2033 | ||
1709 | if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) | 2034 | if (dtim_zero) |
1710 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); | 2035 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); |
1711 | 2036 | ||
1712 | if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) | 2037 | if (deliver_cab) |
1713 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); | 2038 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); |
1714 | 2039 | ||
1715 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); | 2040 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); |
1716 | return skb; | 2041 | return skb; |
1717 | } | 2042 | } |
1718 | 2043 | ||
1719 | static void *ath10k_wmi_tlv_put_wmm(void *ptr, | ||
1720 | const struct wmi_wmm_params_arg *arg) | ||
1721 | { | ||
1722 | struct wmi_wmm_params *wmm; | ||
1723 | struct wmi_tlv *tlv; | ||
1724 | |||
1725 | tlv = ptr; | ||
1726 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); | ||
1727 | tlv->len = __cpu_to_le16(sizeof(*wmm)); | ||
1728 | wmm = (void *)tlv->value; | ||
1729 | ath10k_wmi_pdev_set_wmm_param(wmm, arg); | ||
1730 | |||
1731 | return ptr + sizeof(*tlv) + sizeof(*wmm); | ||
1732 | } | ||
1733 | |||
1734 | static struct sk_buff * | 2044 | static struct sk_buff * |
1735 | ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, | 2045 | ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, |
1736 | const struct wmi_pdev_set_wmm_params_arg *arg) | 2046 | const struct wmi_wmm_params_all_arg *arg) |
1737 | { | 2047 | { |
1738 | struct wmi_tlv_pdev_set_wmm_cmd *cmd; | 2048 | struct wmi_tlv_pdev_set_wmm_cmd *cmd; |
1739 | struct wmi_wmm_params *wmm; | 2049 | struct wmi_wmm_params *wmm; |
@@ -1816,8 +2126,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, | |||
1816 | } | 2126 | } |
1817 | 2127 | ||
1818 | static struct sk_buff * | 2128 | static struct sk_buff * |
1819 | ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable) | 2129 | ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, |
1820 | { | 2130 | u32 log_level) { |
1821 | struct wmi_tlv_dbglog_cmd *cmd; | 2131 | struct wmi_tlv_dbglog_cmd *cmd; |
1822 | struct wmi_tlv *tlv; | 2132 | struct wmi_tlv *tlv; |
1823 | struct sk_buff *skb; | 2133 | struct sk_buff *skb; |
@@ -1922,6 +2232,159 @@ ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) | |||
1922 | return skb; | 2232 | return skb; |
1923 | } | 2233 | } |
1924 | 2234 | ||
2235 | static struct sk_buff * | ||
2236 | ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, | ||
2237 | u32 tim_ie_offset, struct sk_buff *bcn, | ||
2238 | u32 prb_caps, u32 prb_erp, void *prb_ies, | ||
2239 | size_t prb_ies_len) | ||
2240 | { | ||
2241 | struct wmi_tlv_bcn_tmpl_cmd *cmd; | ||
2242 | struct wmi_tlv_bcn_prb_info *info; | ||
2243 | struct wmi_tlv *tlv; | ||
2244 | struct sk_buff *skb; | ||
2245 | void *ptr; | ||
2246 | size_t len; | ||
2247 | |||
2248 | if (WARN_ON(prb_ies_len > 0 && !prb_ies)) | ||
2249 | return ERR_PTR(-EINVAL); | ||
2250 | |||
2251 | len = sizeof(*tlv) + sizeof(*cmd) + | ||
2252 | sizeof(*tlv) + sizeof(*info) + prb_ies_len + | ||
2253 | sizeof(*tlv) + roundup(bcn->len, 4); | ||
2254 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
2255 | if (!skb) | ||
2256 | return ERR_PTR(-ENOMEM); | ||
2257 | |||
2258 | ptr = (void *)skb->data; | ||
2259 | tlv = ptr; | ||
2260 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD); | ||
2261 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
2262 | cmd = (void *)tlv->value; | ||
2263 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
2264 | cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset); | ||
2265 | cmd->buf_len = __cpu_to_le32(bcn->len); | ||
2266 | |||
2267 | ptr += sizeof(*tlv); | ||
2268 | ptr += sizeof(*cmd); | ||
2269 | |||
2270 | /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but | ||
2271 | * then it is then impossible to pass original ie len. | ||
2272 | * This chunk is not used yet so if setting probe resp template yields | ||
2273 | * problems with beaconing or crashes firmware look here. | ||
2274 | */ | ||
2275 | tlv = ptr; | ||
2276 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); | ||
2277 | tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len); | ||
2278 | info = (void *)tlv->value; | ||
2279 | info->caps = __cpu_to_le32(prb_caps); | ||
2280 | info->erp = __cpu_to_le32(prb_erp); | ||
2281 | memcpy(info->ies, prb_ies, prb_ies_len); | ||
2282 | |||
2283 | ptr += sizeof(*tlv); | ||
2284 | ptr += sizeof(*info); | ||
2285 | ptr += prb_ies_len; | ||
2286 | |||
2287 | tlv = ptr; | ||
2288 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); | ||
2289 | tlv->len = __cpu_to_le16(roundup(bcn->len, 4)); | ||
2290 | memcpy(tlv->value, bcn->data, bcn->len); | ||
2291 | |||
2292 | /* FIXME: Adjust TSF? */ | ||
2293 | |||
2294 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", | ||
2295 | vdev_id); | ||
2296 | return skb; | ||
2297 | } | ||
2298 | |||
2299 | static struct sk_buff * | ||
2300 | ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, | ||
2301 | struct sk_buff *prb) | ||
2302 | { | ||
2303 | struct wmi_tlv_prb_tmpl_cmd *cmd; | ||
2304 | struct wmi_tlv_bcn_prb_info *info; | ||
2305 | struct wmi_tlv *tlv; | ||
2306 | struct sk_buff *skb; | ||
2307 | void *ptr; | ||
2308 | size_t len; | ||
2309 | |||
2310 | len = sizeof(*tlv) + sizeof(*cmd) + | ||
2311 | sizeof(*tlv) + sizeof(*info) + | ||
2312 | sizeof(*tlv) + roundup(prb->len, 4); | ||
2313 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
2314 | if (!skb) | ||
2315 | return ERR_PTR(-ENOMEM); | ||
2316 | |||
2317 | ptr = (void *)skb->data; | ||
2318 | tlv = ptr; | ||
2319 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD); | ||
2320 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
2321 | cmd = (void *)tlv->value; | ||
2322 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
2323 | cmd->buf_len = __cpu_to_le32(prb->len); | ||
2324 | |||
2325 | ptr += sizeof(*tlv); | ||
2326 | ptr += sizeof(*cmd); | ||
2327 | |||
2328 | tlv = ptr; | ||
2329 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); | ||
2330 | tlv->len = __cpu_to_le16(sizeof(*info)); | ||
2331 | info = (void *)tlv->value; | ||
2332 | info->caps = 0; | ||
2333 | info->erp = 0; | ||
2334 | |||
2335 | ptr += sizeof(*tlv); | ||
2336 | ptr += sizeof(*info); | ||
2337 | |||
2338 | tlv = ptr; | ||
2339 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); | ||
2340 | tlv->len = __cpu_to_le16(roundup(prb->len, 4)); | ||
2341 | memcpy(tlv->value, prb->data, prb->len); | ||
2342 | |||
2343 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", | ||
2344 | vdev_id); | ||
2345 | return skb; | ||
2346 | } | ||
2347 | |||
2348 | static struct sk_buff * | ||
2349 | ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, | ||
2350 | const u8 *p2p_ie) | ||
2351 | { | ||
2352 | struct wmi_tlv_p2p_go_bcn_ie *cmd; | ||
2353 | struct wmi_tlv *tlv; | ||
2354 | struct sk_buff *skb; | ||
2355 | void *ptr; | ||
2356 | size_t len; | ||
2357 | |||
2358 | len = sizeof(*tlv) + sizeof(*cmd) + | ||
2359 | sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4); | ||
2360 | skb = ath10k_wmi_alloc_skb(ar, len); | ||
2361 | if (!skb) | ||
2362 | return ERR_PTR(-ENOMEM); | ||
2363 | |||
2364 | ptr = (void *)skb->data; | ||
2365 | tlv = ptr; | ||
2366 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE); | ||
2367 | tlv->len = __cpu_to_le16(sizeof(*cmd)); | ||
2368 | cmd = (void *)tlv->value; | ||
2369 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
2370 | cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2); | ||
2371 | |||
2372 | ptr += sizeof(*tlv); | ||
2373 | ptr += sizeof(*cmd); | ||
2374 | |||
2375 | tlv = ptr; | ||
2376 | tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); | ||
2377 | tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4)); | ||
2378 | memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2); | ||
2379 | |||
2380 | ptr += sizeof(*tlv); | ||
2381 | ptr += roundup(p2p_ie[1] + 2, 4); | ||
2382 | |||
2383 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", | ||
2384 | vdev_id); | ||
2385 | return skb; | ||
2386 | } | ||
2387 | |||
1925 | /****************/ | 2388 | /****************/ |
1926 | /* TLV mappings */ | 2389 | /* TLV mappings */ |
1927 | /****************/ | 2390 | /****************/ |
@@ -2045,6 +2508,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { | |||
2045 | .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, | 2508 | .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, |
2046 | .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, | 2509 | .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, |
2047 | .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, | 2510 | .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, |
2511 | .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, | ||
2048 | }; | 2512 | }; |
2049 | 2513 | ||
2050 | static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { | 2514 | static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { |
@@ -2188,6 +2652,7 @@ static const struct wmi_ops wmi_tlv_ops = { | |||
2188 | .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, | 2652 | .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, |
2189 | .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, | 2653 | .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, |
2190 | .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, | 2654 | .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, |
2655 | .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf, | ||
2191 | .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, | 2656 | .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, |
2192 | .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, | 2657 | .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, |
2193 | .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, | 2658 | .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, |
@@ -2207,6 +2672,15 @@ static const struct wmi_ops wmi_tlv_ops = { | |||
2207 | .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, | 2672 | .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, |
2208 | /* .gen_pdev_set_quiet_mode not implemented */ | 2673 | /* .gen_pdev_set_quiet_mode not implemented */ |
2209 | /* .gen_pdev_get_temperature not implemented */ | 2674 | /* .gen_pdev_get_temperature not implemented */ |
2675 | /* .gen_addba_clear_resp not implemented */ | ||
2676 | /* .gen_addba_send not implemented */ | ||
2677 | /* .gen_addba_set_resp not implemented */ | ||
2678 | /* .gen_delba_send not implemented */ | ||
2679 | .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl, | ||
2680 | .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl, | ||
2681 | .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, | ||
2682 | .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, | ||
2683 | .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, | ||
2210 | }; | 2684 | }; |
2211 | 2685 | ||
2212 | /************/ | 2686 | /************/ |
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index 54ffa120cd60..de68fe76eae6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h | |||
@@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd { | |||
1302 | __le32 dg_type; /* no idea.. */ | 1302 | __le32 dg_type; /* no idea.. */ |
1303 | } __packed; | 1303 | } __packed; |
1304 | 1304 | ||
1305 | struct wmi_tlv_vdev_set_wmm_cmd { | ||
1306 | __le32 vdev_id; | ||
1307 | } __packed; | ||
1308 | |||
1305 | struct wmi_tlv_phyerr_ev { | 1309 | struct wmi_tlv_phyerr_ev { |
1306 | __le32 num_phyerrs; | 1310 | __le32 num_phyerrs; |
1307 | __le32 tsf_l32; | 1311 | __le32 tsf_l32; |
@@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable { | |||
1375 | __le32 reserved; | 1379 | __le32 reserved; |
1376 | } __packed; | 1380 | } __packed; |
1377 | 1381 | ||
1382 | enum wmi_tlv_bcn_tx_status { | ||
1383 | WMI_TLV_BCN_TX_STATUS_OK, | ||
1384 | WMI_TLV_BCN_TX_STATUS_XRETRY, | ||
1385 | WMI_TLV_BCN_TX_STATUS_DROP, | ||
1386 | WMI_TLV_BCN_TX_STATUS_FILTERED, | ||
1387 | }; | ||
1388 | |||
1389 | struct wmi_tlv_bcn_tx_status_ev { | ||
1390 | __le32 vdev_id; | ||
1391 | __le32 tx_status; | ||
1392 | } __packed; | ||
1393 | |||
1394 | struct wmi_tlv_bcn_prb_info { | ||
1395 | __le32 caps; | ||
1396 | __le32 erp; | ||
1397 | u8 ies[0]; | ||
1398 | } __packed; | ||
1399 | |||
1400 | struct wmi_tlv_bcn_tmpl_cmd { | ||
1401 | __le32 vdev_id; | ||
1402 | __le32 tim_ie_offset; | ||
1403 | __le32 buf_len; | ||
1404 | } __packed; | ||
1405 | |||
1406 | struct wmi_tlv_prb_tmpl_cmd { | ||
1407 | __le32 vdev_id; | ||
1408 | __le32 buf_len; | ||
1409 | } __packed; | ||
1410 | |||
1411 | struct wmi_tlv_p2p_go_bcn_ie { | ||
1412 | __le32 vdev_id; | ||
1413 | __le32 ie_len; | ||
1414 | } __packed; | ||
1415 | |||
1416 | enum wmi_tlv_diag_item_type { | ||
1417 | WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT, | ||
1418 | WMI_TLV_DIAG_ITEM_TYPE_FW_LOG, | ||
1419 | WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG, | ||
1420 | }; | ||
1421 | |||
1422 | struct wmi_tlv_diag_item { | ||
1423 | u8 type; | ||
1424 | u8 reserved; | ||
1425 | __le16 len; | ||
1426 | __le32 timestamp; | ||
1427 | __le32 code; | ||
1428 | u8 payload[0]; | ||
1429 | } __packed; | ||
1430 | |||
1431 | struct wmi_tlv_diag_data_ev { | ||
1432 | __le32 num_items; | ||
1433 | } __packed; | ||
1434 | |||
1435 | struct wmi_tlv_sta_keepalive_cmd { | ||
1436 | __le32 vdev_id; | ||
1437 | __le32 enabled; | ||
1438 | __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */ | ||
1439 | __le32 interval; /* in seconds */ | ||
1440 | } __packed; | ||
1441 | |||
1378 | void ath10k_wmi_tlv_attach(struct ath10k *ar); | 1442 | void ath10k_wmi_tlv_attach(struct ath10k *ar); |
1379 | 1443 | ||
1380 | #endif | 1444 | #endif |
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 23eca8bc85d1..aeea1c793943 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c | |||
@@ -956,23 +956,45 @@ err_pull: | |||
956 | 956 | ||
957 | static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) | 957 | static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) |
958 | { | 958 | { |
959 | struct ath10k *ar = arvif->ar; | ||
960 | struct ath10k_skb_cb *cb; | ||
961 | struct sk_buff *bcn; | ||
959 | int ret; | 962 | int ret; |
960 | 963 | ||
961 | lockdep_assert_held(&arvif->ar->data_lock); | 964 | spin_lock_bh(&ar->data_lock); |
962 | 965 | ||
963 | if (arvif->beacon == NULL) | 966 | bcn = arvif->beacon; |
964 | return; | ||
965 | 967 | ||
966 | if (arvif->beacon_sent) | 968 | if (!bcn) |
967 | return; | 969 | goto unlock; |
968 | 970 | ||
969 | ret = ath10k_wmi_beacon_send_ref_nowait(arvif); | 971 | cb = ATH10K_SKB_CB(bcn); |
970 | if (ret) | 972 | |
971 | return; | 973 | switch (arvif->beacon_state) { |
974 | case ATH10K_BEACON_SENDING: | ||
975 | case ATH10K_BEACON_SENT: | ||
976 | break; | ||
977 | case ATH10K_BEACON_SCHEDULED: | ||
978 | arvif->beacon_state = ATH10K_BEACON_SENDING; | ||
979 | spin_unlock_bh(&ar->data_lock); | ||
980 | |||
981 | ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar, | ||
982 | arvif->vdev_id, | ||
983 | bcn->data, bcn->len, | ||
984 | cb->paddr, | ||
985 | cb->bcn.dtim_zero, | ||
986 | cb->bcn.deliver_cab); | ||
987 | |||
988 | spin_lock_bh(&ar->data_lock); | ||
989 | |||
990 | if (ret == 0) | ||
991 | arvif->beacon_state = ATH10K_BEACON_SENT; | ||
992 | else | ||
993 | arvif->beacon_state = ATH10K_BEACON_SCHEDULED; | ||
994 | } | ||
972 | 995 | ||
973 | /* We need to retain the arvif->beacon reference for DMA unmapping and | 996 | unlock: |
974 | * freeing the skbuff later. */ | 997 | spin_unlock_bh(&ar->data_lock); |
975 | arvif->beacon_sent = true; | ||
976 | } | 998 | } |
977 | 999 | ||
978 | static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, | 1000 | static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, |
@@ -985,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, | |||
985 | 1007 | ||
986 | static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) | 1008 | static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) |
987 | { | 1009 | { |
988 | spin_lock_bh(&ar->data_lock); | ||
989 | ieee80211_iterate_active_interfaces_atomic(ar->hw, | 1010 | ieee80211_iterate_active_interfaces_atomic(ar->hw, |
990 | IEEE80211_IFACE_ITER_NORMAL, | 1011 | IEEE80211_IFACE_ITER_NORMAL, |
991 | ath10k_wmi_tx_beacons_iter, | 1012 | ath10k_wmi_tx_beacons_iter, |
992 | NULL); | 1013 | NULL); |
993 | spin_unlock_bh(&ar->data_lock); | ||
994 | } | 1014 | } |
995 | 1015 | ||
996 | static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) | 1016 | static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) |
@@ -1680,12 +1700,9 @@ int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) | |||
1680 | return 0; | 1700 | return 0; |
1681 | } | 1701 | } |
1682 | 1702 | ||
1683 | void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, | 1703 | void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, |
1684 | struct ath10k_fw_stats_pdev *dst) | 1704 | struct ath10k_fw_stats_pdev *dst) |
1685 | { | 1705 | { |
1686 | const struct wal_dbg_tx_stats *tx = &src->wal.tx; | ||
1687 | const struct wal_dbg_rx_stats *rx = &src->wal.rx; | ||
1688 | |||
1689 | dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); | 1706 | dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); |
1690 | dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); | 1707 | dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); |
1691 | dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); | 1708 | dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); |
@@ -1693,44 +1710,63 @@ void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, | |||
1693 | dst->cycle_count = __le32_to_cpu(src->cycle_count); | 1710 | dst->cycle_count = __le32_to_cpu(src->cycle_count); |
1694 | dst->phy_err_count = __le32_to_cpu(src->phy_err_count); | 1711 | dst->phy_err_count = __le32_to_cpu(src->phy_err_count); |
1695 | dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); | 1712 | dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); |
1713 | } | ||
1696 | 1714 | ||
1697 | dst->comp_queued = __le32_to_cpu(tx->comp_queued); | 1715 | void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, |
1698 | dst->comp_delivered = __le32_to_cpu(tx->comp_delivered); | 1716 | struct ath10k_fw_stats_pdev *dst) |
1699 | dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued); | 1717 | { |
1700 | dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued); | 1718 | dst->comp_queued = __le32_to_cpu(src->comp_queued); |
1701 | dst->wmm_drop = __le32_to_cpu(tx->wmm_drop); | 1719 | dst->comp_delivered = __le32_to_cpu(src->comp_delivered); |
1702 | dst->local_enqued = __le32_to_cpu(tx->local_enqued); | 1720 | dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued); |
1703 | dst->local_freed = __le32_to_cpu(tx->local_freed); | 1721 | dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued); |
1704 | dst->hw_queued = __le32_to_cpu(tx->hw_queued); | 1722 | dst->wmm_drop = __le32_to_cpu(src->wmm_drop); |
1705 | dst->hw_reaped = __le32_to_cpu(tx->hw_reaped); | 1723 | dst->local_enqued = __le32_to_cpu(src->local_enqued); |
1706 | dst->underrun = __le32_to_cpu(tx->underrun); | 1724 | dst->local_freed = __le32_to_cpu(src->local_freed); |
1707 | dst->tx_abort = __le32_to_cpu(tx->tx_abort); | 1725 | dst->hw_queued = __le32_to_cpu(src->hw_queued); |
1708 | dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed); | 1726 | dst->hw_reaped = __le32_to_cpu(src->hw_reaped); |
1709 | dst->tx_ko = __le32_to_cpu(tx->tx_ko); | 1727 | dst->underrun = __le32_to_cpu(src->underrun); |
1710 | dst->data_rc = __le32_to_cpu(tx->data_rc); | 1728 | dst->tx_abort = __le32_to_cpu(src->tx_abort); |
1711 | dst->self_triggers = __le32_to_cpu(tx->self_triggers); | 1729 | dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed); |
1712 | dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure); | 1730 | dst->tx_ko = __le32_to_cpu(src->tx_ko); |
1713 | dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err); | 1731 | dst->data_rc = __le32_to_cpu(src->data_rc); |
1714 | dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry); | 1732 | dst->self_triggers = __le32_to_cpu(src->self_triggers); |
1715 | dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout); | 1733 | dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); |
1716 | dst->pdev_resets = __le32_to_cpu(tx->pdev_resets); | 1734 | dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); |
1717 | dst->phy_underrun = __le32_to_cpu(tx->phy_underrun); | 1735 | dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); |
1718 | dst->txop_ovf = __le32_to_cpu(tx->txop_ovf); | 1736 | dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); |
1719 | 1737 | dst->pdev_resets = __le32_to_cpu(src->pdev_resets); | |
1720 | dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change); | 1738 | dst->phy_underrun = __le32_to_cpu(src->phy_underrun); |
1721 | dst->status_rcvd = __le32_to_cpu(rx->status_rcvd); | 1739 | dst->txop_ovf = __le32_to_cpu(src->txop_ovf); |
1722 | dst->r0_frags = __le32_to_cpu(rx->r0_frags); | 1740 | } |
1723 | dst->r1_frags = __le32_to_cpu(rx->r1_frags); | 1741 | |
1724 | dst->r2_frags = __le32_to_cpu(rx->r2_frags); | 1742 | void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, |
1725 | dst->r3_frags = __le32_to_cpu(rx->r3_frags); | 1743 | struct ath10k_fw_stats_pdev *dst) |
1726 | dst->htt_msdus = __le32_to_cpu(rx->htt_msdus); | 1744 | { |
1727 | dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus); | 1745 | dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change); |
1728 | dst->loc_msdus = __le32_to_cpu(rx->loc_msdus); | 1746 | dst->status_rcvd = __le32_to_cpu(src->status_rcvd); |
1729 | dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus); | 1747 | dst->r0_frags = __le32_to_cpu(src->r0_frags); |
1730 | dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu); | 1748 | dst->r1_frags = __le32_to_cpu(src->r1_frags); |
1731 | dst->phy_errs = __le32_to_cpu(rx->phy_errs); | 1749 | dst->r2_frags = __le32_to_cpu(src->r2_frags); |
1732 | dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop); | 1750 | dst->r3_frags = __le32_to_cpu(src->r3_frags); |
1733 | dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs); | 1751 | dst->htt_msdus = __le32_to_cpu(src->htt_msdus); |
1752 | dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus); | ||
1753 | dst->loc_msdus = __le32_to_cpu(src->loc_msdus); | ||
1754 | dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus); | ||
1755 | dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu); | ||
1756 | dst->phy_errs = __le32_to_cpu(src->phy_errs); | ||
1757 | dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop); | ||
1758 | dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs); | ||
1759 | } | ||
1760 | |||
1761 | void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src, | ||
1762 | struct ath10k_fw_stats_pdev *dst) | ||
1763 | { | ||
1764 | dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); | ||
1765 | dst->rts_bad = __le32_to_cpu(src->rts_bad); | ||
1766 | dst->rts_good = __le32_to_cpu(src->rts_good); | ||
1767 | dst->fcs_bad = __le32_to_cpu(src->fcs_bad); | ||
1768 | dst->no_beacons = __le32_to_cpu(src->no_beacons); | ||
1769 | dst->mib_int_count = __le32_to_cpu(src->mib_int_count); | ||
1734 | } | 1770 | } |
1735 | 1771 | ||
1736 | void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, | 1772 | void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, |
@@ -1768,7 +1804,10 @@ static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, | |||
1768 | if (!dst) | 1804 | if (!dst) |
1769 | continue; | 1805 | continue; |
1770 | 1806 | ||
1771 | ath10k_wmi_pull_pdev_stats(src, dst); | 1807 | ath10k_wmi_pull_pdev_stats_base(&src->base, dst); |
1808 | ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); | ||
1809 | ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); | ||
1810 | |||
1772 | list_add_tail(&dst->list, &stats->pdevs); | 1811 | list_add_tail(&dst->list, &stats->pdevs); |
1773 | } | 1812 | } |
1774 | 1813 | ||
@@ -1820,14 +1859,10 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, | |||
1820 | if (!dst) | 1859 | if (!dst) |
1821 | continue; | 1860 | continue; |
1822 | 1861 | ||
1823 | ath10k_wmi_pull_pdev_stats(&src->old, dst); | 1862 | ath10k_wmi_pull_pdev_stats_base(&src->base, dst); |
1824 | 1863 | ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); | |
1825 | dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); | 1864 | ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); |
1826 | dst->rts_bad = __le32_to_cpu(src->rts_bad); | 1865 | ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); |
1827 | dst->rts_good = __le32_to_cpu(src->rts_good); | ||
1828 | dst->fcs_bad = __le32_to_cpu(src->fcs_bad); | ||
1829 | dst->no_beacons = __le32_to_cpu(src->no_beacons); | ||
1830 | dst->mib_int_count = __le32_to_cpu(src->mib_int_count); | ||
1831 | 1866 | ||
1832 | list_add_tail(&dst->list, &stats->pdevs); | 1867 | list_add_tail(&dst->list, &stats->pdevs); |
1833 | } | 1868 | } |
@@ -1856,6 +1891,164 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, | |||
1856 | return 0; | 1891 | return 0; |
1857 | } | 1892 | } |
1858 | 1893 | ||
1894 | static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, | ||
1895 | struct sk_buff *skb, | ||
1896 | struct ath10k_fw_stats *stats) | ||
1897 | { | ||
1898 | const struct wmi_10_2_stats_event *ev = (void *)skb->data; | ||
1899 | u32 num_pdev_stats; | ||
1900 | u32 num_pdev_ext_stats; | ||
1901 | u32 num_vdev_stats; | ||
1902 | u32 num_peer_stats; | ||
1903 | int i; | ||
1904 | |||
1905 | if (!skb_pull(skb, sizeof(*ev))) | ||
1906 | return -EPROTO; | ||
1907 | |||
1908 | num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); | ||
1909 | num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); | ||
1910 | num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); | ||
1911 | num_peer_stats = __le32_to_cpu(ev->num_peer_stats); | ||
1912 | |||
1913 | for (i = 0; i < num_pdev_stats; i++) { | ||
1914 | const struct wmi_10_2_pdev_stats *src; | ||
1915 | struct ath10k_fw_stats_pdev *dst; | ||
1916 | |||
1917 | src = (void *)skb->data; | ||
1918 | if (!skb_pull(skb, sizeof(*src))) | ||
1919 | return -EPROTO; | ||
1920 | |||
1921 | dst = kzalloc(sizeof(*dst), GFP_ATOMIC); | ||
1922 | if (!dst) | ||
1923 | continue; | ||
1924 | |||
1925 | ath10k_wmi_pull_pdev_stats_base(&src->base, dst); | ||
1926 | ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); | ||
1927 | ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); | ||
1928 | ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); | ||
1929 | /* FIXME: expose 10.2 specific values */ | ||
1930 | |||
1931 | list_add_tail(&dst->list, &stats->pdevs); | ||
1932 | } | ||
1933 | |||
1934 | for (i = 0; i < num_pdev_ext_stats; i++) { | ||
1935 | const struct wmi_10_2_pdev_ext_stats *src; | ||
1936 | |||
1937 | src = (void *)skb->data; | ||
1938 | if (!skb_pull(skb, sizeof(*src))) | ||
1939 | return -EPROTO; | ||
1940 | |||
1941 | /* FIXME: expose values to userspace | ||
1942 | * | ||
1943 | * Note: Even though this loop seems to do nothing it is | ||
1944 | * required to parse following sub-structures properly. | ||
1945 | */ | ||
1946 | } | ||
1947 | |||
1948 | /* fw doesn't implement vdev stats */ | ||
1949 | |||
1950 | for (i = 0; i < num_peer_stats; i++) { | ||
1951 | const struct wmi_10_2_peer_stats *src; | ||
1952 | struct ath10k_fw_stats_peer *dst; | ||
1953 | |||
1954 | src = (void *)skb->data; | ||
1955 | if (!skb_pull(skb, sizeof(*src))) | ||
1956 | return -EPROTO; | ||
1957 | |||
1958 | dst = kzalloc(sizeof(*dst), GFP_ATOMIC); | ||
1959 | if (!dst) | ||
1960 | continue; | ||
1961 | |||
1962 | ath10k_wmi_pull_peer_stats(&src->old, dst); | ||
1963 | |||
1964 | dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); | ||
1965 | /* FIXME: expose 10.2 specific values */ | ||
1966 | |||
1967 | list_add_tail(&dst->list, &stats->peers); | ||
1968 | } | ||
1969 | |||
1970 | return 0; | ||
1971 | } | ||
1972 | |||
1973 | static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, | ||
1974 | struct sk_buff *skb, | ||
1975 | struct ath10k_fw_stats *stats) | ||
1976 | { | ||
1977 | const struct wmi_10_2_stats_event *ev = (void *)skb->data; | ||
1978 | u32 num_pdev_stats; | ||
1979 | u32 num_pdev_ext_stats; | ||
1980 | u32 num_vdev_stats; | ||
1981 | u32 num_peer_stats; | ||
1982 | int i; | ||
1983 | |||
1984 | if (!skb_pull(skb, sizeof(*ev))) | ||
1985 | return -EPROTO; | ||
1986 | |||
1987 | num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); | ||
1988 | num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); | ||
1989 | num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); | ||
1990 | num_peer_stats = __le32_to_cpu(ev->num_peer_stats); | ||
1991 | |||
1992 | for (i = 0; i < num_pdev_stats; i++) { | ||
1993 | const struct wmi_10_2_pdev_stats *src; | ||
1994 | struct ath10k_fw_stats_pdev *dst; | ||
1995 | |||
1996 | src = (void *)skb->data; | ||
1997 | if (!skb_pull(skb, sizeof(*src))) | ||
1998 | return -EPROTO; | ||
1999 | |||
2000 | dst = kzalloc(sizeof(*dst), GFP_ATOMIC); | ||
2001 | if (!dst) | ||
2002 | continue; | ||
2003 | |||
2004 | ath10k_wmi_pull_pdev_stats_base(&src->base, dst); | ||
2005 | ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); | ||
2006 | ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); | ||
2007 | ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst); | ||
2008 | /* FIXME: expose 10.2 specific values */ | ||
2009 | |||
2010 | list_add_tail(&dst->list, &stats->pdevs); | ||
2011 | } | ||
2012 | |||
2013 | for (i = 0; i < num_pdev_ext_stats; i++) { | ||
2014 | const struct wmi_10_2_pdev_ext_stats *src; | ||
2015 | |||
2016 | src = (void *)skb->data; | ||
2017 | if (!skb_pull(skb, sizeof(*src))) | ||
2018 | return -EPROTO; | ||
2019 | |||
2020 | /* FIXME: expose values to userspace | ||
2021 | * | ||
2022 | * Note: Even though this loop seems to do nothing it is | ||
2023 | * required to parse following sub-structures properly. | ||
2024 | */ | ||
2025 | } | ||
2026 | |||
2027 | /* fw doesn't implement vdev stats */ | ||
2028 | |||
2029 | for (i = 0; i < num_peer_stats; i++) { | ||
2030 | const struct wmi_10_2_4_peer_stats *src; | ||
2031 | struct ath10k_fw_stats_peer *dst; | ||
2032 | |||
2033 | src = (void *)skb->data; | ||
2034 | if (!skb_pull(skb, sizeof(*src))) | ||
2035 | return -EPROTO; | ||
2036 | |||
2037 | dst = kzalloc(sizeof(*dst), GFP_ATOMIC); | ||
2038 | if (!dst) | ||
2039 | continue; | ||
2040 | |||
2041 | ath10k_wmi_pull_peer_stats(&src->common.old, dst); | ||
2042 | |||
2043 | dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); | ||
2044 | /* FIXME: expose 10.2 specific values */ | ||
2045 | |||
2046 | list_add_tail(&dst->list, &stats->peers); | ||
2047 | } | ||
2048 | |||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
1859 | void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) | 2052 | void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) |
1860 | { | 2053 | { |
1861 | ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); | 2054 | ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); |
@@ -2279,9 +2472,19 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) | |||
2279 | spin_lock_bh(&ar->data_lock); | 2472 | spin_lock_bh(&ar->data_lock); |
2280 | 2473 | ||
2281 | if (arvif->beacon) { | 2474 | if (arvif->beacon) { |
2282 | if (!arvif->beacon_sent) | 2475 | switch (arvif->beacon_state) { |
2283 | ath10k_warn(ar, "SWBA overrun on vdev %d\n", | 2476 | case ATH10K_BEACON_SENT: |
2477 | break; | ||
2478 | case ATH10K_BEACON_SCHEDULED: | ||
2479 | ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n", | ||
2480 | arvif->vdev_id); | ||
2481 | break; | ||
2482 | case ATH10K_BEACON_SENDING: | ||
2483 | ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n", | ||
2284 | arvif->vdev_id); | 2484 | arvif->vdev_id); |
2485 | dev_kfree_skb(bcn); | ||
2486 | goto skip; | ||
2487 | } | ||
2285 | 2488 | ||
2286 | ath10k_mac_vif_beacon_free(arvif); | 2489 | ath10k_mac_vif_beacon_free(arvif); |
2287 | } | 2490 | } |
@@ -2309,15 +2512,16 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) | |||
2309 | } | 2512 | } |
2310 | 2513 | ||
2311 | arvif->beacon = bcn; | 2514 | arvif->beacon = bcn; |
2312 | arvif->beacon_sent = false; | 2515 | arvif->beacon_state = ATH10K_BEACON_SCHEDULED; |
2313 | 2516 | ||
2314 | trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); | 2517 | trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); |
2315 | trace_ath10k_tx_payload(ar, bcn->data, bcn->len); | 2518 | trace_ath10k_tx_payload(ar, bcn->data, bcn->len); |
2316 | 2519 | ||
2317 | ath10k_wmi_tx_beacon_nowait(arvif); | ||
2318 | skip: | 2520 | skip: |
2319 | spin_unlock_bh(&ar->data_lock); | 2521 | spin_unlock_bh(&ar->data_lock); |
2320 | } | 2522 | } |
2523 | |||
2524 | ath10k_wmi_tx_beacons_nowait(ar); | ||
2321 | } | 2525 | } |
2322 | 2526 | ||
2323 | void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) | 2527 | void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) |
@@ -3710,7 +3914,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) | |||
3710 | struct wmi_init_cmd_10_2 *cmd; | 3914 | struct wmi_init_cmd_10_2 *cmd; |
3711 | struct sk_buff *buf; | 3915 | struct sk_buff *buf; |
3712 | struct wmi_resource_config_10x config = {}; | 3916 | struct wmi_resource_config_10x config = {}; |
3713 | u32 len, val; | 3917 | u32 len, val, features; |
3714 | 3918 | ||
3715 | config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); | 3919 | config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); |
3716 | config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); | 3920 | config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); |
@@ -3744,7 +3948,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) | |||
3744 | config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); | 3948 | config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); |
3745 | config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); | 3949 | config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); |
3746 | config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); | 3950 | config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); |
3747 | config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); | 3951 | config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE); |
3748 | config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); | 3952 | config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); |
3749 | 3953 | ||
3750 | val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; | 3954 | val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; |
@@ -3764,6 +3968,9 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) | |||
3764 | 3968 | ||
3765 | cmd = (struct wmi_init_cmd_10_2 *)buf->data; | 3969 | cmd = (struct wmi_init_cmd_10_2 *)buf->data; |
3766 | 3970 | ||
3971 | features = WMI_10_2_RX_BATCH_MODE; | ||
3972 | cmd->resource_config.feature_mask = __cpu_to_le32(features); | ||
3973 | |||
3767 | memcpy(&cmd->resource_config.common, &config, sizeof(config)); | 3974 | memcpy(&cmd->resource_config.common, &config, sizeof(config)); |
3768 | ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); | 3975 | ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); |
3769 | 3976 | ||
@@ -4680,12 +4887,12 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar) | |||
4680 | 4887 | ||
4681 | /* This function assumes the beacon is already DMA mapped */ | 4888 | /* This function assumes the beacon is already DMA mapped */ |
4682 | static struct sk_buff * | 4889 | static struct sk_buff * |
4683 | ath10k_wmi_op_gen_beacon_dma(struct ath10k_vif *arvif) | 4890 | ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, |
4891 | size_t bcn_len, u32 bcn_paddr, bool dtim_zero, | ||
4892 | bool deliver_cab) | ||
4684 | { | 4893 | { |
4685 | struct ath10k *ar = arvif->ar; | ||
4686 | struct wmi_bcn_tx_ref_cmd *cmd; | 4894 | struct wmi_bcn_tx_ref_cmd *cmd; |
4687 | struct sk_buff *skb; | 4895 | struct sk_buff *skb; |
4688 | struct sk_buff *beacon = arvif->beacon; | ||
4689 | struct ieee80211_hdr *hdr; | 4896 | struct ieee80211_hdr *hdr; |
4690 | u16 fc; | 4897 | u16 fc; |
4691 | 4898 | ||
@@ -4693,29 +4900,29 @@ ath10k_wmi_op_gen_beacon_dma(struct ath10k_vif *arvif) | |||
4693 | if (!skb) | 4900 | if (!skb) |
4694 | return ERR_PTR(-ENOMEM); | 4901 | return ERR_PTR(-ENOMEM); |
4695 | 4902 | ||
4696 | hdr = (struct ieee80211_hdr *)beacon->data; | 4903 | hdr = (struct ieee80211_hdr *)bcn; |
4697 | fc = le16_to_cpu(hdr->frame_control); | 4904 | fc = le16_to_cpu(hdr->frame_control); |
4698 | 4905 | ||
4699 | cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; | 4906 | cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; |
4700 | cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); | 4907 | cmd->vdev_id = __cpu_to_le32(vdev_id); |
4701 | cmd->data_len = __cpu_to_le32(beacon->len); | 4908 | cmd->data_len = __cpu_to_le32(bcn_len); |
4702 | cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); | 4909 | cmd->data_ptr = __cpu_to_le32(bcn_paddr); |
4703 | cmd->msdu_id = 0; | 4910 | cmd->msdu_id = 0; |
4704 | cmd->frame_control = __cpu_to_le32(fc); | 4911 | cmd->frame_control = __cpu_to_le32(fc); |
4705 | cmd->flags = 0; | 4912 | cmd->flags = 0; |
4706 | cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); | 4913 | cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); |
4707 | 4914 | ||
4708 | if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) | 4915 | if (dtim_zero) |
4709 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); | 4916 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); |
4710 | 4917 | ||
4711 | if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) | 4918 | if (deliver_cab) |
4712 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); | 4919 | cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); |
4713 | 4920 | ||
4714 | return skb; | 4921 | return skb; |
4715 | } | 4922 | } |
4716 | 4923 | ||
4717 | void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, | 4924 | void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params, |
4718 | const struct wmi_wmm_params_arg *arg) | 4925 | const struct wmi_wmm_params_arg *arg) |
4719 | { | 4926 | { |
4720 | params->cwmin = __cpu_to_le32(arg->cwmin); | 4927 | params->cwmin = __cpu_to_le32(arg->cwmin); |
4721 | params->cwmax = __cpu_to_le32(arg->cwmax); | 4928 | params->cwmax = __cpu_to_le32(arg->cwmax); |
@@ -4727,7 +4934,7 @@ void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, | |||
4727 | 4934 | ||
4728 | static struct sk_buff * | 4935 | static struct sk_buff * |
4729 | ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, | 4936 | ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, |
4730 | const struct wmi_pdev_set_wmm_params_arg *arg) | 4937 | const struct wmi_wmm_params_all_arg *arg) |
4731 | { | 4938 | { |
4732 | struct wmi_pdev_set_wmm_params *cmd; | 4939 | struct wmi_pdev_set_wmm_params *cmd; |
4733 | struct sk_buff *skb; | 4940 | struct sk_buff *skb; |
@@ -4737,10 +4944,10 @@ ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar, | |||
4737 | return ERR_PTR(-ENOMEM); | 4944 | return ERR_PTR(-ENOMEM); |
4738 | 4945 | ||
4739 | cmd = (struct wmi_pdev_set_wmm_params *)skb->data; | 4946 | cmd = (struct wmi_pdev_set_wmm_params *)skb->data; |
4740 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); | 4947 | ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be); |
4741 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); | 4948 | ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); |
4742 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); | 4949 | ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); |
4743 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); | 4950 | ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); |
4744 | 4951 | ||
4745 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); | 4952 | ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); |
4746 | return skb; | 4953 | return skb; |
@@ -4784,7 +4991,8 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar, | |||
4784 | } | 4991 | } |
4785 | 4992 | ||
4786 | static struct sk_buff * | 4993 | static struct sk_buff * |
4787 | ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable) | 4994 | ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable, |
4995 | u32 log_level) | ||
4788 | { | 4996 | { |
4789 | struct wmi_dbglog_cfg_cmd *cmd; | 4997 | struct wmi_dbglog_cfg_cmd *cmd; |
4790 | struct sk_buff *skb; | 4998 | struct sk_buff *skb; |
@@ -4797,7 +5005,7 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable) | |||
4797 | cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; | 5005 | cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; |
4798 | 5006 | ||
4799 | if (module_enable) { | 5007 | if (module_enable) { |
4800 | cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE, | 5008 | cfg = SM(log_level, |
4801 | ATH10K_DBGLOG_CFG_LOG_LVL); | 5009 | ATH10K_DBGLOG_CFG_LOG_LVL); |
4802 | } else { | 5010 | } else { |
4803 | /* set back defaults, all modules with WARN level */ | 5011 | /* set back defaults, all modules with WARN level */ |
@@ -4877,6 +5085,109 @@ ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, | |||
4877 | return skb; | 5085 | return skb; |
4878 | } | 5086 | } |
4879 | 5087 | ||
5088 | static struct sk_buff * | ||
5089 | ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id, | ||
5090 | const u8 *mac) | ||
5091 | { | ||
5092 | struct wmi_addba_clear_resp_cmd *cmd; | ||
5093 | struct sk_buff *skb; | ||
5094 | |||
5095 | if (!mac) | ||
5096 | return ERR_PTR(-EINVAL); | ||
5097 | |||
5098 | skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); | ||
5099 | if (!skb) | ||
5100 | return ERR_PTR(-ENOMEM); | ||
5101 | |||
5102 | cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; | ||
5103 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
5104 | ether_addr_copy(cmd->peer_macaddr.addr, mac); | ||
5105 | |||
5106 | ath10k_dbg(ar, ATH10K_DBG_WMI, | ||
5107 | "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", | ||
5108 | vdev_id, mac); | ||
5109 | return skb; | ||
5110 | } | ||
5111 | |||
5112 | static struct sk_buff * | ||
5113 | ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
5114 | u32 tid, u32 buf_size) | ||
5115 | { | ||
5116 | struct wmi_addba_send_cmd *cmd; | ||
5117 | struct sk_buff *skb; | ||
5118 | |||
5119 | if (!mac) | ||
5120 | return ERR_PTR(-EINVAL); | ||
5121 | |||
5122 | skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); | ||
5123 | if (!skb) | ||
5124 | return ERR_PTR(-ENOMEM); | ||
5125 | |||
5126 | cmd = (struct wmi_addba_send_cmd *)skb->data; | ||
5127 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
5128 | ether_addr_copy(cmd->peer_macaddr.addr, mac); | ||
5129 | cmd->tid = __cpu_to_le32(tid); | ||
5130 | cmd->buffersize = __cpu_to_le32(buf_size); | ||
5131 | |||
5132 | ath10k_dbg(ar, ATH10K_DBG_WMI, | ||
5133 | "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", | ||
5134 | vdev_id, mac, tid, buf_size); | ||
5135 | return skb; | ||
5136 | } | ||
5137 | |||
5138 | static struct sk_buff * | ||
5139 | ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
5140 | u32 tid, u32 status) | ||
5141 | { | ||
5142 | struct wmi_addba_setresponse_cmd *cmd; | ||
5143 | struct sk_buff *skb; | ||
5144 | |||
5145 | if (!mac) | ||
5146 | return ERR_PTR(-EINVAL); | ||
5147 | |||
5148 | skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); | ||
5149 | if (!skb) | ||
5150 | return ERR_PTR(-ENOMEM); | ||
5151 | |||
5152 | cmd = (struct wmi_addba_setresponse_cmd *)skb->data; | ||
5153 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
5154 | ether_addr_copy(cmd->peer_macaddr.addr, mac); | ||
5155 | cmd->tid = __cpu_to_le32(tid); | ||
5156 | cmd->statuscode = __cpu_to_le32(status); | ||
5157 | |||
5158 | ath10k_dbg(ar, ATH10K_DBG_WMI, | ||
5159 | "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", | ||
5160 | vdev_id, mac, tid, status); | ||
5161 | return skb; | ||
5162 | } | ||
5163 | |||
5164 | static struct sk_buff * | ||
5165 | ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
5166 | u32 tid, u32 initiator, u32 reason) | ||
5167 | { | ||
5168 | struct wmi_delba_send_cmd *cmd; | ||
5169 | struct sk_buff *skb; | ||
5170 | |||
5171 | if (!mac) | ||
5172 | return ERR_PTR(-EINVAL); | ||
5173 | |||
5174 | skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); | ||
5175 | if (!skb) | ||
5176 | return ERR_PTR(-ENOMEM); | ||
5177 | |||
5178 | cmd = (struct wmi_delba_send_cmd *)skb->data; | ||
5179 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
5180 | ether_addr_copy(cmd->peer_macaddr.addr, mac); | ||
5181 | cmd->tid = __cpu_to_le32(tid); | ||
5182 | cmd->initiator = __cpu_to_le32(initiator); | ||
5183 | cmd->reasoncode = __cpu_to_le32(reason); | ||
5184 | |||
5185 | ath10k_dbg(ar, ATH10K_DBG_WMI, | ||
5186 | "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", | ||
5187 | vdev_id, mac, tid, initiator, reason); | ||
5188 | return skb; | ||
5189 | } | ||
5190 | |||
4880 | static const struct wmi_ops wmi_ops = { | 5191 | static const struct wmi_ops wmi_ops = { |
4881 | .rx = ath10k_wmi_op_rx, | 5192 | .rx = ath10k_wmi_op_rx, |
4882 | .map_svc = wmi_main_svc_map, | 5193 | .map_svc = wmi_main_svc_map, |
@@ -4909,6 +5220,7 @@ static const struct wmi_ops wmi_ops = { | |||
4909 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, | 5220 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, |
4910 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, | 5221 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, |
4911 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, | 5222 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, |
5223 | /* .gen_vdev_wmm_conf not implemented */ | ||
4912 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, | 5224 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, |
4913 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, | 5225 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, |
4914 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, | 5226 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, |
@@ -4928,6 +5240,13 @@ static const struct wmi_ops wmi_ops = { | |||
4928 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, | 5240 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, |
4929 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, | 5241 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, |
4930 | /* .gen_pdev_get_temperature not implemented */ | 5242 | /* .gen_pdev_get_temperature not implemented */ |
5243 | .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, | ||
5244 | .gen_addba_send = ath10k_wmi_op_gen_addba_send, | ||
5245 | .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, | ||
5246 | .gen_delba_send = ath10k_wmi_op_gen_delba_send, | ||
5247 | /* .gen_bcn_tmpl not implemented */ | ||
5248 | /* .gen_prb_tmpl not implemented */ | ||
5249 | /* .gen_p2p_go_bcn_ie not implemented */ | ||
4931 | }; | 5250 | }; |
4932 | 5251 | ||
4933 | static const struct wmi_ops wmi_10_1_ops = { | 5252 | static const struct wmi_ops wmi_10_1_ops = { |
@@ -4965,6 +5284,7 @@ static const struct wmi_ops wmi_10_1_ops = { | |||
4965 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, | 5284 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, |
4966 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, | 5285 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, |
4967 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, | 5286 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, |
5287 | /* .gen_vdev_wmm_conf not implemented */ | ||
4968 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, | 5288 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, |
4969 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, | 5289 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, |
4970 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, | 5290 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, |
@@ -4982,10 +5302,18 @@ static const struct wmi_ops wmi_10_1_ops = { | |||
4982 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, | 5302 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, |
4983 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, | 5303 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, |
4984 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, | 5304 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, |
5305 | .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, | ||
5306 | .gen_addba_send = ath10k_wmi_op_gen_addba_send, | ||
5307 | .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, | ||
5308 | .gen_delba_send = ath10k_wmi_op_gen_delba_send, | ||
5309 | /* .gen_bcn_tmpl not implemented */ | ||
5310 | /* .gen_prb_tmpl not implemented */ | ||
5311 | /* .gen_p2p_go_bcn_ie not implemented */ | ||
4985 | }; | 5312 | }; |
4986 | 5313 | ||
4987 | static const struct wmi_ops wmi_10_2_ops = { | 5314 | static const struct wmi_ops wmi_10_2_ops = { |
4988 | .rx = ath10k_wmi_10_2_op_rx, | 5315 | .rx = ath10k_wmi_10_2_op_rx, |
5316 | .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats, | ||
4989 | .gen_init = ath10k_wmi_10_2_op_gen_init, | 5317 | .gen_init = ath10k_wmi_10_2_op_gen_init, |
4990 | .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, | 5318 | .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, |
4991 | /* .gen_pdev_get_temperature not implemented */ | 5319 | /* .gen_pdev_get_temperature not implemented */ |
@@ -4993,7 +5321,6 @@ static const struct wmi_ops wmi_10_2_ops = { | |||
4993 | /* shared with 10.1 */ | 5321 | /* shared with 10.1 */ |
4994 | .map_svc = wmi_10x_svc_map, | 5322 | .map_svc = wmi_10x_svc_map, |
4995 | .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, | 5323 | .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, |
4996 | .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats, | ||
4997 | .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, | 5324 | .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, |
4998 | .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, | 5325 | .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, |
4999 | 5326 | ||
@@ -5020,6 +5347,7 @@ static const struct wmi_ops wmi_10_2_ops = { | |||
5020 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, | 5347 | .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, |
5021 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, | 5348 | .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf, |
5022 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, | 5349 | .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable, |
5350 | /* .gen_vdev_wmm_conf not implemented */ | ||
5023 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, | 5351 | .gen_peer_create = ath10k_wmi_op_gen_peer_create, |
5024 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, | 5352 | .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, |
5025 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, | 5353 | .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, |
@@ -5037,10 +5365,15 @@ static const struct wmi_ops wmi_10_2_ops = { | |||
5037 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, | 5365 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, |
5038 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, | 5366 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, |
5039 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, | 5367 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, |
5368 | .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, | ||
5369 | .gen_addba_send = ath10k_wmi_op_gen_addba_send, | ||
5370 | .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, | ||
5371 | .gen_delba_send = ath10k_wmi_op_gen_delba_send, | ||
5040 | }; | 5372 | }; |
5041 | 5373 | ||
5042 | static const struct wmi_ops wmi_10_2_4_ops = { | 5374 | static const struct wmi_ops wmi_10_2_4_ops = { |
5043 | .rx = ath10k_wmi_10_2_op_rx, | 5375 | .rx = ath10k_wmi_10_2_op_rx, |
5376 | .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats, | ||
5044 | .gen_init = ath10k_wmi_10_2_op_gen_init, | 5377 | .gen_init = ath10k_wmi_10_2_op_gen_init, |
5045 | .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, | 5378 | .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, |
5046 | .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, | 5379 | .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, |
@@ -5048,7 +5381,6 @@ static const struct wmi_ops wmi_10_2_4_ops = { | |||
5048 | /* shared with 10.1 */ | 5381 | /* shared with 10.1 */ |
5049 | .map_svc = wmi_10x_svc_map, | 5382 | .map_svc = wmi_10x_svc_map, |
5050 | .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, | 5383 | .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev, |
5051 | .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats, | ||
5052 | .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, | 5384 | .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, |
5053 | .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, | 5385 | .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan, |
5054 | 5386 | ||
@@ -5092,6 +5424,13 @@ static const struct wmi_ops wmi_10_2_4_ops = { | |||
5092 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, | 5424 | .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, |
5093 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, | 5425 | .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, |
5094 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, | 5426 | .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, |
5427 | .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp, | ||
5428 | .gen_addba_send = ath10k_wmi_op_gen_addba_send, | ||
5429 | .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, | ||
5430 | .gen_delba_send = ath10k_wmi_op_gen_delba_send, | ||
5431 | /* .gen_bcn_tmpl not implemented */ | ||
5432 | /* .gen_prb_tmpl not implemented */ | ||
5433 | /* .gen_p2p_go_bcn_ie not implemented */ | ||
5095 | }; | 5434 | }; |
5096 | 5435 | ||
5097 | int ath10k_wmi_attach(struct ath10k *ar) | 5436 | int ath10k_wmi_attach(struct ath10k *ar) |
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index bd7f29a3a122..20ce3603e64b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h | |||
@@ -551,6 +551,7 @@ struct wmi_cmd_map { | |||
551 | u32 gpio_config_cmdid; | 551 | u32 gpio_config_cmdid; |
552 | u32 gpio_output_cmdid; | 552 | u32 gpio_output_cmdid; |
553 | u32 pdev_get_temperature_cmdid; | 553 | u32 pdev_get_temperature_cmdid; |
554 | u32 vdev_set_wmm_params_cmdid; | ||
554 | }; | 555 | }; |
555 | 556 | ||
556 | /* | 557 | /* |
@@ -2939,14 +2940,14 @@ struct wmi_wmm_params_arg { | |||
2939 | u32 no_ack; | 2940 | u32 no_ack; |
2940 | }; | 2941 | }; |
2941 | 2942 | ||
2942 | struct wmi_pdev_set_wmm_params_arg { | 2943 | struct wmi_wmm_params_all_arg { |
2943 | struct wmi_wmm_params_arg ac_be; | 2944 | struct wmi_wmm_params_arg ac_be; |
2944 | struct wmi_wmm_params_arg ac_bk; | 2945 | struct wmi_wmm_params_arg ac_bk; |
2945 | struct wmi_wmm_params_arg ac_vi; | 2946 | struct wmi_wmm_params_arg ac_vi; |
2946 | struct wmi_wmm_params_arg ac_vo; | 2947 | struct wmi_wmm_params_arg ac_vo; |
2947 | }; | 2948 | }; |
2948 | 2949 | ||
2949 | struct wal_dbg_tx_stats { | 2950 | struct wmi_pdev_stats_tx { |
2950 | /* Num HTT cookies queued to dispatch list */ | 2951 | /* Num HTT cookies queued to dispatch list */ |
2951 | __le32 comp_queued; | 2952 | __le32 comp_queued; |
2952 | 2953 | ||
@@ -3016,7 +3017,7 @@ struct wal_dbg_tx_stats { | |||
3016 | __le32 txop_ovf; | 3017 | __le32 txop_ovf; |
3017 | } __packed; | 3018 | } __packed; |
3018 | 3019 | ||
3019 | struct wal_dbg_rx_stats { | 3020 | struct wmi_pdev_stats_rx { |
3020 | /* Cnts any change in ring routing mid-ppdu */ | 3021 | /* Cnts any change in ring routing mid-ppdu */ |
3021 | __le32 mid_ppdu_route_change; | 3022 | __le32 mid_ppdu_route_change; |
3022 | 3023 | ||
@@ -3050,17 +3051,11 @@ struct wal_dbg_rx_stats { | |||
3050 | __le32 mpdu_errs; | 3051 | __le32 mpdu_errs; |
3051 | } __packed; | 3052 | } __packed; |
3052 | 3053 | ||
3053 | struct wal_dbg_peer_stats { | 3054 | struct wmi_pdev_stats_peer { |
3054 | /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ | 3055 | /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ |
3055 | __le32 dummy; | 3056 | __le32 dummy; |
3056 | } __packed; | 3057 | } __packed; |
3057 | 3058 | ||
3058 | struct wal_dbg_stats { | ||
3059 | struct wal_dbg_tx_stats tx; | ||
3060 | struct wal_dbg_rx_stats rx; | ||
3061 | struct wal_dbg_peer_stats peer; | ||
3062 | } __packed; | ||
3063 | |||
3064 | enum wmi_stats_id { | 3059 | enum wmi_stats_id { |
3065 | WMI_REQUEST_PEER_STAT = 0x01, | 3060 | WMI_REQUEST_PEER_STAT = 0x01, |
3066 | WMI_REQUEST_AP_STAT = 0x02 | 3061 | WMI_REQUEST_AP_STAT = 0x02 |
@@ -3127,23 +3122,38 @@ struct wmi_stats_event { | |||
3127 | u8 data[0]; | 3122 | u8 data[0]; |
3128 | } __packed; | 3123 | } __packed; |
3129 | 3124 | ||
3125 | struct wmi_10_2_stats_event { | ||
3126 | __le32 stats_id; /* %WMI_REQUEST_ */ | ||
3127 | __le32 num_pdev_stats; | ||
3128 | __le32 num_pdev_ext_stats; | ||
3129 | __le32 num_vdev_stats; | ||
3130 | __le32 num_peer_stats; | ||
3131 | __le32 num_bcnflt_stats; | ||
3132 | u8 data[0]; | ||
3133 | } __packed; | ||
3134 | |||
3130 | /* | 3135 | /* |
3131 | * PDEV statistics | 3136 | * PDEV statistics |
3132 | * TODO: add all PDEV stats here | 3137 | * TODO: add all PDEV stats here |
3133 | */ | 3138 | */ |
3139 | struct wmi_pdev_stats_base { | ||
3140 | __le32 chan_nf; | ||
3141 | __le32 tx_frame_count; | ||
3142 | __le32 rx_frame_count; | ||
3143 | __le32 rx_clear_count; | ||
3144 | __le32 cycle_count; | ||
3145 | __le32 phy_err_count; | ||
3146 | __le32 chan_tx_pwr; | ||
3147 | } __packed; | ||
3148 | |||
3134 | struct wmi_pdev_stats { | 3149 | struct wmi_pdev_stats { |
3135 | __le32 chan_nf; /* Channel noise floor */ | 3150 | struct wmi_pdev_stats_base base; |
3136 | __le32 tx_frame_count; /* TX frame count */ | 3151 | struct wmi_pdev_stats_tx tx; |
3137 | __le32 rx_frame_count; /* RX frame count */ | 3152 | struct wmi_pdev_stats_rx rx; |
3138 | __le32 rx_clear_count; /* rx clear count */ | 3153 | struct wmi_pdev_stats_peer peer; |
3139 | __le32 cycle_count; /* cycle count */ | ||
3140 | __le32 phy_err_count; /* Phy error count */ | ||
3141 | __le32 chan_tx_pwr; /* channel tx power */ | ||
3142 | struct wal_dbg_stats wal; /* WAL dbg stats */ | ||
3143 | } __packed; | 3154 | } __packed; |
3144 | 3155 | ||
3145 | struct wmi_10x_pdev_stats { | 3156 | struct wmi_pdev_stats_extra { |
3146 | struct wmi_pdev_stats old; | ||
3147 | __le32 ack_rx_bad; | 3157 | __le32 ack_rx_bad; |
3148 | __le32 rts_bad; | 3158 | __le32 rts_bad; |
3149 | __le32 rts_good; | 3159 | __le32 rts_good; |
@@ -3152,6 +3162,30 @@ struct wmi_10x_pdev_stats { | |||
3152 | __le32 mib_int_count; | 3162 | __le32 mib_int_count; |
3153 | } __packed; | 3163 | } __packed; |
3154 | 3164 | ||
3165 | struct wmi_10x_pdev_stats { | ||
3166 | struct wmi_pdev_stats_base base; | ||
3167 | struct wmi_pdev_stats_tx tx; | ||
3168 | struct wmi_pdev_stats_rx rx; | ||
3169 | struct wmi_pdev_stats_peer peer; | ||
3170 | struct wmi_pdev_stats_extra extra; | ||
3171 | } __packed; | ||
3172 | |||
3173 | struct wmi_pdev_stats_mem { | ||
3174 | __le32 dram_free; | ||
3175 | __le32 iram_free; | ||
3176 | } __packed; | ||
3177 | |||
3178 | struct wmi_10_2_pdev_stats { | ||
3179 | struct wmi_pdev_stats_base base; | ||
3180 | struct wmi_pdev_stats_tx tx; | ||
3181 | __le32 mc_drop; | ||
3182 | struct wmi_pdev_stats_rx rx; | ||
3183 | __le32 pdev_rx_timeout; | ||
3184 | struct wmi_pdev_stats_mem mem; | ||
3185 | struct wmi_pdev_stats_peer peer; | ||
3186 | struct wmi_pdev_stats_extra extra; | ||
3187 | } __packed; | ||
3188 | |||
3155 | /* | 3189 | /* |
3156 | * VDEV statistics | 3190 | * VDEV statistics |
3157 | * TODO: add all VDEV stats here | 3191 | * TODO: add all VDEV stats here |
@@ -3175,6 +3209,32 @@ struct wmi_10x_peer_stats { | |||
3175 | __le32 peer_rx_rate; | 3209 | __le32 peer_rx_rate; |
3176 | } __packed; | 3210 | } __packed; |
3177 | 3211 | ||
3212 | struct wmi_10_2_peer_stats { | ||
3213 | struct wmi_peer_stats old; | ||
3214 | __le32 peer_rx_rate; | ||
3215 | __le32 current_per; | ||
3216 | __le32 retries; | ||
3217 | __le32 tx_rate_count; | ||
3218 | __le32 max_4ms_frame_len; | ||
3219 | __le32 total_sub_frames; | ||
3220 | __le32 tx_bytes; | ||
3221 | __le32 num_pkt_loss_overflow[4]; | ||
3222 | __le32 num_pkt_loss_excess_retry[4]; | ||
3223 | } __packed; | ||
3224 | |||
3225 | struct wmi_10_2_4_peer_stats { | ||
3226 | struct wmi_10_2_peer_stats common; | ||
3227 | __le32 unknown_value; /* FIXME: what is this word? */ | ||
3228 | } __packed; | ||
3229 | |||
3230 | struct wmi_10_2_pdev_ext_stats { | ||
3231 | __le32 rx_rssi_comb; | ||
3232 | __le32 rx_rssi[4]; | ||
3233 | __le32 rx_mcs[10]; | ||
3234 | __le32 tx_mcs[10]; | ||
3235 | __le32 ack_rssi; | ||
3236 | } __packed; | ||
3237 | |||
3178 | struct wmi_vdev_create_cmd { | 3238 | struct wmi_vdev_create_cmd { |
3179 | __le32 vdev_id; | 3239 | __le32 vdev_id; |
3180 | __le32 vdev_type; | 3240 | __le32 vdev_type; |
@@ -4060,6 +4120,30 @@ enum wmi_sta_ps_param_uapsd { | |||
4060 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), | 4120 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), |
4061 | }; | 4121 | }; |
4062 | 4122 | ||
4123 | #define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX | ||
4124 | |||
4125 | struct wmi_sta_uapsd_auto_trig_param { | ||
4126 | __le32 wmm_ac; | ||
4127 | __le32 user_priority; | ||
4128 | __le32 service_interval; | ||
4129 | __le32 suspend_interval; | ||
4130 | __le32 delay_interval; | ||
4131 | }; | ||
4132 | |||
4133 | struct wmi_sta_uapsd_auto_trig_cmd_fixed_param { | ||
4134 | __le32 vdev_id; | ||
4135 | struct wmi_mac_addr peer_macaddr; | ||
4136 | __le32 num_ac; | ||
4137 | }; | ||
4138 | |||
4139 | struct wmi_sta_uapsd_auto_trig_arg { | ||
4140 | u32 wmm_ac; | ||
4141 | u32 user_priority; | ||
4142 | u32 service_interval; | ||
4143 | u32 suspend_interval; | ||
4144 | u32 delay_interval; | ||
4145 | }; | ||
4146 | |||
4063 | enum wmi_sta_powersave_param { | 4147 | enum wmi_sta_powersave_param { |
4064 | /* | 4148 | /* |
4065 | * Controls how frames are retrievd from AP while STA is sleeping | 4149 | * Controls how frames are retrievd from AP while STA is sleeping |
@@ -4430,7 +4514,7 @@ struct wmi_peer_set_q_empty_callback_cmd { | |||
4430 | #define WMI_PEER_SPATIAL_MUX 0x00200000 | 4514 | #define WMI_PEER_SPATIAL_MUX 0x00200000 |
4431 | #define WMI_PEER_VHT 0x02000000 | 4515 | #define WMI_PEER_VHT 0x02000000 |
4432 | #define WMI_PEER_80MHZ 0x04000000 | 4516 | #define WMI_PEER_80MHZ 0x04000000 |
4433 | #define WMI_PEER_PMF 0x08000000 | 4517 | #define WMI_PEER_VHT_2G 0x08000000 |
4434 | 4518 | ||
4435 | /* | 4519 | /* |
4436 | * Peer rate capabilities. | 4520 | * Peer rate capabilities. |
@@ -4581,6 +4665,11 @@ enum wmi_sta_keepalive_method { | |||
4581 | WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, | 4665 | WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, |
4582 | }; | 4666 | }; |
4583 | 4667 | ||
4668 | #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 | ||
4669 | |||
4670 | /* Firmware crashes if keepalive interval exceeds this limit */ | ||
4671 | #define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff | ||
4672 | |||
4584 | /* note: ip4 addresses are in network byte order, i.e. big endian */ | 4673 | /* note: ip4 addresses are in network byte order, i.e. big endian */ |
4585 | struct wmi_sta_keepalive_arp_resp { | 4674 | struct wmi_sta_keepalive_arp_resp { |
4586 | __be32 src_ip4_addr; | 4675 | __be32 src_ip4_addr; |
@@ -4596,6 +4685,16 @@ struct wmi_sta_keepalive_cmd { | |||
4596 | struct wmi_sta_keepalive_arp_resp arp_resp; | 4685 | struct wmi_sta_keepalive_arp_resp arp_resp; |
4597 | } __packed; | 4686 | } __packed; |
4598 | 4687 | ||
4688 | struct wmi_sta_keepalive_arg { | ||
4689 | u32 vdev_id; | ||
4690 | u32 enabled; | ||
4691 | u32 method; | ||
4692 | u32 interval; | ||
4693 | __be32 src_ip4_addr; | ||
4694 | __be32 dest_ip4_addr; | ||
4695 | const u8 dest_mac_addr[ETH_ALEN]; | ||
4696 | }; | ||
4697 | |||
4599 | enum wmi_force_fw_hang_type { | 4698 | enum wmi_force_fw_hang_type { |
4600 | WMI_FORCE_FW_HANG_ASSERT = 1, | 4699 | WMI_FORCE_FW_HANG_ASSERT = 1, |
4601 | WMI_FORCE_FW_HANG_NO_DETECT, | 4700 | WMI_FORCE_FW_HANG_NO_DETECT, |
@@ -4772,16 +4871,22 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, | |||
4772 | u32 cmd_id); | 4871 | u32 cmd_id); |
4773 | void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); | 4872 | void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); |
4774 | 4873 | ||
4775 | void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, | 4874 | void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, |
4776 | struct ath10k_fw_stats_pdev *dst); | 4875 | struct ath10k_fw_stats_pdev *dst); |
4876 | void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, | ||
4877 | struct ath10k_fw_stats_pdev *dst); | ||
4878 | void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, | ||
4879 | struct ath10k_fw_stats_pdev *dst); | ||
4880 | void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src, | ||
4881 | struct ath10k_fw_stats_pdev *dst); | ||
4777 | void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, | 4882 | void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, |
4778 | struct ath10k_fw_stats_peer *dst); | 4883 | struct ath10k_fw_stats_peer *dst); |
4779 | void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, | 4884 | void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, |
4780 | struct wmi_host_mem_chunks *chunks); | 4885 | struct wmi_host_mem_chunks *chunks); |
4781 | void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, | 4886 | void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, |
4782 | const struct wmi_start_scan_arg *arg); | 4887 | const struct wmi_start_scan_arg *arg); |
4783 | void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, | 4888 | void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params, |
4784 | const struct wmi_wmm_params_arg *arg); | 4889 | const struct wmi_wmm_params_arg *arg); |
4785 | void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, | 4890 | void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, |
4786 | const struct wmi_channel_arg *arg); | 4891 | const struct wmi_channel_arg *arg); |
4787 | int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); | 4892 | int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); |