diff options
Diffstat (limited to 'drivers/net/wireless/wl12xx/tx.c')
-rw-r--r-- | drivers/net/wireless/wl12xx/tx.c | 371 |
1 files changed, 212 insertions, 159 deletions
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c index bad9e29d49b0..36eb0d66fd66 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/wl12xx/tx.c | |||
@@ -26,22 +26,24 @@ | |||
26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
27 | 27 | ||
28 | #include "wl12xx.h" | 28 | #include "wl12xx.h" |
29 | #include "debug.h" | ||
29 | #include "io.h" | 30 | #include "io.h" |
30 | #include "reg.h" | 31 | #include "reg.h" |
31 | #include "ps.h" | 32 | #include "ps.h" |
32 | #include "tx.h" | 33 | #include "tx.h" |
33 | #include "event.h" | 34 | #include "event.h" |
34 | 35 | ||
35 | static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) | 36 | static int wl1271_set_default_wep_key(struct wl1271 *wl, |
37 | struct wl12xx_vif *wlvif, u8 id) | ||
36 | { | 38 | { |
37 | int ret; | 39 | int ret; |
38 | bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); | 40 | bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); |
39 | 41 | ||
40 | if (is_ap) | 42 | if (is_ap) |
41 | ret = wl12xx_cmd_set_default_wep_key(wl, id, | 43 | ret = wl12xx_cmd_set_default_wep_key(wl, id, |
42 | wl->ap_bcast_hlid); | 44 | wlvif->ap.bcast_hlid); |
43 | else | 45 | else |
44 | ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid); | 46 | ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); |
45 | 47 | ||
46 | if (ret < 0) | 48 | if (ret < 0) |
47 | return ret; | 49 | return ret; |
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id) | |||
76 | } | 78 | } |
77 | 79 | ||
78 | static int wl1271_tx_update_filters(struct wl1271 *wl, | 80 | static int wl1271_tx_update_filters(struct wl1271 *wl, |
79 | struct sk_buff *skb) | 81 | struct wl12xx_vif *wlvif, |
82 | struct sk_buff *skb) | ||
80 | { | 83 | { |
81 | struct ieee80211_hdr *hdr; | 84 | struct ieee80211_hdr *hdr; |
82 | int ret; | 85 | int ret; |
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl, | |||
92 | if (!ieee80211_is_auth(hdr->frame_control)) | 95 | if (!ieee80211_is_auth(hdr->frame_control)) |
93 | return 0; | 96 | return 0; |
94 | 97 | ||
95 | if (wl->dev_hlid != WL12XX_INVALID_LINK_ID) | 98 | if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID) |
96 | goto out; | 99 | goto out; |
97 | 100 | ||
98 | wl1271_debug(DEBUG_CMD, "starting device role for roaming"); | 101 | wl1271_debug(DEBUG_CMD, "starting device role for roaming"); |
99 | ret = wl12xx_cmd_role_start_dev(wl); | 102 | ret = wl12xx_start_dev(wl, wlvif); |
100 | if (ret < 0) | ||
101 | goto out; | ||
102 | |||
103 | ret = wl12xx_roc(wl, wl->dev_role_id); | ||
104 | if (ret < 0) | 103 | if (ret < 0) |
105 | goto out; | 104 | goto out; |
106 | out: | 105 | out: |
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, | |||
123 | wl1271_acx_set_inconnection_sta(wl, hdr->addr1); | 122 | wl1271_acx_set_inconnection_sta(wl, hdr->addr1); |
124 | } | 123 | } |
125 | 124 | ||
126 | static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) | 125 | static void wl1271_tx_regulate_link(struct wl1271 *wl, |
126 | struct wl12xx_vif *wlvif, | ||
127 | u8 hlid) | ||
127 | { | 128 | { |
128 | bool fw_ps, single_sta; | 129 | bool fw_ps, single_sta; |
129 | u8 tx_pkts; | 130 | u8 tx_pkts; |
130 | 131 | ||
131 | /* only regulate station links */ | 132 | if (WARN_ON(!test_bit(hlid, wlvif->links_map))) |
132 | if (hlid < WL1271_AP_STA_HLID_START) | ||
133 | return; | 133 | return; |
134 | 134 | ||
135 | if (WARN_ON(!wl1271_is_active_sta(wl, hlid))) | ||
136 | return; | ||
137 | |||
138 | fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); | 135 | fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); |
139 | tx_pkts = wl->links[hlid].allocated_pkts; | 136 | tx_pkts = wl->links[hlid].allocated_pkts; |
140 | single_sta = (wl->active_sta_count == 1); | 137 | single_sta = (wl->active_sta_count == 1); |
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) | |||
146 | * case FW-memory congestion is not a problem. | 143 | * case FW-memory congestion is not a problem. |
147 | */ | 144 | */ |
148 | if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) | 145 | if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) |
149 | wl1271_ps_link_start(wl, hlid, true); | 146 | wl12xx_ps_link_start(wl, wlvif, hlid, true); |
150 | } | 147 | } |
151 | 148 | ||
152 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) | 149 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) |
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) | |||
154 | return wl->dummy_packet == skb; | 151 | return wl->dummy_packet == skb; |
155 | } | 152 | } |
156 | 153 | ||
157 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) | 154 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
155 | struct sk_buff *skb) | ||
158 | { | 156 | { |
159 | struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); | 157 | struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); |
160 | 158 | ||
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) | |||
167 | } else { | 165 | } else { |
168 | struct ieee80211_hdr *hdr; | 166 | struct ieee80211_hdr *hdr; |
169 | 167 | ||
170 | if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) | 168 | if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) |
171 | return wl->system_hlid; | 169 | return wl->system_hlid; |
172 | 170 | ||
173 | hdr = (struct ieee80211_hdr *)skb->data; | 171 | hdr = (struct ieee80211_hdr *)skb->data; |
174 | if (ieee80211_is_mgmt(hdr->frame_control)) | 172 | if (ieee80211_is_mgmt(hdr->frame_control)) |
175 | return wl->ap_global_hlid; | 173 | return wlvif->ap.global_hlid; |
176 | else | 174 | else |
177 | return wl->ap_bcast_hlid; | 175 | return wlvif->ap.bcast_hlid; |
178 | } | 176 | } |
179 | } | 177 | } |
180 | 178 | ||
181 | static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb) | 179 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
180 | struct sk_buff *skb) | ||
182 | { | 181 | { |
183 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 182 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
184 | 183 | ||
185 | if (wl12xx_is_dummy_packet(wl, skb)) | 184 | if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) |
186 | return wl->system_hlid; | 185 | return wl->system_hlid; |
187 | 186 | ||
188 | if (wl->bss_type == BSS_TYPE_AP_BSS) | 187 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
189 | return wl12xx_tx_get_hlid_ap(wl, skb); | 188 | return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); |
190 | 189 | ||
191 | wl1271_tx_update_filters(wl, skb); | 190 | wl1271_tx_update_filters(wl, wlvif, skb); |
192 | 191 | ||
193 | if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || | 192 | if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || |
194 | test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) && | 193 | test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && |
195 | !ieee80211_is_auth(hdr->frame_control) && | 194 | !ieee80211_is_auth(hdr->frame_control) && |
196 | !ieee80211_is_assoc_req(hdr->frame_control)) | 195 | !ieee80211_is_assoc_req(hdr->frame_control)) |
197 | return wl->sta_hlid; | 196 | return wlvif->sta.hlid; |
198 | else | 197 | else |
199 | return wl->dev_hlid; | 198 | return wlvif->dev_hlid; |
200 | } | 199 | } |
201 | 200 | ||
202 | static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, | 201 | static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, |
203 | unsigned int packet_length) | 202 | unsigned int packet_length) |
204 | { | 203 | { |
205 | if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) | 204 | if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) |
206 | return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | ||
207 | else | ||
208 | return ALIGN(packet_length, WL1271_TX_ALIGN_TO); | 205 | return ALIGN(packet_length, WL1271_TX_ALIGN_TO); |
206 | else | ||
207 | return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | ||
209 | } | 208 | } |
210 | 209 | ||
211 | static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | 210 | static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
212 | u32 buf_offset, u8 hlid) | 211 | struct sk_buff *skb, u32 extra, u32 buf_offset, |
212 | u8 hlid) | ||
213 | { | 213 | { |
214 | struct wl1271_tx_hw_descr *desc; | 214 | struct wl1271_tx_hw_descr *desc; |
215 | u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | 215 | u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; |
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | |||
217 | u32 total_blocks; | 217 | u32 total_blocks; |
218 | int id, ret = -EBUSY, ac; | 218 | int id, ret = -EBUSY, ac; |
219 | u32 spare_blocks = wl->tx_spare_blocks; | 219 | u32 spare_blocks = wl->tx_spare_blocks; |
220 | bool is_dummy = false; | ||
220 | 221 | ||
221 | if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) | 222 | if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) |
222 | return -EAGAIN; | 223 | return -EAGAIN; |
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | |||
231 | len = wl12xx_calc_packet_alignment(wl, total_len); | 232 | len = wl12xx_calc_packet_alignment(wl, total_len); |
232 | 233 | ||
233 | /* in case of a dummy packet, use default amount of spare mem blocks */ | 234 | /* in case of a dummy packet, use default amount of spare mem blocks */ |
234 | if (unlikely(wl12xx_is_dummy_packet(wl, skb))) | 235 | if (unlikely(wl12xx_is_dummy_packet(wl, skb))) { |
236 | is_dummy = true; | ||
235 | spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; | 237 | spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; |
238 | } | ||
236 | 239 | ||
237 | total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + | 240 | total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + |
238 | spare_blocks; | 241 | spare_blocks; |
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | |||
257 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 260 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
258 | wl->tx_allocated_pkts[ac]++; | 261 | wl->tx_allocated_pkts[ac]++; |
259 | 262 | ||
260 | if (wl->bss_type == BSS_TYPE_AP_BSS && | 263 | if (!is_dummy && wlvif && |
261 | hlid >= WL1271_AP_STA_HLID_START) | 264 | wlvif->bss_type == BSS_TYPE_AP_BSS && |
265 | test_bit(hlid, wlvif->ap.sta_hlid_map)) | ||
262 | wl->links[hlid].allocated_pkts++; | 266 | wl->links[hlid].allocated_pkts++; |
263 | 267 | ||
264 | ret = 0; | 268 | ret = 0; |
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | |||
273 | return ret; | 277 | return ret; |
274 | } | 278 | } |
275 | 279 | ||
276 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, | 280 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
277 | u32 extra, struct ieee80211_tx_info *control, | 281 | struct sk_buff *skb, u32 extra, |
278 | u8 hlid) | 282 | struct ieee80211_tx_info *control, u8 hlid) |
279 | { | 283 | { |
280 | struct timespec ts; | 284 | struct timespec ts; |
281 | struct wl1271_tx_hw_descr *desc; | 285 | struct wl1271_tx_hw_descr *desc; |
282 | int aligned_len, ac, rate_idx; | 286 | int aligned_len, ac, rate_idx; |
283 | s64 hosttime; | 287 | s64 hosttime; |
284 | u16 tx_attr; | 288 | u16 tx_attr; |
289 | bool is_dummy; | ||
285 | 290 | ||
286 | desc = (struct wl1271_tx_hw_descr *) skb->data; | 291 | desc = (struct wl1271_tx_hw_descr *) skb->data; |
287 | 292 | ||
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, | |||
298 | hosttime = (timespec_to_ns(&ts) >> 10); | 303 | hosttime = (timespec_to_ns(&ts) >> 10); |
299 | desc->start_time = cpu_to_le32(hosttime - wl->time_offset); | 304 | desc->start_time = cpu_to_le32(hosttime - wl->time_offset); |
300 | 305 | ||
301 | if (wl->bss_type != BSS_TYPE_AP_BSS) | 306 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
307 | if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) | ||
302 | desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); | 308 | desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); |
303 | else | 309 | else |
304 | desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | 310 | desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); |
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, | |||
307 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 313 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
308 | desc->tid = skb->priority; | 314 | desc->tid = skb->priority; |
309 | 315 | ||
310 | if (wl12xx_is_dummy_packet(wl, skb)) { | 316 | if (is_dummy) { |
311 | /* | 317 | /* |
312 | * FW expects the dummy packet to have an invalid session id - | 318 | * FW expects the dummy packet to have an invalid session id - |
313 | * any session id that is different than the one set in the join | 319 | * any session id that is different than the one set in the join |
314 | */ | 320 | */ |
315 | tx_attr = ((~wl->session_counter) << | 321 | tx_attr = (SESSION_COUNTER_INVALID << |
316 | TX_HW_ATTR_OFST_SESSION_COUNTER) & | 322 | TX_HW_ATTR_OFST_SESSION_COUNTER) & |
317 | TX_HW_ATTR_SESSION_COUNTER; | 323 | TX_HW_ATTR_SESSION_COUNTER; |
318 | 324 | ||
319 | tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | 325 | tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; |
320 | } else { | 326 | } else if (wlvif) { |
321 | /* configure the tx attributes */ | 327 | /* configure the tx attributes */ |
322 | tx_attr = | 328 | tx_attr = wlvif->session_counter << |
323 | wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; | 329 | TX_HW_ATTR_OFST_SESSION_COUNTER; |
324 | } | 330 | } |
325 | 331 | ||
326 | desc->hlid = hlid; | 332 | desc->hlid = hlid; |
327 | 333 | if (is_dummy || !wlvif) | |
328 | if (wl->bss_type != BSS_TYPE_AP_BSS) { | 334 | rate_idx = 0; |
335 | else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { | ||
329 | /* if the packets are destined for AP (have a STA entry) | 336 | /* if the packets are destined for AP (have a STA entry) |
330 | send them with AP rate policies, otherwise use default | 337 | send them with AP rate policies, otherwise use default |
331 | basic rates */ | 338 | basic rates */ |
332 | if (control->control.sta) | 339 | if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) |
333 | rate_idx = ACX_TX_AP_FULL_RATE; | 340 | rate_idx = wlvif->sta.p2p_rate_idx; |
341 | else if (control->control.sta) | ||
342 | rate_idx = wlvif->sta.ap_rate_idx; | ||
334 | else | 343 | else |
335 | rate_idx = ACX_TX_BASIC_RATE; | 344 | rate_idx = wlvif->sta.basic_rate_idx; |
336 | } else { | 345 | } else { |
337 | if (hlid == wl->ap_global_hlid) | 346 | if (hlid == wlvif->ap.global_hlid) |
338 | rate_idx = ACX_TX_AP_MODE_MGMT_RATE; | 347 | rate_idx = wlvif->ap.mgmt_rate_idx; |
339 | else if (hlid == wl->ap_bcast_hlid) | 348 | else if (hlid == wlvif->ap.bcast_hlid) |
340 | rate_idx = ACX_TX_AP_MODE_BCST_RATE; | 349 | rate_idx = wlvif->ap.bcast_rate_idx; |
341 | else | 350 | else |
342 | rate_idx = ac; | 351 | rate_idx = wlvif->ap.ucast_rate_idx[ac]; |
343 | } | 352 | } |
344 | 353 | ||
345 | tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | 354 | tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; |
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, | |||
379 | } | 388 | } |
380 | 389 | ||
381 | /* caller must hold wl->mutex */ | 390 | /* caller must hold wl->mutex */ |
382 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, | 391 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
383 | u32 buf_offset) | 392 | struct sk_buff *skb, u32 buf_offset) |
384 | { | 393 | { |
385 | struct ieee80211_tx_info *info; | 394 | struct ieee80211_tx_info *info; |
386 | u32 extra = 0; | 395 | u32 extra = 0; |
387 | int ret = 0; | 396 | int ret = 0; |
388 | u32 total_len; | 397 | u32 total_len; |
389 | u8 hlid; | 398 | u8 hlid; |
399 | bool is_dummy; | ||
390 | 400 | ||
391 | if (!skb) | 401 | if (!skb) |
392 | return -EINVAL; | 402 | return -EINVAL; |
393 | 403 | ||
394 | info = IEEE80211_SKB_CB(skb); | 404 | info = IEEE80211_SKB_CB(skb); |
395 | 405 | ||
406 | /* TODO: handle dummy packets on multi-vifs */ | ||
407 | is_dummy = wl12xx_is_dummy_packet(wl, skb); | ||
408 | |||
396 | if (info->control.hw_key && | 409 | if (info->control.hw_key && |
397 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) | 410 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) |
398 | extra = WL1271_TKIP_IV_SPACE; | 411 | extra = WL1271_TKIP_IV_SPACE; |
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, | |||
405 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | 418 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || |
406 | (cipher == WLAN_CIPHER_SUITE_WEP104); | 419 | (cipher == WLAN_CIPHER_SUITE_WEP104); |
407 | 420 | ||
408 | if (unlikely(is_wep && wl->default_key != idx)) { | 421 | if (unlikely(is_wep && wlvif->default_key != idx)) { |
409 | ret = wl1271_set_default_wep_key(wl, idx); | 422 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); |
410 | if (ret < 0) | 423 | if (ret < 0) |
411 | return ret; | 424 | return ret; |
412 | wl->default_key = idx; | 425 | wlvif->default_key = idx; |
413 | } | 426 | } |
414 | } | 427 | } |
415 | 428 | hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); | |
416 | hlid = wl1271_tx_get_hlid(wl, skb); | ||
417 | if (hlid == WL12XX_INVALID_LINK_ID) { | 429 | if (hlid == WL12XX_INVALID_LINK_ID) { |
418 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); | 430 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); |
419 | return -EINVAL; | 431 | return -EINVAL; |
420 | } | 432 | } |
421 | 433 | ||
422 | ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); | 434 | ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); |
423 | if (ret < 0) | 435 | if (ret < 0) |
424 | return ret; | 436 | return ret; |
425 | 437 | ||
426 | wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); | 438 | wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); |
427 | 439 | ||
428 | if (wl->bss_type == BSS_TYPE_AP_BSS) { | 440 | if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { |
429 | wl1271_tx_ap_update_inconnection_sta(wl, skb); | 441 | wl1271_tx_ap_update_inconnection_sta(wl, skb); |
430 | wl1271_tx_regulate_link(wl, hlid); | 442 | wl1271_tx_regulate_link(wl, wlvif, hlid); |
431 | } | 443 | } |
432 | 444 | ||
433 | /* | 445 | /* |
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, | |||
444 | memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | 456 | memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); |
445 | 457 | ||
446 | /* Revert side effects in the dummy packet skb, so it can be reused */ | 458 | /* Revert side effects in the dummy packet skb, so it can be reused */ |
447 | if (wl12xx_is_dummy_packet(wl, skb)) | 459 | if (is_dummy) |
448 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 460 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); |
449 | 461 | ||
450 | return total_len; | 462 | return total_len; |
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, | |||
522 | return &queues[q]; | 534 | return &queues[q]; |
523 | } | 535 | } |
524 | 536 | ||
525 | static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) | 537 | static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, |
538 | struct wl1271_link *lnk) | ||
526 | { | 539 | { |
527 | struct sk_buff *skb = NULL; | 540 | struct sk_buff *skb; |
528 | unsigned long flags; | 541 | unsigned long flags; |
529 | struct sk_buff_head *queue; | 542 | struct sk_buff_head *queue; |
530 | 543 | ||
531 | queue = wl1271_select_queue(wl, wl->tx_queue); | 544 | queue = wl1271_select_queue(wl, lnk->tx_queue); |
532 | if (!queue) | 545 | if (!queue) |
533 | goto out; | 546 | return NULL; |
534 | 547 | ||
535 | skb = skb_dequeue(queue); | 548 | skb = skb_dequeue(queue); |
536 | |||
537 | out: | ||
538 | if (skb) { | 549 | if (skb) { |
539 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 550 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
540 | spin_lock_irqsave(&wl->wl_lock, flags); | 551 | spin_lock_irqsave(&wl->wl_lock, flags); |
@@ -545,43 +556,33 @@ out: | |||
545 | return skb; | 556 | return skb; |
546 | } | 557 | } |
547 | 558 | ||
548 | static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) | 559 | static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, |
560 | struct wl12xx_vif *wlvif) | ||
549 | { | 561 | { |
550 | struct sk_buff *skb = NULL; | 562 | struct sk_buff *skb = NULL; |
551 | unsigned long flags; | ||
552 | int i, h, start_hlid; | 563 | int i, h, start_hlid; |
553 | struct sk_buff_head *queue; | ||
554 | 564 | ||
555 | /* start from the link after the last one */ | 565 | /* start from the link after the last one */ |
556 | start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; | 566 | start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; |
557 | 567 | ||
558 | /* dequeue according to AC, round robin on each link */ | 568 | /* dequeue according to AC, round robin on each link */ |
559 | for (i = 0; i < AP_MAX_LINKS; i++) { | 569 | for (i = 0; i < WL12XX_MAX_LINKS; i++) { |
560 | h = (start_hlid + i) % AP_MAX_LINKS; | 570 | h = (start_hlid + i) % WL12XX_MAX_LINKS; |
561 | 571 | ||
562 | /* only consider connected stations */ | 572 | /* only consider connected stations */ |
563 | if (h >= WL1271_AP_STA_HLID_START && | 573 | if (!test_bit(h, wlvif->links_map)) |
564 | !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map)) | ||
565 | continue; | 574 | continue; |
566 | 575 | ||
567 | queue = wl1271_select_queue(wl, wl->links[h].tx_queue); | 576 | skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); |
568 | if (!queue) | 577 | if (!skb) |
569 | continue; | 578 | continue; |
570 | 579 | ||
571 | skb = skb_dequeue(queue); | 580 | wlvif->last_tx_hlid = h; |
572 | if (skb) | 581 | break; |
573 | break; | ||
574 | } | 582 | } |
575 | 583 | ||
576 | if (skb) { | 584 | if (!skb) |
577 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 585 | wlvif->last_tx_hlid = 0; |
578 | wl->last_tx_hlid = h; | ||
579 | spin_lock_irqsave(&wl->wl_lock, flags); | ||
580 | wl->tx_queue_count[q]--; | ||
581 | spin_unlock_irqrestore(&wl->wl_lock, flags); | ||
582 | } else { | ||
583 | wl->last_tx_hlid = 0; | ||
584 | } | ||
585 | 586 | ||
586 | return skb; | 587 | return skb; |
587 | } | 588 | } |
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) | |||
589 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | 590 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) |
590 | { | 591 | { |
591 | unsigned long flags; | 592 | unsigned long flags; |
593 | struct wl12xx_vif *wlvif = wl->last_wlvif; | ||
592 | struct sk_buff *skb = NULL; | 594 | struct sk_buff *skb = NULL; |
593 | 595 | ||
594 | if (wl->bss_type == BSS_TYPE_AP_BSS) | 596 | if (wlvif) { |
595 | skb = wl1271_ap_skb_dequeue(wl); | 597 | wl12xx_for_each_wlvif_continue(wl, wlvif) { |
596 | else | 598 | skb = wl12xx_vif_skb_dequeue(wl, wlvif); |
597 | skb = wl1271_sta_skb_dequeue(wl); | 599 | if (skb) { |
600 | wl->last_wlvif = wlvif; | ||
601 | break; | ||
602 | } | ||
603 | } | ||
604 | } | ||
605 | |||
606 | /* do another pass */ | ||
607 | if (!skb) { | ||
608 | wl12xx_for_each_wlvif(wl, wlvif) { | ||
609 | skb = wl12xx_vif_skb_dequeue(wl, wlvif); | ||
610 | if (skb) { | ||
611 | wl->last_wlvif = wlvif; | ||
612 | break; | ||
613 | } | ||
614 | } | ||
615 | } | ||
616 | |||
617 | if (!skb) | ||
618 | skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); | ||
598 | 619 | ||
599 | if (!skb && | 620 | if (!skb && |
600 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | 621 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { |
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | |||
610 | return skb; | 631 | return skb; |
611 | } | 632 | } |
612 | 633 | ||
613 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) | 634 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
635 | struct sk_buff *skb) | ||
614 | { | 636 | { |
615 | unsigned long flags; | 637 | unsigned long flags; |
616 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 638 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
617 | 639 | ||
618 | if (wl12xx_is_dummy_packet(wl, skb)) { | 640 | if (wl12xx_is_dummy_packet(wl, skb)) { |
619 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | 641 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); |
620 | } else if (wl->bss_type == BSS_TYPE_AP_BSS) { | 642 | } else { |
621 | u8 hlid = wl1271_tx_get_hlid(wl, skb); | 643 | u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); |
622 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); | 644 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); |
623 | 645 | ||
624 | /* make sure we dequeue the same packet next time */ | 646 | /* make sure we dequeue the same packet next time */ |
625 | wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; | 647 | wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % |
626 | } else { | 648 | WL12XX_MAX_LINKS; |
627 | skb_queue_head(&wl->tx_queue[q], skb); | ||
628 | } | 649 | } |
629 | 650 | ||
630 | spin_lock_irqsave(&wl->wl_lock, flags); | 651 | spin_lock_irqsave(&wl->wl_lock, flags); |
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb) | |||
639 | return ieee80211_is_data_present(hdr->frame_control); | 660 | return ieee80211_is_data_present(hdr->frame_control); |
640 | } | 661 | } |
641 | 662 | ||
663 | void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) | ||
664 | { | ||
665 | struct wl12xx_vif *wlvif; | ||
666 | u32 timeout; | ||
667 | u8 hlid; | ||
668 | |||
669 | if (!wl->conf.rx_streaming.interval) | ||
670 | return; | ||
671 | |||
672 | if (!wl->conf.rx_streaming.always && | ||
673 | !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) | ||
674 | return; | ||
675 | |||
676 | timeout = wl->conf.rx_streaming.duration; | ||
677 | wl12xx_for_each_wlvif_sta(wl, wlvif) { | ||
678 | bool found = false; | ||
679 | for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { | ||
680 | if (test_bit(hlid, wlvif->links_map)) { | ||
681 | found = true; | ||
682 | break; | ||
683 | } | ||
684 | } | ||
685 | |||
686 | if (!found) | ||
687 | continue; | ||
688 | |||
689 | /* enable rx streaming */ | ||
690 | if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) | ||
691 | ieee80211_queue_work(wl->hw, | ||
692 | &wlvif->rx_streaming_enable_work); | ||
693 | |||
694 | mod_timer(&wlvif->rx_streaming_timer, | ||
695 | jiffies + msecs_to_jiffies(timeout)); | ||
696 | } | ||
697 | } | ||
698 | |||
642 | void wl1271_tx_work_locked(struct wl1271 *wl) | 699 | void wl1271_tx_work_locked(struct wl1271 *wl) |
643 | { | 700 | { |
701 | struct wl12xx_vif *wlvif; | ||
644 | struct sk_buff *skb; | 702 | struct sk_buff *skb; |
703 | struct wl1271_tx_hw_descr *desc; | ||
645 | u32 buf_offset = 0; | 704 | u32 buf_offset = 0; |
646 | bool sent_packets = false; | 705 | bool sent_packets = false; |
647 | bool had_data = false; | 706 | unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; |
648 | bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); | ||
649 | int ret; | 707 | int ret; |
650 | 708 | ||
651 | if (unlikely(wl->state == WL1271_STATE_OFF)) | 709 | if (unlikely(wl->state == WL1271_STATE_OFF)) |
652 | return; | 710 | return; |
653 | 711 | ||
654 | while ((skb = wl1271_skb_dequeue(wl))) { | 712 | while ((skb = wl1271_skb_dequeue(wl))) { |
655 | if (wl1271_tx_is_data_present(skb)) | 713 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
656 | had_data = true; | 714 | bool has_data = false; |
657 | 715 | ||
658 | ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); | 716 | wlvif = NULL; |
717 | if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) | ||
718 | wlvif = wl12xx_vif_to_data(info->control.vif); | ||
719 | |||
720 | has_data = wlvif && wl1271_tx_is_data_present(skb); | ||
721 | ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); | ||
659 | if (ret == -EAGAIN) { | 722 | if (ret == -EAGAIN) { |
660 | /* | 723 | /* |
661 | * Aggregation buffer is full. | 724 | * Aggregation buffer is full. |
662 | * Flush buffer and try again. | 725 | * Flush buffer and try again. |
663 | */ | 726 | */ |
664 | wl1271_skb_queue_head(wl, skb); | 727 | wl1271_skb_queue_head(wl, wlvif, skb); |
665 | wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 728 | wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, |
666 | buf_offset, true); | 729 | buf_offset, true); |
667 | sent_packets = true; | 730 | sent_packets = true; |
@@ -672,7 +735,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl) | |||
672 | * Firmware buffer is full. | 735 | * Firmware buffer is full. |
673 | * Queue back last skb, and stop aggregating. | 736 | * Queue back last skb, and stop aggregating. |
674 | */ | 737 | */ |
675 | wl1271_skb_queue_head(wl, skb); | 738 | wl1271_skb_queue_head(wl, wlvif, skb); |
676 | /* No work left, avoid scheduling redundant tx work */ | 739 | /* No work left, avoid scheduling redundant tx work */ |
677 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 740 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
678 | goto out_ack; | 741 | goto out_ack; |
@@ -682,6 +745,10 @@ void wl1271_tx_work_locked(struct wl1271 *wl) | |||
682 | } | 745 | } |
683 | buf_offset += ret; | 746 | buf_offset += ret; |
684 | wl->tx_packets_count++; | 747 | wl->tx_packets_count++; |
748 | if (has_data) { | ||
749 | desc = (struct wl1271_tx_hw_descr *) skb->data; | ||
750 | __set_bit(desc->hlid, active_hlids); | ||
751 | } | ||
685 | } | 752 | } |
686 | 753 | ||
687 | out_ack: | 754 | out_ack: |
@@ -701,19 +768,7 @@ out_ack: | |||
701 | 768 | ||
702 | wl1271_handle_tx_low_watermark(wl); | 769 | wl1271_handle_tx_low_watermark(wl); |
703 | } | 770 | } |
704 | if (!is_ap && wl->conf.rx_streaming.interval && had_data && | 771 | wl12xx_rearm_rx_streaming(wl, active_hlids); |
705 | (wl->conf.rx_streaming.always || | ||
706 | test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { | ||
707 | u32 timeout = wl->conf.rx_streaming.duration; | ||
708 | |||
709 | /* enable rx streaming */ | ||
710 | if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) | ||
711 | ieee80211_queue_work(wl->hw, | ||
712 | &wl->rx_streaming_enable_work); | ||
713 | |||
714 | mod_timer(&wl->rx_streaming_timer, | ||
715 | jiffies + msecs_to_jiffies(timeout)); | ||
716 | } | ||
717 | } | 772 | } |
718 | 773 | ||
719 | void wl1271_tx_work(struct work_struct *work) | 774 | void wl1271_tx_work(struct work_struct *work) |
@@ -737,6 +792,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, | |||
737 | struct wl1271_tx_hw_res_descr *result) | 792 | struct wl1271_tx_hw_res_descr *result) |
738 | { | 793 | { |
739 | struct ieee80211_tx_info *info; | 794 | struct ieee80211_tx_info *info; |
795 | struct ieee80211_vif *vif; | ||
796 | struct wl12xx_vif *wlvif; | ||
740 | struct sk_buff *skb; | 797 | struct sk_buff *skb; |
741 | int id = result->id; | 798 | int id = result->id; |
742 | int rate = -1; | 799 | int rate = -1; |
@@ -756,11 +813,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, | |||
756 | return; | 813 | return; |
757 | } | 814 | } |
758 | 815 | ||
816 | /* info->control is valid as long as we don't update info->status */ | ||
817 | vif = info->control.vif; | ||
818 | wlvif = wl12xx_vif_to_data(vif); | ||
819 | |||
759 | /* update the TX status info */ | 820 | /* update the TX status info */ |
760 | if (result->status == TX_SUCCESS) { | 821 | if (result->status == TX_SUCCESS) { |
761 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | 822 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
762 | info->flags |= IEEE80211_TX_STAT_ACK; | 823 | info->flags |= IEEE80211_TX_STAT_ACK; |
763 | rate = wl1271_rate_to_idx(result->rate_class_index, wl->band); | 824 | rate = wl1271_rate_to_idx(result->rate_class_index, |
825 | wlvif->band); | ||
764 | retries = result->ack_failures; | 826 | retries = result->ack_failures; |
765 | } else if (result->status == TX_RETRY_EXCEEDED) { | 827 | } else if (result->status == TX_RETRY_EXCEEDED) { |
766 | wl->stats.excessive_retries++; | 828 | wl->stats.excessive_retries++; |
@@ -783,14 +845,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, | |||
783 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || | 845 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || |
784 | info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { | 846 | info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { |
785 | u8 fw_lsb = result->tx_security_sequence_number_lsb; | 847 | u8 fw_lsb = result->tx_security_sequence_number_lsb; |
786 | u8 cur_lsb = wl->tx_security_last_seq_lsb; | 848 | u8 cur_lsb = wlvif->tx_security_last_seq_lsb; |
787 | 849 | ||
788 | /* | 850 | /* |
789 | * update security sequence number, taking care of potential | 851 | * update security sequence number, taking care of potential |
790 | * wrap-around | 852 | * wrap-around |
791 | */ | 853 | */ |
792 | wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256; | 854 | wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; |
793 | wl->tx_security_last_seq_lsb = fw_lsb; | 855 | wlvif->tx_security_last_seq_lsb = fw_lsb; |
794 | } | 856 | } |
795 | 857 | ||
796 | /* remove private header from packet */ | 858 | /* remove private header from packet */ |
@@ -886,39 +948,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) | |||
886 | } | 948 | } |
887 | 949 | ||
888 | /* caller must hold wl->mutex and TX must be stopped */ | 950 | /* caller must hold wl->mutex and TX must be stopped */ |
889 | void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | 951 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
890 | { | 952 | { |
891 | int i; | 953 | int i; |
892 | struct sk_buff *skb; | ||
893 | struct ieee80211_tx_info *info; | ||
894 | 954 | ||
895 | /* TX failure */ | 955 | /* TX failure */ |
896 | if (wl->bss_type == BSS_TYPE_AP_BSS) { | 956 | for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { |
897 | for (i = 0; i < AP_MAX_LINKS; i++) { | 957 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
898 | wl1271_free_sta(wl, i); | 958 | wl1271_free_sta(wl, wlvif, i); |
899 | wl1271_tx_reset_link_queues(wl, i); | 959 | else |
900 | wl->links[i].allocated_pkts = 0; | 960 | wlvif->sta.ba_rx_bitmap = 0; |
901 | wl->links[i].prev_freed_pkts = 0; | ||
902 | } | ||
903 | |||
904 | wl->last_tx_hlid = 0; | ||
905 | } else { | ||
906 | for (i = 0; i < NUM_TX_QUEUES; i++) { | ||
907 | while ((skb = skb_dequeue(&wl->tx_queue[i]))) { | ||
908 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", | ||
909 | skb); | ||
910 | |||
911 | if (!wl12xx_is_dummy_packet(wl, skb)) { | ||
912 | info = IEEE80211_SKB_CB(skb); | ||
913 | info->status.rates[0].idx = -1; | ||
914 | info->status.rates[0].count = 0; | ||
915 | ieee80211_tx_status_ni(wl->hw, skb); | ||
916 | } | ||
917 | } | ||
918 | } | ||
919 | 961 | ||
920 | wl->ba_rx_bitmap = 0; | 962 | wl1271_tx_reset_link_queues(wl, i); |
963 | wl->links[i].allocated_pkts = 0; | ||
964 | wl->links[i].prev_freed_pkts = 0; | ||
921 | } | 965 | } |
966 | wlvif->last_tx_hlid = 0; | ||
967 | |||
968 | } | ||
969 | /* caller must hold wl->mutex and TX must be stopped */ | ||
970 | void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | ||
971 | { | ||
972 | int i; | ||
973 | struct sk_buff *skb; | ||
974 | struct ieee80211_tx_info *info; | ||
922 | 975 | ||
923 | for (i = 0; i < NUM_TX_QUEUES; i++) | 976 | for (i = 0; i < NUM_TX_QUEUES; i++) |
924 | wl->tx_queue_count[i] = 0; | 977 | wl->tx_queue_count[i] = 0; |