aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/wl12xx/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/wl12xx/tx.c')
-rw-r--r--drivers/net/wireless/wl12xx/tx.c237
1 files changed, 201 insertions, 36 deletions
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 67a00946e3d..ac60d577319 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -70,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
70 } 70 }
71} 71}
72 72
73static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
74 struct sk_buff *skb)
75{
76 struct ieee80211_hdr *hdr;
77
78 /*
79 * add the station to the known list before transmitting the
80 * authentication response. this way it won't get de-authed by FW
81 * when transmitting too soon.
82 */
83 hdr = (struct ieee80211_hdr *)(skb->data +
84 sizeof(struct wl1271_tx_hw_descr));
85 if (ieee80211_is_auth(hdr->frame_control))
86 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
87}
88
89static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
90{
91 bool fw_ps;
92 u8 tx_blks;
93
94 /* only regulate station links */
95 if (hlid < WL1271_AP_STA_HLID_START)
96 return;
97
98 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
99 tx_blks = wl->links[hlid].allocated_blks;
100
101 /*
102 * if in FW PS and there is enough data in FW we can put the link
103 * into high-level PS and clean out its TX queues.
104 */
105 if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
106 wl1271_ps_link_start(wl, hlid, true);
107}
108
109u8 wl1271_tx_get_hlid(struct sk_buff *skb)
110{
111 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
112
113 if (control->control.sta) {
114 struct wl1271_station *wl_sta;
115
116 wl_sta = (struct wl1271_station *)
117 control->control.sta->drv_priv;
118 return wl_sta->hlid;
119 } else {
120 struct ieee80211_hdr *hdr;
121
122 hdr = (struct ieee80211_hdr *)skb->data;
123 if (ieee80211_is_mgmt(hdr->frame_control))
124 return WL1271_AP_GLOBAL_HLID;
125 else
126 return WL1271_AP_BROADCAST_HLID;
127 }
128}
129
73static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 130static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
74 u32 buf_offset) 131 u32 buf_offset, u8 hlid)
75{ 132{
76 struct wl1271_tx_hw_descr *desc; 133 struct wl1271_tx_hw_descr *desc;
77 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 134 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -100,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
100 157
101 wl->tx_blocks_available -= total_blocks; 158 wl->tx_blocks_available -= total_blocks;
102 159
160 if (wl->bss_type == BSS_TYPE_AP_BSS)
161 wl->links[hlid].allocated_blks += total_blocks;
162
103 ret = 0; 163 ret = 0;
104 164
105 wl1271_debug(DEBUG_TX, 165 wl1271_debug(DEBUG_TX,
@@ -113,7 +173,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
113} 173}
114 174
115static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 175static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
116 u32 extra, struct ieee80211_tx_info *control) 176 u32 extra, struct ieee80211_tx_info *control,
177 u8 hlid)
117{ 178{
118 struct timespec ts; 179 struct timespec ts;
119 struct wl1271_tx_hw_descr *desc; 180 struct wl1271_tx_hw_descr *desc;
@@ -149,7 +210,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
149 desc->tid = ac; 210 desc->tid = ac;
150 211
151 if (wl->bss_type != BSS_TYPE_AP_BSS) { 212 if (wl->bss_type != BSS_TYPE_AP_BSS) {
152 desc->aid = TX_HW_DEFAULT_AID; 213 desc->aid = hlid;
153 214
154 /* if the packets are destined for AP (have a STA entry) 215 /* if the packets are destined for AP (have a STA entry)
155 send them with AP rate policies, otherwise use default 216 send them with AP rate policies, otherwise use default
@@ -159,25 +220,17 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
159 else 220 else
160 rate_idx = ACX_TX_BASIC_RATE; 221 rate_idx = ACX_TX_BASIC_RATE;
161 } else { 222 } else {
162 if (control->control.sta) { 223 desc->hlid = hlid;
163 struct wl1271_station *wl_sta; 224 switch (hlid) {
164 225 case WL1271_AP_GLOBAL_HLID:
165 wl_sta = (struct wl1271_station *) 226 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
166 control->control.sta->drv_priv; 227 break;
167 desc->hlid = wl_sta->hlid; 228 case WL1271_AP_BROADCAST_HLID:
229 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
230 break;
231 default:
168 rate_idx = ac; 232 rate_idx = ac;
169 } else { 233 break;
170 struct ieee80211_hdr *hdr;
171
172 hdr = (struct ieee80211_hdr *)
173 (skb->data + sizeof(*desc));
174 if (ieee80211_is_mgmt(hdr->frame_control)) {
175 desc->hlid = WL1271_AP_GLOBAL_HLID;
176 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
177 } else {
178 desc->hlid = WL1271_AP_BROADCAST_HLID;
179 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
180 }
181 } 234 }
182 } 235 }
183 236
@@ -185,7 +238,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
185 desc->reserved = 0; 238 desc->reserved = 0;
186 239
187 /* align the length (and store in terms of words) */ 240 /* align the length (and store in terms of words) */
188 pad = WL1271_TX_ALIGN(skb->len); 241 pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
189 desc->length = cpu_to_le16(pad >> 2); 242 desc->length = cpu_to_le16(pad >> 2);
190 243
191 /* calculate number of padding bytes */ 244 /* calculate number of padding bytes */
@@ -208,6 +261,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
208 u32 extra = 0; 261 u32 extra = 0;
209 int ret = 0; 262 int ret = 0;
210 u32 total_len; 263 u32 total_len;
264 u8 hlid;
211 265
212 if (!skb) 266 if (!skb)
213 return -EINVAL; 267 return -EINVAL;
@@ -234,18 +288,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
234 } 288 }
235 } 289 }
236 290
237 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset); 291 if (wl->bss_type == BSS_TYPE_AP_BSS)
292 hlid = wl1271_tx_get_hlid(skb);
293 else
294 hlid = TX_HW_DEFAULT_AID;
295
296 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
238 if (ret < 0) 297 if (ret < 0)
239 return ret; 298 return ret;
240 299
241 wl1271_tx_fill_hdr(wl, skb, extra, info); 300 if (wl->bss_type == BSS_TYPE_AP_BSS) {
301 wl1271_tx_ap_update_inconnection_sta(wl, skb);
302 wl1271_tx_regulate_link(wl, hlid);
303 }
304
305 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
242 306
243 /* 307 /*
244 * The length of each packet is stored in terms of words. Thus, we must 308 * The length of each packet is stored in terms of words. Thus, we must
245 * pad the skb data to make sure its length is aligned. 309 * pad the skb data to make sure its length is aligned.
246 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr 310 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
247 */ 311 */
248 total_len = WL1271_TX_ALIGN(skb->len); 312 total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
249 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 313 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
250 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 314 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
251 315
@@ -279,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
279 return enabled_rates; 343 return enabled_rates;
280} 344}
281 345
282static void handle_tx_low_watermark(struct wl1271 *wl) 346void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
283{ 347{
284 unsigned long flags; 348 unsigned long flags;
285 349
@@ -293,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
293 } 357 }
294} 358}
295 359
296static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 360static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
297{ 361{
298 struct sk_buff *skb = NULL; 362 struct sk_buff *skb = NULL;
299 unsigned long flags; 363 unsigned long flags;
@@ -319,12 +383,69 @@ out:
319 return skb; 383 return skb;
320} 384}
321 385
386static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
387{
388 struct sk_buff *skb = NULL;
389 unsigned long flags;
390 int i, h, start_hlid;
391
392 /* start from the link after the last one */
393 start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
394
395 /* dequeue according to AC, round robin on each link */
396 for (i = 0; i < AP_MAX_LINKS; i++) {
397 h = (start_hlid + i) % AP_MAX_LINKS;
398
399 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
400 if (skb)
401 goto out;
402 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
403 if (skb)
404 goto out;
405 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
406 if (skb)
407 goto out;
408 skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
409 if (skb)
410 goto out;
411 }
412
413out:
414 if (skb) {
415 wl->last_tx_hlid = h;
416 spin_lock_irqsave(&wl->wl_lock, flags);
417 wl->tx_queue_count--;
418 spin_unlock_irqrestore(&wl->wl_lock, flags);
419 } else {
420 wl->last_tx_hlid = 0;
421 }
422
423 return skb;
424}
425
426static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
427{
428 if (wl->bss_type == BSS_TYPE_AP_BSS)
429 return wl1271_ap_skb_dequeue(wl);
430
431 return wl1271_sta_skb_dequeue(wl);
432}
433
322static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) 434static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
323{ 435{
324 unsigned long flags; 436 unsigned long flags;
325 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 437 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
326 438
327 skb_queue_head(&wl->tx_queue[q], skb); 439 if (wl->bss_type == BSS_TYPE_AP_BSS) {
440 u8 hlid = wl1271_tx_get_hlid(skb);
441 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
442
443 /* make sure we dequeue the same packet next time */
444 wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
445 } else {
446 skb_queue_head(&wl->tx_queue[q], skb);
447 }
448
328 spin_lock_irqsave(&wl->wl_lock, flags); 449 spin_lock_irqsave(&wl->wl_lock, flags);
329 wl->tx_queue_count++; 450 wl->tx_queue_count++;
330 spin_unlock_irqrestore(&wl->wl_lock, flags); 451 spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -387,7 +508,7 @@ out_ack:
387 if (sent_packets) { 508 if (sent_packets) {
388 /* interrupt the firmware with the new packets */ 509 /* interrupt the firmware with the new packets */
389 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 510 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
390 handle_tx_low_watermark(wl); 511 wl1271_handle_tx_low_watermark(wl);
391 } 512 }
392 513
393out: 514out:
@@ -504,32 +625,76 @@ void wl1271_tx_complete(struct wl1271 *wl)
504 } 625 }
505} 626}
506 627
628void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
629{
630 struct sk_buff *skb;
631 int i, total = 0;
632 unsigned long flags;
633 struct ieee80211_tx_info *info;
634
635 for (i = 0; i < NUM_TX_QUEUES; i++) {
636 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
637 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
638 info = IEEE80211_SKB_CB(skb);
639 info->status.rates[0].idx = -1;
640 info->status.rates[0].count = 0;
641 ieee80211_tx_status(wl->hw, skb);
642 total++;
643 }
644 }
645
646 spin_lock_irqsave(&wl->wl_lock, flags);
647 wl->tx_queue_count -= total;
648 spin_unlock_irqrestore(&wl->wl_lock, flags);
649
650 wl1271_handle_tx_low_watermark(wl);
651}
652
507/* caller must hold wl->mutex */ 653/* caller must hold wl->mutex */
508void wl1271_tx_reset(struct wl1271 *wl) 654void wl1271_tx_reset(struct wl1271 *wl)
509{ 655{
510 int i; 656 int i;
511 struct sk_buff *skb; 657 struct sk_buff *skb;
658 struct ieee80211_tx_info *info;
512 659
513 /* TX failure */ 660 /* TX failure */
514 for (i = 0; i < NUM_TX_QUEUES; i++) { 661 if (wl->bss_type == BSS_TYPE_AP_BSS) {
515 while ((skb = skb_dequeue(&wl->tx_queue[i]))) { 662 for (i = 0; i < AP_MAX_LINKS; i++) {
516 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 663 wl1271_tx_reset_link_queues(wl, i);
517 ieee80211_tx_status(wl->hw, skb); 664 wl->links[i].allocated_blks = 0;
665 wl->links[i].prev_freed_blks = 0;
666 }
667
668 wl->last_tx_hlid = 0;
669 } else {
670 for (i = 0; i < NUM_TX_QUEUES; i++) {
671 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
672 wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
673 skb);
674 info = IEEE80211_SKB_CB(skb);
675 info->status.rates[0].idx = -1;
676 info->status.rates[0].count = 0;
677 ieee80211_tx_status(wl->hw, skb);
678 }
518 } 679 }
519 } 680 }
681
520 wl->tx_queue_count = 0; 682 wl->tx_queue_count = 0;
521 683
522 /* 684 /*
523 * Make sure the driver is at a consistent state, in case this 685 * Make sure the driver is at a consistent state, in case this
524 * function is called from a context other than interface removal. 686 * function is called from a context other than interface removal.
525 */ 687 */
526 handle_tx_low_watermark(wl); 688 wl1271_handle_tx_low_watermark(wl);
527 689
528 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 690 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
529 if (wl->tx_frames[i] != NULL) { 691 if (wl->tx_frames[i] != NULL) {
530 skb = wl->tx_frames[i]; 692 skb = wl->tx_frames[i];
531 wl1271_free_tx_id(wl, i); 693 wl1271_free_tx_id(wl, i);
532 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 694 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
695 info = IEEE80211_SKB_CB(skb);
696 info->status.rates[0].idx = -1;
697 info->status.rates[0].count = 0;
533 ieee80211_tx_status(wl->hw, skb); 698 ieee80211_tx_status(wl->hw, skb);
534 } 699 }
535} 700}
@@ -544,8 +709,8 @@ void wl1271_tx_flush(struct wl1271 *wl)
544 709
545 while (!time_after(jiffies, timeout)) { 710 while (!time_after(jiffies, timeout)) {
546 mutex_lock(&wl->mutex); 711 mutex_lock(&wl->mutex);
547 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", 712 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
548 wl->tx_frames_cnt); 713 wl->tx_frames_cnt, wl->tx_queue_count);
549 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { 714 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
550 mutex_unlock(&wl->mutex); 715 mutex_unlock(&wl->mutex);
551 return; 716 return;