diff options
author | Arik Nemtsov <arik@wizery.com> | 2011-02-22 17:22:26 -0500 |
---|---|---|
committer | Luciano Coelho <coelho@ti.com> | 2011-02-23 04:14:56 -0500 |
commit | a8c0ddb5ba2889e1e11a033ccbadfc600f236a91 (patch) | |
tree | 6daf6085a8e8cd176f8e4d1a823399353caa6f21 /drivers/net/wireless/wl12xx/tx.c | |
parent | 99a2775d02a7accf4cc661a65c76fd7b379d1c7a (diff) |
wl12xx: AP-mode - TX queue per link in AC
When operating in AP-mode we require a per link tx-queue.
This allows us to implement HW assisted PS mode for links,
as well as regulate per-link FW TX blocks consumption.
Split each link into ACs to support future QoS for AP-mode.
AC queues are emptied in priority and per-link queues are
scheduled in a simple round-robin fashion.
Signed-off-by: Arik Nemtsov <arik@wizery.com>
Signed-off-by: Luciano Coelho <coelho@ti.com>
Diffstat (limited to 'drivers/net/wireless/wl12xx/tx.c')
-rw-r--r-- | drivers/net/wireless/wl12xx/tx.c | 130 |
1 files changed, 119 insertions, 11 deletions
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c index 0bb57daac889..8c769500ec5d 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/wl12xx/tx.c | |||
@@ -86,6 +86,27 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, | |||
86 | wl1271_acx_set_inconnection_sta(wl, hdr->addr1); | 86 | wl1271_acx_set_inconnection_sta(wl, hdr->addr1); |
87 | } | 87 | } |
88 | 88 | ||
89 | u8 wl1271_tx_get_hlid(struct sk_buff *skb) | ||
90 | { | ||
91 | struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); | ||
92 | |||
93 | if (control->control.sta) { | ||
94 | struct wl1271_station *wl_sta; | ||
95 | |||
96 | wl_sta = (struct wl1271_station *) | ||
97 | control->control.sta->drv_priv; | ||
98 | return wl_sta->hlid; | ||
99 | } else { | ||
100 | struct ieee80211_hdr *hdr; | ||
101 | |||
102 | hdr = (struct ieee80211_hdr *)skb->data; | ||
103 | if (ieee80211_is_mgmt(hdr->frame_control)) | ||
104 | return WL1271_AP_GLOBAL_HLID; | ||
105 | else | ||
106 | return WL1271_AP_BROADCAST_HLID; | ||
107 | } | ||
108 | } | ||
109 | |||
89 | static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | 110 | static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, |
90 | u32 buf_offset) | 111 | u32 buf_offset) |
91 | { | 112 | { |
@@ -298,7 +319,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) | |||
298 | return enabled_rates; | 319 | return enabled_rates; |
299 | } | 320 | } |
300 | 321 | ||
301 | static void handle_tx_low_watermark(struct wl1271 *wl) | 322 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) |
302 | { | 323 | { |
303 | unsigned long flags; | 324 | unsigned long flags; |
304 | 325 | ||
@@ -312,7 +333,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl) | |||
312 | } | 333 | } |
313 | } | 334 | } |
314 | 335 | ||
315 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | 336 | static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) |
316 | { | 337 | { |
317 | struct sk_buff *skb = NULL; | 338 | struct sk_buff *skb = NULL; |
318 | unsigned long flags; | 339 | unsigned long flags; |
@@ -338,12 +359,69 @@ out: | |||
338 | return skb; | 359 | return skb; |
339 | } | 360 | } |
340 | 361 | ||
362 | static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) | ||
363 | { | ||
364 | struct sk_buff *skb = NULL; | ||
365 | unsigned long flags; | ||
366 | int i, h, start_hlid; | ||
367 | |||
368 | /* start from the link after the last one */ | ||
369 | start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; | ||
370 | |||
371 | /* dequeue according to AC, round robin on each link */ | ||
372 | for (i = 0; i < AP_MAX_LINKS; i++) { | ||
373 | h = (start_hlid + i) % AP_MAX_LINKS; | ||
374 | |||
375 | skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]); | ||
376 | if (skb) | ||
377 | goto out; | ||
378 | skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]); | ||
379 | if (skb) | ||
380 | goto out; | ||
381 | skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]); | ||
382 | if (skb) | ||
383 | goto out; | ||
384 | skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]); | ||
385 | if (skb) | ||
386 | goto out; | ||
387 | } | ||
388 | |||
389 | out: | ||
390 | if (skb) { | ||
391 | wl->last_tx_hlid = h; | ||
392 | spin_lock_irqsave(&wl->wl_lock, flags); | ||
393 | wl->tx_queue_count--; | ||
394 | spin_unlock_irqrestore(&wl->wl_lock, flags); | ||
395 | } else { | ||
396 | wl->last_tx_hlid = 0; | ||
397 | } | ||
398 | |||
399 | return skb; | ||
400 | } | ||
401 | |||
402 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | ||
403 | { | ||
404 | if (wl->bss_type == BSS_TYPE_AP_BSS) | ||
405 | return wl1271_ap_skb_dequeue(wl); | ||
406 | |||
407 | return wl1271_sta_skb_dequeue(wl); | ||
408 | } | ||
409 | |||
341 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) | 410 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) |
342 | { | 411 | { |
343 | unsigned long flags; | 412 | unsigned long flags; |
344 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 413 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
345 | 414 | ||
346 | skb_queue_head(&wl->tx_queue[q], skb); | 415 | if (wl->bss_type == BSS_TYPE_AP_BSS) { |
416 | u8 hlid = wl1271_tx_get_hlid(skb); | ||
417 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); | ||
418 | |||
419 | /* make sure we dequeue the same packet next time */ | ||
420 | wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; | ||
421 | } else { | ||
422 | skb_queue_head(&wl->tx_queue[q], skb); | ||
423 | } | ||
424 | |||
347 | spin_lock_irqsave(&wl->wl_lock, flags); | 425 | spin_lock_irqsave(&wl->wl_lock, flags); |
348 | wl->tx_queue_count++; | 426 | wl->tx_queue_count++; |
349 | spin_unlock_irqrestore(&wl->wl_lock, flags); | 427 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
@@ -406,7 +484,7 @@ out_ack: | |||
406 | if (sent_packets) { | 484 | if (sent_packets) { |
407 | /* interrupt the firmware with the new packets */ | 485 | /* interrupt the firmware with the new packets */ |
408 | wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); | 486 | wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); |
409 | handle_tx_low_watermark(wl); | 487 | wl1271_handle_tx_low_watermark(wl); |
410 | } | 488 | } |
411 | 489 | ||
412 | out: | 490 | out: |
@@ -523,6 +601,27 @@ void wl1271_tx_complete(struct wl1271 *wl) | |||
523 | } | 601 | } |
524 | } | 602 | } |
525 | 603 | ||
604 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) | ||
605 | { | ||
606 | struct sk_buff *skb; | ||
607 | int i, total = 0; | ||
608 | unsigned long flags; | ||
609 | |||
610 | for (i = 0; i < NUM_TX_QUEUES; i++) { | ||
611 | while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { | ||
612 | wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); | ||
613 | ieee80211_tx_status(wl->hw, skb); | ||
614 | total++; | ||
615 | } | ||
616 | } | ||
617 | |||
618 | spin_lock_irqsave(&wl->wl_lock, flags); | ||
619 | wl->tx_queue_count -= total; | ||
620 | spin_unlock_irqrestore(&wl->wl_lock, flags); | ||
621 | |||
622 | wl1271_handle_tx_low_watermark(wl); | ||
623 | } | ||
624 | |||
526 | /* caller must hold wl->mutex */ | 625 | /* caller must hold wl->mutex */ |
527 | void wl1271_tx_reset(struct wl1271 *wl) | 626 | void wl1271_tx_reset(struct wl1271 *wl) |
528 | { | 627 | { |
@@ -530,19 +629,28 @@ void wl1271_tx_reset(struct wl1271 *wl) | |||
530 | struct sk_buff *skb; | 629 | struct sk_buff *skb; |
531 | 630 | ||
532 | /* TX failure */ | 631 | /* TX failure */ |
533 | for (i = 0; i < NUM_TX_QUEUES; i++) { | 632 | if (wl->bss_type == BSS_TYPE_AP_BSS) { |
534 | while ((skb = skb_dequeue(&wl->tx_queue[i]))) { | 633 | for (i = 0; i < AP_MAX_LINKS; i++) |
535 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | 634 | wl1271_tx_reset_link_queues(wl, i); |
536 | ieee80211_tx_status(wl->hw, skb); | 635 | |
636 | wl->last_tx_hlid = 0; | ||
637 | } else { | ||
638 | for (i = 0; i < NUM_TX_QUEUES; i++) { | ||
639 | while ((skb = skb_dequeue(&wl->tx_queue[i]))) { | ||
640 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", | ||
641 | skb); | ||
642 | ieee80211_tx_status(wl->hw, skb); | ||
643 | } | ||
537 | } | 644 | } |
538 | } | 645 | } |
646 | |||
539 | wl->tx_queue_count = 0; | 647 | wl->tx_queue_count = 0; |
540 | 648 | ||
541 | /* | 649 | /* |
542 | * Make sure the driver is at a consistent state, in case this | 650 | * Make sure the driver is at a consistent state, in case this |
543 | * function is called from a context other than interface removal. | 651 | * function is called from a context other than interface removal. |
544 | */ | 652 | */ |
545 | handle_tx_low_watermark(wl); | 653 | wl1271_handle_tx_low_watermark(wl); |
546 | 654 | ||
547 | for (i = 0; i < ACX_TX_DESCRIPTORS; i++) | 655 | for (i = 0; i < ACX_TX_DESCRIPTORS; i++) |
548 | if (wl->tx_frames[i] != NULL) { | 656 | if (wl->tx_frames[i] != NULL) { |
@@ -563,8 +671,8 @@ void wl1271_tx_flush(struct wl1271 *wl) | |||
563 | 671 | ||
564 | while (!time_after(jiffies, timeout)) { | 672 | while (!time_after(jiffies, timeout)) { |
565 | mutex_lock(&wl->mutex); | 673 | mutex_lock(&wl->mutex); |
566 | wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", | 674 | wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", |
567 | wl->tx_frames_cnt); | 675 | wl->tx_frames_cnt, wl->tx_queue_count); |
568 | if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { | 676 | if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { |
569 | mutex_unlock(&wl->mutex); | 677 | mutex_unlock(&wl->mutex); |
570 | return; | 678 | return; |