diff options
author | Eliad Peller <eliad@wizery.com> | 2011-10-10 04:12:51 -0400 |
---|---|---|
committer | Luciano Coelho <coelho@ti.com> | 2011-10-11 08:04:22 -0400 |
commit | d6a3cc2ef962ad4392a2401cae513a18a6d35099 (patch) | |
tree | 8867208be5c5b895e1921c10765e7ca7f93b56ef | |
parent | 4438aca9e16901d8d32a025ca27ad8284a117e09 (diff) |
wl12xx: unify STA and AP tx_queue mechanism
Make sta use the global wl->links[hlid].tx_queue (by
considering its links map) instead of wl->tx_queue,
and then unify the tx and tx_reset flows for the
various vifs.
Signed-off-by: Eliad Peller <eliad@wizery.com>
Signed-off-by: Luciano Coelho <coelho@ti.com>
-rw-r--r-- | drivers/net/wireless/wl12xx/main.c | 32 | ||||
-rw-r--r-- | drivers/net/wireless/wl12xx/tx.c | 109 | ||||
-rw-r--r-- | drivers/net/wireless/wl12xx/tx.h | 5 | ||||
-rw-r--r-- | drivers/net/wireless/wl12xx/wl12xx.h | 1 |
4 files changed, 55 insertions, 92 deletions
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c index 0606b0d8aabd..abe5ef8807ba 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/wl12xx/main.c | |||
@@ -1474,31 +1474,26 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
1474 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); | 1474 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); |
1475 | unsigned long flags; | 1475 | unsigned long flags; |
1476 | int q, mapping; | 1476 | int q, mapping; |
1477 | u8 hlid = 0; | 1477 | u8 hlid; |
1478 | 1478 | ||
1479 | mapping = skb_get_queue_mapping(skb); | 1479 | mapping = skb_get_queue_mapping(skb); |
1480 | q = wl1271_tx_get_queue(mapping); | 1480 | q = wl1271_tx_get_queue(mapping); |
1481 | 1481 | ||
1482 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) | 1482 | hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); |
1483 | hlid = wl12xx_tx_get_hlid_ap(wl, wlvif, skb); | ||
1484 | 1483 | ||
1485 | spin_lock_irqsave(&wl->wl_lock, flags); | 1484 | spin_lock_irqsave(&wl->wl_lock, flags); |
1486 | 1485 | ||
1487 | /* queue the packet */ | 1486 | /* queue the packet */ |
1488 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) { | 1487 | if (hlid == WL12XX_INVALID_LINK_ID || |
1489 | if (!test_bit(hlid, wlvif->links_map)) { | 1488 | !test_bit(hlid, wlvif->links_map)) { |
1490 | wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", | 1489 | wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); |
1491 | hlid, q); | 1490 | dev_kfree_skb(skb); |
1492 | dev_kfree_skb(skb); | 1491 | goto out; |
1493 | goto out; | ||
1494 | } | ||
1495 | |||
1496 | wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); | ||
1497 | skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); | ||
1498 | } else { | ||
1499 | skb_queue_tail(&wl->tx_queue[q], skb); | ||
1500 | } | 1492 | } |
1501 | 1493 | ||
1494 | wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); | ||
1495 | skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); | ||
1496 | |||
1502 | wl->tx_queue_count[q]++; | 1497 | wl->tx_queue_count[q]++; |
1503 | 1498 | ||
1504 | /* | 1499 | /* |
@@ -2131,7 +2126,8 @@ deinit: | |||
2131 | mutex_lock(&wl->mutex); | 2126 | mutex_lock(&wl->mutex); |
2132 | 2127 | ||
2133 | /* let's notify MAC80211 about the remaining pending TX frames */ | 2128 | /* let's notify MAC80211 about the remaining pending TX frames */ |
2134 | wl1271_tx_reset(wl, reset_tx_queues); | 2129 | wl12xx_tx_reset_wlvif(wl, wlvif); |
2130 | wl12xx_tx_reset(wl, reset_tx_queues); | ||
2135 | wl1271_power_off(wl); | 2131 | wl1271_power_off(wl); |
2136 | 2132 | ||
2137 | wl->band = IEEE80211_BAND_2GHZ; | 2133 | wl->band = IEEE80211_BAND_2GHZ; |
@@ -3968,7 +3964,6 @@ static int wl1271_allocate_sta(struct wl1271 *wl, | |||
3968 | return 0; | 3964 | return 0; |
3969 | } | 3965 | } |
3970 | 3966 | ||
3971 | /* TODO: change wl1271_tx_reset(), so we can get sta as param */ | ||
3972 | void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) | 3967 | void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) |
3973 | { | 3968 | { |
3974 | if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) | 3969 | if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) |
@@ -4868,9 +4863,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void) | |||
4868 | wl->plat_dev = plat_dev; | 4863 | wl->plat_dev = plat_dev; |
4869 | 4864 | ||
4870 | for (i = 0; i < NUM_TX_QUEUES; i++) | 4865 | for (i = 0; i < NUM_TX_QUEUES; i++) |
4871 | skb_queue_head_init(&wl->tx_queue[i]); | ||
4872 | |||
4873 | for (i = 0; i < NUM_TX_QUEUES; i++) | ||
4874 | for (j = 0; j < WL12XX_MAX_LINKS; j++) | 4866 | for (j = 0; j < WL12XX_MAX_LINKS; j++) |
4875 | skb_queue_head_init(&wl->links[j].tx_queue[i]); | 4867 | skb_queue_head_init(&wl->links[j].tx_queue[i]); |
4876 | 4868 | ||
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c index 951ff03b7f42..6c0135b27820 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/wl12xx/tx.c | |||
@@ -179,12 +179,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, | |||
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
182 | static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct ieee80211_vif *vif, | 182 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
183 | struct sk_buff *skb) | 183 | struct sk_buff *skb) |
184 | { | 184 | { |
185 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 185 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
186 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); | ||
187 | |||
188 | 186 | ||
189 | if (wl12xx_is_dummy_packet(wl, skb)) | 187 | if (wl12xx_is_dummy_packet(wl, skb)) |
190 | return wl->system_hlid; | 188 | return wl->system_hlid; |
@@ -429,7 +427,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, | |||
429 | wlvif->default_key = idx; | 427 | wlvif->default_key = idx; |
430 | } | 428 | } |
431 | } | 429 | } |
432 | hlid = wl1271_tx_get_hlid(wl, vif, skb); | 430 | hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); |
433 | if (hlid == WL12XX_INVALID_LINK_ID) { | 431 | if (hlid == WL12XX_INVALID_LINK_ID) { |
434 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); | 432 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); |
435 | return -EINVAL; | 433 | return -EINVAL; |
@@ -538,19 +536,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, | |||
538 | return &queues[q]; | 536 | return &queues[q]; |
539 | } | 537 | } |
540 | 538 | ||
541 | static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) | 539 | static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, |
540 | struct wl1271_link *lnk) | ||
542 | { | 541 | { |
543 | struct sk_buff *skb = NULL; | 542 | struct sk_buff *skb; |
544 | unsigned long flags; | 543 | unsigned long flags; |
545 | struct sk_buff_head *queue; | 544 | struct sk_buff_head *queue; |
546 | 545 | ||
547 | queue = wl1271_select_queue(wl, wl->tx_queue); | 546 | queue = wl1271_select_queue(wl, lnk->tx_queue); |
548 | if (!queue) | 547 | if (!queue) |
549 | goto out; | 548 | return NULL; |
550 | 549 | ||
551 | skb = skb_dequeue(queue); | 550 | skb = skb_dequeue(queue); |
552 | |||
553 | out: | ||
554 | if (skb) { | 551 | if (skb) { |
555 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 552 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
556 | spin_lock_irqsave(&wl->wl_lock, flags); | 553 | spin_lock_irqsave(&wl->wl_lock, flags); |
@@ -561,13 +558,11 @@ out: | |||
561 | return skb; | 558 | return skb; |
562 | } | 559 | } |
563 | 560 | ||
564 | static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl, | 561 | static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, |
565 | struct wl12xx_vif *wlvif) | 562 | struct wl12xx_vif *wlvif) |
566 | { | 563 | { |
567 | struct sk_buff *skb = NULL; | 564 | struct sk_buff *skb = NULL; |
568 | unsigned long flags; | ||
569 | int i, h, start_hlid; | 565 | int i, h, start_hlid; |
570 | struct sk_buff_head *queue; | ||
571 | 566 | ||
572 | /* start from the link after the last one */ | 567 | /* start from the link after the last one */ |
573 | start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; | 568 | start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; |
@@ -580,24 +575,16 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl, | |||
580 | if (!test_bit(h, wlvif->links_map)) | 575 | if (!test_bit(h, wlvif->links_map)) |
581 | continue; | 576 | continue; |
582 | 577 | ||
583 | queue = wl1271_select_queue(wl, wl->links[h].tx_queue); | 578 | skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); |
584 | if (!queue) | 579 | if (!skb) |
585 | continue; | 580 | continue; |
586 | 581 | ||
587 | skb = skb_dequeue(queue); | 582 | wlvif->last_tx_hlid = h; |
588 | if (skb) | 583 | break; |
589 | break; | ||
590 | } | 584 | } |
591 | 585 | ||
592 | if (skb) { | 586 | if (!skb) |
593 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | ||
594 | wlvif->last_tx_hlid = h; | ||
595 | spin_lock_irqsave(&wl->wl_lock, flags); | ||
596 | wl->tx_queue_count[q]--; | ||
597 | spin_unlock_irqrestore(&wl->wl_lock, flags); | ||
598 | } else { | ||
599 | wlvif->last_tx_hlid = 0; | 587 | wlvif->last_tx_hlid = 0; |
600 | } | ||
601 | 588 | ||
602 | return skb; | 589 | return skb; |
603 | } | 590 | } |
@@ -608,11 +595,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, | |||
608 | unsigned long flags; | 595 | unsigned long flags; |
609 | struct sk_buff *skb = NULL; | 596 | struct sk_buff *skb = NULL; |
610 | 597 | ||
611 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) | 598 | skb = wl12xx_vif_skb_dequeue(wl, wlvif); |
612 | skb = wl1271_ap_skb_dequeue(wl, wlvif); | ||
613 | else | ||
614 | skb = wl1271_sta_skb_dequeue(wl); | ||
615 | |||
616 | if (!skb && | 599 | if (!skb && |
617 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | 600 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { |
618 | int q; | 601 | int q; |
@@ -627,24 +610,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, | |||
627 | return skb; | 610 | return skb; |
628 | } | 611 | } |
629 | 612 | ||
630 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct ieee80211_vif *vif, | 613 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
631 | struct sk_buff *skb) | 614 | struct sk_buff *skb) |
632 | { | 615 | { |
633 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); | ||
634 | unsigned long flags; | 616 | unsigned long flags; |
635 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 617 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
636 | 618 | ||
637 | if (wl12xx_is_dummy_packet(wl, skb)) { | 619 | if (wl12xx_is_dummy_packet(wl, skb)) { |
638 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | 620 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); |
639 | } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { | 621 | } else { |
640 | u8 hlid = wl1271_tx_get_hlid(wl, vif, skb); | 622 | u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); |
641 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); | 623 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); |
642 | 624 | ||
643 | /* make sure we dequeue the same packet next time */ | 625 | /* make sure we dequeue the same packet next time */ |
644 | wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % | 626 | wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % |
645 | WL12XX_MAX_LINKS; | 627 | WL12XX_MAX_LINKS; |
646 | } else { | ||
647 | skb_queue_head(&wl->tx_queue[q], skb); | ||
648 | } | 628 | } |
649 | 629 | ||
650 | spin_lock_irqsave(&wl->wl_lock, flags); | 630 | spin_lock_irqsave(&wl->wl_lock, flags); |
@@ -682,7 +662,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif) | |||
682 | * Aggregation buffer is full. | 662 | * Aggregation buffer is full. |
683 | * Flush buffer and try again. | 663 | * Flush buffer and try again. |
684 | */ | 664 | */ |
685 | wl1271_skb_queue_head(wl, vif, skb); | 665 | wl1271_skb_queue_head(wl, wlvif, skb); |
686 | wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 666 | wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, |
687 | buf_offset, true); | 667 | buf_offset, true); |
688 | sent_packets = true; | 668 | sent_packets = true; |
@@ -693,7 +673,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif) | |||
693 | * Firmware buffer is full. | 673 | * Firmware buffer is full. |
694 | * Queue back last skb, and stop aggregating. | 674 | * Queue back last skb, and stop aggregating. |
695 | */ | 675 | */ |
696 | wl1271_skb_queue_head(wl, vif, skb); | 676 | wl1271_skb_queue_head(wl, wlvif, skb); |
697 | /* No work left, avoid scheduling redundant tx work */ | 677 | /* No work left, avoid scheduling redundant tx work */ |
698 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 678 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
699 | goto out_ack; | 679 | goto out_ack; |
@@ -907,41 +887,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) | |||
907 | } | 887 | } |
908 | 888 | ||
909 | /* caller must hold wl->mutex and TX must be stopped */ | 889 | /* caller must hold wl->mutex and TX must be stopped */ |
910 | void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | 890 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
911 | { | 891 | { |
912 | struct ieee80211_vif *vif = wl->vif; /* TODO: get as param */ | ||
913 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); | ||
914 | int i; | 892 | int i; |
915 | struct sk_buff *skb; | ||
916 | struct ieee80211_tx_info *info; | ||
917 | 893 | ||
918 | /* TX failure */ | 894 | /* TX failure */ |
919 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) { | 895 | for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { |
920 | for (i = 0; i < WL12XX_MAX_LINKS; i++) { | 896 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
921 | wl1271_free_sta(wl, wlvif, i); | 897 | wl1271_free_sta(wl, wlvif, i); |
922 | wl1271_tx_reset_link_queues(wl, i); | 898 | else |
923 | wl->links[i].allocated_pkts = 0; | 899 | wlvif->sta.ba_rx_bitmap = 0; |
924 | wl->links[i].prev_freed_pkts = 0; | ||
925 | } | ||
926 | |||
927 | wlvif->last_tx_hlid = 0; | ||
928 | } else { | ||
929 | for (i = 0; i < NUM_TX_QUEUES; i++) { | ||
930 | while ((skb = skb_dequeue(&wl->tx_queue[i]))) { | ||
931 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", | ||
932 | skb); | ||
933 | |||
934 | if (!wl12xx_is_dummy_packet(wl, skb)) { | ||
935 | info = IEEE80211_SKB_CB(skb); | ||
936 | info->status.rates[0].idx = -1; | ||
937 | info->status.rates[0].count = 0; | ||
938 | ieee80211_tx_status_ni(wl->hw, skb); | ||
939 | } | ||
940 | } | ||
941 | } | ||
942 | 900 | ||
943 | wlvif->sta.ba_rx_bitmap = 0; | 901 | wl1271_tx_reset_link_queues(wl, i); |
902 | wl->links[i].allocated_pkts = 0; | ||
903 | wl->links[i].prev_freed_pkts = 0; | ||
944 | } | 904 | } |
905 | wlvif->last_tx_hlid = 0; | ||
906 | |||
907 | } | ||
908 | /* caller must hold wl->mutex and TX must be stopped */ | ||
909 | void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | ||
910 | { | ||
911 | int i; | ||
912 | struct sk_buff *skb; | ||
913 | struct ieee80211_tx_info *info; | ||
945 | 914 | ||
946 | for (i = 0; i < NUM_TX_QUEUES; i++) | 915 | for (i = 0; i < NUM_TX_QUEUES; i++) |
947 | wl->tx_queue_count[i] = 0; | 916 | wl->tx_queue_count[i] = 0; |
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h index add4402d7931..050a04792600 100644 --- a/drivers/net/wireless/wl12xx/tx.h +++ b/drivers/net/wireless/wl12xx/tx.h | |||
@@ -206,7 +206,8 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl) | |||
206 | void wl1271_tx_work(struct work_struct *work); | 206 | void wl1271_tx_work(struct work_struct *work); |
207 | void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif); | 207 | void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif); |
208 | void wl1271_tx_complete(struct wl1271 *wl); | 208 | void wl1271_tx_complete(struct wl1271 *wl); |
209 | void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues); | 209 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); |
210 | void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); | ||
210 | void wl1271_tx_flush(struct wl1271 *wl); | 211 | void wl1271_tx_flush(struct wl1271 *wl); |
211 | u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); | 212 | u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); |
212 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, | 213 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, |
@@ -214,6 +215,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, | |||
214 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); | 215 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); |
215 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 216 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
216 | struct sk_buff *skb); | 217 | struct sk_buff *skb); |
218 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, | ||
219 | struct sk_buff *skb); | ||
217 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); | 220 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); |
218 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl); | 221 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl); |
219 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); | 222 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); |
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index b350f0bdd38d..4802f685de63 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h | |||
@@ -416,7 +416,6 @@ struct wl1271 { | |||
416 | s64 time_offset; | 416 | s64 time_offset; |
417 | 417 | ||
418 | /* Frames scheduled for transmission, not handled yet */ | 418 | /* Frames scheduled for transmission, not handled yet */ |
419 | struct sk_buff_head tx_queue[NUM_TX_QUEUES]; | ||
420 | int tx_queue_count[NUM_TX_QUEUES]; | 419 | int tx_queue_count[NUM_TX_QUEUES]; |
421 | long stopped_queues_map; | 420 | long stopped_queues_map; |
422 | 421 | ||