aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c675
1 files changed, 421 insertions, 254 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 94de5033f0b6..3fb04a86444d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,7 +34,7 @@
34 34
35#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
36#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
37#define IEEE80211_TX_FRAG_AGAIN 2 37#define IEEE80211_TX_PENDING 2
38 38
39/* misc utils */ 39/* misc utils */
40 40
@@ -192,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
192 return TX_CONTINUE; 192 return TX_CONTINUE;
193 193
194 if (unlikely(tx->local->sw_scanning) && 194 if (unlikely(tx->local->sw_scanning) &&
195 !ieee80211_is_probe_req(hdr->frame_control)) 195 !ieee80211_is_probe_req(hdr->frame_control) &&
196 !ieee80211_is_nullfunc(hdr->frame_control))
197 /*
198 * When software scanning only nullfunc frames (to notify
199 * the sleep state to the AP) and probe requests (for the
200 * active scan) are allowed, all other frames should not be
201 * sent and we should not get here, but if we do
202 * nonetheless, drop them to avoid sending them
203 * off-channel. See the link below and
204 * ieee80211_start_scan() for more.
205 *
206 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
207 */
196 return TX_DROP; 208 return TX_DROP;
197 209
198 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 210 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -330,6 +342,22 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
330 return TX_CONTINUE; 342 return TX_CONTINUE;
331} 343}
332 344
345static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
346 struct sk_buff *skb)
347{
348 if (!ieee80211_is_mgmt(fc))
349 return 0;
350
351 if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
352 return 0;
353
354 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
355 skb->data))
356 return 0;
357
358 return 1;
359}
360
333static ieee80211_tx_result 361static ieee80211_tx_result
334ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 362ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
335{ 363{
@@ -409,11 +437,17 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
409 tx->key = NULL; 437 tx->key = NULL;
410 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 438 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
411 tx->key = key; 439 tx->key = key;
440 else if (ieee80211_is_mgmt(hdr->frame_control) &&
441 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
442 tx->key = key;
412 else if ((key = rcu_dereference(tx->sdata->default_key))) 443 else if ((key = rcu_dereference(tx->sdata->default_key)))
413 tx->key = key; 444 tx->key = key;
414 else if (tx->sdata->drop_unencrypted && 445 else if (tx->sdata->drop_unencrypted &&
415 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && 446 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) &&
416 !(info->flags & IEEE80211_TX_CTL_INJECTED)) { 447 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
448 (!ieee80211_is_robust_mgmt_frame(hdr) ||
449 (ieee80211_is_action(hdr->frame_control) &&
450 tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
417 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 451 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
418 return TX_DROP; 452 return TX_DROP;
419 } else 453 } else
@@ -428,10 +462,19 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
428 if (ieee80211_is_auth(hdr->frame_control)) 462 if (ieee80211_is_auth(hdr->frame_control))
429 break; 463 break;
430 case ALG_TKIP: 464 case ALG_TKIP:
431 case ALG_CCMP:
432 if (!ieee80211_is_data_present(hdr->frame_control)) 465 if (!ieee80211_is_data_present(hdr->frame_control))
433 tx->key = NULL; 466 tx->key = NULL;
434 break; 467 break;
468 case ALG_CCMP:
469 if (!ieee80211_is_data_present(hdr->frame_control) &&
470 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
471 tx->skb))
472 tx->key = NULL;
473 break;
474 case ALG_AES_CMAC:
475 if (!ieee80211_is_mgmt(hdr->frame_control))
476 tx->key = NULL;
477 break;
435 } 478 }
436 } 479 }
437 480
@@ -658,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
658 return TX_CONTINUE; 701 return TX_CONTINUE;
659} 702}
660 703
704static int ieee80211_fragment(struct ieee80211_local *local,
705 struct sk_buff *skb, int hdrlen,
706 int frag_threshold)
707{
708 struct sk_buff *tail = skb, *tmp;
709 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
710 int pos = hdrlen + per_fragm;
711 int rem = skb->len - hdrlen - per_fragm;
712
713 if (WARN_ON(rem < 0))
714 return -EINVAL;
715
716 while (rem) {
717 int fraglen = per_fragm;
718
719 if (fraglen > rem)
720 fraglen = rem;
721 rem -= fraglen;
722 tmp = dev_alloc_skb(local->tx_headroom +
723 frag_threshold +
724 IEEE80211_ENCRYPT_HEADROOM +
725 IEEE80211_ENCRYPT_TAILROOM);
726 if (!tmp)
727 return -ENOMEM;
728 tail->next = tmp;
729 tail = tmp;
730 skb_reserve(tmp, local->tx_headroom +
731 IEEE80211_ENCRYPT_HEADROOM);
732 /* copy control information */
733 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
734 skb_copy_queue_mapping(tmp, skb);
735 tmp->priority = skb->priority;
736 tmp->do_not_encrypt = skb->do_not_encrypt;
737 tmp->dev = skb->dev;
738 tmp->iif = skb->iif;
739
740 /* copy header and data */
741 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
742 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
743
744 pos += fraglen;
745 }
746
747 skb->len = hdrlen + per_fragm;
748 return 0;
749}
750
661static ieee80211_tx_result debug_noinline 751static ieee80211_tx_result debug_noinline
662ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 752ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
663{ 753{
664 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 754 struct sk_buff *skb = tx->skb;
665 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
666 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 756 struct ieee80211_hdr *hdr = (void *)skb->data;
667 struct sk_buff **frags, *first, *frag;
668 int i;
669 u16 seq;
670 u8 *pos;
671 int frag_threshold = tx->local->fragmentation_threshold; 757 int frag_threshold = tx->local->fragmentation_threshold;
758 int hdrlen;
759 int fragnum;
672 760
673 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 761 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
674 return TX_CONTINUE; 762 return TX_CONTINUE;
@@ -681,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
681 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 769 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
682 return TX_DROP; 770 return TX_DROP;
683 771
684 first = tx->skb;
685
686 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
687 payload_len = first->len - hdrlen;
688 per_fragm = frag_threshold - hdrlen - FCS_LEN;
689 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
690
691 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
692 if (!frags)
693 goto fail;
694
695 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
696 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
697 pos = first->data + hdrlen + per_fragm;
698 left = payload_len - per_fragm;
699 for (i = 0; i < num_fragm - 1; i++) {
700 struct ieee80211_hdr *fhdr;
701 size_t copylen;
702
703 if (left <= 0)
704 goto fail;
705 773
706 /* reserve enough extra head and tail room for possible 774 /* internal error, why is TX_FRAGMENTED set? */
707 * encryption */ 775 if (WARN_ON(skb->len <= frag_threshold))
708 frag = frags[i] = 776 return TX_DROP;
709 dev_alloc_skb(tx->local->tx_headroom +
710 frag_threshold +
711 IEEE80211_ENCRYPT_HEADROOM +
712 IEEE80211_ENCRYPT_TAILROOM);
713 if (!frag)
714 goto fail;
715
716 /* Make sure that all fragments use the same priority so
717 * that they end up using the same TX queue */
718 frag->priority = first->priority;
719 777
720 skb_reserve(frag, tx->local->tx_headroom + 778 /*
721 IEEE80211_ENCRYPT_HEADROOM); 779 * Now fragment the frame. This will allocate all the fragments and
780 * chain them (using skb as the first fragment) to skb->next.
781 * During transmission, we will remove the successfully transmitted
782 * fragments from this list. When the low-level driver rejects one
783 * of the fragments then we will simply pretend to accept the skb
784 * but store it away as pending.
785 */
786 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
787 return TX_DROP;
722 788
723 /* copy TX information */ 789 /* update duration/seq/flags of fragments */
724 info = IEEE80211_SKB_CB(frag); 790 fragnum = 0;
725 memcpy(info, first->cb, sizeof(frag->cb)); 791 do {
792 int next_len;
793 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
726 794
727 /* copy/fill in 802.11 header */ 795 hdr = (void *)skb->data;
728 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); 796 info = IEEE80211_SKB_CB(skb);
729 memcpy(fhdr, first->data, hdrlen);
730 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
731 797
732 if (i == num_fragm - 2) { 798 if (skb->next) {
733 /* clear MOREFRAGS bit for the last fragment */ 799 hdr->frame_control |= morefrags;
734 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); 800 next_len = skb->next->len;
735 } else {
736 /* 801 /*
737 * No multi-rate retries for fragmented frames, that 802 * No multi-rate retries for fragmented frames, that
738 * would completely throw off the NAV at other STAs. 803 * would completely throw off the NAV at other STAs.
@@ -743,35 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
743 info->control.rates[4].idx = -1; 808 info->control.rates[4].idx = -1;
744 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 809 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
745 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 810 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
811 } else {
812 hdr->frame_control &= ~morefrags;
813 next_len = 0;
746 } 814 }
747 815 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
748 /* copy data */ 816 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
749 copylen = left > per_fragm ? per_fragm : left; 817 fragnum++;
750 memcpy(skb_put(frag, copylen), pos, copylen); 818 } while ((skb = skb->next));
751
752 skb_copy_queue_mapping(frag, first);
753
754 frag->do_not_encrypt = first->do_not_encrypt;
755
756 pos += copylen;
757 left -= copylen;
758 }
759 skb_trim(first, hdrlen + per_fragm);
760
761 tx->num_extra_frag = num_fragm - 1;
762 tx->extra_frag = frags;
763 819
764 return TX_CONTINUE; 820 return TX_CONTINUE;
765
766 fail:
767 if (frags) {
768 for (i = 0; i < num_fragm - 1; i++)
769 if (frags[i])
770 dev_kfree_skb(frags[i]);
771 kfree(frags);
772 }
773 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
774 return TX_DROP;
775} 821}
776 822
777static ieee80211_tx_result debug_noinline 823static ieee80211_tx_result debug_noinline
@@ -787,6 +833,8 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
787 return ieee80211_crypto_tkip_encrypt(tx); 833 return ieee80211_crypto_tkip_encrypt(tx);
788 case ALG_CCMP: 834 case ALG_CCMP:
789 return ieee80211_crypto_ccmp_encrypt(tx); 835 return ieee80211_crypto_ccmp_encrypt(tx);
836 case ALG_AES_CMAC:
837 return ieee80211_crypto_aes_cmac_encrypt(tx);
790 } 838 }
791 839
792 /* not reached */ 840 /* not reached */
@@ -797,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
797static ieee80211_tx_result debug_noinline 845static ieee80211_tx_result debug_noinline
798ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 846ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
799{ 847{
800 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 848 struct sk_buff *skb = tx->skb;
801 int next_len, i; 849 struct ieee80211_hdr *hdr;
802 int group_addr = is_multicast_ether_addr(hdr->addr1); 850 int next_len;
803 851 bool group_addr;
804 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
805 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
806 return TX_CONTINUE;
807 }
808
809 hdr->duration_id = ieee80211_duration(tx, group_addr,
810 tx->extra_frag[0]->len);
811 852
812 for (i = 0; i < tx->num_extra_frag; i++) { 853 do {
813 if (i + 1 < tx->num_extra_frag) 854 hdr = (void *) skb->data;
814 next_len = tx->extra_frag[i + 1]->len; 855 next_len = skb->next ? skb->next->len : 0;
815 else 856 group_addr = is_multicast_ether_addr(hdr->addr1);
816 next_len = 0;
817 857
818 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data; 858 hdr->duration_id =
819 hdr->duration_id = ieee80211_duration(tx, 0, next_len); 859 ieee80211_duration(tx, group_addr, next_len);
820 } 860 } while ((skb = skb->next));
821 861
822 return TX_CONTINUE; 862 return TX_CONTINUE;
823} 863}
@@ -825,24 +865,20 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
825static ieee80211_tx_result debug_noinline 865static ieee80211_tx_result debug_noinline
826ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 866ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
827{ 867{
828 int i; 868 struct sk_buff *skb = tx->skb;
829 869
830 if (!tx->sta) 870 if (!tx->sta)
831 return TX_CONTINUE; 871 return TX_CONTINUE;
832 872
833 tx->sta->tx_packets++; 873 tx->sta->tx_packets++;
834 tx->sta->tx_fragments++; 874 do {
835 tx->sta->tx_bytes += tx->skb->len; 875 tx->sta->tx_fragments++;
836 if (tx->extra_frag) { 876 tx->sta->tx_bytes += skb->len;
837 tx->sta->tx_fragments += tx->num_extra_frag; 877 } while ((skb = skb->next));
838 for (i = 0; i < tx->num_extra_frag; i++)
839 tx->sta->tx_bytes += tx->extra_frag[i]->len;
840 }
841 878
842 return TX_CONTINUE; 879 return TX_CONTINUE;
843} 880}
844 881
845
846/* actual transmit path */ 882/* actual transmit path */
847 883
848/* 884/*
@@ -948,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
948 struct ieee80211_hdr *hdr; 984 struct ieee80211_hdr *hdr;
949 struct ieee80211_sub_if_data *sdata; 985 struct ieee80211_sub_if_data *sdata;
950 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 986 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
951
952 int hdrlen, tid; 987 int hdrlen, tid;
953 u8 *qc, *state; 988 u8 *qc, *state;
989 bool queued = false;
954 990
955 memset(tx, 0, sizeof(*tx)); 991 memset(tx, 0, sizeof(*tx));
956 tx->skb = skb; 992 tx->skb = skb;
@@ -977,17 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
977 */ 1013 */
978 } 1014 }
979 1015
1016 /*
1017 * If this flag is set to true anywhere, and we get here,
1018 * we are doing the needed processing, so remove the flag
1019 * now.
1020 */
1021 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1022
980 hdr = (struct ieee80211_hdr *) skb->data; 1023 hdr = (struct ieee80211_hdr *) skb->data;
981 1024
982 tx->sta = sta_info_get(local, hdr->addr1); 1025 tx->sta = sta_info_get(local, hdr->addr1);
983 1026
984 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1027 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1028 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1029 unsigned long flags;
1030 struct tid_ampdu_tx *tid_tx;
1031
985 qc = ieee80211_get_qos_ctl(hdr); 1032 qc = ieee80211_get_qos_ctl(hdr);
986 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1033 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
987 1034
1035 spin_lock_irqsave(&tx->sta->lock, flags);
1036 /*
1037 * XXX: This spinlock could be fairly expensive, but see the
1038 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1039 * One way to solve this would be to do something RCU-like
1040 * for managing the tid_tx struct and using atomic bitops
1041 * for the actual state -- by introducing an actual
1042 * 'operational' bit that would be possible. It would
1043 * require changing ieee80211_agg_tx_operational() to
1044 * set that bit, and changing the way tid_tx is managed
1045 * everywhere, including races between that bit and
1046 * tid_tx going away (tid_tx being added can be easily
1047 * committed to memory before the 'operational' bit).
1048 */
1049 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
988 state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; 1050 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
989 if (*state == HT_AGG_STATE_OPERATIONAL) 1051 if (*state == HT_AGG_STATE_OPERATIONAL) {
990 info->flags |= IEEE80211_TX_CTL_AMPDU; 1052 info->flags |= IEEE80211_TX_CTL_AMPDU;
1053 } else if (*state != HT_AGG_STATE_IDLE) {
1054 /* in progress */
1055 queued = true;
1056 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1057 __skb_queue_tail(&tid_tx->pending, skb);
1058 }
1059 spin_unlock_irqrestore(&tx->sta->lock, flags);
1060
1061 if (unlikely(queued))
1062 return TX_QUEUED;
991 } 1063 }
992 1064
993 if (is_multicast_ether_addr(hdr->addr1)) { 1065 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1038,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
1038 } 1110 }
1039 if (unlikely(!dev)) 1111 if (unlikely(!dev))
1040 return -ENODEV; 1112 return -ENODEV;
1041 /* initialises tx with control */ 1113 /*
1114 * initialises tx with control
1115 *
1116 * return value is safe to ignore here because this function
1117 * can only be invoked for multicast frames
1118 *
1119 * XXX: clean up
1120 */
1042 __ieee80211_tx_prepare(tx, skb, dev); 1121 __ieee80211_tx_prepare(tx, skb, dev);
1043 dev_put(dev); 1122 dev_put(dev);
1044 return 0; 1123 return 0;
1045} 1124}
1046 1125
1047static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1126static int __ieee80211_tx(struct ieee80211_local *local,
1048 struct ieee80211_tx_data *tx) 1127 struct sk_buff **skbp,
1128 struct sta_info *sta)
1049{ 1129{
1130 struct sk_buff *skb = *skbp, *next;
1050 struct ieee80211_tx_info *info; 1131 struct ieee80211_tx_info *info;
1051 int ret, i; 1132 int ret, len;
1133 bool fragm = false;
1052 1134
1053 if (skb) { 1135 local->mdev->trans_start = jiffies;
1054 if (netif_subqueue_stopped(local->mdev, skb))
1055 return IEEE80211_TX_AGAIN;
1056 info = IEEE80211_SKB_CB(skb);
1057 1136
1058 ret = local->ops->tx(local_to_hw(local), skb); 1137 while (skb) {
1059 if (ret) 1138 if (ieee80211_queue_stopped(&local->hw,
1060 return IEEE80211_TX_AGAIN; 1139 skb_get_queue_mapping(skb)))
1061 local->mdev->trans_start = jiffies; 1140 return IEEE80211_TX_PENDING;
1062 ieee80211_led_tx(local, 1); 1141
1063 } 1142 info = IEEE80211_SKB_CB(skb);
1064 if (tx->extra_frag) { 1143
1065 for (i = 0; i < tx->num_extra_frag; i++) { 1144 if (fragm)
1066 if (!tx->extra_frag[i])
1067 continue;
1068 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1069 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1145 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1070 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1146 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1071 if (netif_subqueue_stopped(local->mdev, 1147
1072 tx->extra_frag[i])) 1148 next = skb->next;
1073 return IEEE80211_TX_FRAG_AGAIN; 1149 len = skb->len;
1074 1150 ret = local->ops->tx(local_to_hw(local), skb);
1075 ret = local->ops->tx(local_to_hw(local), 1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1076 tx->extra_frag[i]); 1152 dev_kfree_skb(skb);
1077 if (ret) 1153 ret = NETDEV_TX_OK;
1078 return IEEE80211_TX_FRAG_AGAIN;
1079 local->mdev->trans_start = jiffies;
1080 ieee80211_led_tx(local, 1);
1081 tx->extra_frag[i] = NULL;
1082 } 1154 }
1083 kfree(tx->extra_frag); 1155 if (ret != NETDEV_TX_OK)
1084 tx->extra_frag = NULL; 1156 return IEEE80211_TX_AGAIN;
1157 *skbp = skb = next;
1158 ieee80211_led_tx(local, 1);
1159 fragm = true;
1085 } 1160 }
1161
1086 return IEEE80211_TX_OK; 1162 return IEEE80211_TX_OK;
1087} 1163}
1088 1164
@@ -1094,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1094{ 1170{
1095 struct sk_buff *skb = tx->skb; 1171 struct sk_buff *skb = tx->skb;
1096 ieee80211_tx_result res = TX_DROP; 1172 ieee80211_tx_result res = TX_DROP;
1097 int i;
1098 1173
1099#define CALL_TXH(txh) \ 1174#define CALL_TXH(txh) \
1100 res = txh(tx); \ 1175 res = txh(tx); \
@@ -1118,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1118 txh_done: 1193 txh_done:
1119 if (unlikely(res == TX_DROP)) { 1194 if (unlikely(res == TX_DROP)) {
1120 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1195 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1121 dev_kfree_skb(skb); 1196 while (skb) {
1122 for (i = 0; i < tx->num_extra_frag; i++) 1197 struct sk_buff *next;
1123 if (tx->extra_frag[i]) 1198
1124 dev_kfree_skb(tx->extra_frag[i]); 1199 next = skb->next;
1125 kfree(tx->extra_frag); 1200 dev_kfree_skb(skb);
1201 skb = next;
1202 }
1126 return -1; 1203 return -1;
1127 } else if (unlikely(res == TX_QUEUED)) { 1204 } else if (unlikely(res == TX_QUEUED)) {
1128 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1205 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1132,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1132 return 0; 1209 return 0;
1133} 1210}
1134 1211
1135static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) 1212static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1213 bool txpending)
1136{ 1214{
1137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1215 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1138 struct sta_info *sta; 1216 struct sta_info *sta;
1139 struct ieee80211_tx_data tx; 1217 struct ieee80211_tx_data tx;
1140 ieee80211_tx_result res_prepare; 1218 ieee80211_tx_result res_prepare;
1141 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1142 int ret, i; 1220 struct sk_buff *next;
1221 unsigned long flags;
1222 int ret, retries;
1143 u16 queue; 1223 u16 queue;
1144 1224
1145 queue = skb_get_queue_mapping(skb); 1225 queue = skb_get_queue_mapping(skb);
1146 1226
1147 WARN_ON(test_bit(queue, local->queues_pending)); 1227 WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
1148 1228
1149 if (unlikely(skb->len < 10)) { 1229 if (unlikely(skb->len < 10)) {
1150 dev_kfree_skb(skb); 1230 dev_kfree_skb(skb);
1151 return 0; 1231 return;
1152 } 1232 }
1153 1233
1154 rcu_read_lock(); 1234 rcu_read_lock();
@@ -1156,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1156 /* initialises tx */ 1236 /* initialises tx */
1157 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); 1237 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1158 1238
1159 if (res_prepare == TX_DROP) { 1239 if (unlikely(res_prepare == TX_DROP)) {
1160 dev_kfree_skb(skb); 1240 dev_kfree_skb(skb);
1161 rcu_read_unlock(); 1241 rcu_read_unlock();
1162 return 0; 1242 return;
1243 } else if (unlikely(res_prepare == TX_QUEUED)) {
1244 rcu_read_unlock();
1245 return;
1163 } 1246 }
1164 1247
1165 sta = tx.sta; 1248 sta = tx.sta;
@@ -1169,11 +1252,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1169 if (invoke_tx_handlers(&tx)) 1252 if (invoke_tx_handlers(&tx))
1170 goto out; 1253 goto out;
1171 1254
1172retry: 1255 retries = 0;
1173 ret = __ieee80211_tx(local, skb, &tx); 1256 retry:
1174 if (ret) { 1257 ret = __ieee80211_tx(local, &tx.skb, tx.sta);
1175 struct ieee80211_tx_stored_packet *store; 1258 switch (ret) {
1176 1259 case IEEE80211_TX_OK:
1260 break;
1261 case IEEE80211_TX_AGAIN:
1177 /* 1262 /*
1178 * Since there are no fragmented frames on A-MPDU 1263 * Since there are no fragmented frames on A-MPDU
1179 * queues, there's no reason for a driver to reject 1264 * queues, there's no reason for a driver to reject
@@ -1181,46 +1266,57 @@ retry:
1181 */ 1266 */
1182 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 1267 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1183 goto drop; 1268 goto drop;
1269 /* fall through */
1270 case IEEE80211_TX_PENDING:
1271 skb = tx.skb;
1272
1273 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1274
1275 if (__netif_subqueue_stopped(local->mdev, queue)) {
1276 do {
1277 next = skb->next;
1278 skb->next = NULL;
1279 if (unlikely(txpending))
1280 skb_queue_head(&local->pending[queue],
1281 skb);
1282 else
1283 skb_queue_tail(&local->pending[queue],
1284 skb);
1285 } while ((skb = next));
1184 1286
1185 store = &local->pending_packet[queue]; 1287 /*
1288 * Make sure nobody will enable the queue on us
1289 * (without going through the tasklet) nor disable the
1290 * netdev queue underneath the pending handling code.
1291 */
1292 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
1293 &local->queue_stop_reasons[queue]);
1186 1294
1187 if (ret == IEEE80211_TX_FRAG_AGAIN) 1295 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1188 skb = NULL; 1296 flags);
1297 } else {
1298 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1299 flags);
1189 1300
1190 set_bit(queue, local->queues_pending); 1301 retries++;
1191 smp_mb(); 1302 if (WARN(retries > 10, "tx refused but queue active"))
1192 /* 1303 goto drop;
1193 * When the driver gets out of buffers during sending of
1194 * fragments and calls ieee80211_stop_queue, the netif
1195 * subqueue is stopped. There is, however, a small window
1196 * in which the PENDING bit is not yet set. If a buffer
1197 * gets available in that window (i.e. driver calls
1198 * ieee80211_wake_queue), we would end up with ieee80211_tx
1199 * called with the PENDING bit still set. Prevent this by
1200 * continuing transmitting here when that situation is
1201 * possible to have happened.
1202 */
1203 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1204 clear_bit(queue, local->queues_pending);
1205 goto retry; 1304 goto retry;
1206 } 1305 }
1207 store->skb = skb;
1208 store->extra_frag = tx.extra_frag;
1209 store->num_extra_frag = tx.num_extra_frag;
1210 } 1306 }
1211 out: 1307 out:
1212 rcu_read_unlock(); 1308 rcu_read_unlock();
1213 return 0; 1309 return;
1214 1310
1215 drop: 1311 drop:
1216 if (skb)
1217 dev_kfree_skb(skb);
1218 for (i = 0; i < tx.num_extra_frag; i++)
1219 if (tx.extra_frag[i])
1220 dev_kfree_skb(tx.extra_frag[i]);
1221 kfree(tx.extra_frag);
1222 rcu_read_unlock(); 1312 rcu_read_unlock();
1223 return 0; 1313
1314 skb = tx.skb;
1315 while (skb) {
1316 next = skb->next;
1317 dev_kfree_skb(skb);
1318 skb = next;
1319 }
1224} 1320}
1225 1321
1226/* device xmit handlers */ 1322/* device xmit handlers */
@@ -1279,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1279 FOUND_SDATA, 1375 FOUND_SDATA,
1280 UNKNOWN_ADDRESS, 1376 UNKNOWN_ADDRESS,
1281 } monitor_iface = NOT_MONITOR; 1377 } monitor_iface = NOT_MONITOR;
1282 int ret;
1283 1378
1284 if (skb->iif) 1379 if (skb->iif)
1285 odev = dev_get_by_index(&init_net, skb->iif); 1380 odev = dev_get_by_index(&init_net, skb->iif);
@@ -1293,7 +1388,20 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1293 "originating device\n", dev->name); 1388 "originating device\n", dev->name);
1294#endif 1389#endif
1295 dev_kfree_skb(skb); 1390 dev_kfree_skb(skb);
1296 return 0; 1391 return NETDEV_TX_OK;
1392 }
1393
1394 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1395 local->hw.conf.dynamic_ps_timeout > 0) {
1396 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1397 ieee80211_stop_queues_by_reason(&local->hw,
1398 IEEE80211_QUEUE_STOP_REASON_PS);
1399 queue_work(local->hw.workqueue,
1400 &local->dynamic_ps_disable_work);
1401 }
1402
1403 mod_timer(&local->dynamic_ps_timer, jiffies +
1404 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1297 } 1405 }
1298 1406
1299 memset(info, 0, sizeof(*info)); 1407 memset(info, 0, sizeof(*info));
@@ -1309,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1309 else 1417 else
1310 if (mesh_nexthop_lookup(skb, osdata)) { 1418 if (mesh_nexthop_lookup(skb, osdata)) {
1311 dev_put(odev); 1419 dev_put(odev);
1312 return 0; 1420 return NETDEV_TX_OK;
1313 } 1421 }
1314 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1422 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1315 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1423 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1371,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1371 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { 1479 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1372 dev_kfree_skb(skb); 1480 dev_kfree_skb(skb);
1373 dev_put(odev); 1481 dev_put(odev);
1374 return 0; 1482 return NETDEV_TX_OK;
1375 } 1483 }
1376 1484
1377 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1485 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1380,20 +1488,42 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1380 u.ap); 1488 u.ap);
1381 if (likely(monitor_iface != UNKNOWN_ADDRESS)) 1489 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1382 info->control.vif = &osdata->vif; 1490 info->control.vif = &osdata->vif;
1383 ret = ieee80211_tx(odev, skb); 1491
1492 ieee80211_tx(odev, skb, false);
1384 dev_put(odev); 1493 dev_put(odev);
1385 1494
1386 return ret; 1495 return NETDEV_TX_OK;
1387} 1496}
1388 1497
1389int ieee80211_monitor_start_xmit(struct sk_buff *skb, 1498int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1390 struct net_device *dev) 1499 struct net_device *dev)
1391{ 1500{
1392 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1501 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1502 struct ieee80211_channel *chan = local->hw.conf.channel;
1393 struct ieee80211_radiotap_header *prthdr = 1503 struct ieee80211_radiotap_header *prthdr =
1394 (struct ieee80211_radiotap_header *)skb->data; 1504 (struct ieee80211_radiotap_header *)skb->data;
1395 u16 len_rthdr; 1505 u16 len_rthdr;
1396 1506
1507 /*
1508 * Frame injection is not allowed if beaconing is not allowed
1509 * or if we need radar detection. Beaconing is usually not allowed when
1510 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1511 * Passive scan is also used in world regulatory domains where
1512 * your country is not known and as such it should be treated as
1513 * NO TX unless the channel is explicitly allowed in which case
1514 * your current regulatory domain would not have the passive scan
1515 * flag.
1516 *
1517 * Since AP mode uses monitor interfaces to inject/TX management
1518 * frames we can make AP mode the exception to this rule once it
1519 * supports radar detection as its implementation can deal with
1520 * radar detection by itself. We can do that later by adding a
1521 * monitor flag interfaces used for AP support.
1522 */
1523 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1524 IEEE80211_CHAN_PASSIVE_SCAN)))
1525 goto fail;
1526
1397 /* check for not even having the fixed radiotap header part */ 1527 /* check for not even having the fixed radiotap header part */
1398 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) 1528 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1399 goto fail; /* too short to be possibly valid */ 1529 goto fail; /* too short to be possibly valid */
@@ -1477,19 +1607,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1477 goto fail; 1607 goto fail;
1478 } 1608 }
1479 1609
1480 if (!(local->hw.flags & IEEE80211_HW_NO_STACK_DYNAMIC_PS) &&
1481 local->dynamic_ps_timeout > 0) {
1482 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1483 ieee80211_stop_queues_by_reason(&local->hw,
1484 IEEE80211_QUEUE_STOP_REASON_PS);
1485 queue_work(local->hw.workqueue,
1486 &local->dynamic_ps_disable_work);
1487 }
1488
1489 mod_timer(&local->dynamic_ps_timer, jiffies +
1490 msecs_to_jiffies(local->dynamic_ps_timeout));
1491 }
1492
1493 nh_pos = skb_network_header(skb) - skb->data; 1610 nh_pos = skb_network_header(skb) - skb->data;
1494 h_pos = skb_transport_header(skb) - skb->data; 1611 h_pos = skb_transport_header(skb) - skb->data;
1495 1612
@@ -1570,7 +1687,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1570 case NL80211_IFTYPE_STATION: 1687 case NL80211_IFTYPE_STATION:
1571 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1688 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1572 /* BSSID SA DA */ 1689 /* BSSID SA DA */
1573 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); 1690 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1574 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1691 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1575 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1692 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1576 hdrlen = 24; 1693 hdrlen = 24;
@@ -1579,7 +1696,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1579 /* DA SA BSSID */ 1696 /* DA SA BSSID */
1580 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1697 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1581 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1698 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1582 memcpy(hdr.addr3, sdata->u.sta.bssid, ETH_ALEN); 1699 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
1583 hdrlen = 24; 1700 hdrlen = 24;
1584 break; 1701 break;
1585 default: 1702 default:
@@ -1601,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1601 } 1718 }
1602 1719
1603 /* receiver and we are QoS enabled, use a QoS type frame */ 1720 /* receiver and we are QoS enabled, use a QoS type frame */
1604 if (sta_flags & WLAN_STA_WME && 1721 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1605 ieee80211_num_regular_queues(&local->hw) >= 4) {
1606 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1722 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1607 hdrlen += 2; 1723 hdrlen += 2;
1608 } 1724 }
@@ -1734,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1734 */ 1850 */
1735void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1851void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1736{ 1852{
1737 int i, j; 1853 int i;
1738 struct ieee80211_tx_stored_packet *store;
1739 1854
1740 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { 1855 for (i = 0; i < local->hw.queues; i++)
1741 if (!test_bit(i, local->queues_pending)) 1856 skb_queue_purge(&local->pending[i]);
1742 continue; 1857}
1743 store = &local->pending_packet[i]; 1858
1744 kfree_skb(store->skb); 1859static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1745 for (j = 0; j < store->num_extra_frag; j++) 1860 struct sk_buff *skb)
1746 kfree_skb(store->extra_frag[j]); 1861{
1747 kfree(store->extra_frag); 1862 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1748 clear_bit(i, local->queues_pending); 1863 struct ieee80211_sub_if_data *sdata;
1864 struct sta_info *sta;
1865 struct ieee80211_hdr *hdr;
1866 struct net_device *dev;
1867 int ret;
1868 bool result = true;
1869
1870 /* does interface still exist? */
1871 dev = dev_get_by_index(&init_net, skb->iif);
1872 if (!dev) {
1873 dev_kfree_skb(skb);
1874 return true;
1875 }
1876
1877 /* validate info->control.vif against skb->iif */
1878 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1879 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1880 sdata = container_of(sdata->bss,
1881 struct ieee80211_sub_if_data,
1882 u.ap);
1883
1884 if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
1885 dev_kfree_skb(skb);
1886 result = true;
1887 goto out;
1749 } 1888 }
1889
1890 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
1891 ieee80211_tx(dev, skb, true);
1892 } else {
1893 hdr = (struct ieee80211_hdr *)skb->data;
1894 sta = sta_info_get(local, hdr->addr1);
1895
1896 ret = __ieee80211_tx(local, &skb, sta);
1897 if (ret != IEEE80211_TX_OK)
1898 result = false;
1899 }
1900
1901 out:
1902 dev_put(dev);
1903
1904 return result;
1750} 1905}
1751 1906
1752/* 1907/*
@@ -1757,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
1757{ 1912{
1758 struct ieee80211_local *local = (struct ieee80211_local *)data; 1913 struct ieee80211_local *local = (struct ieee80211_local *)data;
1759 struct net_device *dev = local->mdev; 1914 struct net_device *dev = local->mdev;
1760 struct ieee80211_tx_stored_packet *store; 1915 unsigned long flags;
1761 struct ieee80211_tx_data tx; 1916 int i;
1762 int i, ret; 1917 bool next;
1763 1918
1919 rcu_read_lock();
1764 netif_tx_lock_bh(dev); 1920 netif_tx_lock_bh(dev);
1765 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1766 /* Check that this queue is ok */
1767 if (__netif_subqueue_stopped(local->mdev, i) &&
1768 !test_bit(i, local->queues_pending_run))
1769 continue;
1770 1921
1771 if (!test_bit(i, local->queues_pending)) { 1922 for (i = 0; i < local->hw.queues; i++) {
1772 clear_bit(i, local->queues_pending_run); 1923 /*
1773 ieee80211_wake_queue(&local->hw, i); 1924 * If queue is stopped by something other than due to pending
1925 * frames, or we have no pending frames, proceed to next queue.
1926 */
1927 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1928 next = false;
1929 if (local->queue_stop_reasons[i] !=
1930 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
1931 skb_queue_empty(&local->pending[i]))
1932 next = true;
1933 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1934
1935 if (next)
1774 continue; 1936 continue;
1775 }
1776 1937
1777 clear_bit(i, local->queues_pending_run); 1938 /*
1939 * start the queue now to allow processing our packets,
1940 * we're under the tx lock here anyway so nothing will
1941 * happen as a result of this
1942 */
1778 netif_start_subqueue(local->mdev, i); 1943 netif_start_subqueue(local->mdev, i);
1779 1944
1780 store = &local->pending_packet[i]; 1945 while (!skb_queue_empty(&local->pending[i])) {
1781 tx.extra_frag = store->extra_frag; 1946 struct sk_buff *skb = skb_dequeue(&local->pending[i]);
1782 tx.num_extra_frag = store->num_extra_frag; 1947
1783 tx.flags = 0; 1948 if (!ieee80211_tx_pending_skb(local, skb)) {
1784 ret = __ieee80211_tx(local, store->skb, &tx); 1949 skb_queue_head(&local->pending[i], skb);
1785 if (ret) { 1950 break;
1786 if (ret == IEEE80211_TX_FRAG_AGAIN) 1951 }
1787 store->skb = NULL;
1788 } else {
1789 clear_bit(i, local->queues_pending);
1790 ieee80211_wake_queue(&local->hw, i);
1791 } 1952 }
1953
1954 /* Start regular packet processing again. */
1955 if (skb_queue_empty(&local->pending[i]))
1956 ieee80211_wake_queue_by_reason(&local->hw, i,
1957 IEEE80211_QUEUE_STOP_REASON_PENDING);
1792 } 1958 }
1959
1793 netif_tx_unlock_bh(dev); 1960 netif_tx_unlock_bh(dev);
1961 rcu_read_unlock();
1794} 1962}
1795 1963
1796/* functions for drivers to get certain frames */ 1964/* functions for drivers to get certain frames */
@@ -1865,7 +2033,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1865 struct ieee80211_tx_info *info; 2033 struct ieee80211_tx_info *info;
1866 struct ieee80211_sub_if_data *sdata = NULL; 2034 struct ieee80211_sub_if_data *sdata = NULL;
1867 struct ieee80211_if_ap *ap = NULL; 2035 struct ieee80211_if_ap *ap = NULL;
1868 struct ieee80211_if_sta *ifsta = NULL;
1869 struct beacon_data *beacon; 2036 struct beacon_data *beacon;
1870 struct ieee80211_supported_band *sband; 2037 struct ieee80211_supported_band *sband;
1871 enum ieee80211_band band = local->hw.conf.channel->band; 2038 enum ieee80211_band band = local->hw.conf.channel->band;
@@ -1917,13 +2084,13 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1917 } else 2084 } else
1918 goto out; 2085 goto out;
1919 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 2086 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2087 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
1920 struct ieee80211_hdr *hdr; 2088 struct ieee80211_hdr *hdr;
1921 ifsta = &sdata->u.sta;
1922 2089
1923 if (!ifsta->probe_resp) 2090 if (!ifibss->probe_resp)
1924 goto out; 2091 goto out;
1925 2092
1926 skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC); 2093 skb = skb_copy(ifibss->probe_resp, GFP_ATOMIC);
1927 if (!skb) 2094 if (!skb)
1928 goto out; 2095 goto out;
1929 2096