aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c579
1 files changed, 341 insertions, 238 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 457238a2f3fc..3fb04a86444d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,8 +34,7 @@
34 34
35#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
36#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
37#define IEEE80211_TX_FRAG_AGAIN 2 37#define IEEE80211_TX_PENDING 2
38#define IEEE80211_TX_PENDING 3
39 38
40/* misc utils */ 39/* misc utils */
41 40
@@ -193,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
193 return TX_CONTINUE; 192 return TX_CONTINUE;
194 193
195 if (unlikely(tx->local->sw_scanning) && 194 if (unlikely(tx->local->sw_scanning) &&
196 !ieee80211_is_probe_req(hdr->frame_control)) 195 !ieee80211_is_probe_req(hdr->frame_control) &&
196 !ieee80211_is_nullfunc(hdr->frame_control))
197 /*
198 * When software scanning only nullfunc frames (to notify
199 * the sleep state to the AP) and probe requests (for the
200 * active scan) are allowed, all other frames should not be
201 * sent and we should not get here, but if we do
202 * nonetheless, drop them to avoid sending them
203 * off-channel. See the link below and
204 * ieee80211_start_scan() for more.
205 *
206 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
207 */
197 return TX_DROP; 208 return TX_DROP;
198 209
199 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 210 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -690,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
690 return TX_CONTINUE; 701 return TX_CONTINUE;
691} 702}
692 703
704static int ieee80211_fragment(struct ieee80211_local *local,
705 struct sk_buff *skb, int hdrlen,
706 int frag_threshold)
707{
708 struct sk_buff *tail = skb, *tmp;
709 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
710 int pos = hdrlen + per_fragm;
711 int rem = skb->len - hdrlen - per_fragm;
712
713 if (WARN_ON(rem < 0))
714 return -EINVAL;
715
716 while (rem) {
717 int fraglen = per_fragm;
718
719 if (fraglen > rem)
720 fraglen = rem;
721 rem -= fraglen;
722 tmp = dev_alloc_skb(local->tx_headroom +
723 frag_threshold +
724 IEEE80211_ENCRYPT_HEADROOM +
725 IEEE80211_ENCRYPT_TAILROOM);
726 if (!tmp)
727 return -ENOMEM;
728 tail->next = tmp;
729 tail = tmp;
730 skb_reserve(tmp, local->tx_headroom +
731 IEEE80211_ENCRYPT_HEADROOM);
732 /* copy control information */
733 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
734 skb_copy_queue_mapping(tmp, skb);
735 tmp->priority = skb->priority;
736 tmp->do_not_encrypt = skb->do_not_encrypt;
737 tmp->dev = skb->dev;
738 tmp->iif = skb->iif;
739
740 /* copy header and data */
741 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
742 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
743
744 pos += fraglen;
745 }
746
747 skb->len = hdrlen + per_fragm;
748 return 0;
749}
750
693static ieee80211_tx_result debug_noinline 751static ieee80211_tx_result debug_noinline
694ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 752ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
695{ 753{
696 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 754 struct sk_buff *skb = tx->skb;
697 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
698 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 756 struct ieee80211_hdr *hdr = (void *)skb->data;
699 struct sk_buff **frags, *first, *frag;
700 int i;
701 u16 seq;
702 u8 *pos;
703 int frag_threshold = tx->local->fragmentation_threshold; 757 int frag_threshold = tx->local->fragmentation_threshold;
758 int hdrlen;
759 int fragnum;
704 760
705 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 761 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
706 return TX_CONTINUE; 762 return TX_CONTINUE;
@@ -713,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
713 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 769 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
714 return TX_DROP; 770 return TX_DROP;
715 771
716 first = tx->skb;
717
718 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
719 payload_len = first->len - hdrlen;
720 per_fragm = frag_threshold - hdrlen - FCS_LEN;
721 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
722
723 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
724 if (!frags)
725 goto fail;
726
727 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
728 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
729 pos = first->data + hdrlen + per_fragm;
730 left = payload_len - per_fragm;
731 for (i = 0; i < num_fragm - 1; i++) {
732 struct ieee80211_hdr *fhdr;
733 size_t copylen;
734
735 if (left <= 0)
736 goto fail;
737 773
738 /* reserve enough extra head and tail room for possible 774 /* internal error, why is TX_FRAGMENTED set? */
739 * encryption */ 775 if (WARN_ON(skb->len <= frag_threshold))
740 frag = frags[i] = 776 return TX_DROP;
741 dev_alloc_skb(tx->local->tx_headroom +
742 frag_threshold +
743 IEEE80211_ENCRYPT_HEADROOM +
744 IEEE80211_ENCRYPT_TAILROOM);
745 if (!frag)
746 goto fail;
747
748 /* Make sure that all fragments use the same priority so
749 * that they end up using the same TX queue */
750 frag->priority = first->priority;
751 777
752 skb_reserve(frag, tx->local->tx_headroom + 778 /*
753 IEEE80211_ENCRYPT_HEADROOM); 779 * Now fragment the frame. This will allocate all the fragments and
780 * chain them (using skb as the first fragment) to skb->next.
781 * During transmission, we will remove the successfully transmitted
782 * fragments from this list. When the low-level driver rejects one
783 * of the fragments then we will simply pretend to accept the skb
784 * but store it away as pending.
785 */
786 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
787 return TX_DROP;
754 788
755 /* copy TX information */ 789 /* update duration/seq/flags of fragments */
756 info = IEEE80211_SKB_CB(frag); 790 fragnum = 0;
757 memcpy(info, first->cb, sizeof(frag->cb)); 791 do {
792 int next_len;
793 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
758 794
759 /* copy/fill in 802.11 header */ 795 hdr = (void *)skb->data;
760 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); 796 info = IEEE80211_SKB_CB(skb);
761 memcpy(fhdr, first->data, hdrlen);
762 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
763 797
764 if (i == num_fragm - 2) { 798 if (skb->next) {
765 /* clear MOREFRAGS bit for the last fragment */ 799 hdr->frame_control |= morefrags;
766 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); 800 next_len = skb->next->len;
767 } else {
768 /* 801 /*
769 * No multi-rate retries for fragmented frames, that 802 * No multi-rate retries for fragmented frames, that
770 * would completely throw off the NAV at other STAs. 803 * would completely throw off the NAV at other STAs.
@@ -775,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
775 info->control.rates[4].idx = -1; 808 info->control.rates[4].idx = -1;
776 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 809 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
777 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 810 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
811 } else {
812 hdr->frame_control &= ~morefrags;
813 next_len = 0;
778 } 814 }
779 815 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
780 /* copy data */ 816 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
781 copylen = left > per_fragm ? per_fragm : left; 817 fragnum++;
782 memcpy(skb_put(frag, copylen), pos, copylen); 818 } while ((skb = skb->next));
783
784 skb_copy_queue_mapping(frag, first);
785
786 frag->do_not_encrypt = first->do_not_encrypt;
787 frag->dev = first->dev;
788 frag->iif = first->iif;
789
790 pos += copylen;
791 left -= copylen;
792 }
793 skb_trim(first, hdrlen + per_fragm);
794
795 tx->num_extra_frag = num_fragm - 1;
796 tx->extra_frag = frags;
797 819
798 return TX_CONTINUE; 820 return TX_CONTINUE;
799
800 fail:
801 if (frags) {
802 for (i = 0; i < num_fragm - 1; i++)
803 if (frags[i])
804 dev_kfree_skb(frags[i]);
805 kfree(frags);
806 }
807 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
808 return TX_DROP;
809} 821}
810 822
811static ieee80211_tx_result debug_noinline 823static ieee80211_tx_result debug_noinline
@@ -833,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
833static ieee80211_tx_result debug_noinline 845static ieee80211_tx_result debug_noinline
834ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 846ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
835{ 847{
836 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 848 struct sk_buff *skb = tx->skb;
837 int next_len, i; 849 struct ieee80211_hdr *hdr;
838 int group_addr = is_multicast_ether_addr(hdr->addr1); 850 int next_len;
839 851 bool group_addr;
840 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
841 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
842 return TX_CONTINUE;
843 }
844 852
845 hdr->duration_id = ieee80211_duration(tx, group_addr, 853 do {
846 tx->extra_frag[0]->len); 854 hdr = (void *) skb->data;
855 next_len = skb->next ? skb->next->len : 0;
856 group_addr = is_multicast_ether_addr(hdr->addr1);
847 857
848 for (i = 0; i < tx->num_extra_frag; i++) { 858 hdr->duration_id =
849 if (i + 1 < tx->num_extra_frag) 859 ieee80211_duration(tx, group_addr, next_len);
850 next_len = tx->extra_frag[i + 1]->len; 860 } while ((skb = skb->next));
851 else
852 next_len = 0;
853
854 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
855 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
856 }
857 861
858 return TX_CONTINUE; 862 return TX_CONTINUE;
859} 863}
@@ -861,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
861static ieee80211_tx_result debug_noinline 865static ieee80211_tx_result debug_noinline
862ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 866ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
863{ 867{
864 int i; 868 struct sk_buff *skb = tx->skb;
865 869
866 if (!tx->sta) 870 if (!tx->sta)
867 return TX_CONTINUE; 871 return TX_CONTINUE;
868 872
869 tx->sta->tx_packets++; 873 tx->sta->tx_packets++;
870 tx->sta->tx_fragments++; 874 do {
871 tx->sta->tx_bytes += tx->skb->len; 875 tx->sta->tx_fragments++;
872 if (tx->extra_frag) { 876 tx->sta->tx_bytes += skb->len;
873 tx->sta->tx_fragments += tx->num_extra_frag; 877 } while ((skb = skb->next));
874 for (i = 0; i < tx->num_extra_frag; i++)
875 tx->sta->tx_bytes += tx->extra_frag[i]->len;
876 }
877 878
878 return TX_CONTINUE; 879 return TX_CONTINUE;
879} 880}
@@ -983,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
983 struct ieee80211_hdr *hdr; 984 struct ieee80211_hdr *hdr;
984 struct ieee80211_sub_if_data *sdata; 985 struct ieee80211_sub_if_data *sdata;
985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 986 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
986
987 int hdrlen, tid; 987 int hdrlen, tid;
988 u8 *qc, *state; 988 u8 *qc, *state;
989 bool queued = false;
989 990
990 memset(tx, 0, sizeof(*tx)); 991 memset(tx, 0, sizeof(*tx));
991 tx->skb = skb; 992 tx->skb = skb;
@@ -1012,25 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1012 */ 1013 */
1013 } 1014 }
1014 1015
1016 /*
1017 * If this flag is set to true anywhere, and we get here,
1018 * we are doing the needed processing, so remove the flag
1019 * now.
1020 */
1021 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1022
1015 hdr = (struct ieee80211_hdr *) skb->data; 1023 hdr = (struct ieee80211_hdr *) skb->data;
1016 1024
1017 tx->sta = sta_info_get(local, hdr->addr1); 1025 tx->sta = sta_info_get(local, hdr->addr1);
1018 1026
1019 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1027 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1028 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1020 unsigned long flags; 1029 unsigned long flags;
1030 struct tid_ampdu_tx *tid_tx;
1031
1021 qc = ieee80211_get_qos_ctl(hdr); 1032 qc = ieee80211_get_qos_ctl(hdr);
1022 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1033 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1023 1034
1024 spin_lock_irqsave(&tx->sta->lock, flags); 1035 spin_lock_irqsave(&tx->sta->lock, flags);
1036 /*
1037 * XXX: This spinlock could be fairly expensive, but see the
1038 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1039 * One way to solve this would be to do something RCU-like
1040 * for managing the tid_tx struct and using atomic bitops
1041 * for the actual state -- by introducing an actual
1042 * 'operational' bit that would be possible. It would
1043 * require changing ieee80211_agg_tx_operational() to
1044 * set that bit, and changing the way tid_tx is managed
1045 * everywhere, including races between that bit and
1046 * tid_tx going away (tid_tx being added can be easily
1047 * committed to memory before the 'operational' bit).
1048 */
1049 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1025 state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; 1050 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1026 if (*state == HT_AGG_STATE_OPERATIONAL) { 1051 if (*state == HT_AGG_STATE_OPERATIONAL) {
1027 info->flags |= IEEE80211_TX_CTL_AMPDU; 1052 info->flags |= IEEE80211_TX_CTL_AMPDU;
1028 if (local->hw.ampdu_queues) 1053 } else if (*state != HT_AGG_STATE_IDLE) {
1029 skb_set_queue_mapping( 1054 /* in progress */
1030 skb, tx->local->hw.queues + 1055 queued = true;
1031 tx->sta->tid_to_tx_q[tid]); 1056 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1057 __skb_queue_tail(&tid_tx->pending, skb);
1032 } 1058 }
1033 spin_unlock_irqrestore(&tx->sta->lock, flags); 1059 spin_unlock_irqrestore(&tx->sta->lock, flags);
1060
1061 if (unlikely(queued))
1062 return TX_QUEUED;
1034 } 1063 }
1035 1064
1036 if (is_multicast_ether_addr(hdr->addr1)) { 1065 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1081,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
1081 } 1110 }
1082 if (unlikely(!dev)) 1111 if (unlikely(!dev))
1083 return -ENODEV; 1112 return -ENODEV;
1084 /* initialises tx with control */ 1113 /*
1114 * initialises tx with control
1115 *
1116 * return value is safe to ignore here because this function
1117 * can only be invoked for multicast frames
1118 *
1119 * XXX: clean up
1120 */
1085 __ieee80211_tx_prepare(tx, skb, dev); 1121 __ieee80211_tx_prepare(tx, skb, dev);
1086 dev_put(dev); 1122 dev_put(dev);
1087 return 0; 1123 return 0;
1088} 1124}
1089 1125
1090static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1126static int __ieee80211_tx(struct ieee80211_local *local,
1091 struct ieee80211_tx_data *tx) 1127 struct sk_buff **skbp,
1128 struct sta_info *sta)
1092{ 1129{
1130 struct sk_buff *skb = *skbp, *next;
1093 struct ieee80211_tx_info *info; 1131 struct ieee80211_tx_info *info;
1094 int ret, i; 1132 int ret, len;
1133 bool fragm = false;
1095 1134
1096 if (skb) { 1135 local->mdev->trans_start = jiffies;
1136
1137 while (skb) {
1097 if (ieee80211_queue_stopped(&local->hw, 1138 if (ieee80211_queue_stopped(&local->hw,
1098 skb_get_queue_mapping(skb))) 1139 skb_get_queue_mapping(skb)))
1099 return IEEE80211_TX_PENDING; 1140 return IEEE80211_TX_PENDING;
1100 1141
1101 ret = local->ops->tx(local_to_hw(local), skb); 1142 info = IEEE80211_SKB_CB(skb);
1102 if (ret) 1143
1103 return IEEE80211_TX_AGAIN; 1144 if (fragm)
1104 local->mdev->trans_start = jiffies;
1105 ieee80211_led_tx(local, 1);
1106 }
1107 if (tx->extra_frag) {
1108 for (i = 0; i < tx->num_extra_frag; i++) {
1109 if (!tx->extra_frag[i])
1110 continue;
1111 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1112 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1145 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1113 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1146 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1114 if (ieee80211_queue_stopped(&local->hw, 1147
1115 skb_get_queue_mapping(tx->extra_frag[i]))) 1148 next = skb->next;
1116 return IEEE80211_TX_FRAG_AGAIN; 1149 len = skb->len;
1117 1150 ret = local->ops->tx(local_to_hw(local), skb);
1118 ret = local->ops->tx(local_to_hw(local), 1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1119 tx->extra_frag[i]); 1152 dev_kfree_skb(skb);
1120 if (ret) 1153 ret = NETDEV_TX_OK;
1121 return IEEE80211_TX_FRAG_AGAIN;
1122 local->mdev->trans_start = jiffies;
1123 ieee80211_led_tx(local, 1);
1124 tx->extra_frag[i] = NULL;
1125 } 1154 }
1126 kfree(tx->extra_frag); 1155 if (ret != NETDEV_TX_OK)
1127 tx->extra_frag = NULL; 1156 return IEEE80211_TX_AGAIN;
1157 *skbp = skb = next;
1158 ieee80211_led_tx(local, 1);
1159 fragm = true;
1128 } 1160 }
1161
1129 return IEEE80211_TX_OK; 1162 return IEEE80211_TX_OK;
1130} 1163}
1131 1164
@@ -1137,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1137{ 1170{
1138 struct sk_buff *skb = tx->skb; 1171 struct sk_buff *skb = tx->skb;
1139 ieee80211_tx_result res = TX_DROP; 1172 ieee80211_tx_result res = TX_DROP;
1140 int i;
1141 1173
1142#define CALL_TXH(txh) \ 1174#define CALL_TXH(txh) \
1143 res = txh(tx); \ 1175 res = txh(tx); \
@@ -1161,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1161 txh_done: 1193 txh_done:
1162 if (unlikely(res == TX_DROP)) { 1194 if (unlikely(res == TX_DROP)) {
1163 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1195 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1164 dev_kfree_skb(skb); 1196 while (skb) {
1165 for (i = 0; i < tx->num_extra_frag; i++) 1197 struct sk_buff *next;
1166 if (tx->extra_frag[i]) 1198
1167 dev_kfree_skb(tx->extra_frag[i]); 1199 next = skb->next;
1168 kfree(tx->extra_frag); 1200 dev_kfree_skb(skb);
1201 skb = next;
1202 }
1169 return -1; 1203 return -1;
1170 } else if (unlikely(res == TX_QUEUED)) { 1204 } else if (unlikely(res == TX_QUEUED)) {
1171 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1205 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1175,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1175 return 0; 1209 return 0;
1176} 1210}
1177 1211
1178static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) 1212static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1213 bool txpending)
1179{ 1214{
1180 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1215 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1181 struct sta_info *sta; 1216 struct sta_info *sta;
1182 struct ieee80211_tx_data tx; 1217 struct ieee80211_tx_data tx;
1183 ieee80211_tx_result res_prepare; 1218 ieee80211_tx_result res_prepare;
1184 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1185 int ret, i; 1220 struct sk_buff *next;
1221 unsigned long flags;
1222 int ret, retries;
1186 u16 queue; 1223 u16 queue;
1187 1224
1188 queue = skb_get_queue_mapping(skb); 1225 queue = skb_get_queue_mapping(skb);
1189 1226
1190 WARN_ON(test_bit(queue, local->queues_pending)); 1227 WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
1191 1228
1192 if (unlikely(skb->len < 10)) { 1229 if (unlikely(skb->len < 10)) {
1193 dev_kfree_skb(skb); 1230 dev_kfree_skb(skb);
1194 return 0; 1231 return;
1195 } 1232 }
1196 1233
1197 rcu_read_lock(); 1234 rcu_read_lock();
@@ -1199,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1199 /* initialises tx */ 1236 /* initialises tx */
1200 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); 1237 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1201 1238
1202 if (res_prepare == TX_DROP) { 1239 if (unlikely(res_prepare == TX_DROP)) {
1203 dev_kfree_skb(skb); 1240 dev_kfree_skb(skb);
1204 rcu_read_unlock(); 1241 rcu_read_unlock();
1205 return 0; 1242 return;
1243 } else if (unlikely(res_prepare == TX_QUEUED)) {
1244 rcu_read_unlock();
1245 return;
1206 } 1246 }
1207 1247
1208 sta = tx.sta; 1248 sta = tx.sta;
@@ -1212,59 +1252,71 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1212 if (invoke_tx_handlers(&tx)) 1252 if (invoke_tx_handlers(&tx))
1213 goto out; 1253 goto out;
1214 1254
1215retry: 1255 retries = 0;
1216 ret = __ieee80211_tx(local, skb, &tx); 1256 retry:
1217 if (ret) { 1257 ret = __ieee80211_tx(local, &tx.skb, tx.sta);
1218 struct ieee80211_tx_stored_packet *store; 1258 switch (ret) {
1219 1259 case IEEE80211_TX_OK:
1260 break;
1261 case IEEE80211_TX_AGAIN:
1220 /* 1262 /*
1221 * Since there are no fragmented frames on A-MPDU 1263 * Since there are no fragmented frames on A-MPDU
1222 * queues, there's no reason for a driver to reject 1264 * queues, there's no reason for a driver to reject
1223 * a frame there, warn and drop it. 1265 * a frame there, warn and drop it.
1224 */ 1266 */
1225 if (ret != IEEE80211_TX_PENDING) 1267 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1226 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 1268 goto drop;
1227 goto drop; 1269 /* fall through */
1270 case IEEE80211_TX_PENDING:
1271 skb = tx.skb;
1272
1273 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1274
1275 if (__netif_subqueue_stopped(local->mdev, queue)) {
1276 do {
1277 next = skb->next;
1278 skb->next = NULL;
1279 if (unlikely(txpending))
1280 skb_queue_head(&local->pending[queue],
1281 skb);
1282 else
1283 skb_queue_tail(&local->pending[queue],
1284 skb);
1285 } while ((skb = next));
1228 1286
1229 store = &local->pending_packet[queue]; 1287 /*
1288 * Make sure nobody will enable the queue on us
1289 * (without going through the tasklet) nor disable the
1290 * netdev queue underneath the pending handling code.
1291 */
1292 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
1293 &local->queue_stop_reasons[queue]);
1230 1294
1231 if (ret == IEEE80211_TX_FRAG_AGAIN) 1295 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1232 skb = NULL; 1296 flags);
1297 } else {
1298 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1299 flags);
1233 1300
1234 set_bit(queue, local->queues_pending); 1301 retries++;
1235 smp_mb(); 1302 if (WARN(retries > 10, "tx refused but queue active"))
1236 /* 1303 goto drop;
1237 * When the driver gets out of buffers during sending of
1238 * fragments and calls ieee80211_stop_queue, the netif
1239 * subqueue is stopped. There is, however, a small window
1240 * in which the PENDING bit is not yet set. If a buffer
1241 * gets available in that window (i.e. driver calls
1242 * ieee80211_wake_queue), we would end up with ieee80211_tx
1243 * called with the PENDING bit still set. Prevent this by
1244 * continuing transmitting here when that situation is
1245 * possible to have happened.
1246 */
1247 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1248 clear_bit(queue, local->queues_pending);
1249 goto retry; 1304 goto retry;
1250 } 1305 }
1251 store->skb = skb;
1252 store->extra_frag = tx.extra_frag;
1253 store->num_extra_frag = tx.num_extra_frag;
1254 } 1306 }
1255 out: 1307 out:
1256 rcu_read_unlock(); 1308 rcu_read_unlock();
1257 return 0; 1309 return;
1258 1310
1259 drop: 1311 drop:
1260 if (skb)
1261 dev_kfree_skb(skb);
1262 for (i = 0; i < tx.num_extra_frag; i++)
1263 if (tx.extra_frag[i])
1264 dev_kfree_skb(tx.extra_frag[i]);
1265 kfree(tx.extra_frag);
1266 rcu_read_unlock(); 1312 rcu_read_unlock();
1267 return 0; 1313
1314 skb = tx.skb;
1315 while (skb) {
1316 next = skb->next;
1317 dev_kfree_skb(skb);
1318 skb = next;
1319 }
1268} 1320}
1269 1321
1270/* device xmit handlers */ 1322/* device xmit handlers */
@@ -1323,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1323 FOUND_SDATA, 1375 FOUND_SDATA,
1324 UNKNOWN_ADDRESS, 1376 UNKNOWN_ADDRESS,
1325 } monitor_iface = NOT_MONITOR; 1377 } monitor_iface = NOT_MONITOR;
1326 int ret;
1327 1378
1328 if (skb->iif) 1379 if (skb->iif)
1329 odev = dev_get_by_index(&init_net, skb->iif); 1380 odev = dev_get_by_index(&init_net, skb->iif);
@@ -1337,7 +1388,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1337 "originating device\n", dev->name); 1388 "originating device\n", dev->name);
1338#endif 1389#endif
1339 dev_kfree_skb(skb); 1390 dev_kfree_skb(skb);
1340 return 0; 1391 return NETDEV_TX_OK;
1341 } 1392 }
1342 1393
1343 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 1394 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
@@ -1366,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1366 else 1417 else
1367 if (mesh_nexthop_lookup(skb, osdata)) { 1418 if (mesh_nexthop_lookup(skb, osdata)) {
1368 dev_put(odev); 1419 dev_put(odev);
1369 return 0; 1420 return NETDEV_TX_OK;
1370 } 1421 }
1371 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1422 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1372 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1423 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1428,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1428 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { 1479 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1429 dev_kfree_skb(skb); 1480 dev_kfree_skb(skb);
1430 dev_put(odev); 1481 dev_put(odev);
1431 return 0; 1482 return NETDEV_TX_OK;
1432 } 1483 }
1433 1484
1434 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1485 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1437,10 +1488,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1437 u.ap); 1488 u.ap);
1438 if (likely(monitor_iface != UNKNOWN_ADDRESS)) 1489 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1439 info->control.vif = &osdata->vif; 1490 info->control.vif = &osdata->vif;
1440 ret = ieee80211_tx(odev, skb); 1491
1492 ieee80211_tx(odev, skb, false);
1441 dev_put(odev); 1493 dev_put(odev);
1442 1494
1443 return ret; 1495 return NETDEV_TX_OK;
1444} 1496}
1445 1497
1446int ieee80211_monitor_start_xmit(struct sk_buff *skb, 1498int ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1666,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1666 } 1718 }
1667 1719
1668 /* receiver and we are QoS enabled, use a QoS type frame */ 1720 /* receiver and we are QoS enabled, use a QoS type frame */
1669 if (sta_flags & WLAN_STA_WME && 1721 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1670 ieee80211_num_regular_queues(&local->hw) >= 4) {
1671 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1722 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1672 hdrlen += 2; 1723 hdrlen += 2;
1673 } 1724 }
@@ -1799,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1799 */ 1850 */
1800void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1851void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1801{ 1852{
1802 int i, j; 1853 int i;
1803 struct ieee80211_tx_stored_packet *store;
1804 1854
1805 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { 1855 for (i = 0; i < local->hw.queues; i++)
1806 if (!test_bit(i, local->queues_pending)) 1856 skb_queue_purge(&local->pending[i]);
1807 continue; 1857}
1808 store = &local->pending_packet[i]; 1858
1809 kfree_skb(store->skb); 1859static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1810 for (j = 0; j < store->num_extra_frag; j++) 1860 struct sk_buff *skb)
1811 kfree_skb(store->extra_frag[j]); 1861{
1812 kfree(store->extra_frag); 1862 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1813 clear_bit(i, local->queues_pending); 1863 struct ieee80211_sub_if_data *sdata;
1864 struct sta_info *sta;
1865 struct ieee80211_hdr *hdr;
1866 struct net_device *dev;
1867 int ret;
1868 bool result = true;
1869
1870 /* does interface still exist? */
1871 dev = dev_get_by_index(&init_net, skb->iif);
1872 if (!dev) {
1873 dev_kfree_skb(skb);
1874 return true;
1814 } 1875 }
1876
1877 /* validate info->control.vif against skb->iif */
1878 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1879 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1880 sdata = container_of(sdata->bss,
1881 struct ieee80211_sub_if_data,
1882 u.ap);
1883
1884 if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
1885 dev_kfree_skb(skb);
1886 result = true;
1887 goto out;
1888 }
1889
1890 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
1891 ieee80211_tx(dev, skb, true);
1892 } else {
1893 hdr = (struct ieee80211_hdr *)skb->data;
1894 sta = sta_info_get(local, hdr->addr1);
1895
1896 ret = __ieee80211_tx(local, &skb, sta);
1897 if (ret != IEEE80211_TX_OK)
1898 result = false;
1899 }
1900
1901 out:
1902 dev_put(dev);
1903
1904 return result;
1815} 1905}
1816 1906
1817/* 1907/*
@@ -1822,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
1822{ 1912{
1823 struct ieee80211_local *local = (struct ieee80211_local *)data; 1913 struct ieee80211_local *local = (struct ieee80211_local *)data;
1824 struct net_device *dev = local->mdev; 1914 struct net_device *dev = local->mdev;
1825 struct ieee80211_tx_stored_packet *store; 1915 unsigned long flags;
1826 struct ieee80211_tx_data tx; 1916 int i;
1827 int i, ret; 1917 bool next;
1828 1918
1919 rcu_read_lock();
1829 netif_tx_lock_bh(dev); 1920 netif_tx_lock_bh(dev);
1830 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1831 /* Check that this queue is ok */
1832 if (__netif_subqueue_stopped(local->mdev, i) &&
1833 !test_bit(i, local->queues_pending_run))
1834 continue;
1835 1921
1836 if (!test_bit(i, local->queues_pending)) { 1922 for (i = 0; i < local->hw.queues; i++) {
1837 clear_bit(i, local->queues_pending_run); 1923 /*
1838 ieee80211_wake_queue(&local->hw, i); 1924 * If queue is stopped by something other than due to pending
1925 * frames, or we have no pending frames, proceed to next queue.
1926 */
1927 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1928 next = false;
1929 if (local->queue_stop_reasons[i] !=
1930 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
1931 skb_queue_empty(&local->pending[i]))
1932 next = true;
1933 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1934
1935 if (next)
1839 continue; 1936 continue;
1840 }
1841 1937
1842 clear_bit(i, local->queues_pending_run); 1938 /*
1939 * start the queue now to allow processing our packets,
1940 * we're under the tx lock here anyway so nothing will
1941 * happen as a result of this
1942 */
1843 netif_start_subqueue(local->mdev, i); 1943 netif_start_subqueue(local->mdev, i);
1844 1944
1845 store = &local->pending_packet[i]; 1945 while (!skb_queue_empty(&local->pending[i])) {
1846 tx.extra_frag = store->extra_frag; 1946 struct sk_buff *skb = skb_dequeue(&local->pending[i]);
1847 tx.num_extra_frag = store->num_extra_frag; 1947
1848 tx.flags = 0; 1948 if (!ieee80211_tx_pending_skb(local, skb)) {
1849 ret = __ieee80211_tx(local, store->skb, &tx); 1949 skb_queue_head(&local->pending[i], skb);
1850 if (ret) { 1950 break;
1851 if (ret == IEEE80211_TX_FRAG_AGAIN) 1951 }
1852 store->skb = NULL;
1853 } else {
1854 clear_bit(i, local->queues_pending);
1855 ieee80211_wake_queue(&local->hw, i);
1856 } 1952 }
1953
1954 /* Start regular packet processing again. */
1955 if (skb_queue_empty(&local->pending[i]))
1956 ieee80211_wake_queue_by_reason(&local->hw, i,
1957 IEEE80211_QUEUE_STOP_REASON_PENDING);
1857 } 1958 }
1959
1858 netif_tx_unlock_bh(dev); 1960 netif_tx_unlock_bh(dev);
1961 rcu_read_unlock();
1859} 1962}
1860 1963
1861/* functions for drivers to get certain frames */ 1964/* functions for drivers to get certain frames */