diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2009-03-23 12:28:35 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-03-27 20:13:21 -0400 |
commit | 2de8e0d999b8790861cd3749bec2236ccc1c8110 (patch) | |
tree | 18b7f0127b7e1d938b41d1120803cb0af528058f /net/mac80211/tx.c | |
parent | 08df05aa9b25f3079585855506022bb33a011183 (diff) |
mac80211: rewrite fragmentation
Fragmentation currently uses an allocated array to store the
fragment skbs, and then keeps track of which have been sent
and which are still pending etc. This is rather complicated;
make it simpler by just chaining the fragments into skb->next
and removing from that list when sent. Also simplifies all
code that needs to touch fragments, since it now only needs
to walk the skb->next list.
This is a prerequisite for fixing the stored packet code,
which I need to do for proper aggregation packet storing.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Reviewed-by: Luis R. Rodriguez <lrodriguez@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 322 |
1 files changed, 151 insertions, 171 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f3f240c69018..51bf49cc75bc 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -34,8 +34,7 @@ | |||
34 | 34 | ||
35 | #define IEEE80211_TX_OK 0 | 35 | #define IEEE80211_TX_OK 0 |
36 | #define IEEE80211_TX_AGAIN 1 | 36 | #define IEEE80211_TX_AGAIN 1 |
37 | #define IEEE80211_TX_FRAG_AGAIN 2 | 37 | #define IEEE80211_TX_PENDING 2 |
38 | #define IEEE80211_TX_PENDING 3 | ||
39 | 38 | ||
40 | /* misc utils */ | 39 | /* misc utils */ |
41 | 40 | ||
@@ -702,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | |||
702 | return TX_CONTINUE; | 701 | return TX_CONTINUE; |
703 | } | 702 | } |
704 | 703 | ||
704 | static int ieee80211_fragment(struct ieee80211_local *local, | ||
705 | struct sk_buff *skb, int hdrlen, | ||
706 | int frag_threshold) | ||
707 | { | ||
708 | struct sk_buff *tail = skb, *tmp; | ||
709 | int per_fragm = frag_threshold - hdrlen - FCS_LEN; | ||
710 | int pos = hdrlen + per_fragm; | ||
711 | int rem = skb->len - hdrlen - per_fragm; | ||
712 | |||
713 | if (WARN_ON(rem < 0)) | ||
714 | return -EINVAL; | ||
715 | |||
716 | while (rem) { | ||
717 | int fraglen = per_fragm; | ||
718 | |||
719 | if (fraglen > rem) | ||
720 | fraglen = rem; | ||
721 | rem -= fraglen; | ||
722 | tmp = dev_alloc_skb(local->tx_headroom + | ||
723 | frag_threshold + | ||
724 | IEEE80211_ENCRYPT_HEADROOM + | ||
725 | IEEE80211_ENCRYPT_TAILROOM); | ||
726 | if (!tmp) | ||
727 | return -ENOMEM; | ||
728 | tail->next = tmp; | ||
729 | tail = tmp; | ||
730 | skb_reserve(tmp, local->tx_headroom + | ||
731 | IEEE80211_ENCRYPT_HEADROOM); | ||
732 | /* copy control information */ | ||
733 | memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); | ||
734 | skb_copy_queue_mapping(tmp, skb); | ||
735 | tmp->priority = skb->priority; | ||
736 | tmp->do_not_encrypt = skb->do_not_encrypt; | ||
737 | tmp->dev = skb->dev; | ||
738 | tmp->iif = skb->iif; | ||
739 | |||
740 | /* copy header and data */ | ||
741 | memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); | ||
742 | memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); | ||
743 | |||
744 | pos += fraglen; | ||
745 | } | ||
746 | |||
747 | skb->len = hdrlen + per_fragm; | ||
748 | return 0; | ||
749 | } | ||
750 | |||
705 | static ieee80211_tx_result debug_noinline | 751 | static ieee80211_tx_result debug_noinline |
706 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | 752 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) |
707 | { | 753 | { |
708 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 754 | struct sk_buff *skb = tx->skb; |
709 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 755 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
710 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; | 756 | struct ieee80211_hdr *hdr = (void *)skb->data; |
711 | struct sk_buff **frags, *first, *frag; | ||
712 | int i; | ||
713 | u16 seq; | ||
714 | u8 *pos; | ||
715 | int frag_threshold = tx->local->fragmentation_threshold; | 757 | int frag_threshold = tx->local->fragmentation_threshold; |
758 | int hdrlen; | ||
759 | int fragnum; | ||
716 | 760 | ||
717 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) | 761 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) |
718 | return TX_CONTINUE; | 762 | return TX_CONTINUE; |
@@ -725,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
725 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) | 769 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) |
726 | return TX_DROP; | 770 | return TX_DROP; |
727 | 771 | ||
728 | first = tx->skb; | ||
729 | |||
730 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 772 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
731 | payload_len = first->len - hdrlen; | ||
732 | per_fragm = frag_threshold - hdrlen - FCS_LEN; | ||
733 | num_fragm = DIV_ROUND_UP(payload_len, per_fragm); | ||
734 | |||
735 | frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC); | ||
736 | if (!frags) | ||
737 | goto fail; | ||
738 | |||
739 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); | ||
740 | seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ; | ||
741 | pos = first->data + hdrlen + per_fragm; | ||
742 | left = payload_len - per_fragm; | ||
743 | for (i = 0; i < num_fragm - 1; i++) { | ||
744 | struct ieee80211_hdr *fhdr; | ||
745 | size_t copylen; | ||
746 | |||
747 | if (left <= 0) | ||
748 | goto fail; | ||
749 | 773 | ||
750 | /* reserve enough extra head and tail room for possible | 774 | /* internal error, why is TX_FRAGMENTED set? */ |
751 | * encryption */ | 775 | if (WARN_ON(skb->len <= frag_threshold)) |
752 | frag = frags[i] = | 776 | return TX_DROP; |
753 | dev_alloc_skb(tx->local->tx_headroom + | ||
754 | frag_threshold + | ||
755 | IEEE80211_ENCRYPT_HEADROOM + | ||
756 | IEEE80211_ENCRYPT_TAILROOM); | ||
757 | if (!frag) | ||
758 | goto fail; | ||
759 | |||
760 | /* Make sure that all fragments use the same priority so | ||
761 | * that they end up using the same TX queue */ | ||
762 | frag->priority = first->priority; | ||
763 | 777 | ||
764 | skb_reserve(frag, tx->local->tx_headroom + | 778 | /* |
765 | IEEE80211_ENCRYPT_HEADROOM); | 779 | * Now fragment the frame. This will allocate all the fragments and |
780 | * chain them (using skb as the first fragment) to skb->next. | ||
781 | * During transmission, we will remove the successfully transmitted | ||
782 | * fragments from this list. When the low-level driver rejects one | ||
783 | * of the fragments then we will simply pretend to accept the skb | ||
784 | * but store it away as pending. | ||
785 | */ | ||
786 | if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) | ||
787 | return TX_DROP; | ||
766 | 788 | ||
767 | /* copy TX information */ | 789 | /* update duration/seq/flags of fragments */ |
768 | info = IEEE80211_SKB_CB(frag); | 790 | fragnum = 0; |
769 | memcpy(info, first->cb, sizeof(frag->cb)); | 791 | do { |
792 | int next_len; | ||
793 | const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); | ||
770 | 794 | ||
771 | /* copy/fill in 802.11 header */ | 795 | hdr = (void *)skb->data; |
772 | fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); | 796 | info = IEEE80211_SKB_CB(skb); |
773 | memcpy(fhdr, first->data, hdrlen); | ||
774 | fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); | ||
775 | 797 | ||
776 | if (i == num_fragm - 2) { | 798 | if (skb->next) { |
777 | /* clear MOREFRAGS bit for the last fragment */ | 799 | hdr->frame_control |= morefrags; |
778 | fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); | 800 | next_len = skb->next->len; |
779 | } else { | ||
780 | /* | 801 | /* |
781 | * No multi-rate retries for fragmented frames, that | 802 | * No multi-rate retries for fragmented frames, that |
782 | * would completely throw off the NAV at other STAs. | 803 | * would completely throw off the NAV at other STAs. |
@@ -787,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
787 | info->control.rates[4].idx = -1; | 808 | info->control.rates[4].idx = -1; |
788 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); | 809 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); |
789 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; | 810 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
811 | } else { | ||
812 | hdr->frame_control &= ~morefrags; | ||
813 | next_len = 0; | ||
790 | } | 814 | } |
791 | 815 | hdr->duration_id = ieee80211_duration(tx, 0, next_len); | |
792 | /* copy data */ | 816 | hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); |
793 | copylen = left > per_fragm ? per_fragm : left; | 817 | fragnum++; |
794 | memcpy(skb_put(frag, copylen), pos, copylen); | 818 | } while ((skb = skb->next)); |
795 | |||
796 | skb_copy_queue_mapping(frag, first); | ||
797 | |||
798 | frag->do_not_encrypt = first->do_not_encrypt; | ||
799 | frag->dev = first->dev; | ||
800 | frag->iif = first->iif; | ||
801 | |||
802 | pos += copylen; | ||
803 | left -= copylen; | ||
804 | } | ||
805 | skb_trim(first, hdrlen + per_fragm); | ||
806 | |||
807 | tx->num_extra_frag = num_fragm - 1; | ||
808 | tx->extra_frag = frags; | ||
809 | 819 | ||
810 | return TX_CONTINUE; | 820 | return TX_CONTINUE; |
811 | |||
812 | fail: | ||
813 | if (frags) { | ||
814 | for (i = 0; i < num_fragm - 1; i++) | ||
815 | if (frags[i]) | ||
816 | dev_kfree_skb(frags[i]); | ||
817 | kfree(frags); | ||
818 | } | ||
819 | I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); | ||
820 | return TX_DROP; | ||
821 | } | 821 | } |
822 | 822 | ||
823 | static ieee80211_tx_result debug_noinline | 823 | static ieee80211_tx_result debug_noinline |
@@ -845,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | |||
845 | static ieee80211_tx_result debug_noinline | 845 | static ieee80211_tx_result debug_noinline |
846 | ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) | 846 | ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) |
847 | { | 847 | { |
848 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 848 | struct sk_buff *skb = tx->skb; |
849 | int next_len, i; | 849 | struct ieee80211_hdr *hdr; |
850 | int group_addr = is_multicast_ether_addr(hdr->addr1); | 850 | int next_len; |
851 | 851 | bool group_addr; | |
852 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) { | ||
853 | hdr->duration_id = ieee80211_duration(tx, group_addr, 0); | ||
854 | return TX_CONTINUE; | ||
855 | } | ||
856 | |||
857 | hdr->duration_id = ieee80211_duration(tx, group_addr, | ||
858 | tx->extra_frag[0]->len); | ||
859 | 852 | ||
860 | for (i = 0; i < tx->num_extra_frag; i++) { | 853 | do { |
861 | if (i + 1 < tx->num_extra_frag) | 854 | hdr = (void *) skb->data; |
862 | next_len = tx->extra_frag[i + 1]->len; | 855 | next_len = skb->next ? skb->next->len : 0; |
863 | else | 856 | group_addr = is_multicast_ether_addr(hdr->addr1); |
864 | next_len = 0; | ||
865 | 857 | ||
866 | hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data; | 858 | hdr->duration_id = |
867 | hdr->duration_id = ieee80211_duration(tx, 0, next_len); | 859 | ieee80211_duration(tx, group_addr, next_len); |
868 | } | 860 | } while ((skb = skb->next)); |
869 | 861 | ||
870 | return TX_CONTINUE; | 862 | return TX_CONTINUE; |
871 | } | 863 | } |
@@ -873,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) | |||
873 | static ieee80211_tx_result debug_noinline | 865 | static ieee80211_tx_result debug_noinline |
874 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) | 866 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) |
875 | { | 867 | { |
876 | int i; | 868 | struct sk_buff *skb = tx->skb; |
877 | 869 | ||
878 | if (!tx->sta) | 870 | if (!tx->sta) |
879 | return TX_CONTINUE; | 871 | return TX_CONTINUE; |
880 | 872 | ||
881 | tx->sta->tx_packets++; | 873 | tx->sta->tx_packets++; |
882 | tx->sta->tx_fragments++; | 874 | do { |
883 | tx->sta->tx_bytes += tx->skb->len; | 875 | tx->sta->tx_fragments++; |
884 | if (tx->extra_frag) { | 876 | tx->sta->tx_bytes += skb->len; |
885 | tx->sta->tx_fragments += tx->num_extra_frag; | 877 | } while ((skb = skb->next)); |
886 | for (i = 0; i < tx->num_extra_frag; i++) | ||
887 | tx->sta->tx_bytes += tx->extra_frag[i]->len; | ||
888 | } | ||
889 | 878 | ||
890 | return TX_CONTINUE; | 879 | return TX_CONTINUE; |
891 | } | 880 | } |
@@ -1099,45 +1088,36 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local, | |||
1099 | return 0; | 1088 | return 0; |
1100 | } | 1089 | } |
1101 | 1090 | ||
1102 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | 1091 | static int __ieee80211_tx(struct ieee80211_local *local, |
1103 | struct ieee80211_tx_data *tx) | 1092 | struct ieee80211_tx_data *tx) |
1104 | { | 1093 | { |
1094 | struct sk_buff *skb = tx->skb, *next; | ||
1105 | struct ieee80211_tx_info *info; | 1095 | struct ieee80211_tx_info *info; |
1106 | int ret, i; | 1096 | int ret; |
1097 | bool fragm = false; | ||
1107 | 1098 | ||
1108 | if (skb) { | 1099 | local->mdev->trans_start = jiffies; |
1100 | |||
1101 | while (skb) { | ||
1109 | if (ieee80211_queue_stopped(&local->hw, | 1102 | if (ieee80211_queue_stopped(&local->hw, |
1110 | skb_get_queue_mapping(skb))) | 1103 | skb_get_queue_mapping(skb))) |
1111 | return IEEE80211_TX_PENDING; | 1104 | return IEEE80211_TX_PENDING; |
1112 | 1105 | ||
1113 | ret = local->ops->tx(local_to_hw(local), skb); | 1106 | if (fragm) { |
1114 | if (ret) | 1107 | info = IEEE80211_SKB_CB(skb); |
1115 | return IEEE80211_TX_AGAIN; | ||
1116 | local->mdev->trans_start = jiffies; | ||
1117 | ieee80211_led_tx(local, 1); | ||
1118 | } | ||
1119 | if (tx->extra_frag) { | ||
1120 | for (i = 0; i < tx->num_extra_frag; i++) { | ||
1121 | if (!tx->extra_frag[i]) | ||
1122 | continue; | ||
1123 | info = IEEE80211_SKB_CB(tx->extra_frag[i]); | ||
1124 | info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | | 1108 | info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | |
1125 | IEEE80211_TX_CTL_FIRST_FRAGMENT); | 1109 | IEEE80211_TX_CTL_FIRST_FRAGMENT); |
1126 | if (ieee80211_queue_stopped(&local->hw, | ||
1127 | skb_get_queue_mapping(tx->extra_frag[i]))) | ||
1128 | return IEEE80211_TX_FRAG_AGAIN; | ||
1129 | |||
1130 | ret = local->ops->tx(local_to_hw(local), | ||
1131 | tx->extra_frag[i]); | ||
1132 | if (ret) | ||
1133 | return IEEE80211_TX_FRAG_AGAIN; | ||
1134 | local->mdev->trans_start = jiffies; | ||
1135 | ieee80211_led_tx(local, 1); | ||
1136 | tx->extra_frag[i] = NULL; | ||
1137 | } | 1110 | } |
1138 | kfree(tx->extra_frag); | 1111 | |
1139 | tx->extra_frag = NULL; | 1112 | next = skb->next; |
1113 | ret = local->ops->tx(local_to_hw(local), skb); | ||
1114 | if (ret != NETDEV_TX_OK) | ||
1115 | return IEEE80211_TX_AGAIN; | ||
1116 | tx->skb = skb = next; | ||
1117 | ieee80211_led_tx(local, 1); | ||
1118 | fragm = true; | ||
1140 | } | 1119 | } |
1120 | |||
1141 | return IEEE80211_TX_OK; | 1121 | return IEEE80211_TX_OK; |
1142 | } | 1122 | } |
1143 | 1123 | ||
@@ -1149,7 +1129,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1149 | { | 1129 | { |
1150 | struct sk_buff *skb = tx->skb; | 1130 | struct sk_buff *skb = tx->skb; |
1151 | ieee80211_tx_result res = TX_DROP; | 1131 | ieee80211_tx_result res = TX_DROP; |
1152 | int i; | ||
1153 | 1132 | ||
1154 | #define CALL_TXH(txh) \ | 1133 | #define CALL_TXH(txh) \ |
1155 | res = txh(tx); \ | 1134 | res = txh(tx); \ |
@@ -1173,11 +1152,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1173 | txh_done: | 1152 | txh_done: |
1174 | if (unlikely(res == TX_DROP)) { | 1153 | if (unlikely(res == TX_DROP)) { |
1175 | I802_DEBUG_INC(tx->local->tx_handlers_drop); | 1154 | I802_DEBUG_INC(tx->local->tx_handlers_drop); |
1176 | dev_kfree_skb(skb); | 1155 | while (skb) { |
1177 | for (i = 0; i < tx->num_extra_frag; i++) | 1156 | struct sk_buff *next; |
1178 | if (tx->extra_frag[i]) | 1157 | |
1179 | dev_kfree_skb(tx->extra_frag[i]); | 1158 | next = skb->next; |
1180 | kfree(tx->extra_frag); | 1159 | dev_kfree_skb(skb); |
1160 | skb = next; | ||
1161 | } | ||
1181 | return -1; | 1162 | return -1; |
1182 | } else if (unlikely(res == TX_QUEUED)) { | 1163 | } else if (unlikely(res == TX_QUEUED)) { |
1183 | I802_DEBUG_INC(tx->local->tx_handlers_queued); | 1164 | I802_DEBUG_INC(tx->local->tx_handlers_queued); |
@@ -1194,7 +1175,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1194 | struct ieee80211_tx_data tx; | 1175 | struct ieee80211_tx_data tx; |
1195 | ieee80211_tx_result res_prepare; | 1176 | ieee80211_tx_result res_prepare; |
1196 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1177 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1197 | int ret, i; | 1178 | int ret; |
1198 | u16 queue; | 1179 | u16 queue; |
1199 | 1180 | ||
1200 | queue = skb_get_queue_mapping(skb); | 1181 | queue = skb_get_queue_mapping(skb); |
@@ -1225,7 +1206,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1225 | goto out; | 1206 | goto out; |
1226 | 1207 | ||
1227 | retry: | 1208 | retry: |
1228 | ret = __ieee80211_tx(local, skb, &tx); | 1209 | ret = __ieee80211_tx(local, &tx); |
1229 | if (ret) { | 1210 | if (ret) { |
1230 | struct ieee80211_tx_stored_packet *store; | 1211 | struct ieee80211_tx_stored_packet *store; |
1231 | 1212 | ||
@@ -1240,9 +1221,6 @@ retry: | |||
1240 | 1221 | ||
1241 | store = &local->pending_packet[queue]; | 1222 | store = &local->pending_packet[queue]; |
1242 | 1223 | ||
1243 | if (ret == IEEE80211_TX_FRAG_AGAIN) | ||
1244 | skb = NULL; | ||
1245 | |||
1246 | set_bit(queue, local->queues_pending); | 1224 | set_bit(queue, local->queues_pending); |
1247 | smp_mb(); | 1225 | smp_mb(); |
1248 | /* | 1226 | /* |
@@ -1260,22 +1238,23 @@ retry: | |||
1260 | clear_bit(queue, local->queues_pending); | 1238 | clear_bit(queue, local->queues_pending); |
1261 | goto retry; | 1239 | goto retry; |
1262 | } | 1240 | } |
1263 | store->skb = skb; | 1241 | store->skb = tx.skb; |
1264 | store->extra_frag = tx.extra_frag; | ||
1265 | store->num_extra_frag = tx.num_extra_frag; | ||
1266 | } | 1242 | } |
1267 | out: | 1243 | out: |
1268 | rcu_read_unlock(); | 1244 | rcu_read_unlock(); |
1269 | return 0; | 1245 | return 0; |
1270 | 1246 | ||
1271 | drop: | 1247 | drop: |
1272 | if (skb) | ||
1273 | dev_kfree_skb(skb); | ||
1274 | for (i = 0; i < tx.num_extra_frag; i++) | ||
1275 | if (tx.extra_frag[i]) | ||
1276 | dev_kfree_skb(tx.extra_frag[i]); | ||
1277 | kfree(tx.extra_frag); | ||
1278 | rcu_read_unlock(); | 1248 | rcu_read_unlock(); |
1249 | |||
1250 | skb = tx.skb; | ||
1251 | while (skb) { | ||
1252 | struct sk_buff *next; | ||
1253 | |||
1254 | next = skb->next; | ||
1255 | dev_kfree_skb(skb); | ||
1256 | skb = next; | ||
1257 | } | ||
1279 | return 0; | 1258 | return 0; |
1280 | } | 1259 | } |
1281 | 1260 | ||
@@ -1810,17 +1789,21 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1810 | */ | 1789 | */ |
1811 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 1790 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
1812 | { | 1791 | { |
1813 | int i, j; | 1792 | struct sk_buff *skb; |
1814 | struct ieee80211_tx_stored_packet *store; | 1793 | int i; |
1815 | 1794 | ||
1816 | for (i = 0; i < local->hw.queues; i++) { | 1795 | for (i = 0; i < local->hw.queues; i++) { |
1817 | if (!test_bit(i, local->queues_pending)) | 1796 | if (!test_bit(i, local->queues_pending)) |
1818 | continue; | 1797 | continue; |
1819 | store = &local->pending_packet[i]; | 1798 | |
1820 | kfree_skb(store->skb); | 1799 | skb = local->pending_packet[i].skb; |
1821 | for (j = 0; j < store->num_extra_frag; j++) | 1800 | while (skb) { |
1822 | kfree_skb(store->extra_frag[j]); | 1801 | struct sk_buff *next; |
1823 | kfree(store->extra_frag); | 1802 | |
1803 | next = skb->next; | ||
1804 | dev_kfree_skb(skb); | ||
1805 | skb = next; | ||
1806 | } | ||
1824 | clear_bit(i, local->queues_pending); | 1807 | clear_bit(i, local->queues_pending); |
1825 | } | 1808 | } |
1826 | } | 1809 | } |
@@ -1854,14 +1837,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
1854 | netif_start_subqueue(local->mdev, i); | 1837 | netif_start_subqueue(local->mdev, i); |
1855 | 1838 | ||
1856 | store = &local->pending_packet[i]; | 1839 | store = &local->pending_packet[i]; |
1857 | tx.extra_frag = store->extra_frag; | ||
1858 | tx.num_extra_frag = store->num_extra_frag; | ||
1859 | tx.flags = 0; | 1840 | tx.flags = 0; |
1860 | ret = __ieee80211_tx(local, store->skb, &tx); | 1841 | tx.skb = store->skb; |
1861 | if (ret) { | 1842 | ret = __ieee80211_tx(local, &tx); |
1862 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1843 | store->skb = tx.skb; |
1863 | store->skb = NULL; | 1844 | if (!ret) { |
1864 | } else { | ||
1865 | clear_bit(i, local->queues_pending); | 1845 | clear_bit(i, local->queues_pending); |
1866 | ieee80211_wake_queue(&local->hw, i); | 1846 | ieee80211_wake_queue(&local->hw, i); |
1867 | } | 1847 | } |