aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c440
1 files changed, 209 insertions, 231 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 48e912487b16..21c886a9a1d9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/etherdevice.h> 40#include <linux/etherdevice.h>
41#include <asm/div64.h>
41 42
42 43
43/* 32bit DMA ops. */ 44/* 32bit DMA ops. */
@@ -291,52 +292,6 @@ static inline int request_slot(struct b43_dmaring *ring)
291 return slot; 292 return slot;
292} 293}
293 294
294/* Mac80211-queue to b43-ring mapping */
295static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
296 int queue_priority)
297{
298 struct b43_dmaring *ring;
299
300/*FIXME: For now we always run on TX-ring-1 */
301 return dev->dma.tx_ring1;
302
303 /* 0 = highest priority */
304 switch (queue_priority) {
305 default:
306 B43_WARN_ON(1);
307 /* fallthrough */
308 case 0:
309 ring = dev->dma.tx_ring3;
310 break;
311 case 1:
312 ring = dev->dma.tx_ring2;
313 break;
314 case 2:
315 ring = dev->dma.tx_ring1;
316 break;
317 case 3:
318 ring = dev->dma.tx_ring0;
319 break;
320 }
321
322 return ring;
323}
324
325/* b43-ring to mac80211-queue mapping */
326static inline int txring_to_priority(struct b43_dmaring *ring)
327{
328 static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
329 unsigned int index;
330
331/*FIXME: have only one queue, for now */
332 return 0;
333
334 index = ring->index;
335 if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
336 index = 0;
337 return idx_to_prio[index];
338}
339
340static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) 295static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
341{ 296{
342 static const u16 map64[] = { 297 static const u16 map64[] = {
@@ -596,7 +551,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
596 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 551 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
597{ 552{
598 struct b43_rxhdr_fw4 *rxhdr; 553 struct b43_rxhdr_fw4 *rxhdr;
599 struct b43_hwtxstatus *txstat;
600 dma_addr_t dmaaddr; 554 dma_addr_t dmaaddr;
601 struct sk_buff *skb; 555 struct sk_buff *skb;
602 556
@@ -632,8 +586,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
632 586
633 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 587 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
634 rxhdr->frame_len = 0; 588 rxhdr->frame_len = 0;
635 txstat = (struct b43_hwtxstatus *)(skb->data);
636 txstat->cookie = 0;
637 589
638 return 0; 590 return 0;
639} 591}
@@ -822,6 +774,18 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
822 return DMA_30BIT_MASK; 774 return DMA_30BIT_MASK;
823} 775}
824 776
777static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
778{
779 if (dmamask == DMA_30BIT_MASK)
780 return B43_DMA_30BIT;
781 if (dmamask == DMA_32BIT_MASK)
782 return B43_DMA_32BIT;
783 if (dmamask == DMA_64BIT_MASK)
784 return B43_DMA_64BIT;
785 B43_WARN_ON(1);
786 return B43_DMA_30BIT;
787}
788
825/* Main initialization function. */ 789/* Main initialization function. */
826static 790static
827struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 791struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
@@ -937,16 +901,52 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
937 goto out; 901 goto out;
938} 902}
939 903
904#define divide(a, b) ({ \
905 typeof(a) __a = a; \
906 do_div(__a, b); \
907 __a; \
908 })
909
910#define modulo(a, b) ({ \
911 typeof(a) __a = a; \
912 do_div(__a, b); \
913 })
914
940/* Main cleanup function. */ 915/* Main cleanup function. */
941static void b43_destroy_dmaring(struct b43_dmaring *ring) 916static void b43_destroy_dmaring(struct b43_dmaring *ring,
917 const char *ringname)
942{ 918{
943 if (!ring) 919 if (!ring)
944 return; 920 return;
945 921
946 b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n", 922#ifdef CONFIG_B43_DEBUG
947 (unsigned int)(ring->type), 923 {
948 ring->mmio_base, 924 /* Print some statistics. */
949 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); 925 u64 failed_packets = ring->nr_failed_tx_packets;
926 u64 succeed_packets = ring->nr_succeed_tx_packets;
927 u64 nr_packets = failed_packets + succeed_packets;
928 u64 permille_failed = 0, average_tries = 0;
929
930 if (nr_packets)
931 permille_failed = divide(failed_packets * 1000, nr_packets);
932 if (nr_packets)
933 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
934
935 b43dbg(ring->dev->wl, "DMA-%u %s: "
936 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
937 "Average tries %llu.%02llu\n",
938 (unsigned int)(ring->type), ringname,
939 ring->max_used_slots,
940 ring->nr_slots,
941 (unsigned long long)failed_packets,
942 (unsigned long long)nr_packets,
943 (unsigned long long)divide(permille_failed, 10),
944 (unsigned long long)modulo(permille_failed, 10),
945 (unsigned long long)divide(average_tries, 100),
946 (unsigned long long)modulo(average_tries, 100));
947 }
948#endif /* DEBUG */
949
950 /* Device IRQs are disabled prior entering this function, 950 /* Device IRQs are disabled prior entering this function,
951 * so no need to take care of concurrency with rx handler stuff. 951 * so no need to take care of concurrency with rx handler stuff.
952 */ 952 */
@@ -959,51 +959,36 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
959 kfree(ring); 959 kfree(ring);
960} 960}
961 961
962#define destroy_ring(dma, ring) do { \
963 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
964 (dma)->ring = NULL; \
965 } while (0)
966
962void b43_dma_free(struct b43_wldev *dev) 967void b43_dma_free(struct b43_wldev *dev)
963{ 968{
964 struct b43_dma *dma = &dev->dma; 969 struct b43_dma *dma;
965 970
966 b43_destroy_dmaring(dma->rx_ring3); 971 if (b43_using_pio_transfers(dev))
967 dma->rx_ring3 = NULL; 972 return;
968 b43_destroy_dmaring(dma->rx_ring0); 973 dma = &dev->dma;
969 dma->rx_ring0 = NULL; 974
970 975 destroy_ring(dma, rx_ring);
971 b43_destroy_dmaring(dma->tx_ring5); 976 destroy_ring(dma, tx_ring_AC_BK);
972 dma->tx_ring5 = NULL; 977 destroy_ring(dma, tx_ring_AC_BE);
973 b43_destroy_dmaring(dma->tx_ring4); 978 destroy_ring(dma, tx_ring_AC_VI);
974 dma->tx_ring4 = NULL; 979 destroy_ring(dma, tx_ring_AC_VO);
975 b43_destroy_dmaring(dma->tx_ring3); 980 destroy_ring(dma, tx_ring_mcast);
976 dma->tx_ring3 = NULL;
977 b43_destroy_dmaring(dma->tx_ring2);
978 dma->tx_ring2 = NULL;
979 b43_destroy_dmaring(dma->tx_ring1);
980 dma->tx_ring1 = NULL;
981 b43_destroy_dmaring(dma->tx_ring0);
982 dma->tx_ring0 = NULL;
983} 981}
984 982
985int b43_dma_init(struct b43_wldev *dev) 983int b43_dma_init(struct b43_wldev *dev)
986{ 984{
987 struct b43_dma *dma = &dev->dma; 985 struct b43_dma *dma = &dev->dma;
988 struct b43_dmaring *ring;
989 int err; 986 int err;
990 u64 dmamask; 987 u64 dmamask;
991 enum b43_dmatype type; 988 enum b43_dmatype type;
992 989
993 dmamask = supported_dma_mask(dev); 990 dmamask = supported_dma_mask(dev);
994 switch (dmamask) { 991 type = dma_mask_to_engine_type(dmamask);
995 default:
996 B43_WARN_ON(1);
997 case DMA_30BIT_MASK:
998 type = B43_DMA_30BIT;
999 break;
1000 case DMA_32BIT_MASK:
1001 type = B43_DMA_32BIT;
1002 break;
1003 case DMA_64BIT_MASK:
1004 type = B43_DMA_64BIT;
1005 break;
1006 }
1007 err = ssb_dma_set_mask(dev->dev, dmamask); 992 err = ssb_dma_set_mask(dev->dev, dmamask);
1008 if (err) { 993 if (err) {
1009 b43err(dev->wl, "The machine/kernel does not support " 994 b43err(dev->wl, "The machine/kernel does not support "
@@ -1015,83 +1000,57 @@ int b43_dma_init(struct b43_wldev *dev)
1015 1000
1016 err = -ENOMEM; 1001 err = -ENOMEM;
1017 /* setup TX DMA channels. */ 1002 /* setup TX DMA channels. */
1018 ring = b43_setup_dmaring(dev, 0, 1, type); 1003 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1019 if (!ring) 1004 if (!dma->tx_ring_AC_BK)
1020 goto out; 1005 goto out;
1021 dma->tx_ring0 = ring;
1022 1006
1023 ring = b43_setup_dmaring(dev, 1, 1, type); 1007 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1024 if (!ring) 1008 if (!dma->tx_ring_AC_BE)
1025 goto err_destroy_tx0; 1009 goto err_destroy_bk;
1026 dma->tx_ring1 = ring;
1027 1010
1028 ring = b43_setup_dmaring(dev, 2, 1, type); 1011 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1029 if (!ring) 1012 if (!dma->tx_ring_AC_VI)
1030 goto err_destroy_tx1; 1013 goto err_destroy_be;
1031 dma->tx_ring2 = ring;
1032 1014
1033 ring = b43_setup_dmaring(dev, 3, 1, type); 1015 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1034 if (!ring) 1016 if (!dma->tx_ring_AC_VO)
1035 goto err_destroy_tx2; 1017 goto err_destroy_vi;
1036 dma->tx_ring3 = ring;
1037 1018
1038 ring = b43_setup_dmaring(dev, 4, 1, type); 1019 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1039 if (!ring) 1020 if (!dma->tx_ring_mcast)
1040 goto err_destroy_tx3; 1021 goto err_destroy_vo;
1041 dma->tx_ring4 = ring;
1042 1022
1043 ring = b43_setup_dmaring(dev, 5, 1, type); 1023 /* setup RX DMA channel. */
1044 if (!ring) 1024 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1045 goto err_destroy_tx4; 1025 if (!dma->rx_ring)
1046 dma->tx_ring5 = ring; 1026 goto err_destroy_mcast;
1047 1027
1048 /* setup RX DMA channels. */ 1028 /* No support for the TX status DMA ring. */
1049 ring = b43_setup_dmaring(dev, 0, 0, type); 1029 B43_WARN_ON(dev->dev->id.revision < 5);
1050 if (!ring)
1051 goto err_destroy_tx5;
1052 dma->rx_ring0 = ring;
1053
1054 if (dev->dev->id.revision < 5) {
1055 ring = b43_setup_dmaring(dev, 3, 0, type);
1056 if (!ring)
1057 goto err_destroy_rx0;
1058 dma->rx_ring3 = ring;
1059 }
1060 1030
1061 b43dbg(dev->wl, "%u-bit DMA initialized\n", 1031 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1062 (unsigned int)type); 1032 (unsigned int)type);
1063 err = 0; 1033 err = 0;
1064 out: 1034out:
1065 return err; 1035 return err;
1066 1036
1067 err_destroy_rx0: 1037err_destroy_mcast:
1068 b43_destroy_dmaring(dma->rx_ring0); 1038 destroy_ring(dma, tx_ring_mcast);
1069 dma->rx_ring0 = NULL; 1039err_destroy_vo:
1070 err_destroy_tx5: 1040 destroy_ring(dma, tx_ring_AC_VO);
1071 b43_destroy_dmaring(dma->tx_ring5); 1041err_destroy_vi:
1072 dma->tx_ring5 = NULL; 1042 destroy_ring(dma, tx_ring_AC_VI);
1073 err_destroy_tx4: 1043err_destroy_be:
1074 b43_destroy_dmaring(dma->tx_ring4); 1044 destroy_ring(dma, tx_ring_AC_BE);
1075 dma->tx_ring4 = NULL; 1045err_destroy_bk:
1076 err_destroy_tx3: 1046 destroy_ring(dma, tx_ring_AC_BK);
1077 b43_destroy_dmaring(dma->tx_ring3); 1047 return err;
1078 dma->tx_ring3 = NULL;
1079 err_destroy_tx2:
1080 b43_destroy_dmaring(dma->tx_ring2);
1081 dma->tx_ring2 = NULL;
1082 err_destroy_tx1:
1083 b43_destroy_dmaring(dma->tx_ring1);
1084 dma->tx_ring1 = NULL;
1085 err_destroy_tx0:
1086 b43_destroy_dmaring(dma->tx_ring0);
1087 dma->tx_ring0 = NULL;
1088 goto out;
1089} 1048}
1090 1049
1091/* Generate a cookie for the TX header. */ 1050/* Generate a cookie for the TX header. */
1092static u16 generate_cookie(struct b43_dmaring *ring, int slot) 1051static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1093{ 1052{
1094 u16 cookie = 0x1000; 1053 u16 cookie;
1095 1054
1096 /* Use the upper 4 bits of the cookie as 1055 /* Use the upper 4 bits of the cookie as
1097 * DMA controller ID and store the slot number 1056 * DMA controller ID and store the slot number
@@ -1101,30 +1060,9 @@ static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1101 * It can also not be 0xFFFF because that is special 1060 * It can also not be 0xFFFF because that is special
1102 * for multicast frames. 1061 * for multicast frames.
1103 */ 1062 */
1104 switch (ring->index) { 1063 cookie = (((u16)ring->index + 1) << 12);
1105 case 0:
1106 cookie = 0x1000;
1107 break;
1108 case 1:
1109 cookie = 0x2000;
1110 break;
1111 case 2:
1112 cookie = 0x3000;
1113 break;
1114 case 3:
1115 cookie = 0x4000;
1116 break;
1117 case 4:
1118 cookie = 0x5000;
1119 break;
1120 case 5:
1121 cookie = 0x6000;
1122 break;
1123 default:
1124 B43_WARN_ON(1);
1125 }
1126 B43_WARN_ON(slot & ~0x0FFF); 1064 B43_WARN_ON(slot & ~0x0FFF);
1127 cookie |= (u16) slot; 1065 cookie |= (u16)slot;
1128 1066
1129 return cookie; 1067 return cookie;
1130} 1068}
@@ -1138,22 +1076,19 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1138 1076
1139 switch (cookie & 0xF000) { 1077 switch (cookie & 0xF000) {
1140 case 0x1000: 1078 case 0x1000:
1141 ring = dma->tx_ring0; 1079 ring = dma->tx_ring_AC_BK;
1142 break; 1080 break;
1143 case 0x2000: 1081 case 0x2000:
1144 ring = dma->tx_ring1; 1082 ring = dma->tx_ring_AC_BE;
1145 break; 1083 break;
1146 case 0x3000: 1084 case 0x3000:
1147 ring = dma->tx_ring2; 1085 ring = dma->tx_ring_AC_VI;
1148 break; 1086 break;
1149 case 0x4000: 1087 case 0x4000:
1150 ring = dma->tx_ring3; 1088 ring = dma->tx_ring_AC_VO;
1151 break; 1089 break;
1152 case 0x5000: 1090 case 0x5000:
1153 ring = dma->tx_ring4; 1091 ring = dma->tx_ring_mcast;
1154 break;
1155 case 0x6000:
1156 ring = dma->tx_ring5;
1157 break; 1092 break;
1158 default: 1093 default:
1159 B43_WARN_ON(1); 1094 B43_WARN_ON(1);
@@ -1180,7 +1115,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1180 size_t hdrsize = b43_txhdr_size(ring->dev); 1115 size_t hdrsize = b43_txhdr_size(ring->dev);
1181 1116
1182#define SLOTS_PER_PACKET 2 1117#define SLOTS_PER_PACKET 2
1183 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1184 1118
1185 old_top_slot = ring->current_slot; 1119 old_top_slot = ring->current_slot;
1186 old_used_slots = ring->used_slots; 1120 old_used_slots = ring->used_slots;
@@ -1285,6 +1219,37 @@ static inline int should_inject_overflow(struct b43_dmaring *ring)
1285 return 0; 1219 return 0;
1286} 1220}
1287 1221
1222/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1223static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1224 u8 queue_prio)
1225{
1226 struct b43_dmaring *ring;
1227
1228 if (b43_modparam_qos) {
1229 /* 0 = highest priority */
1230 switch (queue_prio) {
1231 default:
1232 B43_WARN_ON(1);
1233 /* fallthrough */
1234 case 0:
1235 ring = dev->dma.tx_ring_AC_VO;
1236 break;
1237 case 1:
1238 ring = dev->dma.tx_ring_AC_VI;
1239 break;
1240 case 2:
1241 ring = dev->dma.tx_ring_AC_BE;
1242 break;
1243 case 3:
1244 ring = dev->dma.tx_ring_AC_BK;
1245 break;
1246 }
1247 } else
1248 ring = dev->dma.tx_ring_AC_BE;
1249
1250 return ring;
1251}
1252
1288int b43_dma_tx(struct b43_wldev *dev, 1253int b43_dma_tx(struct b43_wldev *dev,
1289 struct sk_buff *skb, struct ieee80211_tx_control *ctl) 1254 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1290{ 1255{
@@ -1293,21 +1258,16 @@ int b43_dma_tx(struct b43_wldev *dev,
1293 int err = 0; 1258 int err = 0;
1294 unsigned long flags; 1259 unsigned long flags;
1295 1260
1296 if (unlikely(skb->len < 2 + 2 + 6)) {
1297 /* Too short, this can't be a valid frame. */
1298 return -EINVAL;
1299 }
1300
1301 hdr = (struct ieee80211_hdr *)skb->data; 1261 hdr = (struct ieee80211_hdr *)skb->data;
1302 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1262 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1303 /* The multicast ring will be sent after the DTIM */ 1263 /* The multicast ring will be sent after the DTIM */
1304 ring = dev->dma.tx_ring4; 1264 ring = dev->dma.tx_ring_mcast;
1305 /* Set the more-data bit. Ucode will clear it on 1265 /* Set the more-data bit. Ucode will clear it on
1306 * the last frame for us. */ 1266 * the last frame for us. */
1307 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1267 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1308 } else { 1268 } else {
1309 /* Decide by priority where to put this frame. */ 1269 /* Decide by priority where to put this frame. */
1310 ring = priority_to_txring(dev, ctl->queue); 1270 ring = select_ring_by_priority(dev, ctl->queue);
1311 } 1271 }
1312 1272
1313 spin_lock_irqsave(&ring->lock, flags); 1273 spin_lock_irqsave(&ring->lock, flags);
@@ -1322,6 +1282,11 @@ int b43_dma_tx(struct b43_wldev *dev,
1322 * That would be a mac80211 bug. */ 1282 * That would be a mac80211 bug. */
1323 B43_WARN_ON(ring->stopped); 1283 B43_WARN_ON(ring->stopped);
1324 1284
1285 /* Assign the queue number to the ring (if not already done before)
1286 * so TX status handling can use it. The queue to ring mapping is
1287 * static, so we don't need to store it per frame. */
1288 ring->queue_prio = ctl->queue;
1289
1325 err = dma_tx_fragment(ring, skb, ctl); 1290 err = dma_tx_fragment(ring, skb, ctl);
1326 if (unlikely(err == -ENOKEY)) { 1291 if (unlikely(err == -ENOKEY)) {
1327 /* Drop this packet, as we don't have the encryption key 1292 /* Drop this packet, as we don't have the encryption key
@@ -1338,7 +1303,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1338 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1303 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1339 should_inject_overflow(ring)) { 1304 should_inject_overflow(ring)) {
1340 /* This TX ring is full. */ 1305 /* This TX ring is full. */
1341 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); 1306 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
1342 ring->stopped = 1; 1307 ring->stopped = 1;
1343 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1308 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1344 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1309 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1359,6 +1324,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1359 struct b43_dmadesc_generic *desc; 1324 struct b43_dmadesc_generic *desc;
1360 struct b43_dmadesc_meta *meta; 1325 struct b43_dmadesc_meta *meta;
1361 int slot; 1326 int slot;
1327 bool frame_succeed;
1362 1328
1363 ring = parse_cookie(dev, status->cookie, &slot); 1329 ring = parse_cookie(dev, status->cookie, &slot);
1364 if (unlikely(!ring)) 1330 if (unlikely(!ring))
@@ -1385,18 +1351,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1385 * status of the transmission. 1351 * status of the transmission.
1386 * Some fields of txstat are already filled in dma_tx(). 1352 * Some fields of txstat are already filled in dma_tx().
1387 */ 1353 */
1388 if (status->acked) { 1354 frame_succeed = b43_fill_txstatus_report(
1389 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; 1355 &(meta->txstat), status);
1390 } else { 1356#ifdef CONFIG_B43_DEBUG
1391 if (!(meta->txstat.control.flags 1357 if (frame_succeed)
1392 & IEEE80211_TXCTL_NO_ACK)) 1358 ring->nr_succeed_tx_packets++;
1393 meta->txstat.excessive_retries = 1; 1359 else
1394 } 1360 ring->nr_failed_tx_packets++;
1395 if (status->frame_count == 0) { 1361 ring->nr_total_packet_tries += status->frame_count;
1396 /* The frame was not transmitted at all. */ 1362#endif /* DEBUG */
1397 meta->txstat.retry_count = 0;
1398 } else
1399 meta->txstat.retry_count = status->frame_count - 1;
1400 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1363 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1401 &(meta->txstat)); 1364 &(meta->txstat));
1402 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1365 /* skb is freed by ieee80211_tx_status_irqsafe() */
@@ -1418,7 +1381,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1418 dev->stats.last_tx = jiffies; 1381 dev->stats.last_tx = jiffies;
1419 if (ring->stopped) { 1382 if (ring->stopped) {
1420 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1383 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1421 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); 1384 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1422 ring->stopped = 0; 1385 ring->stopped = 0;
1423 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1386 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1424 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); 1387 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
@@ -1439,7 +1402,7 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
1439 1402
1440 for (i = 0; i < nr_queues; i++) { 1403 for (i = 0; i < nr_queues; i++) {
1441 data = &(stats->data[i]); 1404 data = &(stats->data[i]);
1442 ring = priority_to_txring(dev, i); 1405 ring = select_ring_by_priority(dev, i);
1443 1406
1444 spin_lock_irqsave(&ring->lock, flags); 1407 spin_lock_irqsave(&ring->lock, flags);
1445 data->len = ring->used_slots / SLOTS_PER_PACKET; 1408 data->len = ring->used_slots / SLOTS_PER_PACKET;
@@ -1465,25 +1428,6 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1465 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1428 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1466 skb = meta->skb; 1429 skb = meta->skb;
1467 1430
1468 if (ring->index == 3) {
1469 /* We received an xmit status. */
1470 struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
1471 int i = 0;
1472
1473 while (hw->cookie == 0) {
1474 if (i > 100)
1475 break;
1476 i++;
1477 udelay(2);
1478 barrier();
1479 }
1480 b43_handle_hwtxstatus(ring->dev, hw);
1481 /* recycle the descriptor buffer. */
1482 sync_descbuffer_for_device(ring, meta->dmaaddr,
1483 ring->rx_buffersize);
1484
1485 return;
1486 }
1487 rxhdr = (struct b43_rxhdr_fw4 *)skb->data; 1431 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1488 len = le16_to_cpu(rxhdr->frame_len); 1432 len = le16_to_cpu(rxhdr->frame_len);
1489 if (len == 0) { 1433 if (len == 0) {
@@ -1540,7 +1484,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1540 skb_pull(skb, ring->frameoffset); 1484 skb_pull(skb, ring->frameoffset);
1541 1485
1542 b43_rx(ring->dev, skb, rxhdr); 1486 b43_rx(ring->dev, skb, rxhdr);
1543 drop: 1487drop:
1544 return; 1488 return;
1545} 1489}
1546 1490
@@ -1586,21 +1530,55 @@ static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1586void b43_dma_tx_suspend(struct b43_wldev *dev) 1530void b43_dma_tx_suspend(struct b43_wldev *dev)
1587{ 1531{
1588 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 1532 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1589 b43_dma_tx_suspend_ring(dev->dma.tx_ring0); 1533 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1590 b43_dma_tx_suspend_ring(dev->dma.tx_ring1); 1534 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1591 b43_dma_tx_suspend_ring(dev->dma.tx_ring2); 1535 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1592 b43_dma_tx_suspend_ring(dev->dma.tx_ring3); 1536 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1593 b43_dma_tx_suspend_ring(dev->dma.tx_ring4); 1537 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1594 b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
1595} 1538}
1596 1539
1597void b43_dma_tx_resume(struct b43_wldev *dev) 1540void b43_dma_tx_resume(struct b43_wldev *dev)
1598{ 1541{
1599 b43_dma_tx_resume_ring(dev->dma.tx_ring5); 1542 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1600 b43_dma_tx_resume_ring(dev->dma.tx_ring4); 1543 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1601 b43_dma_tx_resume_ring(dev->dma.tx_ring3); 1544 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1602 b43_dma_tx_resume_ring(dev->dma.tx_ring2); 1545 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1603 b43_dma_tx_resume_ring(dev->dma.tx_ring1); 1546 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1604 b43_dma_tx_resume_ring(dev->dma.tx_ring0);
1605 b43_power_saving_ctl_bits(dev, 0); 1547 b43_power_saving_ctl_bits(dev, 0);
1606} 1548}
1549
1550#ifdef CONFIG_B43_PIO
1551static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1552 u16 mmio_base, bool enable)
1553{
1554 u32 ctl;
1555
1556 if (type == B43_DMA_64BIT) {
1557 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1558 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1559 if (enable)
1560 ctl |= B43_DMA64_RXDIRECTFIFO;
1561 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1562 } else {
1563 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1564 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1565 if (enable)
1566 ctl |= B43_DMA32_RXDIRECTFIFO;
1567 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1568 }
1569}
1570
1571/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1572 * This is called from PIO code, so DMA structures are not available. */
1573void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1574 unsigned int engine_index, bool enable)
1575{
1576 enum b43_dmatype type;
1577 u16 mmio_base;
1578
1579 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1580
1581 mmio_base = b43_dmacontroller_base(type, engine_index);
1582 direct_fifo_rx(dev, type, mmio_base, enable);
1583}
1584#endif /* CONFIG_B43_PIO */