aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2007-12-26 12:26:17 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:09:45 -0500
commit280d0e16bcbf5893505a0d0897f3ca1ddc0764fa (patch)
treeefa557090ba744de7736c1c6576bf4a67c11795c /drivers/net/wireless/b43/dma.c
parentd4df6f1a9edb80c99913548467397617ccee7855 (diff)
b43: Put multicast frames on the mcast queue
This queues frames flagged as "send after DTIM" by mac80211 on the special multicast queue. The firmware will take care to send the packet after the DTIM. Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c82
1 files changed, 55 insertions, 27 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 63217b1e312d..cf92853a2180 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -37,6 +37,8 @@
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/etherdevice.h>
41
40 42
41/* 32bit DMA ops. */ 43/* 32bit DMA ops. */
42static 44static
@@ -315,26 +317,24 @@ static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
315 case 3: 317 case 3:
316 ring = dev->dma.tx_ring0; 318 ring = dev->dma.tx_ring0;
317 break; 319 break;
318 case 4:
319 ring = dev->dma.tx_ring4;
320 break;
321 case 5:
322 ring = dev->dma.tx_ring5;
323 break;
324 } 320 }
325 321
326 return ring; 322 return ring;
327} 323}
328 324
329/* Bcm43xx-ring to mac80211-queue mapping */ 325/* b43-ring to mac80211-queue mapping */
330static inline int txring_to_priority(struct b43_dmaring *ring) 326static inline int txring_to_priority(struct b43_dmaring *ring)
331{ 327{
332 static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, }; 328 static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
329 unsigned int index;
333 330
334/*FIXME: have only one queue, for now */ 331/*FIXME: have only one queue, for now */
335 return 0; 332 return 0;
336 333
337 return idx_to_prio[ring->index]; 334 index = ring->index;
335 if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
336 index = 0;
337 return idx_to_prio[index];
338} 338}
339 339
340u16 b43_dmacontroller_base(int dma64bit, int controller_idx) 340u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
@@ -1043,26 +1043,30 @@ static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1043 * in the lower 12 bits. 1043 * in the lower 12 bits.
1044 * Note that the cookie must never be 0, as this 1044 * Note that the cookie must never be 0, as this
1045 * is a special value used in RX path. 1045 * is a special value used in RX path.
1046 * It can also not be 0xFFFF because that is special
1047 * for multicast frames.
1046 */ 1048 */
1047 switch (ring->index) { 1049 switch (ring->index) {
1048 case 0: 1050 case 0:
1049 cookie = 0xA000; 1051 cookie = 0x1000;
1050 break; 1052 break;
1051 case 1: 1053 case 1:
1052 cookie = 0xB000; 1054 cookie = 0x2000;
1053 break; 1055 break;
1054 case 2: 1056 case 2:
1055 cookie = 0xC000; 1057 cookie = 0x3000;
1056 break; 1058 break;
1057 case 3: 1059 case 3:
1058 cookie = 0xD000; 1060 cookie = 0x4000;
1059 break; 1061 break;
1060 case 4: 1062 case 4:
1061 cookie = 0xE000; 1063 cookie = 0x5000;
1062 break; 1064 break;
1063 case 5: 1065 case 5:
1064 cookie = 0xF000; 1066 cookie = 0x6000;
1065 break; 1067 break;
1068 default:
1069 B43_WARN_ON(1);
1066 } 1070 }
1067 B43_WARN_ON(slot & ~0x0FFF); 1071 B43_WARN_ON(slot & ~0x0FFF);
1068 cookie |= (u16) slot; 1072 cookie |= (u16) slot;
@@ -1078,22 +1082,22 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1078 struct b43_dmaring *ring = NULL; 1082 struct b43_dmaring *ring = NULL;
1079 1083
1080 switch (cookie & 0xF000) { 1084 switch (cookie & 0xF000) {
1081 case 0xA000: 1085 case 0x1000:
1082 ring = dma->tx_ring0; 1086 ring = dma->tx_ring0;
1083 break; 1087 break;
1084 case 0xB000: 1088 case 0x2000:
1085 ring = dma->tx_ring1; 1089 ring = dma->tx_ring1;
1086 break; 1090 break;
1087 case 0xC000: 1091 case 0x3000:
1088 ring = dma->tx_ring2; 1092 ring = dma->tx_ring2;
1089 break; 1093 break;
1090 case 0xD000: 1094 case 0x4000:
1091 ring = dma->tx_ring3; 1095 ring = dma->tx_ring3;
1092 break; 1096 break;
1093 case 0xE000: 1097 case 0x5000:
1094 ring = dma->tx_ring4; 1098 ring = dma->tx_ring4;
1095 break; 1099 break;
1096 case 0xF000: 1100 case 0x6000:
1097 ring = dma->tx_ring5; 1101 ring = dma->tx_ring5;
1098 break; 1102 break;
1099 default: 1103 default:
@@ -1117,6 +1121,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1117 struct b43_dmadesc_meta *meta; 1121 struct b43_dmadesc_meta *meta;
1118 struct b43_dmadesc_meta *meta_hdr; 1122 struct b43_dmadesc_meta *meta_hdr;
1119 struct sk_buff *bounce_skb; 1123 struct sk_buff *bounce_skb;
1124 u16 cookie;
1120 1125
1121#define SLOTS_PER_PACKET 2 1126#define SLOTS_PER_PACKET 2
1122 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 1127 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
@@ -1127,9 +1132,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1127 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1132 memset(meta_hdr, 0, sizeof(*meta_hdr));
1128 1133
1129 header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]); 1134 header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]);
1135 cookie = generate_cookie(ring, slot);
1130 b43_generate_txhdr(ring->dev, header, 1136 b43_generate_txhdr(ring->dev, header,
1131 skb->data, skb->len, ctl, 1137 skb->data, skb->len, ctl, cookie);
1132 generate_cookie(ring, slot));
1133 1138
1134 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1139 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1135 sizeof(struct b43_txhdr_fw4), 1); 1140 sizeof(struct b43_txhdr_fw4), 1);
@@ -1169,14 +1174,20 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1169 1174
1170 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1175 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1171 1176
1177 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1178 /* Tell the firmware about the cookie of the last
1179 * mcast frame, so it can clear the more-data bit in it. */
1180 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1181 B43_SHM_SH_MCASTCOOKIE, cookie);
1182 }
1172 /* Now transfer the whole frame. */ 1183 /* Now transfer the whole frame. */
1173 wmb(); 1184 wmb();
1174 ops->poke_tx(ring, next_slot(ring, slot)); 1185 ops->poke_tx(ring, next_slot(ring, slot));
1175 return 0; 1186 return 0;
1176 1187
1177 out_free_bounce: 1188out_free_bounce:
1178 dev_kfree_skb_any(skb); 1189 dev_kfree_skb_any(skb);
1179 out_unmap_hdr: 1190out_unmap_hdr:
1180 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1191 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1181 sizeof(struct b43_txhdr_fw4), 1); 1192 sizeof(struct b43_txhdr_fw4), 1);
1182 return err; 1193 return err;
@@ -1207,10 +1218,27 @@ int b43_dma_tx(struct b43_wldev *dev,
1207 struct sk_buff *skb, struct ieee80211_tx_control *ctl) 1218 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1208{ 1219{
1209 struct b43_dmaring *ring; 1220 struct b43_dmaring *ring;
1221 struct ieee80211_hdr *hdr;
1210 int err = 0; 1222 int err = 0;
1211 unsigned long flags; 1223 unsigned long flags;
1212 1224
1213 ring = priority_to_txring(dev, ctl->queue); 1225 if (unlikely(skb->len < 2 + 2 + 6)) {
1226 /* Too short, this can't be a valid frame. */
1227 return -EINVAL;
1228 }
1229
1230 hdr = (struct ieee80211_hdr *)skb->data;
1231 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1232 /* The multicast ring will be sent after the DTIM */
1233 ring = dev->dma.tx_ring4;
1234 /* Set the more-data bit. Ucode will clear it on
1235 * the last frame for us. */
1236 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1237 } else {
1238 /* Decide by priority where to put this frame. */
1239 ring = priority_to_txring(dev, ctl->queue);
1240 }
1241
1214 spin_lock_irqsave(&ring->lock, flags); 1242 spin_lock_irqsave(&ring->lock, flags);
1215 B43_WARN_ON(!ring->tx); 1243 B43_WARN_ON(!ring->tx);
1216 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { 1244 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
@@ -1238,7 +1266,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1238 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1266 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1239 } 1267 }
1240 } 1268 }
1241 out_unlock: 1269out_unlock:
1242 spin_unlock_irqrestore(&ring->lock, flags); 1270 spin_unlock_irqrestore(&ring->lock, flags);
1243 1271
1244 return err; 1272 return err;