aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index f50e2014ffbe..aced9866d815 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1131,10 +1131,10 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1131} 1131}
1132 1132
1133static int dma_tx_fragment(struct b43_dmaring *ring, 1133static int dma_tx_fragment(struct b43_dmaring *ring,
1134 struct sk_buff *skb, 1134 struct sk_buff *skb)
1135 struct ieee80211_tx_control *ctl)
1136{ 1135{
1137 const struct b43_dma_ops *ops = ring->ops; 1136 const struct b43_dma_ops *ops = ring->ops;
1137 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1138 u8 *header; 1138 u8 *header;
1139 int slot, old_top_slot, old_used_slots; 1139 int slot, old_top_slot, old_used_slots;
1140 int err; 1140 int err;
@@ -1158,7 +1158,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1158 header = &(ring->txhdr_cache[slot * hdrsize]); 1158 header = &(ring->txhdr_cache[slot * hdrsize]);
1159 cookie = generate_cookie(ring, slot); 1159 cookie = generate_cookie(ring, slot);
1160 err = b43_generate_txhdr(ring->dev, header, 1160 err = b43_generate_txhdr(ring->dev, header,
1161 skb->data, skb->len, ctl, cookie); 1161 skb->data, skb->len, info, cookie);
1162 if (unlikely(err)) { 1162 if (unlikely(err)) {
1163 ring->current_slot = old_top_slot; 1163 ring->current_slot = old_top_slot;
1164 ring->used_slots = old_used_slots; 1164 ring->used_slots = old_used_slots;
@@ -1180,7 +1180,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1180 desc = ops->idx2desc(ring, slot, &meta); 1180 desc = ops->idx2desc(ring, slot, &meta);
1181 memset(meta, 0, sizeof(*meta)); 1181 memset(meta, 0, sizeof(*meta));
1182 1182
1183 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1184 meta->skb = skb; 1183 meta->skb = skb;
1185 meta->is_last_fragment = 1; 1184 meta->is_last_fragment = 1;
1186 1185
@@ -1210,7 +1209,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1210 1209
1211 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1210 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1212 1211
1213 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1212 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1214 /* Tell the firmware about the cookie of the last 1213 /* Tell the firmware about the cookie of the last
1215 * mcast frame, so it can clear the more-data bit in it. */ 1214 * mcast frame, so it can clear the more-data bit in it. */
1216 b43_shm_write16(ring->dev, B43_SHM_SHARED, 1215 b43_shm_write16(ring->dev, B43_SHM_SHARED,
@@ -1281,16 +1280,16 @@ static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1281 return ring; 1280 return ring;
1282} 1281}
1283 1282
1284int b43_dma_tx(struct b43_wldev *dev, 1283int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1285 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1286{ 1284{
1287 struct b43_dmaring *ring; 1285 struct b43_dmaring *ring;
1288 struct ieee80211_hdr *hdr; 1286 struct ieee80211_hdr *hdr;
1289 int err = 0; 1287 int err = 0;
1290 unsigned long flags; 1288 unsigned long flags;
1289 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1291 1290
1292 hdr = (struct ieee80211_hdr *)skb->data; 1291 hdr = (struct ieee80211_hdr *)skb->data;
1293 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1292 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1294 /* The multicast ring will be sent after the DTIM */ 1293 /* The multicast ring will be sent after the DTIM */
1295 ring = dev->dma.tx_ring_mcast; 1294 ring = dev->dma.tx_ring_mcast;
1296 /* Set the more-data bit. Ucode will clear it on 1295 /* Set the more-data bit. Ucode will clear it on
@@ -1298,7 +1297,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1297 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1299 } else { 1298 } else {
1300 /* Decide by priority where to put this frame. */ 1299 /* Decide by priority where to put this frame. */
1301 ring = select_ring_by_priority(dev, ctl->queue); 1300 ring = select_ring_by_priority(dev, info->queue);
1302 } 1301 }
1303 1302
1304 spin_lock_irqsave(&ring->lock, flags); 1303 spin_lock_irqsave(&ring->lock, flags);
@@ -1316,9 +1315,9 @@ int b43_dma_tx(struct b43_wldev *dev,
1316 /* Assign the queue number to the ring (if not already done before) 1315 /* Assign the queue number to the ring (if not already done before)
1317 * so TX status handling can use it. The queue to ring mapping is 1316 * so TX status handling can use it. The queue to ring mapping is
1318 * static, so we don't need to store it per frame. */ 1317 * static, so we don't need to store it per frame. */
1319 ring->queue_prio = ctl->queue; 1318 ring->queue_prio = info->queue;
1320 1319
1321 err = dma_tx_fragment(ring, skb, ctl); 1320 err = dma_tx_fragment(ring, skb);
1322 if (unlikely(err == -ENOKEY)) { 1321 if (unlikely(err == -ENOKEY)) {
1323 /* Drop this packet, as we don't have the encryption key 1322 /* Drop this packet, as we don't have the encryption key
1324 * anymore and must not transmit it unencrypted. */ 1323 * anymore and must not transmit it unencrypted. */
@@ -1334,7 +1333,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1334 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1333 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1335 should_inject_overflow(ring)) { 1334 should_inject_overflow(ring)) {
1336 /* This TX ring is full. */ 1335 /* This TX ring is full. */
1337 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 1336 ieee80211_stop_queue(dev->wl->hw, info->queue);
1338 ring->stopped = 1; 1337 ring->stopped = 1;
1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1338 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1340 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1339 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1377,13 +1376,19 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1377 b43_txhdr_size(dev), 1); 1376 b43_txhdr_size(dev), 1);
1378 1377
1379 if (meta->is_last_fragment) { 1378 if (meta->is_last_fragment) {
1380 B43_WARN_ON(!meta->skb); 1379 struct ieee80211_tx_info *info;
1381 /* Call back to inform the ieee80211 subsystem about the 1380
1382 * status of the transmission. 1381 BUG_ON(!meta->skb);
1383 * Some fields of txstat are already filled in dma_tx(). 1382
1383 info = IEEE80211_SKB_CB(meta->skb);
1384
1385 memset(&info->status, 0, sizeof(info->status));
1386
1387 /*
1388 * Call back to inform the ieee80211 subsystem about
1389 * the status of the transmission.
1384 */ 1390 */
1385 frame_succeed = b43_fill_txstatus_report( 1391 frame_succeed = b43_fill_txstatus_report(info, status);
1386 &(meta->txstat), status);
1387#ifdef CONFIG_B43_DEBUG 1392#ifdef CONFIG_B43_DEBUG
1388 if (frame_succeed) 1393 if (frame_succeed)
1389 ring->nr_succeed_tx_packets++; 1394 ring->nr_succeed_tx_packets++;
@@ -1391,8 +1396,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1391 ring->nr_failed_tx_packets++; 1396 ring->nr_failed_tx_packets++;
1392 ring->nr_total_packet_tries += status->frame_count; 1397 ring->nr_total_packet_tries += status->frame_count;
1393#endif /* DEBUG */ 1398#endif /* DEBUG */
1394 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1399 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1395 &(meta->txstat)); 1400
1396 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1401 /* skb is freed by ieee80211_tx_status_irqsafe() */
1397 meta->skb = NULL; 1402 meta->skb = NULL;
1398 } else { 1403 } else {