diff options
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 119 |
1 files changed, 64 insertions, 55 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index e23f2f172bd7..098f886976f6 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -328,11 +328,11 @@ static inline | |||
328 | dma_addr_t dmaaddr; | 328 | dma_addr_t dmaaddr; |
329 | 329 | ||
330 | if (tx) { | 330 | if (tx) { |
331 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 331 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
332 | buf, len, DMA_TO_DEVICE); | 332 | buf, len, DMA_TO_DEVICE); |
333 | } else { | 333 | } else { |
334 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 334 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
335 | buf, len, DMA_FROM_DEVICE); | 335 | buf, len, DMA_FROM_DEVICE); |
336 | } | 336 | } |
337 | 337 | ||
338 | return dmaaddr; | 338 | return dmaaddr; |
@@ -343,11 +343,11 @@ static inline | |||
343 | dma_addr_t addr, size_t len, int tx) | 343 | dma_addr_t addr, size_t len, int tx) |
344 | { | 344 | { |
345 | if (tx) { | 345 | if (tx) { |
346 | dma_unmap_single(ring->dev->dev->dma_dev, | 346 | ssb_dma_unmap_single(ring->dev->dev, |
347 | addr, len, DMA_TO_DEVICE); | 347 | addr, len, DMA_TO_DEVICE); |
348 | } else { | 348 | } else { |
349 | dma_unmap_single(ring->dev->dev->dma_dev, | 349 | ssb_dma_unmap_single(ring->dev->dev, |
350 | addr, len, DMA_FROM_DEVICE); | 350 | addr, len, DMA_FROM_DEVICE); |
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
@@ -356,8 +356,8 @@ static inline | |||
356 | dma_addr_t addr, size_t len) | 356 | dma_addr_t addr, size_t len) |
357 | { | 357 | { |
358 | B43_WARN_ON(ring->tx); | 358 | B43_WARN_ON(ring->tx); |
359 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, | 359 | ssb_dma_sync_single_for_cpu(ring->dev->dev, |
360 | addr, len, DMA_FROM_DEVICE); | 360 | addr, len, DMA_FROM_DEVICE); |
361 | } | 361 | } |
362 | 362 | ||
363 | static inline | 363 | static inline |
@@ -365,8 +365,8 @@ static inline | |||
365 | dma_addr_t addr, size_t len) | 365 | dma_addr_t addr, size_t len) |
366 | { | 366 | { |
367 | B43_WARN_ON(ring->tx); | 367 | B43_WARN_ON(ring->tx); |
368 | dma_sync_single_for_device(ring->dev->dev->dma_dev, | 368 | ssb_dma_sync_single_for_device(ring->dev->dev, |
369 | addr, len, DMA_FROM_DEVICE); | 369 | addr, len, DMA_FROM_DEVICE); |
370 | } | 370 | } |
371 | 371 | ||
372 | static inline | 372 | static inline |
@@ -381,7 +381,6 @@ static inline | |||
381 | 381 | ||
382 | static int alloc_ringmemory(struct b43_dmaring *ring) | 382 | static int alloc_ringmemory(struct b43_dmaring *ring) |
383 | { | 383 | { |
384 | struct device *dma_dev = ring->dev->dev->dma_dev; | ||
385 | gfp_t flags = GFP_KERNEL; | 384 | gfp_t flags = GFP_KERNEL; |
386 | 385 | ||
387 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K | 386 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
@@ -392,11 +391,14 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
392 | * For unknown reasons - possibly a hardware error - the BCM4311 rev | 391 | * For unknown reasons - possibly a hardware error - the BCM4311 rev |
393 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, | 392 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, |
394 | * which accounts for the GFP_DMA flag below. | 393 | * which accounts for the GFP_DMA flag below. |
394 | * | ||
395 | * The flags here must match the flags in free_ringmemory below! | ||
395 | */ | 396 | */ |
396 | if (ring->type == B43_DMA_64BIT) | 397 | if (ring->type == B43_DMA_64BIT) |
397 | flags |= GFP_DMA; | 398 | flags |= GFP_DMA; |
398 | ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE, | 399 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, |
399 | &(ring->dmabase), flags); | 400 | B43_DMA_RINGMEMSIZE, |
401 | &(ring->dmabase), flags); | ||
400 | if (!ring->descbase) { | 402 | if (!ring->descbase) { |
401 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | 403 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); |
402 | return -ENOMEM; | 404 | return -ENOMEM; |
@@ -408,10 +410,13 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
408 | 410 | ||
409 | static void free_ringmemory(struct b43_dmaring *ring) | 411 | static void free_ringmemory(struct b43_dmaring *ring) |
410 | { | 412 | { |
411 | struct device *dma_dev = ring->dev->dev->dma_dev; | 413 | gfp_t flags = GFP_KERNEL; |
414 | |||
415 | if (ring->type == B43_DMA_64BIT) | ||
416 | flags |= GFP_DMA; | ||
412 | 417 | ||
413 | dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE, | 418 | ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, |
414 | ring->descbase, ring->dmabase); | 419 | ring->descbase, ring->dmabase, flags); |
415 | } | 420 | } |
416 | 421 | ||
417 | /* Reset the RX DMA channel */ | 422 | /* Reset the RX DMA channel */ |
@@ -518,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
518 | dma_addr_t addr, | 523 | dma_addr_t addr, |
519 | size_t buffersize, bool dma_to_device) | 524 | size_t buffersize, bool dma_to_device) |
520 | { | 525 | { |
521 | if (unlikely(dma_mapping_error(addr))) | 526 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
522 | return 1; | 527 | return 1; |
523 | 528 | ||
524 | switch (ring->type) { | 529 | switch (ring->type) { |
@@ -844,10 +849,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
844 | goto err_kfree_meta; | 849 | goto err_kfree_meta; |
845 | 850 | ||
846 | /* test for ability to dma to txhdr_cache */ | 851 | /* test for ability to dma to txhdr_cache */ |
847 | dma_test = dma_map_single(dev->dev->dma_dev, | 852 | dma_test = ssb_dma_map_single(dev->dev, |
848 | ring->txhdr_cache, | 853 | ring->txhdr_cache, |
849 | b43_txhdr_size(dev), | 854 | b43_txhdr_size(dev), |
850 | DMA_TO_DEVICE); | 855 | DMA_TO_DEVICE); |
851 | 856 | ||
852 | if (b43_dma_mapping_error(ring, dma_test, | 857 | if (b43_dma_mapping_error(ring, dma_test, |
853 | b43_txhdr_size(dev), 1)) { | 858 | b43_txhdr_size(dev), 1)) { |
@@ -859,10 +864,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
859 | if (!ring->txhdr_cache) | 864 | if (!ring->txhdr_cache) |
860 | goto err_kfree_meta; | 865 | goto err_kfree_meta; |
861 | 866 | ||
862 | dma_test = dma_map_single(dev->dev->dma_dev, | 867 | dma_test = ssb_dma_map_single(dev->dev, |
863 | ring->txhdr_cache, | 868 | ring->txhdr_cache, |
864 | b43_txhdr_size(dev), | 869 | b43_txhdr_size(dev), |
865 | DMA_TO_DEVICE); | 870 | DMA_TO_DEVICE); |
866 | 871 | ||
867 | if (b43_dma_mapping_error(ring, dma_test, | 872 | if (b43_dma_mapping_error(ring, dma_test, |
868 | b43_txhdr_size(dev), 1)) { | 873 | b43_txhdr_size(dev), 1)) { |
@@ -873,9 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
873 | } | 878 | } |
874 | } | 879 | } |
875 | 880 | ||
876 | dma_unmap_single(dev->dev->dma_dev, | 881 | ssb_dma_unmap_single(dev->dev, |
877 | dma_test, b43_txhdr_size(dev), | 882 | dma_test, b43_txhdr_size(dev), |
878 | DMA_TO_DEVICE); | 883 | DMA_TO_DEVICE); |
879 | } | 884 | } |
880 | 885 | ||
881 | err = alloc_ringmemory(ring); | 886 | err = alloc_ringmemory(ring); |
@@ -1130,10 +1135,10 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) | |||
1130 | } | 1135 | } |
1131 | 1136 | ||
1132 | static int dma_tx_fragment(struct b43_dmaring *ring, | 1137 | static int dma_tx_fragment(struct b43_dmaring *ring, |
1133 | struct sk_buff *skb, | 1138 | struct sk_buff *skb) |
1134 | struct ieee80211_tx_control *ctl) | ||
1135 | { | 1139 | { |
1136 | const struct b43_dma_ops *ops = ring->ops; | 1140 | const struct b43_dma_ops *ops = ring->ops; |
1141 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1137 | u8 *header; | 1142 | u8 *header; |
1138 | int slot, old_top_slot, old_used_slots; | 1143 | int slot, old_top_slot, old_used_slots; |
1139 | int err; | 1144 | int err; |
@@ -1157,7 +1162,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1157 | header = &(ring->txhdr_cache[slot * hdrsize]); | 1162 | header = &(ring->txhdr_cache[slot * hdrsize]); |
1158 | cookie = generate_cookie(ring, slot); | 1163 | cookie = generate_cookie(ring, slot); |
1159 | err = b43_generate_txhdr(ring->dev, header, | 1164 | err = b43_generate_txhdr(ring->dev, header, |
1160 | skb->data, skb->len, ctl, cookie); | 1165 | skb->data, skb->len, info, cookie); |
1161 | if (unlikely(err)) { | 1166 | if (unlikely(err)) { |
1162 | ring->current_slot = old_top_slot; | 1167 | ring->current_slot = old_top_slot; |
1163 | ring->used_slots = old_used_slots; | 1168 | ring->used_slots = old_used_slots; |
@@ -1179,7 +1184,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1179 | desc = ops->idx2desc(ring, slot, &meta); | 1184 | desc = ops->idx2desc(ring, slot, &meta); |
1180 | memset(meta, 0, sizeof(*meta)); | 1185 | memset(meta, 0, sizeof(*meta)); |
1181 | 1186 | ||
1182 | memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); | ||
1183 | meta->skb = skb; | 1187 | meta->skb = skb; |
1184 | meta->is_last_fragment = 1; | 1188 | meta->is_last_fragment = 1; |
1185 | 1189 | ||
@@ -1209,7 +1213,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1209 | 1213 | ||
1210 | ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); | 1214 | ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); |
1211 | 1215 | ||
1212 | if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { | 1216 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { |
1213 | /* Tell the firmware about the cookie of the last | 1217 | /* Tell the firmware about the cookie of the last |
1214 | * mcast frame, so it can clear the more-data bit in it. */ | 1218 | * mcast frame, so it can clear the more-data bit in it. */ |
1215 | b43_shm_write16(ring->dev, B43_SHM_SHARED, | 1219 | b43_shm_write16(ring->dev, B43_SHM_SHARED, |
@@ -1280,16 +1284,16 @@ static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev, | |||
1280 | return ring; | 1284 | return ring; |
1281 | } | 1285 | } |
1282 | 1286 | ||
1283 | int b43_dma_tx(struct b43_wldev *dev, | 1287 | int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) |
1284 | struct sk_buff *skb, struct ieee80211_tx_control *ctl) | ||
1285 | { | 1288 | { |
1286 | struct b43_dmaring *ring; | 1289 | struct b43_dmaring *ring; |
1287 | struct ieee80211_hdr *hdr; | 1290 | struct ieee80211_hdr *hdr; |
1288 | int err = 0; | 1291 | int err = 0; |
1289 | unsigned long flags; | 1292 | unsigned long flags; |
1293 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1290 | 1294 | ||
1291 | hdr = (struct ieee80211_hdr *)skb->data; | 1295 | hdr = (struct ieee80211_hdr *)skb->data; |
1292 | if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { | 1296 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { |
1293 | /* The multicast ring will be sent after the DTIM */ | 1297 | /* The multicast ring will be sent after the DTIM */ |
1294 | ring = dev->dma.tx_ring_mcast; | 1298 | ring = dev->dma.tx_ring_mcast; |
1295 | /* Set the more-data bit. Ucode will clear it on | 1299 | /* Set the more-data bit. Ucode will clear it on |
@@ -1297,7 +1301,8 @@ int b43_dma_tx(struct b43_wldev *dev, | |||
1297 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 1301 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
1298 | } else { | 1302 | } else { |
1299 | /* Decide by priority where to put this frame. */ | 1303 | /* Decide by priority where to put this frame. */ |
1300 | ring = select_ring_by_priority(dev, ctl->queue); | 1304 | ring = select_ring_by_priority( |
1305 | dev, skb_get_queue_mapping(skb)); | ||
1301 | } | 1306 | } |
1302 | 1307 | ||
1303 | spin_lock_irqsave(&ring->lock, flags); | 1308 | spin_lock_irqsave(&ring->lock, flags); |
@@ -1315,9 +1320,9 @@ int b43_dma_tx(struct b43_wldev *dev, | |||
1315 | /* Assign the queue number to the ring (if not already done before) | 1320 | /* Assign the queue number to the ring (if not already done before) |
1316 | * so TX status handling can use it. The queue to ring mapping is | 1321 | * so TX status handling can use it. The queue to ring mapping is |
1317 | * static, so we don't need to store it per frame. */ | 1322 | * static, so we don't need to store it per frame. */ |
1318 | ring->queue_prio = ctl->queue; | 1323 | ring->queue_prio = skb_get_queue_mapping(skb); |
1319 | 1324 | ||
1320 | err = dma_tx_fragment(ring, skb, ctl); | 1325 | err = dma_tx_fragment(ring, skb); |
1321 | if (unlikely(err == -ENOKEY)) { | 1326 | if (unlikely(err == -ENOKEY)) { |
1322 | /* Drop this packet, as we don't have the encryption key | 1327 | /* Drop this packet, as we don't have the encryption key |
1323 | * anymore and must not transmit it unencrypted. */ | 1328 | * anymore and must not transmit it unencrypted. */ |
@@ -1333,7 +1338,7 @@ int b43_dma_tx(struct b43_wldev *dev, | |||
1333 | if ((free_slots(ring) < SLOTS_PER_PACKET) || | 1338 | if ((free_slots(ring) < SLOTS_PER_PACKET) || |
1334 | should_inject_overflow(ring)) { | 1339 | should_inject_overflow(ring)) { |
1335 | /* This TX ring is full. */ | 1340 | /* This TX ring is full. */ |
1336 | ieee80211_stop_queue(dev->wl->hw, ctl->queue); | 1341 | ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); |
1337 | ring->stopped = 1; | 1342 | ring->stopped = 1; |
1338 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { | 1343 | if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { |
1339 | b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); | 1344 | b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); |
@@ -1376,13 +1381,19 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1376 | b43_txhdr_size(dev), 1); | 1381 | b43_txhdr_size(dev), 1); |
1377 | 1382 | ||
1378 | if (meta->is_last_fragment) { | 1383 | if (meta->is_last_fragment) { |
1379 | B43_WARN_ON(!meta->skb); | 1384 | struct ieee80211_tx_info *info; |
1380 | /* Call back to inform the ieee80211 subsystem about the | 1385 | |
1381 | * status of the transmission. | 1386 | BUG_ON(!meta->skb); |
1382 | * Some fields of txstat are already filled in dma_tx(). | 1387 | |
1388 | info = IEEE80211_SKB_CB(meta->skb); | ||
1389 | |||
1390 | memset(&info->status, 0, sizeof(info->status)); | ||
1391 | |||
1392 | /* | ||
1393 | * Call back to inform the ieee80211 subsystem about | ||
1394 | * the status of the transmission. | ||
1383 | */ | 1395 | */ |
1384 | frame_succeed = b43_fill_txstatus_report( | 1396 | frame_succeed = b43_fill_txstatus_report(info, status); |
1385 | &(meta->txstat), status); | ||
1386 | #ifdef CONFIG_B43_DEBUG | 1397 | #ifdef CONFIG_B43_DEBUG |
1387 | if (frame_succeed) | 1398 | if (frame_succeed) |
1388 | ring->nr_succeed_tx_packets++; | 1399 | ring->nr_succeed_tx_packets++; |
@@ -1390,8 +1401,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1390 | ring->nr_failed_tx_packets++; | 1401 | ring->nr_failed_tx_packets++; |
1391 | ring->nr_total_packet_tries += status->frame_count; | 1402 | ring->nr_total_packet_tries += status->frame_count; |
1392 | #endif /* DEBUG */ | 1403 | #endif /* DEBUG */ |
1393 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, | 1404 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); |
1394 | &(meta->txstat)); | 1405 | |
1395 | /* skb is freed by ieee80211_tx_status_irqsafe() */ | 1406 | /* skb is freed by ieee80211_tx_status_irqsafe() */ |
1396 | meta->skb = NULL; | 1407 | meta->skb = NULL; |
1397 | } else { | 1408 | } else { |
@@ -1426,18 +1437,16 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev, | |||
1426 | { | 1437 | { |
1427 | const int nr_queues = dev->wl->hw->queues; | 1438 | const int nr_queues = dev->wl->hw->queues; |
1428 | struct b43_dmaring *ring; | 1439 | struct b43_dmaring *ring; |
1429 | struct ieee80211_tx_queue_stats_data *data; | ||
1430 | unsigned long flags; | 1440 | unsigned long flags; |
1431 | int i; | 1441 | int i; |
1432 | 1442 | ||
1433 | for (i = 0; i < nr_queues; i++) { | 1443 | for (i = 0; i < nr_queues; i++) { |
1434 | data = &(stats->data[i]); | ||
1435 | ring = select_ring_by_priority(dev, i); | 1444 | ring = select_ring_by_priority(dev, i); |
1436 | 1445 | ||
1437 | spin_lock_irqsave(&ring->lock, flags); | 1446 | spin_lock_irqsave(&ring->lock, flags); |
1438 | data->len = ring->used_slots / SLOTS_PER_PACKET; | 1447 | stats[i].len = ring->used_slots / SLOTS_PER_PACKET; |
1439 | data->limit = ring->nr_slots / SLOTS_PER_PACKET; | 1448 | stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET; |
1440 | data->count = ring->nr_tx_packets; | 1449 | stats[i].count = ring->nr_tx_packets; |
1441 | spin_unlock_irqrestore(&ring->lock, flags); | 1450 | spin_unlock_irqrestore(&ring->lock, flags); |
1442 | } | 1451 | } |
1443 | } | 1452 | } |