aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c264
1 files changed, 206 insertions, 58 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index b5cd7f57055b..027be275e035 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,44 +383,160 @@ static inline
383 } 383 }
384} 384}
385 385
386/* Check if a DMA region fits the device constraints.
387 * Returns true, if the region is OK for usage with this device. */
388static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
389 dma_addr_t addr, size_t size)
390{
391 switch (ring->type) {
392 case B43_DMA_30BIT:
393 if ((u64)addr + size > (1ULL << 30))
394 return 0;
395 break;
396 case B43_DMA_32BIT:
397 if ((u64)addr + size > (1ULL << 32))
398 return 0;
399 break;
400 case B43_DMA_64BIT:
401 /* Currently we can't have addresses beyond
402 * 64bit in the kernel. */
403 break;
404 }
405 return 1;
406}
407
408#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
409#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
410
411static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
412 dma_addr_t dmaaddr, size_t size)
413{
414 ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
415 free_pages((unsigned long)base, get_order(size));
416}
417
418static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
419 dma_addr_t *dmaaddr, size_t size,
420 gfp_t gfp_flags)
421{
422 void *base;
423
424 base = (void *)__get_free_pages(gfp_flags, get_order(size));
425 if (!base)
426 return NULL;
427 memset(base, 0, size);
428 *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
429 DMA_TO_DEVICE);
430 if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
431 free_pages((unsigned long)base, get_order(size));
432 return NULL;
433 }
434
435 return base;
436}
437
438static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
439 dma_addr_t *dmaaddr, size_t size)
440{
441 void *base;
442
443 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
444 GFP_KERNEL);
445 if (!base) {
446 b43err(ring->dev->wl, "Failed to allocate or map pages "
447 "for DMA ringmemory\n");
448 return NULL;
449 }
450 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
451 /* The memory does not fit our device constraints.
452 * Retry with GFP_DMA set to get lower memory. */
453 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
454 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
455 GFP_KERNEL | GFP_DMA);
456 if (!base) {
457 b43err(ring->dev->wl, "Failed to allocate or map pages "
458 "in the GFP_DMA region for DMA ringmemory\n");
459 return NULL;
460 }
461 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
462 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
463 b43err(ring->dev->wl, "Failed to allocate DMA "
464 "ringmemory that fits device constraints\n");
465 return NULL;
466 }
467 }
468 /* We expect the memory to be 4k aligned, at least. */
469 if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
470 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
471 return NULL;
472 }
473
474 return base;
475}
476
386static int alloc_ringmemory(struct b43_dmaring *ring) 477static int alloc_ringmemory(struct b43_dmaring *ring)
387{ 478{
388 gfp_t flags = GFP_KERNEL; 479 unsigned int required;
389 480 void *base;
390 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 481 dma_addr_t dmaaddr;
391 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing 482
392 * has shown that 4K is sufficient for the latter as long as the buffer 483 /* There are several requirements to the descriptor ring memory:
393 * does not cross an 8K boundary. 484 * - The memory region needs to fit the address constraints for the
394 * 485 * device (same as for frame buffers).
395 * For unknown reasons - possibly a hardware error - the BCM4311 rev 486 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
396 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, 487 * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
397 * which accounts for the GFP_DMA flag below.
398 *
399 * The flags here must match the flags in free_ringmemory below!
400 */ 488 */
489
401 if (ring->type == B43_DMA_64BIT) 490 if (ring->type == B43_DMA_64BIT)
402 flags |= GFP_DMA; 491 required = ring->nr_slots * sizeof(struct b43_dmadesc64);
403 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 492 else
404 B43_DMA_RINGMEMSIZE, 493 required = ring->nr_slots * sizeof(struct b43_dmadesc32);
405 &(ring->dmabase), flags); 494 if (B43_WARN_ON(required > 0x1000))
406 if (!ring->descbase) { 495 return -ENOMEM;
407 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 496
497 ring->alloc_descsize = 0x1000;
498 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
499 if (!base)
500 return -ENOMEM;
501 ring->alloc_descbase = base;
502 ring->alloc_dmabase = dmaaddr;
503
504 if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
505 /* We're on <=32bit DMA, or we already got 8k aligned memory.
506 * That's all we need, so we're fine. */
507 ring->descbase = base;
508 ring->dmabase = dmaaddr;
509 return 0;
510 }
511 b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
512
513 /* Ok, we failed at the 8k alignment requirement.
514 * Try to force-align the memory region now. */
515 ring->alloc_descsize = 0x2000;
516 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
517 if (!base)
408 return -ENOMEM; 518 return -ENOMEM;
519 ring->alloc_descbase = base;
520 ring->alloc_dmabase = dmaaddr;
521
522 if (is_8k_aligned(dmaaddr)) {
523 /* We're already 8k aligned. That Ok, too. */
524 ring->descbase = base;
525 ring->dmabase = dmaaddr;
526 return 0;
409 } 527 }
410 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); 528 /* Force-align it to 8k */
529 ring->descbase = (void *)((u8 *)base + 0x1000);
530 ring->dmabase = dmaaddr + 0x1000;
531 B43_WARN_ON(!is_8k_aligned(ring->dmabase));
411 532
412 return 0; 533 return 0;
413} 534}
414 535
415static void free_ringmemory(struct b43_dmaring *ring) 536static void free_ringmemory(struct b43_dmaring *ring)
416{ 537{
417 gfp_t flags = GFP_KERNEL; 538 b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
418 539 ring->alloc_dmabase, ring->alloc_descsize);
419 if (ring->type == B43_DMA_64BIT)
420 flags |= GFP_DMA;
421
422 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423 ring->descbase, ring->dmabase, flags);
424} 540}
425 541
426/* Reset the RX DMA channel */ 542/* Reset the RX DMA channel */
@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
530 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 646 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
531 return 1; 647 return 1;
532 648
533 switch (ring->type) { 649 if (!b43_dma_address_ok(ring, addr, buffersize)) {
534 case B43_DMA_30BIT: 650 /* We can't support this address. Unmap it again. */
535 if ((u64)addr + buffersize > (1ULL << 30)) 651 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
536 goto address_error; 652 return 1;
537 break;
538 case B43_DMA_32BIT:
539 if ((u64)addr + buffersize > (1ULL << 32))
540 goto address_error;
541 break;
542 case B43_DMA_64BIT:
543 /* Currently we can't have addresses beyond
544 * 64bit in the kernel. */
545 break;
546 } 653 }
547 654
548 /* The address is OK. */ 655 /* The address is OK. */
549 return 0; 656 return 0;
550
551address_error:
552 /* We can't support this address. Unmap it again. */
553 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555 return 1;
556} 657}
557 658
558static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 659static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
614 meta->dmaaddr = dmaaddr; 715 meta->dmaaddr = dmaaddr;
615 ring->ops->fill_descriptor(ring, desc, dmaaddr, 716 ring->ops->fill_descriptor(ring, desc, dmaaddr,
616 ring->rx_buffersize, 0, 0, 0); 717 ring->rx_buffersize, 0, 0, 0);
718 ssb_dma_sync_single_for_device(ring->dev->dev,
719 ring->alloc_dmabase,
720 ring->alloc_descsize, DMA_TO_DEVICE);
617 721
618 return 0; 722 return 0;
619} 723}
@@ -770,7 +874,7 @@ static void free_all_descbuffers(struct b43_dmaring *ring)
770 for (i = 0; i < ring->nr_slots; i++) { 874 for (i = 0; i < ring->nr_slots; i++) {
771 desc = ring->ops->idx2desc(ring, i, &meta); 875 desc = ring->ops->idx2desc(ring, i, &meta);
772 876
773 if (!meta->skb) { 877 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
774 B43_WARN_ON(!ring->tx); 878 B43_WARN_ON(!ring->tx);
775 continue; 879 continue;
776 } 880 }
@@ -822,7 +926,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
822 enum b43_dmatype type) 926 enum b43_dmatype type)
823{ 927{
824 struct b43_dmaring *ring; 928 struct b43_dmaring *ring;
825 int err; 929 int i, err;
826 dma_addr_t dma_test; 930 dma_addr_t dma_test;
827 931
828 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 932 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -837,6 +941,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
837 GFP_KERNEL); 941 GFP_KERNEL);
838 if (!ring->meta) 942 if (!ring->meta)
839 goto err_kfree_ring; 943 goto err_kfree_ring;
944 for (i = 0; i < ring->nr_slots; i++)
945 ring->meta->skb = B43_DMA_PTR_POISON;
840 946
841 ring->type = type; 947 ring->type = type;
842 ring->dev = dev; 948 ring->dev = dev;
@@ -1147,11 +1253,13 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1147 case 0x5000: 1253 case 0x5000:
1148 ring = dma->tx_ring_mcast; 1254 ring = dma->tx_ring_mcast;
1149 break; 1255 break;
1150 default:
1151 B43_WARN_ON(1);
1152 } 1256 }
1153 *slot = (cookie & 0x0FFF); 1257 *slot = (cookie & 0x0FFF);
1154 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 1258 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1259 b43dbg(dev->wl, "TX-status contains "
1260 "invalid cookie: 0x%04X\n", cookie);
1261 return NULL;
1262 }
1155 1263
1156 return ring; 1264 return ring;
1157} 1265}
@@ -1246,6 +1354,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1246 } 1354 }
1247 /* Now transfer the whole frame. */ 1355 /* Now transfer the whole frame. */
1248 wmb(); 1356 wmb();
1357 ssb_dma_sync_single_for_device(ring->dev->dev,
1358 ring->alloc_dmabase,
1359 ring->alloc_descsize, DMA_TO_DEVICE);
1249 ops->poke_tx(ring, next_slot(ring, slot)); 1360 ops->poke_tx(ring, next_slot(ring, slot));
1250 return 0; 1361 return 0;
1251 1362
@@ -1387,19 +1498,40 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1387 struct b43_dmaring *ring; 1498 struct b43_dmaring *ring;
1388 struct b43_dmadesc_generic *desc; 1499 struct b43_dmadesc_generic *desc;
1389 struct b43_dmadesc_meta *meta; 1500 struct b43_dmadesc_meta *meta;
1390 int slot; 1501 int slot, firstused;
1391 bool frame_succeed; 1502 bool frame_succeed;
1392 1503
1393 ring = parse_cookie(dev, status->cookie, &slot); 1504 ring = parse_cookie(dev, status->cookie, &slot);
1394 if (unlikely(!ring)) 1505 if (unlikely(!ring))
1395 return; 1506 return;
1396
1397 B43_WARN_ON(!ring->tx); 1507 B43_WARN_ON(!ring->tx);
1508
1509 /* Sanity check: TX packets are processed in-order on one ring.
1510 * Check if the slot deduced from the cookie really is the first
1511 * used slot. */
1512 firstused = ring->current_slot - ring->used_slots + 1;
1513 if (firstused < 0)
1514 firstused = ring->nr_slots + firstused;
1515 if (unlikely(slot != firstused)) {
1516 /* This possibly is a firmware bug and will result in
1517 * malfunction, memory leaks and/or stall of DMA functionality. */
1518 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1519 "Expected %d, but got %d\n",
1520 ring->index, firstused, slot);
1521 return;
1522 }
1523
1398 ops = ring->ops; 1524 ops = ring->ops;
1399 while (1) { 1525 while (1) {
1400 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1526 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1401 desc = ops->idx2desc(ring, slot, &meta); 1527 desc = ops->idx2desc(ring, slot, &meta);
1402 1528
1529 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1530 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1531 "on ring %d\n",
1532 slot, firstused, ring->index);
1533 break;
1534 }
1403 if (meta->skb) { 1535 if (meta->skb) {
1404 struct b43_private_tx_info *priv_info = 1536 struct b43_private_tx_info *priv_info =
1405 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1537 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
@@ -1415,7 +1547,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1415 if (meta->is_last_fragment) { 1547 if (meta->is_last_fragment) {
1416 struct ieee80211_tx_info *info; 1548 struct ieee80211_tx_info *info;
1417 1549
1418 BUG_ON(!meta->skb); 1550 if (unlikely(!meta->skb)) {
1551 /* This is a scatter-gather fragment of a frame, so
1552 * the skb pointer must not be NULL. */
1553 b43dbg(dev->wl, "TX status unexpected NULL skb "
1554 "at slot %d (first=%d) on ring %d\n",
1555 slot, firstused, ring->index);
1556 break;
1557 }
1419 1558
1420 info = IEEE80211_SKB_CB(meta->skb); 1559 info = IEEE80211_SKB_CB(meta->skb);
1421 1560
@@ -1433,20 +1572,29 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1433#endif /* DEBUG */ 1572#endif /* DEBUG */
1434 ieee80211_tx_status(dev->wl->hw, meta->skb); 1573 ieee80211_tx_status(dev->wl->hw, meta->skb);
1435 1574
1436 /* skb is freed by ieee80211_tx_status() */ 1575 /* skb will be freed by ieee80211_tx_status().
1437 meta->skb = NULL; 1576 * Poison our pointer. */
1577 meta->skb = B43_DMA_PTR_POISON;
1438 } else { 1578 } else {
1439 /* No need to call free_descriptor_buffer here, as 1579 /* No need to call free_descriptor_buffer here, as
1440 * this is only the txhdr, which is not allocated. 1580 * this is only the txhdr, which is not allocated.
1441 */ 1581 */
1442 B43_WARN_ON(meta->skb); 1582 if (unlikely(meta->skb)) {
1583 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1584 "at slot %d (first=%d) on ring %d\n",
1585 slot, firstused, ring->index);
1586 break;
1587 }
1443 } 1588 }
1444 1589
1445 /* Everything unmapped and free'd. So it's not used anymore. */ 1590 /* Everything unmapped and free'd. So it's not used anymore. */
1446 ring->used_slots--; 1591 ring->used_slots--;
1447 1592
1448 if (meta->is_last_fragment) 1593 if (meta->is_last_fragment) {
1594 /* This is the last scatter-gather
1595 * fragment of the frame. We are done. */
1449 break; 1596 break;
1597 }
1450 slot = next_slot(ring, slot); 1598 slot = next_slot(ring, slot);
1451 } 1599 }
1452 if (ring->stopped) { 1600 if (ring->stopped) {