aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2008-02-05 06:50:41 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-05 14:35:47 -0500
commitb79caa68c0d48477453a90d12be34b47cb75f3a8 (patch)
tree103b22e57178cdfcdab2afd2fbd2d9ca45644fc7 /drivers
parent532031d7f426eb02f854d13184416cabcb01bdd5 (diff)
b43: Fix DMA for 30/32-bit DMA engines
This checks if the DMA address is bigger than what the controller can manage. It will reallocate the buffers in the GFP_DMA zone in that case. Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/b43/dma.c137
-rw-r--r--drivers/net/wireless/b43/dma.h20
2 files changed, 99 insertions, 58 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8a708b77925d..3dfb28a34be9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
337 return idx_to_prio[index]; 337 return idx_to_prio[index];
338} 338}
339 339
340u16 b43_dmacontroller_base(int dma64bit, int controller_idx) 340static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
341{ 341{
342 static const u16 map64[] = { 342 static const u16 map64[] = {
343 B43_MMIO_DMA64_BASE0, 343 B43_MMIO_DMA64_BASE0,
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
356 B43_MMIO_DMA32_BASE5, 356 B43_MMIO_DMA32_BASE5,
357 }; 357 };
358 358
359 if (dma64bit) { 359 if (type == B43_DMA_64BIT) {
360 B43_WARN_ON(!(controller_idx >= 0 && 360 B43_WARN_ON(!(controller_idx >= 0 &&
361 controller_idx < ARRAY_SIZE(map64))); 361 controller_idx < ARRAY_SIZE(map64)));
362 return map64[controller_idx]; 362 return map64[controller_idx];
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
437 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, 437 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
438 * which accounts for the GFP_DMA flag below. 438 * which accounts for the GFP_DMA flag below.
439 */ 439 */
440 if (ring->dma64) 440 if (ring->type == B43_DMA_64BIT)
441 flags |= GFP_DMA; 441 flags |= GFP_DMA;
442 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, 442 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
443 &(ring->dmabase), flags); 443 &(ring->dmabase), flags);
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
459} 459}
460 460
461/* Reset the RX DMA channel */ 461/* Reset the RX DMA channel */
462int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) 462static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
463 enum b43_dmatype type)
463{ 464{
464 int i; 465 int i;
465 u32 value; 466 u32 value;
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
467 468
468 might_sleep(); 469 might_sleep();
469 470
470 offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; 471 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
471 b43_write32(dev, mmio_base + offset, 0); 472 b43_write32(dev, mmio_base + offset, 0);
472 for (i = 0; i < 10; i++) { 473 for (i = 0; i < 10; i++) {
473 offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; 474 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
475 B43_DMA32_RXSTATUS;
474 value = b43_read32(dev, mmio_base + offset); 476 value = b43_read32(dev, mmio_base + offset);
475 if (dma64) { 477 if (type == B43_DMA_64BIT) {
476 value &= B43_DMA64_RXSTAT; 478 value &= B43_DMA64_RXSTAT;
477 if (value == B43_DMA64_RXSTAT_DISABLED) { 479 if (value == B43_DMA64_RXSTAT_DISABLED) {
478 i = -1; 480 i = -1;
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
496} 498}
497 499
498/* Reset the TX DMA channel */ 500/* Reset the TX DMA channel */
499int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) 501static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
502 enum b43_dmatype type)
500{ 503{
501 int i; 504 int i;
502 u32 value; 505 u32 value;
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
505 might_sleep(); 508 might_sleep();
506 509
507 for (i = 0; i < 10; i++) { 510 for (i = 0; i < 10; i++) {
508 offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; 511 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
512 B43_DMA32_TXSTATUS;
509 value = b43_read32(dev, mmio_base + offset); 513 value = b43_read32(dev, mmio_base + offset);
510 if (dma64) { 514 if (type == B43_DMA_64BIT) {
511 value &= B43_DMA64_TXSTAT; 515 value &= B43_DMA64_TXSTAT;
512 if (value == B43_DMA64_TXSTAT_DISABLED || 516 if (value == B43_DMA64_TXSTAT_DISABLED ||
513 value == B43_DMA64_TXSTAT_IDLEWAIT || 517 value == B43_DMA64_TXSTAT_IDLEWAIT ||
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
522 } 526 }
523 msleep(1); 527 msleep(1);
524 } 528 }
525 offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; 529 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
526 b43_write32(dev, mmio_base + offset, 0); 530 b43_write32(dev, mmio_base + offset, 0);
527 for (i = 0; i < 10; i++) { 531 for (i = 0; i < 10; i++) {
528 offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; 532 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
533 B43_DMA32_TXSTATUS;
529 value = b43_read32(dev, mmio_base + offset); 534 value = b43_read32(dev, mmio_base + offset);
530 if (dma64) { 535 if (type == B43_DMA_64BIT) {
531 value &= B43_DMA64_TXSTAT; 536 value &= B43_DMA64_TXSTAT;
532 if (value == B43_DMA64_TXSTAT_DISABLED) { 537 if (value == B43_DMA64_TXSTAT_DISABLED) {
533 i = -1; 538 i = -1;
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
552 return 0; 557 return 0;
553} 558}
554 559
560/* Check if a DMA mapping address is invalid. */
561static bool b43_dma_mapping_error(struct b43_dmaring *ring,
562 dma_addr_t addr,
563 size_t buffersize)
564{
565 if (unlikely(dma_mapping_error(addr)))
566 return 1;
567
568 switch (ring->type) {
569 case B43_DMA_30BIT:
570 if ((u64)addr + buffersize > (1ULL << 30))
571 return 1;
572 break;
573 case B43_DMA_32BIT:
574 if ((u64)addr + buffersize > (1ULL << 32))
575 return 1;
576 break;
577 case B43_DMA_64BIT:
578 /* Currently we can't have addresses beyond
579 * 64bit in the kernel. */
580 break;
581 }
582
583 /* The address is OK. */
584 return 0;
585}
586
555static int setup_rx_descbuffer(struct b43_dmaring *ring, 587static int setup_rx_descbuffer(struct b43_dmaring *ring,
556 struct b43_dmadesc_generic *desc, 588 struct b43_dmadesc_generic *desc,
557 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 589 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
567 if (unlikely(!skb)) 599 if (unlikely(!skb))
568 return -ENOMEM; 600 return -ENOMEM;
569 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 601 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
570 if (dma_mapping_error(dmaaddr)) { 602 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
571 /* ugh. try to realloc in zone_dma */ 603 /* ugh. try to realloc in zone_dma */
572 gfp_flags |= GFP_DMA; 604 gfp_flags |= GFP_DMA;
573 605
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
580 ring->rx_buffersize, 0); 612 ring->rx_buffersize, 0);
581 } 613 }
582 614
583 if (dma_mapping_error(dmaaddr)) { 615 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
584 dev_kfree_skb_any(skb); 616 dev_kfree_skb_any(skb);
585 return -EIO; 617 return -EIO;
586 } 618 }
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
645 u32 trans = ssb_dma_translation(ring->dev->dev); 677 u32 trans = ssb_dma_translation(ring->dev->dev);
646 678
647 if (ring->tx) { 679 if (ring->tx) {
648 if (ring->dma64) { 680 if (ring->type == B43_DMA_64BIT) {
649 u64 ringbase = (u64) (ring->dmabase); 681 u64 ringbase = (u64) (ring->dmabase);
650 682
651 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 683 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
677 err = alloc_initial_descbuffers(ring); 709 err = alloc_initial_descbuffers(ring);
678 if (err) 710 if (err)
679 goto out; 711 goto out;
680 if (ring->dma64) { 712 if (ring->type == B43_DMA_64BIT) {
681 u64 ringbase = (u64) (ring->dmabase); 713 u64 ringbase = (u64) (ring->dmabase);
682 714
683 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 715 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
722{ 754{
723 if (ring->tx) { 755 if (ring->tx) {
724 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 756 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
725 ring->dma64); 757 ring->type);
726 if (ring->dma64) { 758 if (ring->type == B43_DMA_64BIT) {
727 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); 759 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
728 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); 760 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
729 } else 761 } else
730 b43_dma_write(ring, B43_DMA32_TXRING, 0); 762 b43_dma_write(ring, B43_DMA32_TXRING, 0);
731 } else { 763 } else {
732 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 764 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
733 ring->dma64); 765 ring->type);
734 if (ring->dma64) { 766 if (ring->type == B43_DMA_64BIT) {
735 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); 767 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
736 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); 768 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
737 } else 769 } else
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
786static 818static
787struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 819struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
788 int controller_index, 820 int controller_index,
789 int for_tx, int dma64) 821 int for_tx,
822 enum b43_dmatype type)
790{ 823{
791 struct b43_dmaring *ring; 824 struct b43_dmaring *ring;
792 int err; 825 int err;
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
796 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 829 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
797 if (!ring) 830 if (!ring)
798 goto out; 831 goto out;
832 ring->type = type;
799 833
800 nr_slots = B43_RXRING_SLOTS; 834 nr_slots = B43_RXRING_SLOTS;
801 if (for_tx) 835 if (for_tx)
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
818 b43_txhdr_size(dev), 852 b43_txhdr_size(dev),
819 DMA_TO_DEVICE); 853 DMA_TO_DEVICE);
820 854
821 if (dma_mapping_error(dma_test)) { 855 if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
822 /* ugh realloc */ 856 /* ugh realloc */
823 kfree(ring->txhdr_cache); 857 kfree(ring->txhdr_cache);
824 ring->txhdr_cache = kcalloc(nr_slots, 858 ring->txhdr_cache = kcalloc(nr_slots,
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
832 b43_txhdr_size(dev), 866 b43_txhdr_size(dev),
833 DMA_TO_DEVICE); 867 DMA_TO_DEVICE);
834 868
835 if (dma_mapping_error(dma_test)) 869 if (b43_dma_mapping_error(ring, dma_test,
870 b43_txhdr_size(dev)))
836 goto err_kfree_txhdr_cache; 871 goto err_kfree_txhdr_cache;
837 } 872 }
838 873
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
843 878
844 ring->dev = dev; 879 ring->dev = dev;
845 ring->nr_slots = nr_slots; 880 ring->nr_slots = nr_slots;
846 ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); 881 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
847 ring->index = controller_index; 882 ring->index = controller_index;
848 ring->dma64 = !!dma64; 883 if (type == B43_DMA_64BIT)
849 if (dma64)
850 ring->ops = &dma64_ops; 884 ring->ops = &dma64_ops;
851 else 885 else
852 ring->ops = &dma32_ops; 886 ring->ops = &dma32_ops;
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
896 if (!ring) 930 if (!ring)
897 return; 931 return;
898 932
899 b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", 933 b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
900 (ring->dma64) ? "64" : "32", 934 (unsigned int)(ring->type),
901 ring->mmio_base, 935 ring->mmio_base,
902 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); 936 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
903 /* Device IRQs are disabled prior entering this function, 937 /* Device IRQs are disabled prior entering this function,
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
941 struct b43_dmaring *ring; 975 struct b43_dmaring *ring;
942 int err; 976 int err;
943 u64 dmamask; 977 u64 dmamask;
944 int dma64 = 0; 978 enum b43_dmatype type;
945 979
946 dmamask = supported_dma_mask(dev); 980 dmamask = supported_dma_mask(dev);
947 if (dmamask == DMA_64BIT_MASK) 981 switch (dmamask) {
948 dma64 = 1; 982 default:
949 983 B43_WARN_ON(1);
984 case DMA_30BIT_MASK:
985 type = B43_DMA_30BIT;
986 break;
987 case DMA_32BIT_MASK:
988 type = B43_DMA_32BIT;
989 break;
990 case DMA_64BIT_MASK:
991 type = B43_DMA_64BIT;
992 break;
993 }
950 err = ssb_dma_set_mask(dev->dev, dmamask); 994 err = ssb_dma_set_mask(dev->dev, dmamask);
951 if (err) { 995 if (err) {
952 b43err(dev->wl, "The machine/kernel does not support " 996 b43err(dev->wl, "The machine/kernel does not support "
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
958 1002
959 err = -ENOMEM; 1003 err = -ENOMEM;
960 /* setup TX DMA channels. */ 1004 /* setup TX DMA channels. */
961 ring = b43_setup_dmaring(dev, 0, 1, dma64); 1005 ring = b43_setup_dmaring(dev, 0, 1, type);
962 if (!ring) 1006 if (!ring)
963 goto out; 1007 goto out;
964 dma->tx_ring0 = ring; 1008 dma->tx_ring0 = ring;
965 1009
966 ring = b43_setup_dmaring(dev, 1, 1, dma64); 1010 ring = b43_setup_dmaring(dev, 1, 1, type);
967 if (!ring) 1011 if (!ring)
968 goto err_destroy_tx0; 1012 goto err_destroy_tx0;
969 dma->tx_ring1 = ring; 1013 dma->tx_ring1 = ring;
970 1014
971 ring = b43_setup_dmaring(dev, 2, 1, dma64); 1015 ring = b43_setup_dmaring(dev, 2, 1, type);
972 if (!ring) 1016 if (!ring)
973 goto err_destroy_tx1; 1017 goto err_destroy_tx1;
974 dma->tx_ring2 = ring; 1018 dma->tx_ring2 = ring;
975 1019
976 ring = b43_setup_dmaring(dev, 3, 1, dma64); 1020 ring = b43_setup_dmaring(dev, 3, 1, type);
977 if (!ring) 1021 if (!ring)
978 goto err_destroy_tx2; 1022 goto err_destroy_tx2;
979 dma->tx_ring3 = ring; 1023 dma->tx_ring3 = ring;
980 1024
981 ring = b43_setup_dmaring(dev, 4, 1, dma64); 1025 ring = b43_setup_dmaring(dev, 4, 1, type);
982 if (!ring) 1026 if (!ring)
983 goto err_destroy_tx3; 1027 goto err_destroy_tx3;
984 dma->tx_ring4 = ring; 1028 dma->tx_ring4 = ring;
985 1029
986 ring = b43_setup_dmaring(dev, 5, 1, dma64); 1030 ring = b43_setup_dmaring(dev, 5, 1, type);
987 if (!ring) 1031 if (!ring)
988 goto err_destroy_tx4; 1032 goto err_destroy_tx4;
989 dma->tx_ring5 = ring; 1033 dma->tx_ring5 = ring;
990 1034
991 /* setup RX DMA channels. */ 1035 /* setup RX DMA channels. */
992 ring = b43_setup_dmaring(dev, 0, 0, dma64); 1036 ring = b43_setup_dmaring(dev, 0, 0, type);
993 if (!ring) 1037 if (!ring)
994 goto err_destroy_tx5; 1038 goto err_destroy_tx5;
995 dma->rx_ring0 = ring; 1039 dma->rx_ring0 = ring;
996 1040
997 if (dev->dev->id.revision < 5) { 1041 if (dev->dev->id.revision < 5) {
998 ring = b43_setup_dmaring(dev, 3, 0, dma64); 1042 ring = b43_setup_dmaring(dev, 3, 0, type);
999 if (!ring) 1043 if (!ring)
1000 goto err_destroy_rx0; 1044 goto err_destroy_rx0;
1001 dma->rx_ring3 = ring; 1045 dma->rx_ring3 = ring;
1002 } 1046 }
1003 1047
1004 b43dbg(dev->wl, "%d-bit DMA initialized\n", 1048 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1005 (dmamask == DMA_64BIT_MASK) ? 64 : 1049 (unsigned int)type);
1006 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1007 err = 0; 1050 err = 0;
1008 out: 1051 out:
1009 return err; 1052 return err;
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1146 1189
1147 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1190 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1148 hdrsize, 1); 1191 hdrsize, 1);
1149 if (dma_mapping_error(meta_hdr->dmaaddr)) { 1192 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
1150 ring->current_slot = old_top_slot; 1193 ring->current_slot = old_top_slot;
1151 ring->used_slots = old_used_slots; 1194 ring->used_slots = old_used_slots;
1152 return -EIO; 1195 return -EIO;
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1165 1208
1166 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1209 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1167 /* create a bounce buffer in zone_dma on mapping failure. */ 1210 /* create a bounce buffer in zone_dma on mapping failure. */
1168 if (dma_mapping_error(meta->dmaaddr)) { 1211 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1169 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1212 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1170 if (!bounce_skb) { 1213 if (!bounce_skb) {
1171 ring->current_slot = old_top_slot; 1214 ring->current_slot = old_top_slot;
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1179 skb = bounce_skb; 1222 skb = bounce_skb;
1180 meta->skb = skb; 1223 meta->skb = skb;
1181 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1224 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1182 if (dma_mapping_error(meta->dmaaddr)) { 1225 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1183 ring->current_slot = old_top_slot; 1226 ring->current_slot = old_top_slot;
1184 ring->used_slots = old_used_slots; 1227 ring->used_slots = old_used_slots;
1185 err = -EIO; 1228 err = -EIO;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 58db03ac536e..c0d6b69e6501 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -203,6 +203,12 @@ struct b43_dma_ops {
203 void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); 203 void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
204}; 204};
205 205
206enum b43_dmatype {
207 B43_DMA_30BIT = 30,
208 B43_DMA_32BIT = 32,
209 B43_DMA_64BIT = 64,
210};
211
206struct b43_dmaring { 212struct b43_dmaring {
207 /* Lowlevel DMA ops. */ 213 /* Lowlevel DMA ops. */
208 const struct b43_dma_ops *ops; 214 const struct b43_dma_ops *ops;
@@ -235,8 +241,8 @@ struct b43_dmaring {
235 int index; 241 int index;
236 /* Boolean. Is this a TX ring? */ 242 /* Boolean. Is this a TX ring? */
237 bool tx; 243 bool tx;
238 /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ 244 /* The type of DMA engine used. */
239 bool dma64; 245 enum b43_dmatype type;
240 /* Boolean. Is this ring stopped at ieee80211 level? */ 246 /* Boolean. Is this ring stopped at ieee80211 level? */
241 bool stopped; 247 bool stopped;
242 /* Lock, only used for TX. */ 248 /* Lock, only used for TX. */
@@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
255 return b43_read32(ring->dev, ring->mmio_base + offset); 261 return b43_read32(ring->dev, ring->mmio_base + offset);
256} 262}
257 263
258static inline 264static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
259 void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
260{ 265{
261 b43_write32(ring->dev, ring->mmio_base + offset, value); 266 b43_write32(ring->dev, ring->mmio_base + offset, value);
262} 267}
@@ -264,13 +269,6 @@ static inline
264int b43_dma_init(struct b43_wldev *dev); 269int b43_dma_init(struct b43_wldev *dev);
265void b43_dma_free(struct b43_wldev *dev); 270void b43_dma_free(struct b43_wldev *dev);
266 271
267int b43_dmacontroller_rx_reset(struct b43_wldev *dev,
268 u16 dmacontroller_mmio_base, int dma64);
269int b43_dmacontroller_tx_reset(struct b43_wldev *dev,
270 u16 dmacontroller_mmio_base, int dma64);
271
272u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx);
273
274void b43_dma_tx_suspend(struct b43_wldev *dev); 272void b43_dma_tx_suspend(struct b43_wldev *dev);
275void b43_dma_tx_resume(struct b43_wldev *dev); 273void b43_dma_tx_resume(struct b43_wldev *dev);
276 274