aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorStefano Brivio <stefano.brivio@polimi.it>2008-02-08 00:31:53 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-15 13:44:18 -0500
commit8e118f0ed866afab654a438b85643d4f1472f28b (patch)
tree562364b18b16c98a7809ebce7be6b0d20c32eaec /drivers/net
parentd68ab68066805fdfee1f5f29ec2ec0179fd92fe1 (diff)
b43legacy: fix DMA for 30/32-bit DMA engines
This checks if the DMA address is bigger than what the controller can manage. It will reallocate the buffers in the GFP_DMA zone in that case. The patch by Michael Buesch has been ported to b43legacy. Thanks to Matti Viljanen for reporting this. Cc: Matti Viljanen <viljanen.matti@gmail.com> Signed-off-by: Stefano Brivio <stefano.brivio@polimi.it> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/b43legacy/dma.c167
-rw-r--r--drivers/net/wireless/b43legacy/dma.h33
2 files changed, 113 insertions, 87 deletions
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 6e08405e8026..e87b427d5e43 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -354,7 +354,8 @@ return 0;
354} 354}
355 355
356 356
357u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx) 357static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
358 int controller_idx)
358{ 359{
359 static const u16 map64[] = { 360 static const u16 map64[] = {
360 B43legacy_MMIO_DMA64_BASE0, 361 B43legacy_MMIO_DMA64_BASE0,
@@ -373,7 +374,7 @@ u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx)
373 B43legacy_MMIO_DMA32_BASE5, 374 B43legacy_MMIO_DMA32_BASE5,
374 }; 375 };
375 376
376 if (dma64bit) { 377 if (type == B43legacy_DMA_64BIT) {
377 B43legacy_WARN_ON(!(controller_idx >= 0 && 378 B43legacy_WARN_ON(!(controller_idx >= 0 &&
378 controller_idx < ARRAY_SIZE(map64))); 379 controller_idx < ARRAY_SIZE(map64)));
379 return map64[controller_idx]; 380 return map64[controller_idx];
@@ -480,8 +481,9 @@ static void free_ringmemory(struct b43legacy_dmaring *ring)
480} 481}
481 482
482/* Reset the RX DMA channel */ 483/* Reset the RX DMA channel */
483int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 484static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
484 u16 mmio_base, int dma64) 485 u16 mmio_base,
486 enum b43legacy_dmatype type)
485{ 487{
486 int i; 488 int i;
487 u32 value; 489 u32 value;
@@ -489,13 +491,14 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
489 491
490 might_sleep(); 492 might_sleep();
491 493
492 offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; 494 offset = (type == B43legacy_DMA_64BIT) ?
495 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
493 b43legacy_write32(dev, mmio_base + offset, 0); 496 b43legacy_write32(dev, mmio_base + offset, 0);
494 for (i = 0; i < 10; i++) { 497 for (i = 0; i < 10; i++) {
495 offset = dma64 ? B43legacy_DMA64_RXSTATUS : 498 offset = (type == B43legacy_DMA_64BIT) ?
496 B43legacy_DMA32_RXSTATUS; 499 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
497 value = b43legacy_read32(dev, mmio_base + offset); 500 value = b43legacy_read32(dev, mmio_base + offset);
498 if (dma64) { 501 if (type == B43legacy_DMA_64BIT) {
499 value &= B43legacy_DMA64_RXSTAT; 502 value &= B43legacy_DMA64_RXSTAT;
500 if (value == B43legacy_DMA64_RXSTAT_DISABLED) { 503 if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
501 i = -1; 504 i = -1;
@@ -519,8 +522,9 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
519} 522}
520 523
521/* Reset the RX DMA channel */ 524/* Reset the RX DMA channel */
522int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 525static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
523 u16 mmio_base, int dma64) 526 u16 mmio_base,
527 enum b43legacy_dmatype type)
524{ 528{
525 int i; 529 int i;
526 u32 value; 530 u32 value;
@@ -529,10 +533,10 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
529 might_sleep(); 533 might_sleep();
530 534
531 for (i = 0; i < 10; i++) { 535 for (i = 0; i < 10; i++) {
532 offset = dma64 ? B43legacy_DMA64_TXSTATUS : 536 offset = (type == B43legacy_DMA_64BIT) ?
533 B43legacy_DMA32_TXSTATUS; 537 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
534 value = b43legacy_read32(dev, mmio_base + offset); 538 value = b43legacy_read32(dev, mmio_base + offset);
535 if (dma64) { 539 if (type == B43legacy_DMA_64BIT) {
536 value &= B43legacy_DMA64_TXSTAT; 540 value &= B43legacy_DMA64_TXSTAT;
537 if (value == B43legacy_DMA64_TXSTAT_DISABLED || 541 if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
538 value == B43legacy_DMA64_TXSTAT_IDLEWAIT || 542 value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
@@ -547,13 +551,14 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
547 } 551 }
548 msleep(1); 552 msleep(1);
549 } 553 }
550 offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL; 554 offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
555 B43legacy_DMA32_TXCTL;
551 b43legacy_write32(dev, mmio_base + offset, 0); 556 b43legacy_write32(dev, mmio_base + offset, 0);
552 for (i = 0; i < 10; i++) { 557 for (i = 0; i < 10; i++) {
553 offset = dma64 ? B43legacy_DMA64_TXSTATUS : 558 offset = (type == B43legacy_DMA_64BIT) ?
554 B43legacy_DMA32_TXSTATUS; 559 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
555 value = b43legacy_read32(dev, mmio_base + offset); 560 value = b43legacy_read32(dev, mmio_base + offset);
556 if (dma64) { 561 if (type == B43legacy_DMA_64BIT) {
557 value &= B43legacy_DMA64_TXSTAT; 562 value &= B43legacy_DMA64_TXSTAT;
558 if (value == B43legacy_DMA64_TXSTAT_DISABLED) { 563 if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
559 i = -1; 564 i = -1;
@@ -578,6 +583,32 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
578 return 0; 583 return 0;
579} 584}
580 585
586/* Check if a DMA mapping address is invalid. */
587static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
588 dma_addr_t addr,
589 size_t buffersize)
590{
591 if (unlikely(dma_mapping_error(addr)))
592 return 1;
593
594 switch (ring->type) {
595 case B43legacy_DMA_30BIT:
596 if ((u64)addr + buffersize > (1ULL << 30))
597 return 1;
598 break;
599 case B43legacy_DMA_32BIT:
600 if ((u64)addr + buffersize > (1ULL << 32))
601 return 1;
602 break;
603 case B43legacy_DMA_64BIT:
604 /* Currently we can't have addresses beyond 64 bits in the kernel. */
605 break;
606 }
607
608 /* The address is OK. */
609 return 0;
610}
611
581static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 612static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
582 struct b43legacy_dmadesc_generic *desc, 613 struct b43legacy_dmadesc_generic *desc,
583 struct b43legacy_dmadesc_meta *meta, 614 struct b43legacy_dmadesc_meta *meta,
@@ -595,7 +626,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
595 return -ENOMEM; 626 return -ENOMEM;
596 dmaaddr = map_descbuffer(ring, skb->data, 627 dmaaddr = map_descbuffer(ring, skb->data,
597 ring->rx_buffersize, 0); 628 ring->rx_buffersize, 0);
598 if (dma_mapping_error(dmaaddr)) { 629 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
599 /* ugh. try to realloc in zone_dma */ 630 /* ugh. try to realloc in zone_dma */
600 gfp_flags |= GFP_DMA; 631 gfp_flags |= GFP_DMA;
601 632
@@ -608,7 +639,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
608 ring->rx_buffersize, 0); 639 ring->rx_buffersize, 0);
609 } 640 }
610 641
611 if (dma_mapping_error(dmaaddr)) { 642 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
612 dev_kfree_skb_any(skb); 643 dev_kfree_skb_any(skb);
613 return -EIO; 644 return -EIO;
614 } 645 }
@@ -674,7 +705,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
674 u32 trans = ssb_dma_translation(ring->dev->dev); 705 u32 trans = ssb_dma_translation(ring->dev->dev);
675 706
676 if (ring->tx) { 707 if (ring->tx) {
677 if (ring->dma64) { 708 if (ring->type == B43legacy_DMA_64BIT) {
678 u64 ringbase = (u64)(ring->dmabase); 709 u64 ringbase = (u64)(ring->dmabase);
679 710
680 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 711 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -709,7 +740,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
709 err = alloc_initial_descbuffers(ring); 740 err = alloc_initial_descbuffers(ring);
710 if (err) 741 if (err)
711 goto out; 742 goto out;
712 if (ring->dma64) { 743 if (ring->type == B43legacy_DMA_64BIT) {
713 u64 ringbase = (u64)(ring->dmabase); 744 u64 ringbase = (u64)(ring->dmabase);
714 745
715 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 746 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -760,16 +791,16 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
760{ 791{
761 if (ring->tx) { 792 if (ring->tx) {
762 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 793 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
763 ring->dma64); 794 ring->type);
764 if (ring->dma64) { 795 if (ring->type == B43legacy_DMA_64BIT) {
765 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); 796 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
766 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); 797 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
767 } else 798 } else
768 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 799 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
769 } else { 800 } else {
770 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 801 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
771 ring->dma64); 802 ring->type);
772 if (ring->dma64) { 803 if (ring->type == B43legacy_DMA_64BIT) {
773 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); 804 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
774 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); 805 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
775 } else 806 } else
@@ -824,11 +855,10 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
824 855
825/* Main initialization function. */ 856/* Main initialization function. */
826static 857static
827struct b43legacy_dmaring *b43legacy_setup_dmaring( 858struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
828 struct b43legacy_wldev *dev, 859 int controller_index,
829 int controller_index, 860 int for_tx,
830 int for_tx, 861 enum b43legacy_dmatype type)
831 int dma64)
832{ 862{
833 struct b43legacy_dmaring *ring; 863 struct b43legacy_dmaring *ring;
834 int err; 864 int err;
@@ -838,6 +868,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
838 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 868 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
839 if (!ring) 869 if (!ring)
840 goto out; 870 goto out;
871 ring->type = type;
841 872
842 nr_slots = B43legacy_RXRING_SLOTS; 873 nr_slots = B43legacy_RXRING_SLOTS;
843 if (for_tx) 874 if (for_tx)
@@ -855,12 +886,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
855 goto err_kfree_meta; 886 goto err_kfree_meta;
856 887
857 /* test for ability to dma to txhdr_cache */ 888 /* test for ability to dma to txhdr_cache */
858 dma_test = dma_map_single(dev->dev->dev, 889 dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache,
859 ring->txhdr_cache, 890 sizeof(struct b43legacy_txhdr_fw3),
860 sizeof(struct b43legacy_txhdr_fw3), 891 DMA_TO_DEVICE);
861 DMA_TO_DEVICE);
862 892
863 if (dma_mapping_error(dma_test)) { 893 if (b43legacy_dma_mapping_error(ring, dma_test,
894 sizeof(struct b43legacy_txhdr_fw3))) {
864 /* ugh realloc */ 895 /* ugh realloc */
865 kfree(ring->txhdr_cache); 896 kfree(ring->txhdr_cache);
866 ring->txhdr_cache = kcalloc(nr_slots, 897 ring->txhdr_cache = kcalloc(nr_slots,
@@ -874,7 +905,8 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
874 sizeof(struct b43legacy_txhdr_fw3), 905 sizeof(struct b43legacy_txhdr_fw3),
875 DMA_TO_DEVICE); 906 DMA_TO_DEVICE);
876 907
877 if (dma_mapping_error(dma_test)) 908 if (b43legacy_dma_mapping_error(ring, dma_test,
909 sizeof(struct b43legacy_txhdr_fw3)))
878 goto err_kfree_txhdr_cache; 910 goto err_kfree_txhdr_cache;
879 } 911 }
880 912
@@ -885,11 +917,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
885 917
886 ring->dev = dev; 918 ring->dev = dev;
887 ring->nr_slots = nr_slots; 919 ring->nr_slots = nr_slots;
888 ring->mmio_base = b43legacy_dmacontroller_base(dma64, 920 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
889 controller_index);
890 ring->index = controller_index; 921 ring->index = controller_index;
891 ring->dma64 = !!dma64; 922 if (type == B43legacy_DMA_64BIT)
892 if (dma64)
893 ring->ops = &dma64_ops; 923 ring->ops = &dma64_ops;
894 else 924 else
895 ring->ops = &dma32_ops; 925 ring->ops = &dma32_ops;
@@ -939,10 +969,10 @@ static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
939 if (!ring) 969 if (!ring)
940 return; 970 return;
941 971
942 b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:" 972 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
943 " %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base, 973 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
944 (ring->tx) ? "TX" : "RX", 974 (ring->tx) ? "TX" : "RX", ring->max_used_slots,
945 ring->max_used_slots, ring->nr_slots); 975 ring->nr_slots);
946 /* Device IRQs are disabled prior entering this function, 976 /* Device IRQs are disabled prior entering this function,
947 * so no need to take care of concurrency with rx handler stuff. 977 * so no need to take care of concurrency with rx handler stuff.
948 */ 978 */
@@ -988,11 +1018,22 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
988 struct b43legacy_dmaring *ring; 1018 struct b43legacy_dmaring *ring;
989 int err; 1019 int err;
990 u64 dmamask; 1020 u64 dmamask;
991 int dma64 = 0; 1021 enum b43legacy_dmatype type;
992 1022
993 dmamask = supported_dma_mask(dev); 1023 dmamask = supported_dma_mask(dev);
994 if (dmamask == DMA_64BIT_MASK) 1024 switch (dmamask) {
995 dma64 = 1; 1025 default:
1026 B43legacy_WARN_ON(1);
1027 case DMA_30BIT_MASK:
1028 type = B43legacy_DMA_30BIT;
1029 break;
1030 case DMA_32BIT_MASK:
1031 type = B43legacy_DMA_32BIT;
1032 break;
1033 case DMA_64BIT_MASK:
1034 type = B43legacy_DMA_64BIT;
1035 break;
1036 }
996 1037
997 err = ssb_dma_set_mask(dev->dev, dmamask); 1038 err = ssb_dma_set_mask(dev->dev, dmamask);
998 if (err) { 1039 if (err) {
@@ -1010,52 +1051,50 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
1010 1051
1011 err = -ENOMEM; 1052 err = -ENOMEM;
1012 /* setup TX DMA channels. */ 1053 /* setup TX DMA channels. */
1013 ring = b43legacy_setup_dmaring(dev, 0, 1, dma64); 1054 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
1014 if (!ring) 1055 if (!ring)
1015 goto out; 1056 goto out;
1016 dma->tx_ring0 = ring; 1057 dma->tx_ring0 = ring;
1017 1058
1018 ring = b43legacy_setup_dmaring(dev, 1, 1, dma64); 1059 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
1019 if (!ring) 1060 if (!ring)
1020 goto err_destroy_tx0; 1061 goto err_destroy_tx0;
1021 dma->tx_ring1 = ring; 1062 dma->tx_ring1 = ring;
1022 1063
1023 ring = b43legacy_setup_dmaring(dev, 2, 1, dma64); 1064 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
1024 if (!ring) 1065 if (!ring)
1025 goto err_destroy_tx1; 1066 goto err_destroy_tx1;
1026 dma->tx_ring2 = ring; 1067 dma->tx_ring2 = ring;
1027 1068
1028 ring = b43legacy_setup_dmaring(dev, 3, 1, dma64); 1069 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
1029 if (!ring) 1070 if (!ring)
1030 goto err_destroy_tx2; 1071 goto err_destroy_tx2;
1031 dma->tx_ring3 = ring; 1072 dma->tx_ring3 = ring;
1032 1073
1033 ring = b43legacy_setup_dmaring(dev, 4, 1, dma64); 1074 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
1034 if (!ring) 1075 if (!ring)
1035 goto err_destroy_tx3; 1076 goto err_destroy_tx3;
1036 dma->tx_ring4 = ring; 1077 dma->tx_ring4 = ring;
1037 1078
1038 ring = b43legacy_setup_dmaring(dev, 5, 1, dma64); 1079 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
1039 if (!ring) 1080 if (!ring)
1040 goto err_destroy_tx4; 1081 goto err_destroy_tx4;
1041 dma->tx_ring5 = ring; 1082 dma->tx_ring5 = ring;
1042 1083
1043 /* setup RX DMA channels. */ 1084 /* setup RX DMA channels. */
1044 ring = b43legacy_setup_dmaring(dev, 0, 0, dma64); 1085 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
1045 if (!ring) 1086 if (!ring)
1046 goto err_destroy_tx5; 1087 goto err_destroy_tx5;
1047 dma->rx_ring0 = ring; 1088 dma->rx_ring0 = ring;
1048 1089
1049 if (dev->dev->id.revision < 5) { 1090 if (dev->dev->id.revision < 5) {
1050 ring = b43legacy_setup_dmaring(dev, 3, 0, dma64); 1091 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
1051 if (!ring) 1092 if (!ring)
1052 goto err_destroy_rx0; 1093 goto err_destroy_rx0;
1053 dma->rx_ring3 = ring; 1094 dma->rx_ring3 = ring;
1054 } 1095 }
1055 1096
1056 b43legacydbg(dev->wl, "%d-bit DMA initialized\n", 1097 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
1057 (dmamask == DMA_64BIT_MASK) ? 64 :
1058 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1059 err = 0; 1098 err = 0;
1060out: 1099out:
1061 return err; 1100 return err;
@@ -1194,9 +1233,13 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1194 } 1233 }
1195 1234
1196 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1235 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1197 sizeof(struct b43legacy_txhdr_fw3), 1); 1236 sizeof(struct b43legacy_txhdr_fw3), 1);
1198 if (dma_mapping_error(meta_hdr->dmaaddr)) 1237 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1238 sizeof(struct b43legacy_txhdr_fw3))) {
1239 ring->current_slot = old_top_slot;
1240 ring->used_slots = old_used_slots;
1199 return -EIO; 1241 return -EIO;
1242 }
1200 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1243 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1201 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1244 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1202 1245
@@ -1211,7 +1254,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1211 1254
1212 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1255 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1213 /* create a bounce buffer in zone_dma on mapping failure. */ 1256 /* create a bounce buffer in zone_dma on mapping failure. */
1214 if (dma_mapping_error(meta->dmaaddr)) { 1257 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1215 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1258 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1216 if (!bounce_skb) { 1259 if (!bounce_skb) {
1217 ring->current_slot = old_top_slot; 1260 ring->current_slot = old_top_slot;
@@ -1225,7 +1268,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1225 skb = bounce_skb; 1268 skb = bounce_skb;
1226 meta->skb = skb; 1269 meta->skb = skb;
1227 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1270 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1228 if (dma_mapping_error(meta->dmaaddr)) { 1271 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1229 ring->current_slot = old_top_slot; 1272 ring->current_slot = old_top_slot;
1230 ring->used_slots = old_used_slots; 1273 ring->used_slots = old_used_slots;
1231 err = -EIO; 1274 err = -EIO;
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 26f6ab08de75..2dd488c5be2d 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -218,6 +218,12 @@ struct b43legacy_dma_ops {
218 void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot); 218 void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot);
219}; 219};
220 220
221enum b43legacy_dmatype {
222 B43legacy_DMA_30BIT = 30,
223 B43legacy_DMA_32BIT = 32,
224 B43legacy_DMA_64BIT = 64,
225};
226
221struct b43legacy_dmaring { 227struct b43legacy_dmaring {
222 /* Lowlevel DMA ops. */ 228 /* Lowlevel DMA ops. */
223 const struct b43legacy_dma_ops *ops; 229 const struct b43legacy_dma_ops *ops;
@@ -250,8 +256,8 @@ struct b43legacy_dmaring {
250 int index; 256 int index;
251 /* Boolean. Is this a TX ring? */ 257 /* Boolean. Is this a TX ring? */
252 bool tx; 258 bool tx;
253 /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ 259 /* The type of DMA engine used. */
254 bool dma64; 260 enum b43legacy_dmatype type;
255 /* Boolean. Is this ring stopped at ieee80211 level? */ 261 /* Boolean. Is this ring stopped at ieee80211 level? */
256 bool stopped; 262 bool stopped;
257 /* Lock, only used for TX. */ 263 /* Lock, only used for TX. */
@@ -284,15 +290,6 @@ void b43legacy_dma_write(struct b43legacy_dmaring *ring,
284int b43legacy_dma_init(struct b43legacy_wldev *dev); 290int b43legacy_dma_init(struct b43legacy_wldev *dev);
285void b43legacy_dma_free(struct b43legacy_wldev *dev); 291void b43legacy_dma_free(struct b43legacy_wldev *dev);
286 292
287int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
288 u16 dmacontroller_mmio_base,
289 int dma64);
290int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
291 u16 dmacontroller_mmio_base,
292 int dma64);
293
294u16 b43legacy_dmacontroller_base(int dma64bit, int dmacontroller_idx);
295
296void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); 293void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
297void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); 294void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
298 295
@@ -320,20 +317,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
320{ 317{
321} 318}
322static inline 319static inline
323int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
324 u16 dmacontroller_mmio_base,
325 int dma64)
326{
327 return 0;
328}
329static inline
330int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
331 u16 dmacontroller_mmio_base,
332 int dma64)
333{
334 return 0;
335}
336static inline
337void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, 320void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
338 struct ieee80211_tx_queue_stats *stats) 321 struct ieee80211_tx_queue_stats *stats)
339{ 322{