aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2008-04-11 05:59:00 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-04-15 15:04:35 -0400
commit4ac58469f13028e1eb97f8bc7b0fca5072591d8d (patch)
treec955e1b753e1f86c570d2d6f5f6095d1182d5c35 /drivers/net
parent2d4543fdb487b1301ae48703dea3e66ead2d3c75 (diff)
ssb: Fix usage of struct device used for DMAing
This fixes DMA on architectures where DMA is nontrivial, like PPC64. We must use the host-device's (PCI) struct device for any DMA operation instead of the SSB device. For this we add a new struct device pointer to the SSB device structure that will always point to the right device for DMAing. Without this patch b43 and b44 drivers won't work on complex-DMA architectures, that for example need dev->archdata for DMA operations. Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/b44.c52
-rw-r--r--drivers/net/wireless/b43/dma.c27
2 files changed, 40 insertions, 39 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 25f1337cd02c..59dce6aa0865 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -148,7 +148,7 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 unsigned long offset, 148 unsigned long offset,
149 enum dma_data_direction dir) 149 enum dma_data_direction dir)
150{ 150{
151 dma_sync_single_range_for_device(sdev->dev, dma_base, 151 dma_sync_single_range_for_device(sdev->dma_dev, dma_base,
152 offset & dma_desc_align_mask, 152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir); 153 dma_desc_sync_size, dir);
154} 154}
@@ -158,7 +158,7 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158 unsigned long offset, 158 unsigned long offset,
159 enum dma_data_direction dir) 159 enum dma_data_direction dir)
160{ 160{
161 dma_sync_single_range_for_cpu(sdev->dev, dma_base, 161 dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base,
162 offset & dma_desc_align_mask, 162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir); 163 dma_desc_sync_size, dir);
164} 164}
@@ -613,7 +613,7 @@ static void b44_tx(struct b44 *bp)
613 613
614 BUG_ON(skb == NULL); 614 BUG_ON(skb == NULL);
615 615
616 dma_unmap_single(bp->sdev->dev, 616 dma_unmap_single(bp->sdev->dma_dev,
617 rp->mapping, 617 rp->mapping,
618 skb->len, 618 skb->len,
619 DMA_TO_DEVICE); 619 DMA_TO_DEVICE);
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
653 if (skb == NULL) 653 if (skb == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
656 mapping = dma_map_single(bp->sdev->dev, skb->data, 656 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
657 RX_PKT_BUF_SZ, 657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE); 658 DMA_FROM_DEVICE);
659 659
@@ -663,19 +663,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
664 /* Sigh... */ 664 /* Sigh... */
665 if (!dma_mapping_error(mapping)) 665 if (!dma_mapping_error(mapping))
666 dma_unmap_single(bp->sdev->dev, mapping, 666 dma_unmap_single(bp->sdev->dma_dev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb); 668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL) 670 if (skb == NULL)
671 return -ENOMEM; 671 return -ENOMEM;
672 mapping = dma_map_single(bp->sdev->dev, skb->data, 672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
673 RX_PKT_BUF_SZ, 673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE); 674 DMA_FROM_DEVICE);
675 if (dma_mapping_error(mapping) || 675 if (dma_mapping_error(mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
677 if (!dma_mapping_error(mapping)) 677 if (!dma_mapping_error(mapping))
678 dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); 678 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb); 679 dev_kfree_skb_any(skb);
680 return -ENOMEM; 680 return -ENOMEM;
681 } 681 }
@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
750 dest_idx * sizeof(dest_desc), 750 dest_idx * sizeof(dest_desc),
751 DMA_BIDIRECTIONAL); 751 DMA_BIDIRECTIONAL);
752 752
753 dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), 753 dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr),
754 RX_PKT_BUF_SZ, 754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE); 755 DMA_FROM_DEVICE);
756} 756}
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget)
772 struct rx_header *rh; 772 struct rx_header *rh;
773 u16 len; 773 u16 len;
774 774
775 dma_sync_single_for_cpu(bp->sdev->dev, map, 775 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
776 RX_PKT_BUF_SZ, 776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE); 777 DMA_FROM_DEVICE);
778 rh = (struct rx_header *) skb->data; 778 rh = (struct rx_header *) skb->data;
@@ -806,7 +806,7 @@ static int b44_rx(struct b44 *bp, int budget)
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0) 807 if (skb_size < 0)
808 goto drop_it; 808 goto drop_it;
809 dma_unmap_single(bp->sdev->dev, map, 809 dma_unmap_single(bp->sdev->dma_dev, map,
810 skb_size, DMA_FROM_DEVICE); 810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */ 811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET); 812 skb_put(skb, len + RX_PKT_OFFSET);
@@ -966,24 +966,24 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 goto err_out; 966 goto err_out;
967 } 967 }
968 968
969 mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); 969 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
970 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 970 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
971 struct sk_buff *bounce_skb; 971 struct sk_buff *bounce_skb;
972 972
973 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 973 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
974 if (!dma_mapping_error(mapping)) 974 if (!dma_mapping_error(mapping))
975 dma_unmap_single(bp->sdev->dev, mapping, len, 975 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
976 DMA_TO_DEVICE); 976 DMA_TO_DEVICE);
977 977
978 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); 978 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
979 if (!bounce_skb) 979 if (!bounce_skb)
980 goto err_out; 980 goto err_out;
981 981
982 mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, 982 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
983 len, DMA_TO_DEVICE); 983 len, DMA_TO_DEVICE);
984 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 984 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
985 if (!dma_mapping_error(mapping)) 985 if (!dma_mapping_error(mapping))
986 dma_unmap_single(bp->sdev->dev, mapping, 986 dma_unmap_single(bp->sdev->dma_dev, mapping,
987 len, DMA_TO_DEVICE); 987 len, DMA_TO_DEVICE);
988 dev_kfree_skb_any(bounce_skb); 988 dev_kfree_skb_any(bounce_skb);
989 goto err_out; 989 goto err_out;
@@ -1082,7 +1082,7 @@ static void b44_free_rings(struct b44 *bp)
1082 1082
1083 if (rp->skb == NULL) 1083 if (rp->skb == NULL)
1084 continue; 1084 continue;
1085 dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, 1085 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1086 DMA_FROM_DEVICE); 1086 DMA_FROM_DEVICE);
1087 dev_kfree_skb_any(rp->skb); 1087 dev_kfree_skb_any(rp->skb);
1088 rp->skb = NULL; 1088 rp->skb = NULL;
@@ -1094,7 +1094,7 @@ static void b44_free_rings(struct b44 *bp)
1094 1094
1095 if (rp->skb == NULL) 1095 if (rp->skb == NULL)
1096 continue; 1096 continue;
1097 dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, 1097 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1098 DMA_TO_DEVICE); 1098 DMA_TO_DEVICE);
1099 dev_kfree_skb_any(rp->skb); 1099 dev_kfree_skb_any(rp->skb);
1100 rp->skb = NULL; 1100 rp->skb = NULL;
@@ -1117,12 +1117,12 @@ static void b44_init_rings(struct b44 *bp)
1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1118 1118
1119 if (bp->flags & B44_FLAG_RX_RING_HACK) 1119 if (bp->flags & B44_FLAG_RX_RING_HACK)
1120 dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, 1120 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1121 DMA_TABLE_BYTES, 1121 DMA_TABLE_BYTES,
1122 DMA_BIDIRECTIONAL); 1122 DMA_BIDIRECTIONAL);
1123 1123
1124 if (bp->flags & B44_FLAG_TX_RING_HACK) 1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, 1125 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1126 DMA_TABLE_BYTES, 1126 DMA_TABLE_BYTES,
1127 DMA_TO_DEVICE); 1127 DMA_TO_DEVICE);
1128 1128
@@ -1144,24 +1144,24 @@ static void b44_free_consistent(struct b44 *bp)
1144 bp->tx_buffers = NULL; 1144 bp->tx_buffers = NULL;
1145 if (bp->rx_ring) { 1145 if (bp->rx_ring) {
1146 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1146 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1147 dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, 1147 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1148 DMA_TABLE_BYTES, 1148 DMA_TABLE_BYTES,
1149 DMA_BIDIRECTIONAL); 1149 DMA_BIDIRECTIONAL);
1150 kfree(bp->rx_ring); 1150 kfree(bp->rx_ring);
1151 } else 1151 } else
1152 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, 1152 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1153 bp->rx_ring, bp->rx_ring_dma); 1153 bp->rx_ring, bp->rx_ring_dma);
1154 bp->rx_ring = NULL; 1154 bp->rx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_RX_RING_HACK; 1155 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1156 } 1156 }
1157 if (bp->tx_ring) { 1157 if (bp->tx_ring) {
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) { 1158 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, 1159 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1160 DMA_TABLE_BYTES, 1160 DMA_TABLE_BYTES,
1161 DMA_TO_DEVICE); 1161 DMA_TO_DEVICE);
1162 kfree(bp->tx_ring); 1162 kfree(bp->tx_ring);
1163 } else 1163 } else
1164 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, 1164 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma); 1165 bp->tx_ring, bp->tx_ring_dma);
1166 bp->tx_ring = NULL; 1166 bp->tx_ring = NULL;
1167 bp->flags &= ~B44_FLAG_TX_RING_HACK; 1167 bp->flags &= ~B44_FLAG_TX_RING_HACK;
@@ -1187,7 +1187,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1187 goto out_err; 1187 goto out_err;
1188 1188
1189 size = DMA_TABLE_BYTES; 1189 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); 1190 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp);
1191 if (!bp->rx_ring) { 1191 if (!bp->rx_ring) {
1192 /* Allocation may have failed due to pci_alloc_consistent 1192 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive 1193 insisting on use of GFP_DMA, which is more restrictive
@@ -1199,7 +1199,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1199 if (!rx_ring) 1199 if (!rx_ring)
1200 goto out_err; 1200 goto out_err;
1201 1201
1202 rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, 1202 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1203 DMA_TABLE_BYTES, 1203 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL); 1204 DMA_BIDIRECTIONAL);
1205 1205
@@ -1214,7 +1214,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1214 bp->flags |= B44_FLAG_RX_RING_HACK; 1214 bp->flags |= B44_FLAG_RX_RING_HACK;
1215 } 1215 }
1216 1216
1217 bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); 1217 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp);
1218 if (!bp->tx_ring) { 1218 if (!bp->tx_ring) {
1219 /* Allocation may have failed due to dma_alloc_coherent 1219 /* Allocation may have failed due to dma_alloc_coherent
1220 insisting on use of GFP_DMA, which is more restrictive 1220 insisting on use of GFP_DMA, which is more restrictive
@@ -1226,7 +1226,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1226 if (!tx_ring) 1226 if (!tx_ring)
1227 goto out_err; 1227 goto out_err;
1228 1228
1229 tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, 1229 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1230 DMA_TABLE_BYTES, 1230 DMA_TABLE_BYTES,
1231 DMA_TO_DEVICE); 1231 DMA_TO_DEVICE);
1232 1232
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 948eb1fe916b..48e912487b16 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -373,10 +373,10 @@ static inline
373 dma_addr_t dmaaddr; 373 dma_addr_t dmaaddr;
374 374
375 if (tx) { 375 if (tx) {
376 dmaaddr = dma_map_single(ring->dev->dev->dev, 376 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
377 buf, len, DMA_TO_DEVICE); 377 buf, len, DMA_TO_DEVICE);
378 } else { 378 } else {
379 dmaaddr = dma_map_single(ring->dev->dev->dev, 379 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
380 buf, len, DMA_FROM_DEVICE); 380 buf, len, DMA_FROM_DEVICE);
381 } 381 }
382 382
@@ -388,9 +388,10 @@ static inline
388 dma_addr_t addr, size_t len, int tx) 388 dma_addr_t addr, size_t len, int tx)
389{ 389{
390 if (tx) { 390 if (tx) {
391 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); 391 dma_unmap_single(ring->dev->dev->dma_dev,
392 addr, len, DMA_TO_DEVICE);
392 } else { 393 } else {
393 dma_unmap_single(ring->dev->dev->dev, 394 dma_unmap_single(ring->dev->dev->dma_dev,
394 addr, len, DMA_FROM_DEVICE); 395 addr, len, DMA_FROM_DEVICE);
395 } 396 }
396} 397}
@@ -400,7 +401,7 @@ static inline
400 dma_addr_t addr, size_t len) 401 dma_addr_t addr, size_t len)
401{ 402{
402 B43_WARN_ON(ring->tx); 403 B43_WARN_ON(ring->tx);
403 dma_sync_single_for_cpu(ring->dev->dev->dev, 404 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
404 addr, len, DMA_FROM_DEVICE); 405 addr, len, DMA_FROM_DEVICE);
405} 406}
406 407
@@ -409,7 +410,7 @@ static inline
409 dma_addr_t addr, size_t len) 410 dma_addr_t addr, size_t len)
410{ 411{
411 B43_WARN_ON(ring->tx); 412 B43_WARN_ON(ring->tx);
412 dma_sync_single_for_device(ring->dev->dev->dev, 413 dma_sync_single_for_device(ring->dev->dev->dma_dev,
413 addr, len, DMA_FROM_DEVICE); 414 addr, len, DMA_FROM_DEVICE);
414} 415}
415 416
@@ -425,7 +426,7 @@ static inline
425 426
426static int alloc_ringmemory(struct b43_dmaring *ring) 427static int alloc_ringmemory(struct b43_dmaring *ring)
427{ 428{
428 struct device *dev = ring->dev->dev->dev; 429 struct device *dma_dev = ring->dev->dev->dma_dev;
429 gfp_t flags = GFP_KERNEL; 430 gfp_t flags = GFP_KERNEL;
430 431
431 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 432 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
@@ -439,7 +440,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
439 */ 440 */
440 if (ring->type == B43_DMA_64BIT) 441 if (ring->type == B43_DMA_64BIT)
441 flags |= GFP_DMA; 442 flags |= GFP_DMA;
442 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, 443 ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
443 &(ring->dmabase), flags); 444 &(ring->dmabase), flags);
444 if (!ring->descbase) { 445 if (!ring->descbase) {
445 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 446 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
@@ -452,9 +453,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
452 453
453static void free_ringmemory(struct b43_dmaring *ring) 454static void free_ringmemory(struct b43_dmaring *ring)
454{ 455{
455 struct device *dev = ring->dev->dev->dev; 456 struct device *dma_dev = ring->dev->dev->dma_dev;
456 457
457 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, 458 dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
458 ring->descbase, ring->dmabase); 459 ring->descbase, ring->dmabase);
459} 460}
460 461
@@ -854,7 +855,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
854 goto err_kfree_meta; 855 goto err_kfree_meta;
855 856
856 /* test for ability to dma to txhdr_cache */ 857 /* test for ability to dma to txhdr_cache */
857 dma_test = dma_map_single(dev->dev->dev, 858 dma_test = dma_map_single(dev->dev->dma_dev,
858 ring->txhdr_cache, 859 ring->txhdr_cache,
859 b43_txhdr_size(dev), 860 b43_txhdr_size(dev),
860 DMA_TO_DEVICE); 861 DMA_TO_DEVICE);
@@ -869,7 +870,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
869 if (!ring->txhdr_cache) 870 if (!ring->txhdr_cache)
870 goto err_kfree_meta; 871 goto err_kfree_meta;
871 872
872 dma_test = dma_map_single(dev->dev->dev, 873 dma_test = dma_map_single(dev->dev->dma_dev,
873 ring->txhdr_cache, 874 ring->txhdr_cache,
874 b43_txhdr_size(dev), 875 b43_txhdr_size(dev),
875 DMA_TO_DEVICE); 876 DMA_TO_DEVICE);
@@ -883,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
883 } 884 }
884 } 885 }
885 886
886 dma_unmap_single(dev->dev->dev, 887 dma_unmap_single(dev->dev->dma_dev,
887 dma_test, b43_txhdr_size(dev), 888 dma_test, b43_txhdr_size(dev),
888 DMA_TO_DEVICE); 889 DMA_TO_DEVICE);
889 } 890 }