aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43/dma.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-06-03 22:37:36 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-06-04 16:00:42 -0400
commit718e8898af2c523b1785f025350c34c59750734d (patch)
treeb1cec0d591fbd6b6fe1ea11bcc00446386252821 /drivers/net/wireless/b43/dma.c
parent4e8031328be3e19de937354b76a9e69878c3101e (diff)
b43: replace the ssb_dma API with the generic DMA API
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Stefano Brivio <stefano.brivio@polimi.it> Cc: John W. Linville <linville@tuxdriver.com> Acked-by: Michael Buesch <mb@bu3sch.de> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r--drivers/net/wireless/b43/dma.c65
1 files changed, 34 insertions, 31 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fdfeab0c21a0..10d0aaf754c5 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@ static inline
333 dma_addr_t dmaaddr; 333 dma_addr_t dmaaddr;
334 334
335 if (tx) { 335 if (tx) {
336 dmaaddr = ssb_dma_map_single(ring->dev->dev, 336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337 buf, len, DMA_TO_DEVICE); 337 buf, len, DMA_TO_DEVICE);
338 } else { 338 } else {
339 dmaaddr = ssb_dma_map_single(ring->dev->dev, 339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340 buf, len, DMA_FROM_DEVICE); 340 buf, len, DMA_FROM_DEVICE);
341 } 341 }
342 342
343 return dmaaddr; 343 return dmaaddr;
@@ -348,11 +348,11 @@ static inline
348 dma_addr_t addr, size_t len, int tx) 348 dma_addr_t addr, size_t len, int tx)
349{ 349{
350 if (tx) { 350 if (tx) {
351 ssb_dma_unmap_single(ring->dev->dev, 351 dma_unmap_single(ring->dev->dev->dma_dev,
352 addr, len, DMA_TO_DEVICE); 352 addr, len, DMA_TO_DEVICE);
353 } else { 353 } else {
354 ssb_dma_unmap_single(ring->dev->dev, 354 dma_unmap_single(ring->dev->dev->dma_dev,
355 addr, len, DMA_FROM_DEVICE); 355 addr, len, DMA_FROM_DEVICE);
356 } 356 }
357} 357}
358 358
@@ -361,7 +361,7 @@ static inline
361 dma_addr_t addr, size_t len) 361 dma_addr_t addr, size_t len)
362{ 362{
363 B43_WARN_ON(ring->tx); 363 B43_WARN_ON(ring->tx);
364 ssb_dma_sync_single_for_cpu(ring->dev->dev, 364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365 addr, len, DMA_FROM_DEVICE); 365 addr, len, DMA_FROM_DEVICE);
366} 366}
367 367
@@ -370,8 +370,8 @@ static inline
370 dma_addr_t addr, size_t len) 370 dma_addr_t addr, size_t len)
371{ 371{
372 B43_WARN_ON(ring->tx); 372 B43_WARN_ON(ring->tx);
373 ssb_dma_sync_single_for_device(ring->dev->dev, 373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
374 addr, len, DMA_FROM_DEVICE); 374 addr, len, DMA_FROM_DEVICE);
375} 375}
376 376
377static inline 377static inline
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
401 */ 401 */
402 if (ring->type == B43_DMA_64BIT) 402 if (ring->type == B43_DMA_64BIT)
403 flags |= GFP_DMA; 403 flags |= GFP_DMA;
404 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
405 B43_DMA_RINGMEMSIZE, 405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags); 406 &(ring->dmabase), flags);
407 if (!ring->descbase) { 407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
409 return -ENOMEM; 409 return -ENOMEM;
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
420 if (ring->type == B43_DMA_64BIT) 420 if (ring->type == B43_DMA_64BIT)
421 flags |= GFP_DMA; 421 flags |= GFP_DMA;
422 422
423 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, 423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase, flags); 424 ring->descbase, ring->dmabase);
425} 425}
426 426
427/* Reset the RX DMA channel */ 427/* Reset the RX DMA channel */
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
528 dma_addr_t addr, 528 dma_addr_t addr,
529 size_t buffersize, bool dma_to_device) 529 size_t buffersize, bool dma_to_device)
530{ 530{
531 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 531 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
532 return 1; 532 return 1;
533 533
534 switch (ring->type) { 534 switch (ring->type) {
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
874 goto err_kfree_meta; 874 goto err_kfree_meta;
875 875
876 /* test for ability to dma to txhdr_cache */ 876 /* test for ability to dma to txhdr_cache */
877 dma_test = ssb_dma_map_single(dev->dev, 877 dma_test = dma_map_single(dev->dev->dma_dev,
878 ring->txhdr_cache, 878 ring->txhdr_cache,
879 b43_txhdr_size(dev), 879 b43_txhdr_size(dev),
880 DMA_TO_DEVICE); 880 DMA_TO_DEVICE);
881 881
882 if (b43_dma_mapping_error(ring, dma_test, 882 if (b43_dma_mapping_error(ring, dma_test,
883 b43_txhdr_size(dev), 1)) { 883 b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
889 if (!ring->txhdr_cache) 889 if (!ring->txhdr_cache)
890 goto err_kfree_meta; 890 goto err_kfree_meta;
891 891
892 dma_test = ssb_dma_map_single(dev->dev, 892 dma_test = dma_map_single(dev->dev->dma_dev,
893 ring->txhdr_cache, 893 ring->txhdr_cache,
894 b43_txhdr_size(dev), 894 b43_txhdr_size(dev),
895 DMA_TO_DEVICE); 895 DMA_TO_DEVICE);
896 896
897 if (b43_dma_mapping_error(ring, dma_test, 897 if (b43_dma_mapping_error(ring, dma_test,
898 b43_txhdr_size(dev), 1)) { 898 b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
903 } 903 }
904 } 904 }
905 905
906 ssb_dma_unmap_single(dev->dev, 906 dma_unmap_single(dev->dev->dma_dev,
907 dma_test, b43_txhdr_size(dev), 907 dma_test, b43_txhdr_size(dev),
908 DMA_TO_DEVICE); 908 DMA_TO_DEVICE);
909 } 909 }
910 910
911 err = alloc_ringmemory(ring); 911 err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1018 /* Try to set the DMA mask. If it fails, try falling back to a 1018 /* Try to set the DMA mask. If it fails, try falling back to a
1019 * lower mask, as we can always also support a lower one. */ 1019 * lower mask, as we can always also support a lower one. */
1020 while (1) { 1020 while (1) {
1021 err = ssb_dma_set_mask(dev->dev, mask); 1021 err = dma_set_mask(dev->dev->dma_dev, mask);
1022 if (!err) 1022 if (!err) {
1023 break; 1023 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1024 if (!err)
1025 break;
1026 }
1024 if (mask == DMA_BIT_MASK(64)) { 1027 if (mask == DMA_BIT_MASK(64)) {
1025 mask = DMA_BIT_MASK(32); 1028 mask = DMA_BIT_MASK(32);
1026 fallback = 1; 1029 fallback = 1;