aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-11 14:34:06 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-11 14:34:06 -0400
commit14599f1e341ee219abdd15f4eee5872d6f2d29f1 (patch)
tree3875181429010e58416ab34e6c06ef42de52e756 /drivers/net/wireless/b43
parentd8d1f30b95a635dbd610dcc5eb641aca8f4768cf (diff)
parent832c10fd733893f86c63bde1c65b005d5a2fe346 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Conflicts: drivers/net/wireless/wl12xx/wl1271.h drivers/net/wireless/wl12xx/wl1271_cmd.h
Diffstat (limited to 'drivers/net/wireless/b43')
-rw-r--r--drivers/net/wireless/b43/dma.c69
1 files changed, 36 insertions, 33 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fa40fdfea719..10d0aaf754c5 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@ static inline
333 dma_addr_t dmaaddr; 333 dma_addr_t dmaaddr;
334 334
335 if (tx) { 335 if (tx) {
336 dmaaddr = ssb_dma_map_single(ring->dev->dev, 336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337 buf, len, DMA_TO_DEVICE); 337 buf, len, DMA_TO_DEVICE);
338 } else { 338 } else {
339 dmaaddr = ssb_dma_map_single(ring->dev->dev, 339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340 buf, len, DMA_FROM_DEVICE); 340 buf, len, DMA_FROM_DEVICE);
341 } 341 }
342 342
343 return dmaaddr; 343 return dmaaddr;
@@ -348,11 +348,11 @@ static inline
348 dma_addr_t addr, size_t len, int tx) 348 dma_addr_t addr, size_t len, int tx)
349{ 349{
350 if (tx) { 350 if (tx) {
351 ssb_dma_unmap_single(ring->dev->dev, 351 dma_unmap_single(ring->dev->dev->dma_dev,
352 addr, len, DMA_TO_DEVICE); 352 addr, len, DMA_TO_DEVICE);
353 } else { 353 } else {
354 ssb_dma_unmap_single(ring->dev->dev, 354 dma_unmap_single(ring->dev->dev->dma_dev,
355 addr, len, DMA_FROM_DEVICE); 355 addr, len, DMA_FROM_DEVICE);
356 } 356 }
357} 357}
358 358
@@ -361,7 +361,7 @@ static inline
361 dma_addr_t addr, size_t len) 361 dma_addr_t addr, size_t len)
362{ 362{
363 B43_WARN_ON(ring->tx); 363 B43_WARN_ON(ring->tx);
364 ssb_dma_sync_single_for_cpu(ring->dev->dev, 364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365 addr, len, DMA_FROM_DEVICE); 365 addr, len, DMA_FROM_DEVICE);
366} 366}
367 367
@@ -370,8 +370,8 @@ static inline
370 dma_addr_t addr, size_t len) 370 dma_addr_t addr, size_t len)
371{ 371{
372 B43_WARN_ON(ring->tx); 372 B43_WARN_ON(ring->tx);
373 ssb_dma_sync_single_for_device(ring->dev->dev, 373 dma_sync_single_for_device(ring->dev->dev->dma_dev,
374 addr, len, DMA_FROM_DEVICE); 374 addr, len, DMA_FROM_DEVICE);
375} 375}
376 376
377static inline 377static inline
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
401 */ 401 */
402 if (ring->type == B43_DMA_64BIT) 402 if (ring->type == B43_DMA_64BIT)
403 flags |= GFP_DMA; 403 flags |= GFP_DMA;
404 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
405 B43_DMA_RINGMEMSIZE, 405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags); 406 &(ring->dmabase), flags);
407 if (!ring->descbase) { 407 if (!ring->descbase) {
408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
409 return -ENOMEM; 409 return -ENOMEM;
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
420 if (ring->type == B43_DMA_64BIT) 420 if (ring->type == B43_DMA_64BIT)
421 flags |= GFP_DMA; 421 flags |= GFP_DMA;
422 422
423 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, 423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase, flags); 424 ring->descbase, ring->dmabase);
425} 425}
426 426
427/* Reset the RX DMA channel */ 427/* Reset the RX DMA channel */
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
528 dma_addr_t addr, 528 dma_addr_t addr,
529 size_t buffersize, bool dma_to_device) 529 size_t buffersize, bool dma_to_device)
530{ 530{
531 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 531 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
532 return 1; 532 return 1;
533 533
534 switch (ring->type) { 534 switch (ring->type) {
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
874 goto err_kfree_meta; 874 goto err_kfree_meta;
875 875
876 /* test for ability to dma to txhdr_cache */ 876 /* test for ability to dma to txhdr_cache */
877 dma_test = ssb_dma_map_single(dev->dev, 877 dma_test = dma_map_single(dev->dev->dma_dev,
878 ring->txhdr_cache, 878 ring->txhdr_cache,
879 b43_txhdr_size(dev), 879 b43_txhdr_size(dev),
880 DMA_TO_DEVICE); 880 DMA_TO_DEVICE);
881 881
882 if (b43_dma_mapping_error(ring, dma_test, 882 if (b43_dma_mapping_error(ring, dma_test,
883 b43_txhdr_size(dev), 1)) { 883 b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
889 if (!ring->txhdr_cache) 889 if (!ring->txhdr_cache)
890 goto err_kfree_meta; 890 goto err_kfree_meta;
891 891
892 dma_test = ssb_dma_map_single(dev->dev, 892 dma_test = dma_map_single(dev->dev->dma_dev,
893 ring->txhdr_cache, 893 ring->txhdr_cache,
894 b43_txhdr_size(dev), 894 b43_txhdr_size(dev),
895 DMA_TO_DEVICE); 895 DMA_TO_DEVICE);
896 896
897 if (b43_dma_mapping_error(ring, dma_test, 897 if (b43_dma_mapping_error(ring, dma_test,
898 b43_txhdr_size(dev), 1)) { 898 b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
903 } 903 }
904 } 904 }
905 905
906 ssb_dma_unmap_single(dev->dev, 906 dma_unmap_single(dev->dev->dma_dev,
907 dma_test, b43_txhdr_size(dev), 907 dma_test, b43_txhdr_size(dev),
908 DMA_TO_DEVICE); 908 DMA_TO_DEVICE);
909 } 909 }
910 910
911 err = alloc_ringmemory(ring); 911 err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1018 /* Try to set the DMA mask. If it fails, try falling back to a 1018 /* Try to set the DMA mask. If it fails, try falling back to a
1019 * lower mask, as we can always also support a lower one. */ 1019 * lower mask, as we can always also support a lower one. */
1020 while (1) { 1020 while (1) {
1021 err = ssb_dma_set_mask(dev->dev, mask); 1021 err = dma_set_mask(dev->dev->dma_dev, mask);
1022 if (!err) 1022 if (!err) {
1023 break; 1023 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1024 if (!err)
1025 break;
1026 }
1024 if (mask == DMA_BIT_MASK(64)) { 1027 if (mask == DMA_BIT_MASK(64)) {
1025 mask = DMA_BIT_MASK(32); 1028 mask = DMA_BIT_MASK(32);
1026 fallback = 1; 1029 fallback = 1;
@@ -1221,14 +1224,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1221 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1224 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1222 /* create a bounce buffer in zone_dma on mapping failure. */ 1225 /* create a bounce buffer in zone_dma on mapping failure. */
1223 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1226 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1224 priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); 1227 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1228 GFP_ATOMIC | GFP_DMA);
1225 if (!priv_info->bouncebuffer) { 1229 if (!priv_info->bouncebuffer) {
1226 ring->current_slot = old_top_slot; 1230 ring->current_slot = old_top_slot;
1227 ring->used_slots = old_used_slots; 1231 ring->used_slots = old_used_slots;
1228 err = -ENOMEM; 1232 err = -ENOMEM;
1229 goto out_unmap_hdr; 1233 goto out_unmap_hdr;
1230 } 1234 }
1231 memcpy(priv_info->bouncebuffer, skb->data, skb->len);
1232 1235
1233 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); 1236 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1234 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1237 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {