aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/b43/dma.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 3dfb28a34be9..766d955a9355 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -560,7 +560,7 @@ static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
560/* Check if a DMA mapping address is invalid. */ 560/* Check if a DMA mapping address is invalid. */
561static bool b43_dma_mapping_error(struct b43_dmaring *ring, 561static bool b43_dma_mapping_error(struct b43_dmaring *ring,
562 dma_addr_t addr, 562 dma_addr_t addr,
563 size_t buffersize) 563 size_t buffersize, bool dma_to_device)
564{ 564{
565 if (unlikely(dma_mapping_error(addr))) 565 if (unlikely(dma_mapping_error(addr)))
566 return 1; 566 return 1;
@@ -568,11 +568,11 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
568 switch (ring->type) { 568 switch (ring->type) {
569 case B43_DMA_30BIT: 569 case B43_DMA_30BIT:
570 if ((u64)addr + buffersize > (1ULL << 30)) 570 if ((u64)addr + buffersize > (1ULL << 30))
571 return 1; 571 goto address_error;
572 break; 572 break;
573 case B43_DMA_32BIT: 573 case B43_DMA_32BIT:
574 if ((u64)addr + buffersize > (1ULL << 32)) 574 if ((u64)addr + buffersize > (1ULL << 32))
575 return 1; 575 goto address_error;
576 break; 576 break;
577 case B43_DMA_64BIT: 577 case B43_DMA_64BIT:
578 /* Currently we can't have addresses beyond 578 /* Currently we can't have addresses beyond
@@ -582,6 +582,12 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
582 582
583 /* The address is OK. */ 583 /* The address is OK. */
584 return 0; 584 return 0;
585
586address_error:
587 /* We can't support this address. Unmap it again. */
588 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
589
590 return 1;
585} 591}
586 592
587static int setup_rx_descbuffer(struct b43_dmaring *ring, 593static int setup_rx_descbuffer(struct b43_dmaring *ring,
@@ -599,7 +605,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
599 if (unlikely(!skb)) 605 if (unlikely(!skb))
600 return -ENOMEM; 606 return -ENOMEM;
601 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 607 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
602 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { 608 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
603 /* ugh. try to realloc in zone_dma */ 609 /* ugh. try to realloc in zone_dma */
604 gfp_flags |= GFP_DMA; 610 gfp_flags |= GFP_DMA;
605 611
@@ -612,7 +618,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
612 ring->rx_buffersize, 0); 618 ring->rx_buffersize, 0);
613 } 619 }
614 620
615 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { 621 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
616 dev_kfree_skb_any(skb); 622 dev_kfree_skb_any(skb);
617 return -EIO; 623 return -EIO;
618 } 624 }
@@ -852,7 +858,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
852 b43_txhdr_size(dev), 858 b43_txhdr_size(dev),
853 DMA_TO_DEVICE); 859 DMA_TO_DEVICE);
854 860
855 if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) { 861 if (b43_dma_mapping_error(ring, dma_test,
862 b43_txhdr_size(dev), 1)) {
856 /* ugh realloc */ 863 /* ugh realloc */
857 kfree(ring->txhdr_cache); 864 kfree(ring->txhdr_cache);
858 ring->txhdr_cache = kcalloc(nr_slots, 865 ring->txhdr_cache = kcalloc(nr_slots,
@@ -867,7 +874,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
867 DMA_TO_DEVICE); 874 DMA_TO_DEVICE);
868 875
869 if (b43_dma_mapping_error(ring, dma_test, 876 if (b43_dma_mapping_error(ring, dma_test,
870 b43_txhdr_size(dev))) 877 b43_txhdr_size(dev), 1))
871 goto err_kfree_txhdr_cache; 878 goto err_kfree_txhdr_cache;
872 } 879 }
873 880
@@ -1189,7 +1196,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1189 1196
1190 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1197 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1191 hdrsize, 1); 1198 hdrsize, 1);
1192 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) { 1199 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1193 ring->current_slot = old_top_slot; 1200 ring->current_slot = old_top_slot;
1194 ring->used_slots = old_used_slots; 1201 ring->used_slots = old_used_slots;
1195 return -EIO; 1202 return -EIO;
@@ -1208,7 +1215,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1208 1215
1209 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1216 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1210 /* create a bounce buffer in zone_dma on mapping failure. */ 1217 /* create a bounce buffer in zone_dma on mapping failure. */
1211 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { 1218 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1212 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1219 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1213 if (!bounce_skb) { 1220 if (!bounce_skb) {
1214 ring->current_slot = old_top_slot; 1221 ring->current_slot = old_top_slot;
@@ -1222,7 +1229,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1222 skb = bounce_skb; 1229 skb = bounce_skb;
1223 meta->skb = skb; 1230 meta->skb = skb;
1224 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1231 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1225 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { 1232 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1226 ring->current_slot = old_top_slot; 1233 ring->current_slot = old_top_slot;
1227 ring->used_slots = old_used_slots; 1234 ring->used_slots = old_used_slots;
1228 err = -EIO; 1235 err = -EIO;