diff options
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 663aed4e9e05..8a9776b52daf 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -515,7 +515,7 @@ static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, | |||
515 | /* Check if a DMA mapping address is invalid. */ | 515 | /* Check if a DMA mapping address is invalid. */ |
516 | static bool b43_dma_mapping_error(struct b43_dmaring *ring, | 516 | static bool b43_dma_mapping_error(struct b43_dmaring *ring, |
517 | dma_addr_t addr, | 517 | dma_addr_t addr, |
518 | size_t buffersize) | 518 | size_t buffersize, bool dma_to_device) |
519 | { | 519 | { |
520 | if (unlikely(dma_mapping_error(addr))) | 520 | if (unlikely(dma_mapping_error(addr))) |
521 | return 1; | 521 | return 1; |
@@ -523,11 +523,11 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
523 | switch (ring->type) { | 523 | switch (ring->type) { |
524 | case B43_DMA_30BIT: | 524 | case B43_DMA_30BIT: |
525 | if ((u64)addr + buffersize > (1ULL << 30)) | 525 | if ((u64)addr + buffersize > (1ULL << 30)) |
526 | return 1; | 526 | goto address_error; |
527 | break; | 527 | break; |
528 | case B43_DMA_32BIT: | 528 | case B43_DMA_32BIT: |
529 | if ((u64)addr + buffersize > (1ULL << 32)) | 529 | if ((u64)addr + buffersize > (1ULL << 32)) |
530 | return 1; | 530 | goto address_error; |
531 | break; | 531 | break; |
532 | case B43_DMA_64BIT: | 532 | case B43_DMA_64BIT: |
533 | /* Currently we can't have addresses beyond | 533 | /* Currently we can't have addresses beyond |
@@ -537,6 +537,12 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
537 | 537 | ||
538 | /* The address is OK. */ | 538 | /* The address is OK. */ |
539 | return 0; | 539 | return 0; |
540 | |||
541 | address_error: | ||
542 | /* We can't support this address. Unmap it again. */ | ||
543 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | ||
544 | |||
545 | return 1; | ||
540 | } | 546 | } |
541 | 547 | ||
542 | static int setup_rx_descbuffer(struct b43_dmaring *ring, | 548 | static int setup_rx_descbuffer(struct b43_dmaring *ring, |
@@ -554,7 +560,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
554 | if (unlikely(!skb)) | 560 | if (unlikely(!skb)) |
555 | return -ENOMEM; | 561 | return -ENOMEM; |
556 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); | 562 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); |
557 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 563 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
558 | /* ugh. try to realloc in zone_dma */ | 564 | /* ugh. try to realloc in zone_dma */ |
559 | gfp_flags |= GFP_DMA; | 565 | gfp_flags |= GFP_DMA; |
560 | 566 | ||
@@ -567,7 +573,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
567 | ring->rx_buffersize, 0); | 573 | ring->rx_buffersize, 0); |
568 | } | 574 | } |
569 | 575 | ||
570 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { | 576 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
571 | dev_kfree_skb_any(skb); | 577 | dev_kfree_skb_any(skb); |
572 | return -EIO; | 578 | return -EIO; |
573 | } | 579 | } |
@@ -807,7 +813,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
807 | b43_txhdr_size(dev), | 813 | b43_txhdr_size(dev), |
808 | DMA_TO_DEVICE); | 814 | DMA_TO_DEVICE); |
809 | 815 | ||
810 | if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) { | 816 | if (b43_dma_mapping_error(ring, dma_test, |
817 | b43_txhdr_size(dev), 1)) { | ||
811 | /* ugh realloc */ | 818 | /* ugh realloc */ |
812 | kfree(ring->txhdr_cache); | 819 | kfree(ring->txhdr_cache); |
813 | ring->txhdr_cache = kcalloc(nr_slots, | 820 | ring->txhdr_cache = kcalloc(nr_slots, |
@@ -822,7 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
822 | DMA_TO_DEVICE); | 829 | DMA_TO_DEVICE); |
823 | 830 | ||
824 | if (b43_dma_mapping_error(ring, dma_test, | 831 | if (b43_dma_mapping_error(ring, dma_test, |
825 | b43_txhdr_size(dev))) | 832 | b43_txhdr_size(dev), 1)) |
826 | goto err_kfree_txhdr_cache; | 833 | goto err_kfree_txhdr_cache; |
827 | } | 834 | } |
828 | 835 | ||
@@ -1123,7 +1130,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1123 | 1130 | ||
1124 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | 1131 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, |
1125 | hdrsize, 1); | 1132 | hdrsize, 1); |
1126 | if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) { | 1133 | if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { |
1127 | ring->current_slot = old_top_slot; | 1134 | ring->current_slot = old_top_slot; |
1128 | ring->used_slots = old_used_slots; | 1135 | ring->used_slots = old_used_slots; |
1129 | return -EIO; | 1136 | return -EIO; |
@@ -1142,7 +1149,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1142 | 1149 | ||
1143 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1150 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1144 | /* create a bounce buffer in zone_dma on mapping failure. */ | 1151 | /* create a bounce buffer in zone_dma on mapping failure. */ |
1145 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1152 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1146 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 1153 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1147 | if (!bounce_skb) { | 1154 | if (!bounce_skb) { |
1148 | ring->current_slot = old_top_slot; | 1155 | ring->current_slot = old_top_slot; |
@@ -1156,7 +1163,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
1156 | skb = bounce_skb; | 1163 | skb = bounce_skb; |
1157 | meta->skb = skb; | 1164 | meta->skb = skb; |
1158 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | 1165 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); |
1159 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { | 1166 | if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
1160 | ring->current_slot = old_top_slot; | 1167 | ring->current_slot = old_top_slot; |
1161 | ring->used_slots = old_used_slots; | 1168 | ring->used_slots = old_used_slots; |
1162 | err = -EIO; | 1169 | err = -EIO; |
@@ -1339,6 +1346,7 @@ static void b43_fill_txstatus_report(struct b43_dmaring *ring, | |||
1339 | } | 1346 | } |
1340 | } | 1347 | } |
1341 | 1348 | ||
1349 | /* Called with IRQs disabled. */ | ||
1342 | void b43_dma_handle_txstatus(struct b43_wldev *dev, | 1350 | void b43_dma_handle_txstatus(struct b43_wldev *dev, |
1343 | const struct b43_txstatus *status) | 1351 | const struct b43_txstatus *status) |
1344 | { | 1352 | { |
@@ -1351,8 +1359,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1351 | ring = parse_cookie(dev, status->cookie, &slot); | 1359 | ring = parse_cookie(dev, status->cookie, &slot); |
1352 | if (unlikely(!ring)) | 1360 | if (unlikely(!ring)) |
1353 | return; | 1361 | return; |
1354 | B43_WARN_ON(!irqs_disabled()); | 1362 | |
1355 | spin_lock(&ring->lock); | 1363 | spin_lock(&ring->lock); /* IRQs are already disabled. */ |
1356 | 1364 | ||
1357 | B43_WARN_ON(!ring->tx); | 1365 | B43_WARN_ON(!ring->tx); |
1358 | ops = ring->ops; | 1366 | ops = ring->ops; |