aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/b44.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r--drivers/net/b44.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 5c84541e0737..b70b81ec34c3 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -660,7 +660,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
660 /* Hardware bug work-around, the chip is unable to do PCI DMA 660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */ 661 to/from anything above 1GB :-( */
662 if (ssb_dma_mapping_error(bp->sdev, mapping) || 662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
664 /* Sigh... */ 664 /* Sigh... */
665 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 ssb_dma_unmap_single(bp->sdev, mapping, 666 ssb_dma_unmap_single(bp->sdev, mapping,
@@ -673,7 +673,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
673 RX_PKT_BUF_SZ, 673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE); 674 DMA_FROM_DEVICE);
675 if (ssb_dma_mapping_error(bp->sdev, mapping) || 675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); 678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb); 679 dev_kfree_skb_any(skb);
@@ -703,7 +703,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
703 703
704 if (bp->flags & B44_FLAG_RX_RING_HACK) 704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, 705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(dp), 706 dest_idx * sizeof(*dp),
707 DMA_BIDIRECTIONAL); 707 DMA_BIDIRECTIONAL);
708 708
709 return RX_PKT_BUF_SZ; 709 return RX_PKT_BUF_SZ;
@@ -731,7 +731,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
731 731
732 if (bp->flags & B44_FLAG_RX_RING_HACK) 732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, 733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(src_desc), 734 src_idx * sizeof(*src_desc),
735 DMA_BIDIRECTIONAL); 735 DMA_BIDIRECTIONAL);
736 736
737 ctrl = src_desc->ctrl; 737 ctrl = src_desc->ctrl;
@@ -747,10 +747,10 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
747 747
748 if (bp->flags & B44_FLAG_RX_RING_HACK) 748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, 749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(dest_desc), 750 dest_idx * sizeof(*dest_desc),
751 DMA_BIDIRECTIONAL); 751 DMA_BIDIRECTIONAL);
752 752
753 ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr), 753 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
754 RX_PKT_BUF_SZ, 754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE); 755 DMA_FROM_DEVICE);
756} 756}
@@ -965,7 +965,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
965 } 965 }
966 966
967 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); 967 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
968 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { 968 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
969 struct sk_buff *bounce_skb; 969 struct sk_buff *bounce_skb;
970 970
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
@@ -979,7 +979,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
979 979
980 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, 980 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
981 len, DMA_TO_DEVICE); 981 len, DMA_TO_DEVICE);
982 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { 982 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
983 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 983 if (!ssb_dma_mapping_error(bp->sdev, mapping))
984 ssb_dma_unmap_single(bp->sdev, mapping, 984 ssb_dma_unmap_single(bp->sdev, mapping,
985 len, DMA_TO_DEVICE); 985 len, DMA_TO_DEVICE);
@@ -1204,7 +1204,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1204 DMA_BIDIRECTIONAL); 1204 DMA_BIDIRECTIONAL);
1205 1205
1206 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || 1206 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1207 rx_ring_dma + size > DMA_30BIT_MASK) { 1207 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1208 kfree(rx_ring); 1208 kfree(rx_ring);
1209 goto out_err; 1209 goto out_err;
1210 } 1210 }
@@ -1231,7 +1231,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1231 DMA_TO_DEVICE); 1231 DMA_TO_DEVICE);
1232 1232
1233 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || 1233 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1234 tx_ring_dma + size > DMA_30BIT_MASK) { 1234 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1235 kfree(tx_ring); 1235 kfree(tx_ring);
1236 goto out_err; 1236 goto out_err;
1237 } 1237 }
@@ -2180,7 +2180,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2180 "Failed to powerup the bus\n"); 2180 "Failed to powerup the bus\n");
2181 goto err_out_free_dev; 2181 goto err_out_free_dev;
2182 } 2182 }
2183 err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK); 2183 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2184 if (err) { 2184 if (err) {
2185 dev_err(sdev->dev, 2185 dev_err(sdev->dev,
2186 "Required 30BIT DMA mask unsupported by the system.\n"); 2186 "Required 30BIT DMA mask unsupported by the system.\n");