aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/b44.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r--drivers/net/b44.c140
1 files changed, 71 insertions, 69 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 59dce6aa0865..c3bda5ce67c4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -148,9 +148,9 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 unsigned long offset, 148 unsigned long offset,
149 enum dma_data_direction dir) 149 enum dma_data_direction dir)
150{ 150{
151 dma_sync_single_range_for_device(sdev->dma_dev, dma_base, 151 ssb_dma_sync_single_range_for_device(sdev, dma_base,
152 offset & dma_desc_align_mask, 152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir); 153 dma_desc_sync_size, dir);
154} 154}
155 155
156static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, 156static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -158,9 +158,9 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158 unsigned long offset, 158 unsigned long offset,
159 enum dma_data_direction dir) 159 enum dma_data_direction dir)
160{ 160{
161 dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base, 161 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162 offset & dma_desc_align_mask, 162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir); 163 dma_desc_sync_size, dir);
164} 164}
165 165
166static inline unsigned long br32(const struct b44 *bp, unsigned long reg) 166static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -613,10 +613,10 @@ static void b44_tx(struct b44 *bp)
613 613
614 BUG_ON(skb == NULL); 614 BUG_ON(skb == NULL);
615 615
616 dma_unmap_single(bp->sdev->dma_dev, 616 ssb_dma_unmap_single(bp->sdev,
617 rp->mapping, 617 rp->mapping,
618 skb->len, 618 skb->len,
619 DMA_TO_DEVICE); 619 DMA_TO_DEVICE);
620 rp->skb = NULL; 620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb); 621 dev_kfree_skb_irq(skb);
622 } 622 }
@@ -653,29 +653,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
653 if (skb == NULL) 653 if (skb == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
656 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, 656 mapping = ssb_dma_map_single(bp->sdev, skb->data,
657 RX_PKT_BUF_SZ, 657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE); 658 DMA_FROM_DEVICE);
659 659
660 /* Hardware bug work-around, the chip is unable to do PCI DMA 660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */ 661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(mapping) || 662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
664 /* Sigh... */ 664 /* Sigh... */
665 if (!dma_mapping_error(mapping)) 665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 dma_unmap_single(bp->sdev->dma_dev, mapping, 666 ssb_dma_unmap_single(bp->sdev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb); 668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL) 670 if (skb == NULL)
671 return -ENOMEM; 671 return -ENOMEM;
672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, 672 mapping = ssb_dma_map_single(bp->sdev, skb->data,
673 RX_PKT_BUF_SZ, 673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE); 674 DMA_FROM_DEVICE);
675 if (dma_mapping_error(mapping) || 675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
677 if (!dma_mapping_error(mapping)) 677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); 678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb); 679 dev_kfree_skb_any(skb);
680 return -ENOMEM; 680 return -ENOMEM;
681 } 681 }
@@ -750,9 +750,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
750 dest_idx * sizeof(dest_desc), 750 dest_idx * sizeof(dest_desc),
751 DMA_BIDIRECTIONAL); 751 DMA_BIDIRECTIONAL);
752 752
753 dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr), 753 ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
754 RX_PKT_BUF_SZ, 754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE); 755 DMA_FROM_DEVICE);
756} 756}
757 757
758static int b44_rx(struct b44 *bp, int budget) 758static int b44_rx(struct b44 *bp, int budget)
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget)
772 struct rx_header *rh; 772 struct rx_header *rh;
773 u16 len; 773 u16 len;
774 774
775 dma_sync_single_for_cpu(bp->sdev->dma_dev, map, 775 ssb_dma_sync_single_for_cpu(bp->sdev, map,
776 RX_PKT_BUF_SZ, 776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE); 777 DMA_FROM_DEVICE);
778 rh = (struct rx_header *) skb->data; 778 rh = (struct rx_header *) skb->data;
@@ -806,8 +806,8 @@ static int b44_rx(struct b44 *bp, int budget)
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0) 807 if (skb_size < 0)
808 goto drop_it; 808 goto drop_it;
809 dma_unmap_single(bp->sdev->dma_dev, map, 809 ssb_dma_unmap_single(bp->sdev, map,
810 skb_size, DMA_FROM_DEVICE); 810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */ 811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET); 812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET); 813 skb_pull(skb, RX_PKT_OFFSET);
@@ -966,25 +966,25 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 goto err_out; 966 goto err_out;
967 } 967 }
968 968
969 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); 969 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
970 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 970 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
971 struct sk_buff *bounce_skb; 971 struct sk_buff *bounce_skb;
972 972
973 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 973 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
974 if (!dma_mapping_error(mapping)) 974 if (!ssb_dma_mapping_error(bp->sdev, mapping))
975 dma_unmap_single(bp->sdev->dma_dev, mapping, len, 975 ssb_dma_unmap_single(bp->sdev, mapping, len,
976 DMA_TO_DEVICE); 976 DMA_TO_DEVICE);
977 977
978 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); 978 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
979 if (!bounce_skb) 979 if (!bounce_skb)
980 goto err_out; 980 goto err_out;
981 981
982 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, 982 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
983 len, DMA_TO_DEVICE); 983 len, DMA_TO_DEVICE);
984 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 984 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
985 if (!dma_mapping_error(mapping)) 985 if (!ssb_dma_mapping_error(bp->sdev, mapping))
986 dma_unmap_single(bp->sdev->dma_dev, mapping, 986 ssb_dma_unmap_single(bp->sdev, mapping,
987 len, DMA_TO_DEVICE); 987 len, DMA_TO_DEVICE);
988 dev_kfree_skb_any(bounce_skb); 988 dev_kfree_skb_any(bounce_skb);
989 goto err_out; 989 goto err_out;
990 } 990 }
@@ -1082,8 +1082,8 @@ static void b44_free_rings(struct b44 *bp)
1082 1082
1083 if (rp->skb == NULL) 1083 if (rp->skb == NULL)
1084 continue; 1084 continue;
1085 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, 1085 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1086 DMA_FROM_DEVICE); 1086 DMA_FROM_DEVICE);
1087 dev_kfree_skb_any(rp->skb); 1087 dev_kfree_skb_any(rp->skb);
1088 rp->skb = NULL; 1088 rp->skb = NULL;
1089 } 1089 }
@@ -1094,8 +1094,8 @@ static void b44_free_rings(struct b44 *bp)
1094 1094
1095 if (rp->skb == NULL) 1095 if (rp->skb == NULL)
1096 continue; 1096 continue;
1097 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, 1097 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1098 DMA_TO_DEVICE); 1098 DMA_TO_DEVICE);
1099 dev_kfree_skb_any(rp->skb); 1099 dev_kfree_skb_any(rp->skb);
1100 rp->skb = NULL; 1100 rp->skb = NULL;
1101 } 1101 }
@@ -1117,14 +1117,14 @@ static void b44_init_rings(struct b44 *bp)
1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1117 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1118 1118
1119 if (bp->flags & B44_FLAG_RX_RING_HACK) 1119 if (bp->flags & B44_FLAG_RX_RING_HACK)
1120 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, 1120 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1121 DMA_TABLE_BYTES, 1121 DMA_TABLE_BYTES,
1122 DMA_BIDIRECTIONAL); 1122 DMA_BIDIRECTIONAL);
1123 1123
1124 if (bp->flags & B44_FLAG_TX_RING_HACK) 1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, 1125 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1126 DMA_TABLE_BYTES, 1126 DMA_TABLE_BYTES,
1127 DMA_TO_DEVICE); 1127 DMA_TO_DEVICE);
1128 1128
1129 for (i = 0; i < bp->rx_pending; i++) { 1129 for (i = 0; i < bp->rx_pending; i++) {
1130 if (b44_alloc_rx_skb(bp, -1, i) < 0) 1130 if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1144,25 +1144,27 @@ static void b44_free_consistent(struct b44 *bp)
1144 bp->tx_buffers = NULL; 1144 bp->tx_buffers = NULL;
1145 if (bp->rx_ring) { 1145 if (bp->rx_ring) {
1146 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1146 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1147 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, 1147 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1148 DMA_TABLE_BYTES, 1148 DMA_TABLE_BYTES,
1149 DMA_BIDIRECTIONAL); 1149 DMA_BIDIRECTIONAL);
1150 kfree(bp->rx_ring); 1150 kfree(bp->rx_ring);
1151 } else 1151 } else
1152 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, 1152 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1153 bp->rx_ring, bp->rx_ring_dma); 1153 bp->rx_ring, bp->rx_ring_dma,
1154 GFP_KERNEL);
1154 bp->rx_ring = NULL; 1155 bp->rx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_RX_RING_HACK; 1156 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1156 } 1157 }
1157 if (bp->tx_ring) { 1158 if (bp->tx_ring) {
1158 if (bp->flags & B44_FLAG_TX_RING_HACK) { 1159 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1159 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, 1160 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1160 DMA_TABLE_BYTES, 1161 DMA_TABLE_BYTES,
1161 DMA_TO_DEVICE); 1162 DMA_TO_DEVICE);
1162 kfree(bp->tx_ring); 1163 kfree(bp->tx_ring);
1163 } else 1164 } else
1164 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, 1165 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1165 bp->tx_ring, bp->tx_ring_dma); 1166 bp->tx_ring, bp->tx_ring_dma,
1167 GFP_KERNEL);
1166 bp->tx_ring = NULL; 1168 bp->tx_ring = NULL;
1167 bp->flags &= ~B44_FLAG_TX_RING_HACK; 1169 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1168 } 1170 }
@@ -1187,7 +1189,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1187 goto out_err; 1189 goto out_err;
1188 1190
1189 size = DMA_TABLE_BYTES; 1191 size = DMA_TABLE_BYTES;
1190 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); 1192 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1191 if (!bp->rx_ring) { 1193 if (!bp->rx_ring) {
1192 /* Allocation may have failed due to pci_alloc_consistent 1194 /* Allocation may have failed due to pci_alloc_consistent
1193 insisting on use of GFP_DMA, which is more restrictive 1195 insisting on use of GFP_DMA, which is more restrictive
@@ -1199,11 +1201,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1199 if (!rx_ring) 1201 if (!rx_ring)
1200 goto out_err; 1202 goto out_err;
1201 1203
1202 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, 1204 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1203 DMA_TABLE_BYTES, 1205 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL); 1206 DMA_BIDIRECTIONAL);
1205 1207
1206 if (dma_mapping_error(rx_ring_dma) || 1208 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1207 rx_ring_dma + size > DMA_30BIT_MASK) { 1209 rx_ring_dma + size > DMA_30BIT_MASK) {
1208 kfree(rx_ring); 1210 kfree(rx_ring);
1209 goto out_err; 1211 goto out_err;
@@ -1214,9 +1216,9 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1214 bp->flags |= B44_FLAG_RX_RING_HACK; 1216 bp->flags |= B44_FLAG_RX_RING_HACK;
1215 } 1217 }
1216 1218
1217 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); 1219 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1218 if (!bp->tx_ring) { 1220 if (!bp->tx_ring) {
1219 /* Allocation may have failed due to dma_alloc_coherent 1221 /* Allocation may have failed due to ssb_dma_alloc_consistent
1220 insisting on use of GFP_DMA, which is more restrictive 1222 insisting on use of GFP_DMA, which is more restrictive
1221 than necessary... */ 1223 than necessary... */
1222 struct dma_desc *tx_ring; 1224 struct dma_desc *tx_ring;
@@ -1226,11 +1228,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1226 if (!tx_ring) 1228 if (!tx_ring)
1227 goto out_err; 1229 goto out_err;
1228 1230
1229 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, 1231 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1230 DMA_TABLE_BYTES, 1232 DMA_TABLE_BYTES,
1231 DMA_TO_DEVICE); 1233 DMA_TO_DEVICE);
1232 1234
1233 if (dma_mapping_error(tx_ring_dma) || 1235 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1234 tx_ring_dma + size > DMA_30BIT_MASK) { 1236 tx_ring_dma + size > DMA_30BIT_MASK) {
1235 kfree(tx_ring); 1237 kfree(tx_ring);
1236 goto out_err; 1238 goto out_err;