aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/b44.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r--drivers/net/b44.c146
1 files changed, 70 insertions, 76 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 293f9c16e786..37617abc1647 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *);
135 135
136static void b44_init_hw(struct b44 *, int); 136static void b44_init_hw(struct b44 *, int);
137 137
138static int dma_desc_align_mask;
139static int dma_desc_sync_size; 138static int dma_desc_sync_size;
140static int instance; 139static int instance;
141 140
@@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
150 unsigned long offset, 149 unsigned long offset,
151 enum dma_data_direction dir) 150 enum dma_data_direction dir)
152{ 151{
153 ssb_dma_sync_single_range_for_device(sdev, dma_base, 152 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
154 offset & dma_desc_align_mask, 153 dma_desc_sync_size, dir);
155 dma_desc_sync_size, dir);
156} 154}
157 155
158static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, 156static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
160 unsigned long offset, 158 unsigned long offset,
161 enum dma_data_direction dir) 159 enum dma_data_direction dir)
162{ 160{
163 ssb_dma_sync_single_range_for_cpu(sdev, dma_base, 161 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
164 offset & dma_desc_align_mask, 162 dma_desc_sync_size, dir);
165 dma_desc_sync_size, dir);
166} 163}
167 164
168static inline unsigned long br32(const struct b44 *bp, unsigned long reg) 165static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
608 605
609 BUG_ON(skb == NULL); 606 BUG_ON(skb == NULL);
610 607
611 ssb_dma_unmap_single(bp->sdev, 608 dma_unmap_single(bp->sdev->dma_dev,
612 rp->mapping, 609 rp->mapping,
613 skb->len, 610 skb->len,
614 DMA_TO_DEVICE); 611 DMA_TO_DEVICE);
615 rp->skb = NULL; 612 rp->skb = NULL;
616 dev_kfree_skb_irq(skb); 613 dev_kfree_skb_irq(skb);
617 } 614 }
@@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
648 if (skb == NULL) 645 if (skb == NULL)
649 return -ENOMEM; 646 return -ENOMEM;
650 647
651 mapping = ssb_dma_map_single(bp->sdev, skb->data, 648 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652 RX_PKT_BUF_SZ, 649 RX_PKT_BUF_SZ,
653 DMA_FROM_DEVICE); 650 DMA_FROM_DEVICE);
654 651
655 /* Hardware bug work-around, the chip is unable to do PCI DMA 652 /* Hardware bug work-around, the chip is unable to do PCI DMA
656 to/from anything above 1GB :-( */ 653 to/from anything above 1GB :-( */
657 if (ssb_dma_mapping_error(bp->sdev, mapping) || 654 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { 655 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659 /* Sigh... */ 656 /* Sigh... */
660 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 657 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661 ssb_dma_unmap_single(bp->sdev, mapping, 658 dma_unmap_single(bp->sdev->dma_dev, mapping,
662 RX_PKT_BUF_SZ, DMA_FROM_DEVICE); 659 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb); 660 dev_kfree_skb_any(skb);
664 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 661 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
665 if (skb == NULL) 662 if (skb == NULL)
666 return -ENOMEM; 663 return -ENOMEM;
667 mapping = ssb_dma_map_single(bp->sdev, skb->data, 664 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668 RX_PKT_BUF_SZ, 665 RX_PKT_BUF_SZ,
669 DMA_FROM_DEVICE); 666 DMA_FROM_DEVICE);
670 if (ssb_dma_mapping_error(bp->sdev, mapping) || 667 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { 668 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 669 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); 670 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
675 return -ENOMEM; 672 return -ENOMEM;
676 } 673 }
@@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
745 dest_idx * sizeof(*dest_desc), 742 dest_idx * sizeof(*dest_desc),
746 DMA_BIDIRECTIONAL); 743 DMA_BIDIRECTIONAL);
747 744
748 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, 745 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749 RX_PKT_BUF_SZ, 746 RX_PKT_BUF_SZ,
750 DMA_FROM_DEVICE); 747 DMA_FROM_DEVICE);
751} 748}
752 749
753static int b44_rx(struct b44 *bp, int budget) 750static int b44_rx(struct b44 *bp, int budget)
@@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int budget)
767 struct rx_header *rh; 764 struct rx_header *rh;
768 u16 len; 765 u16 len;
769 766
770 ssb_dma_sync_single_for_cpu(bp->sdev, map, 767 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771 RX_PKT_BUF_SZ, 768 RX_PKT_BUF_SZ,
772 DMA_FROM_DEVICE); 769 DMA_FROM_DEVICE);
773 rh = (struct rx_header *) skb->data; 770 rh = (struct rx_header *) skb->data;
774 len = le16_to_cpu(rh->len); 771 len = le16_to_cpu(rh->len);
775 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || 772 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int budget)
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 798 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 if (skb_size < 0) 799 if (skb_size < 0)
803 goto drop_it; 800 goto drop_it;
804 ssb_dma_unmap_single(bp->sdev, map, 801 dma_unmap_single(bp->sdev->dma_dev, map,
805 skb_size, DMA_FROM_DEVICE); 802 skb_size, DMA_FROM_DEVICE);
806 /* Leave out rx_header */ 803 /* Leave out rx_header */
807 skb_put(skb, len + RX_PKT_OFFSET); 804 skb_put(skb, len + RX_PKT_OFFSET);
808 skb_pull(skb, RX_PKT_OFFSET); 805 skb_pull(skb, RX_PKT_OFFSET);
@@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
954 goto err_out; 951 goto err_out;
955 } 952 }
956 953
957 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); 954 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
958 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { 955 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
959 struct sk_buff *bounce_skb; 956 struct sk_buff *bounce_skb;
960 957
961 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 958 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
962 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 959 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
963 ssb_dma_unmap_single(bp->sdev, mapping, len, 960 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
964 DMA_TO_DEVICE); 961 DMA_TO_DEVICE);
965 962
966 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); 963 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
967 if (!bounce_skb) 964 if (!bounce_skb)
968 goto err_out; 965 goto err_out;
969 966
970 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, 967 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
971 len, DMA_TO_DEVICE); 968 len, DMA_TO_DEVICE);
972 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { 969 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
973 if (!ssb_dma_mapping_error(bp->sdev, mapping)) 970 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
974 ssb_dma_unmap_single(bp->sdev, mapping, 971 dma_unmap_single(bp->sdev->dma_dev, mapping,
975 len, DMA_TO_DEVICE); 972 len, DMA_TO_DEVICE);
976 dev_kfree_skb_any(bounce_skb); 973 dev_kfree_skb_any(bounce_skb);
977 goto err_out; 974 goto err_out;
@@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *bp)
1068 1065
1069 if (rp->skb == NULL) 1066 if (rp->skb == NULL)
1070 continue; 1067 continue;
1071 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, 1068 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1072 DMA_FROM_DEVICE); 1069 DMA_FROM_DEVICE);
1073 dev_kfree_skb_any(rp->skb); 1070 dev_kfree_skb_any(rp->skb);
1074 rp->skb = NULL; 1071 rp->skb = NULL;
1075 } 1072 }
@@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *bp)
1080 1077
1081 if (rp->skb == NULL) 1078 if (rp->skb == NULL)
1082 continue; 1079 continue;
1083 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, 1080 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1084 DMA_TO_DEVICE); 1081 DMA_TO_DEVICE);
1085 dev_kfree_skb_any(rp->skb); 1082 dev_kfree_skb_any(rp->skb);
1086 rp->skb = NULL; 1083 rp->skb = NULL;
1087 } 1084 }
@@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *bp)
1103 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1100 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1104 1101
1105 if (bp->flags & B44_FLAG_RX_RING_HACK) 1102 if (bp->flags & B44_FLAG_RX_RING_HACK)
1106 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, 1103 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1107 DMA_TABLE_BYTES, 1104 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1108 DMA_BIDIRECTIONAL);
1109 1105
1110 if (bp->flags & B44_FLAG_TX_RING_HACK) 1106 if (bp->flags & B44_FLAG_TX_RING_HACK)
1111 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, 1107 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1112 DMA_TABLE_BYTES, 1108 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1113 DMA_TO_DEVICE);
1114 1109
1115 for (i = 0; i < bp->rx_pending; i++) { 1110 for (i = 0; i < bp->rx_pending; i++) {
1116 if (b44_alloc_rx_skb(bp, -1, i) < 0) 1111 if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b44 *bp)
1130 bp->tx_buffers = NULL; 1125 bp->tx_buffers = NULL;
1131 if (bp->rx_ring) { 1126 if (bp->rx_ring) {
1132 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1127 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1133 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, 1128 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1134 DMA_TABLE_BYTES, 1129 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1135 DMA_BIDIRECTIONAL);
1136 kfree(bp->rx_ring); 1130 kfree(bp->rx_ring);
1137 } else 1131 } else
1138 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, 1132 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1139 bp->rx_ring, bp->rx_ring_dma, 1133 bp->rx_ring, bp->rx_ring_dma);
1140 GFP_KERNEL);
1141 bp->rx_ring = NULL; 1134 bp->rx_ring = NULL;
1142 bp->flags &= ~B44_FLAG_RX_RING_HACK; 1135 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1143 } 1136 }
1144 if (bp->tx_ring) { 1137 if (bp->tx_ring) {
1145 if (bp->flags & B44_FLAG_TX_RING_HACK) { 1138 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, 1139 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES, 1140 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1148 DMA_TO_DEVICE);
1149 kfree(bp->tx_ring); 1141 kfree(bp->tx_ring);
1150 } else 1142 } else
1151 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, 1143 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1152 bp->tx_ring, bp->tx_ring_dma, 1144 bp->tx_ring, bp->tx_ring_dma);
1153 GFP_KERNEL);
1154 bp->tx_ring = NULL; 1145 bp->tx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_TX_RING_HACK; 1146 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1156 } 1147 }
@@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1175 goto out_err; 1166 goto out_err;
1176 1167
1177 size = DMA_TABLE_BYTES; 1168 size = DMA_TABLE_BYTES;
1178 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); 1169 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1170 &bp->rx_ring_dma, gfp);
1179 if (!bp->rx_ring) { 1171 if (!bp->rx_ring) {
1180 /* Allocation may have failed due to pci_alloc_consistent 1172 /* Allocation may have failed due to pci_alloc_consistent
1181 insisting on use of GFP_DMA, which is more restrictive 1173 insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1187 if (!rx_ring) 1179 if (!rx_ring)
1188 goto out_err; 1180 goto out_err;
1189 1181
1190 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, 1182 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1191 DMA_TABLE_BYTES, 1183 DMA_TABLE_BYTES,
1192 DMA_BIDIRECTIONAL); 1184 DMA_BIDIRECTIONAL);
1193 1185
1194 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || 1186 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1195 rx_ring_dma + size > DMA_BIT_MASK(30)) { 1187 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1196 kfree(rx_ring); 1188 kfree(rx_ring);
1197 goto out_err; 1189 goto out_err;
@@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1202 bp->flags |= B44_FLAG_RX_RING_HACK; 1194 bp->flags |= B44_FLAG_RX_RING_HACK;
1203 } 1195 }
1204 1196
1205 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); 1197 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1198 &bp->tx_ring_dma, gfp);
1206 if (!bp->tx_ring) { 1199 if (!bp->tx_ring) {
1207 /* Allocation may have failed due to ssb_dma_alloc_consistent 1200 /* Allocation may have failed due to ssb_dma_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive 1201 insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1214 if (!tx_ring) 1207 if (!tx_ring)
1215 goto out_err; 1208 goto out_err;
1216 1209
1217 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, 1210 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1218 DMA_TABLE_BYTES, 1211 DMA_TABLE_BYTES,
1219 DMA_TO_DEVICE); 1212 DMA_TO_DEVICE);
1220 1213
1221 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || 1214 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1222 tx_ring_dma + size > DMA_BIT_MASK(30)) { 1215 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1223 kfree(tx_ring); 1216 kfree(tx_ring);
1224 goto out_err; 1217 goto out_err;
@@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2176 "Failed to powerup the bus\n"); 2169 "Failed to powerup the bus\n");
2177 goto err_out_free_dev; 2170 goto err_out_free_dev;
2178 } 2171 }
2179 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); 2172
2180 if (err) { 2173 if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2174 dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2181 dev_err(sdev->dev, 2175 dev_err(sdev->dev,
2182 "Required 30BIT DMA mask unsupported by the system\n"); 2176 "Required 30BIT DMA mask unsupported by the system\n");
2183 goto err_out_powerdown; 2177 goto err_out_powerdown;
2184 } 2178 }
2179
2185 err = b44_get_invariants(bp); 2180 err = b44_get_invariants(bp);
2186 if (err) { 2181 if (err) {
2187 dev_err(sdev->dev, 2182 dev_err(sdev->dev,
@@ -2344,7 +2339,6 @@ static int __init b44_init(void)
2344 int err; 2339 int err;
2345 2340
2346 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2341 /* Setup paramaters for syncing RX/TX DMA descriptors */
2347 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2348 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); 2342 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2349 2343
2350 err = b44_pci_init(); 2344 err = b44_pci_init();