diff options
author | Michael Buesch <mb@bu3sch.de> | 2008-06-20 05:50:29 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-06-27 09:09:15 -0400 |
commit | f225763a7d6c92c4932dbd528437997078496fcc (patch) | |
tree | 5d787c7c0bdabcf72e98603a85672ebe95a3682e | |
parent | 316af76f3475bb73dbb11f1c6d549ae589efb3d0 (diff) |
ssb, b43, b43legacy, b44: Rewrite SSB DMA API
This is a rewrite of the DMA API for SSB devices.
This is needed, because the old (non-existing) "API" made too many bad
assumptions on the API of the host-bus (PCI).
This introduces an almost complete SSB-DMA-API that maps to the lowlevel
bus-API based on the bustype.
Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | drivers/net/b44.c | 140 | ||||
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 65 | ||||
-rw-r--r-- | drivers/net/wireless/b43legacy/dma.c | 63 | ||||
-rw-r--r-- | drivers/ssb/Kconfig | 2 | ||||
-rw-r--r-- | drivers/ssb/main.c | 75 | ||||
-rw-r--r-- | include/linux/ssb/ssb.h | 143 |
6 files changed, 336 insertions, 152 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 59dce6aa0865..c3bda5ce67c4 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -148,9 +148,9 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, | |||
148 | unsigned long offset, | 148 | unsigned long offset, |
149 | enum dma_data_direction dir) | 149 | enum dma_data_direction dir) |
150 | { | 150 | { |
151 | dma_sync_single_range_for_device(sdev->dma_dev, dma_base, | 151 | ssb_dma_sync_single_range_for_device(sdev, dma_base, |
152 | offset & dma_desc_align_mask, | 152 | offset & dma_desc_align_mask, |
153 | dma_desc_sync_size, dir); | 153 | dma_desc_sync_size, dir); |
154 | } | 154 | } |
155 | 155 | ||
156 | static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | 156 | static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, |
@@ -158,9 +158,9 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | |||
158 | unsigned long offset, | 158 | unsigned long offset, |
159 | enum dma_data_direction dir) | 159 | enum dma_data_direction dir) |
160 | { | 160 | { |
161 | dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base, | 161 | ssb_dma_sync_single_range_for_cpu(sdev, dma_base, |
162 | offset & dma_desc_align_mask, | 162 | offset & dma_desc_align_mask, |
163 | dma_desc_sync_size, dir); | 163 | dma_desc_sync_size, dir); |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) | 166 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) |
@@ -613,10 +613,10 @@ static void b44_tx(struct b44 *bp) | |||
613 | 613 | ||
614 | BUG_ON(skb == NULL); | 614 | BUG_ON(skb == NULL); |
615 | 615 | ||
616 | dma_unmap_single(bp->sdev->dma_dev, | 616 | ssb_dma_unmap_single(bp->sdev, |
617 | rp->mapping, | 617 | rp->mapping, |
618 | skb->len, | 618 | skb->len, |
619 | DMA_TO_DEVICE); | 619 | DMA_TO_DEVICE); |
620 | rp->skb = NULL; | 620 | rp->skb = NULL; |
621 | dev_kfree_skb_irq(skb); | 621 | dev_kfree_skb_irq(skb); |
622 | } | 622 | } |
@@ -653,29 +653,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
653 | if (skb == NULL) | 653 | if (skb == NULL) |
654 | return -ENOMEM; | 654 | return -ENOMEM; |
655 | 655 | ||
656 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, | 656 | mapping = ssb_dma_map_single(bp->sdev, skb->data, |
657 | RX_PKT_BUF_SZ, | 657 | RX_PKT_BUF_SZ, |
658 | DMA_FROM_DEVICE); | 658 | DMA_FROM_DEVICE); |
659 | 659 | ||
660 | /* Hardware bug work-around, the chip is unable to do PCI DMA | 660 | /* Hardware bug work-around, the chip is unable to do PCI DMA |
661 | to/from anything above 1GB :-( */ | 661 | to/from anything above 1GB :-( */ |
662 | if (dma_mapping_error(mapping) || | 662 | if (ssb_dma_mapping_error(bp->sdev, mapping) || |
663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
664 | /* Sigh... */ | 664 | /* Sigh... */ |
665 | if (!dma_mapping_error(mapping)) | 665 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) |
666 | dma_unmap_single(bp->sdev->dma_dev, mapping, | 666 | ssb_dma_unmap_single(bp->sdev, mapping, |
667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); | 667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); |
668 | dev_kfree_skb_any(skb); | 668 | dev_kfree_skb_any(skb); |
669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); | 669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
670 | if (skb == NULL) | 670 | if (skb == NULL) |
671 | return -ENOMEM; | 671 | return -ENOMEM; |
672 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, | 672 | mapping = ssb_dma_map_single(bp->sdev, skb->data, |
673 | RX_PKT_BUF_SZ, | 673 | RX_PKT_BUF_SZ, |
674 | DMA_FROM_DEVICE); | 674 | DMA_FROM_DEVICE); |
675 | if (dma_mapping_error(mapping) || | 675 | if (ssb_dma_mapping_error(bp->sdev, mapping) || |
676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
677 | if (!dma_mapping_error(mapping)) | 677 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) |
678 | dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); | 678 | ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); |
679 | dev_kfree_skb_any(skb); | 679 | dev_kfree_skb_any(skb); |
680 | return -ENOMEM; | 680 | return -ENOMEM; |
681 | } | 681 | } |
@@ -750,9 +750,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
750 | dest_idx * sizeof(dest_desc), | 750 | dest_idx * sizeof(dest_desc), |
751 | DMA_BIDIRECTIONAL); | 751 | DMA_BIDIRECTIONAL); |
752 | 752 | ||
753 | dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr), | 753 | ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr), |
754 | RX_PKT_BUF_SZ, | 754 | RX_PKT_BUF_SZ, |
755 | DMA_FROM_DEVICE); | 755 | DMA_FROM_DEVICE); |
756 | } | 756 | } |
757 | 757 | ||
758 | static int b44_rx(struct b44 *bp, int budget) | 758 | static int b44_rx(struct b44 *bp, int budget) |
@@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
772 | struct rx_header *rh; | 772 | struct rx_header *rh; |
773 | u16 len; | 773 | u16 len; |
774 | 774 | ||
775 | dma_sync_single_for_cpu(bp->sdev->dma_dev, map, | 775 | ssb_dma_sync_single_for_cpu(bp->sdev, map, |
776 | RX_PKT_BUF_SZ, | 776 | RX_PKT_BUF_SZ, |
777 | DMA_FROM_DEVICE); | 777 | DMA_FROM_DEVICE); |
778 | rh = (struct rx_header *) skb->data; | 778 | rh = (struct rx_header *) skb->data; |
@@ -806,8 +806,8 @@ static int b44_rx(struct b44 *bp, int budget) | |||
806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | 806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); |
807 | if (skb_size < 0) | 807 | if (skb_size < 0) |
808 | goto drop_it; | 808 | goto drop_it; |
809 | dma_unmap_single(bp->sdev->dma_dev, map, | 809 | ssb_dma_unmap_single(bp->sdev, map, |
810 | skb_size, DMA_FROM_DEVICE); | 810 | skb_size, DMA_FROM_DEVICE); |
811 | /* Leave out rx_header */ | 811 | /* Leave out rx_header */ |
812 | skb_put(skb, len + RX_PKT_OFFSET); | 812 | skb_put(skb, len + RX_PKT_OFFSET); |
813 | skb_pull(skb, RX_PKT_OFFSET); | 813 | skb_pull(skb, RX_PKT_OFFSET); |
@@ -966,25 +966,25 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
966 | goto err_out; | 966 | goto err_out; |
967 | } | 967 | } |
968 | 968 | ||
969 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); | 969 | mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); |
970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 970 | if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { |
971 | struct sk_buff *bounce_skb; | 971 | struct sk_buff *bounce_skb; |
972 | 972 | ||
973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
974 | if (!dma_mapping_error(mapping)) | 974 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) |
975 | dma_unmap_single(bp->sdev->dma_dev, mapping, len, | 975 | ssb_dma_unmap_single(bp->sdev, mapping, len, |
976 | DMA_TO_DEVICE); | 976 | DMA_TO_DEVICE); |
977 | 977 | ||
978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); | 978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); |
979 | if (!bounce_skb) | 979 | if (!bounce_skb) |
980 | goto err_out; | 980 | goto err_out; |
981 | 981 | ||
982 | mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, | 982 | mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, |
983 | len, DMA_TO_DEVICE); | 983 | len, DMA_TO_DEVICE); |
984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 984 | if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { |
985 | if (!dma_mapping_error(mapping)) | 985 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) |
986 | dma_unmap_single(bp->sdev->dma_dev, mapping, | 986 | ssb_dma_unmap_single(bp->sdev, mapping, |
987 | len, DMA_TO_DEVICE); | 987 | len, DMA_TO_DEVICE); |
988 | dev_kfree_skb_any(bounce_skb); | 988 | dev_kfree_skb_any(bounce_skb); |
989 | goto err_out; | 989 | goto err_out; |
990 | } | 990 | } |
@@ -1082,8 +1082,8 @@ static void b44_free_rings(struct b44 *bp) | |||
1082 | 1082 | ||
1083 | if (rp->skb == NULL) | 1083 | if (rp->skb == NULL) |
1084 | continue; | 1084 | continue; |
1085 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, | 1085 | ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, |
1086 | DMA_FROM_DEVICE); | 1086 | DMA_FROM_DEVICE); |
1087 | dev_kfree_skb_any(rp->skb); | 1087 | dev_kfree_skb_any(rp->skb); |
1088 | rp->skb = NULL; | 1088 | rp->skb = NULL; |
1089 | } | 1089 | } |
@@ -1094,8 +1094,8 @@ static void b44_free_rings(struct b44 *bp) | |||
1094 | 1094 | ||
1095 | if (rp->skb == NULL) | 1095 | if (rp->skb == NULL) |
1096 | continue; | 1096 | continue; |
1097 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, | 1097 | ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, |
1098 | DMA_TO_DEVICE); | 1098 | DMA_TO_DEVICE); |
1099 | dev_kfree_skb_any(rp->skb); | 1099 | dev_kfree_skb_any(rp->skb); |
1100 | rp->skb = NULL; | 1100 | rp->skb = NULL; |
1101 | } | 1101 | } |
@@ -1117,14 +1117,14 @@ static void b44_init_rings(struct b44 *bp) | |||
1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
1118 | 1118 | ||
1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
1120 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, | 1120 | ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, |
1121 | DMA_TABLE_BYTES, | 1121 | DMA_TABLE_BYTES, |
1122 | DMA_BIDIRECTIONAL); | 1122 | DMA_BIDIRECTIONAL); |
1123 | 1123 | ||
1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) | 1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) |
1125 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, | 1125 | ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, |
1126 | DMA_TABLE_BYTES, | 1126 | DMA_TABLE_BYTES, |
1127 | DMA_TO_DEVICE); | 1127 | DMA_TO_DEVICE); |
1128 | 1128 | ||
1129 | for (i = 0; i < bp->rx_pending; i++) { | 1129 | for (i = 0; i < bp->rx_pending; i++) { |
1130 | if (b44_alloc_rx_skb(bp, -1, i) < 0) | 1130 | if (b44_alloc_rx_skb(bp, -1, i) < 0) |
@@ -1144,25 +1144,27 @@ static void b44_free_consistent(struct b44 *bp) | |||
1144 | bp->tx_buffers = NULL; | 1144 | bp->tx_buffers = NULL; |
1145 | if (bp->rx_ring) { | 1145 | if (bp->rx_ring) { |
1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { | 1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
1147 | dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, | 1147 | ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, |
1148 | DMA_TABLE_BYTES, | 1148 | DMA_TABLE_BYTES, |
1149 | DMA_BIDIRECTIONAL); | 1149 | DMA_BIDIRECTIONAL); |
1150 | kfree(bp->rx_ring); | 1150 | kfree(bp->rx_ring); |
1151 | } else | 1151 | } else |
1152 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, | 1152 | ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, |
1153 | bp->rx_ring, bp->rx_ring_dma); | 1153 | bp->rx_ring, bp->rx_ring_dma, |
1154 | GFP_KERNEL); | ||
1154 | bp->rx_ring = NULL; | 1155 | bp->rx_ring = NULL; |
1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | 1156 | bp->flags &= ~B44_FLAG_RX_RING_HACK; |
1156 | } | 1157 | } |
1157 | if (bp->tx_ring) { | 1158 | if (bp->tx_ring) { |
1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { | 1159 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
1159 | dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, | 1160 | ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, |
1160 | DMA_TABLE_BYTES, | 1161 | DMA_TABLE_BYTES, |
1161 | DMA_TO_DEVICE); | 1162 | DMA_TO_DEVICE); |
1162 | kfree(bp->tx_ring); | 1163 | kfree(bp->tx_ring); |
1163 | } else | 1164 | } else |
1164 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, | 1165 | ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, |
1165 | bp->tx_ring, bp->tx_ring_dma); | 1166 | bp->tx_ring, bp->tx_ring_dma, |
1167 | GFP_KERNEL); | ||
1166 | bp->tx_ring = NULL; | 1168 | bp->tx_ring = NULL; |
1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | 1169 | bp->flags &= ~B44_FLAG_TX_RING_HACK; |
1168 | } | 1170 | } |
@@ -1187,7 +1189,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1187 | goto out_err; | 1189 | goto out_err; |
1188 | 1190 | ||
1189 | size = DMA_TABLE_BYTES; | 1191 | size = DMA_TABLE_BYTES; |
1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); | 1192 | bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); |
1191 | if (!bp->rx_ring) { | 1193 | if (!bp->rx_ring) { |
1192 | /* Allocation may have failed due to pci_alloc_consistent | 1194 | /* Allocation may have failed due to pci_alloc_consistent |
1193 | insisting on use of GFP_DMA, which is more restrictive | 1195 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1199,11 +1201,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1199 | if (!rx_ring) | 1201 | if (!rx_ring) |
1200 | goto out_err; | 1202 | goto out_err; |
1201 | 1203 | ||
1202 | rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, | 1204 | rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, |
1203 | DMA_TABLE_BYTES, | 1205 | DMA_TABLE_BYTES, |
1204 | DMA_BIDIRECTIONAL); | 1206 | DMA_BIDIRECTIONAL); |
1205 | 1207 | ||
1206 | if (dma_mapping_error(rx_ring_dma) || | 1208 | if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || |
1207 | rx_ring_dma + size > DMA_30BIT_MASK) { | 1209 | rx_ring_dma + size > DMA_30BIT_MASK) { |
1208 | kfree(rx_ring); | 1210 | kfree(rx_ring); |
1209 | goto out_err; | 1211 | goto out_err; |
@@ -1214,9 +1216,9 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1214 | bp->flags |= B44_FLAG_RX_RING_HACK; | 1216 | bp->flags |= B44_FLAG_RX_RING_HACK; |
1215 | } | 1217 | } |
1216 | 1218 | ||
1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); | 1219 | bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); |
1218 | if (!bp->tx_ring) { | 1220 | if (!bp->tx_ring) { |
1219 | /* Allocation may have failed due to dma_alloc_coherent | 1221 | /* Allocation may have failed due to ssb_dma_alloc_consistent |
1220 | insisting on use of GFP_DMA, which is more restrictive | 1222 | insisting on use of GFP_DMA, which is more restrictive |
1221 | than necessary... */ | 1223 | than necessary... */ |
1222 | struct dma_desc *tx_ring; | 1224 | struct dma_desc *tx_ring; |
@@ -1226,11 +1228,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1226 | if (!tx_ring) | 1228 | if (!tx_ring) |
1227 | goto out_err; | 1229 | goto out_err; |
1228 | 1230 | ||
1229 | tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, | 1231 | tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, |
1230 | DMA_TABLE_BYTES, | 1232 | DMA_TABLE_BYTES, |
1231 | DMA_TO_DEVICE); | 1233 | DMA_TO_DEVICE); |
1232 | 1234 | ||
1233 | if (dma_mapping_error(tx_ring_dma) || | 1235 | if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || |
1234 | tx_ring_dma + size > DMA_30BIT_MASK) { | 1236 | tx_ring_dma + size > DMA_30BIT_MASK) { |
1235 | kfree(tx_ring); | 1237 | kfree(tx_ring); |
1236 | goto out_err; | 1238 | goto out_err; |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 8a09a1db08db..098f886976f6 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -328,11 +328,11 @@ static inline | |||
328 | dma_addr_t dmaaddr; | 328 | dma_addr_t dmaaddr; |
329 | 329 | ||
330 | if (tx) { | 330 | if (tx) { |
331 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 331 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
332 | buf, len, DMA_TO_DEVICE); | 332 | buf, len, DMA_TO_DEVICE); |
333 | } else { | 333 | } else { |
334 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 334 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
335 | buf, len, DMA_FROM_DEVICE); | 335 | buf, len, DMA_FROM_DEVICE); |
336 | } | 336 | } |
337 | 337 | ||
338 | return dmaaddr; | 338 | return dmaaddr; |
@@ -343,11 +343,11 @@ static inline | |||
343 | dma_addr_t addr, size_t len, int tx) | 343 | dma_addr_t addr, size_t len, int tx) |
344 | { | 344 | { |
345 | if (tx) { | 345 | if (tx) { |
346 | dma_unmap_single(ring->dev->dev->dma_dev, | 346 | ssb_dma_unmap_single(ring->dev->dev, |
347 | addr, len, DMA_TO_DEVICE); | 347 | addr, len, DMA_TO_DEVICE); |
348 | } else { | 348 | } else { |
349 | dma_unmap_single(ring->dev->dev->dma_dev, | 349 | ssb_dma_unmap_single(ring->dev->dev, |
350 | addr, len, DMA_FROM_DEVICE); | 350 | addr, len, DMA_FROM_DEVICE); |
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
@@ -356,8 +356,8 @@ static inline | |||
356 | dma_addr_t addr, size_t len) | 356 | dma_addr_t addr, size_t len) |
357 | { | 357 | { |
358 | B43_WARN_ON(ring->tx); | 358 | B43_WARN_ON(ring->tx); |
359 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, | 359 | ssb_dma_sync_single_for_cpu(ring->dev->dev, |
360 | addr, len, DMA_FROM_DEVICE); | 360 | addr, len, DMA_FROM_DEVICE); |
361 | } | 361 | } |
362 | 362 | ||
363 | static inline | 363 | static inline |
@@ -365,8 +365,8 @@ static inline | |||
365 | dma_addr_t addr, size_t len) | 365 | dma_addr_t addr, size_t len) |
366 | { | 366 | { |
367 | B43_WARN_ON(ring->tx); | 367 | B43_WARN_ON(ring->tx); |
368 | dma_sync_single_for_device(ring->dev->dev->dma_dev, | 368 | ssb_dma_sync_single_for_device(ring->dev->dev, |
369 | addr, len, DMA_FROM_DEVICE); | 369 | addr, len, DMA_FROM_DEVICE); |
370 | } | 370 | } |
371 | 371 | ||
372 | static inline | 372 | static inline |
@@ -381,7 +381,6 @@ static inline | |||
381 | 381 | ||
382 | static int alloc_ringmemory(struct b43_dmaring *ring) | 382 | static int alloc_ringmemory(struct b43_dmaring *ring) |
383 | { | 383 | { |
384 | struct device *dma_dev = ring->dev->dev->dma_dev; | ||
385 | gfp_t flags = GFP_KERNEL; | 384 | gfp_t flags = GFP_KERNEL; |
386 | 385 | ||
387 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K | 386 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
@@ -392,11 +391,14 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
392 | * For unknown reasons - possibly a hardware error - the BCM4311 rev | 391 | * For unknown reasons - possibly a hardware error - the BCM4311 rev |
393 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, | 392 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, |
394 | * which accounts for the GFP_DMA flag below. | 393 | * which accounts for the GFP_DMA flag below. |
394 | * | ||
395 | * The flags here must match the flags in free_ringmemory below! | ||
395 | */ | 396 | */ |
396 | if (ring->type == B43_DMA_64BIT) | 397 | if (ring->type == B43_DMA_64BIT) |
397 | flags |= GFP_DMA; | 398 | flags |= GFP_DMA; |
398 | ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE, | 399 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, |
399 | &(ring->dmabase), flags); | 400 | B43_DMA_RINGMEMSIZE, |
401 | &(ring->dmabase), flags); | ||
400 | if (!ring->descbase) { | 402 | if (!ring->descbase) { |
401 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | 403 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); |
402 | return -ENOMEM; | 404 | return -ENOMEM; |
@@ -408,10 +410,13 @@ static int alloc_ringmemory(struct b43_dmaring *ring) | |||
408 | 410 | ||
409 | static void free_ringmemory(struct b43_dmaring *ring) | 411 | static void free_ringmemory(struct b43_dmaring *ring) |
410 | { | 412 | { |
411 | struct device *dma_dev = ring->dev->dev->dma_dev; | 413 | gfp_t flags = GFP_KERNEL; |
414 | |||
415 | if (ring->type == B43_DMA_64BIT) | ||
416 | flags |= GFP_DMA; | ||
412 | 417 | ||
413 | dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE, | 418 | ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, |
414 | ring->descbase, ring->dmabase); | 419 | ring->descbase, ring->dmabase, flags); |
415 | } | 420 | } |
416 | 421 | ||
417 | /* Reset the RX DMA channel */ | 422 | /* Reset the RX DMA channel */ |
@@ -518,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
518 | dma_addr_t addr, | 523 | dma_addr_t addr, |
519 | size_t buffersize, bool dma_to_device) | 524 | size_t buffersize, bool dma_to_device) |
520 | { | 525 | { |
521 | if (unlikely(dma_mapping_error(addr))) | 526 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
522 | return 1; | 527 | return 1; |
523 | 528 | ||
524 | switch (ring->type) { | 529 | switch (ring->type) { |
@@ -844,10 +849,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
844 | goto err_kfree_meta; | 849 | goto err_kfree_meta; |
845 | 850 | ||
846 | /* test for ability to dma to txhdr_cache */ | 851 | /* test for ability to dma to txhdr_cache */ |
847 | dma_test = dma_map_single(dev->dev->dma_dev, | 852 | dma_test = ssb_dma_map_single(dev->dev, |
848 | ring->txhdr_cache, | 853 | ring->txhdr_cache, |
849 | b43_txhdr_size(dev), | 854 | b43_txhdr_size(dev), |
850 | DMA_TO_DEVICE); | 855 | DMA_TO_DEVICE); |
851 | 856 | ||
852 | if (b43_dma_mapping_error(ring, dma_test, | 857 | if (b43_dma_mapping_error(ring, dma_test, |
853 | b43_txhdr_size(dev), 1)) { | 858 | b43_txhdr_size(dev), 1)) { |
@@ -859,10 +864,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
859 | if (!ring->txhdr_cache) | 864 | if (!ring->txhdr_cache) |
860 | goto err_kfree_meta; | 865 | goto err_kfree_meta; |
861 | 866 | ||
862 | dma_test = dma_map_single(dev->dev->dma_dev, | 867 | dma_test = ssb_dma_map_single(dev->dev, |
863 | ring->txhdr_cache, | 868 | ring->txhdr_cache, |
864 | b43_txhdr_size(dev), | 869 | b43_txhdr_size(dev), |
865 | DMA_TO_DEVICE); | 870 | DMA_TO_DEVICE); |
866 | 871 | ||
867 | if (b43_dma_mapping_error(ring, dma_test, | 872 | if (b43_dma_mapping_error(ring, dma_test, |
868 | b43_txhdr_size(dev), 1)) { | 873 | b43_txhdr_size(dev), 1)) { |
@@ -873,9 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, | |||
873 | } | 878 | } |
874 | } | 879 | } |
875 | 880 | ||
876 | dma_unmap_single(dev->dev->dma_dev, | 881 | ssb_dma_unmap_single(dev->dev, |
877 | dma_test, b43_txhdr_size(dev), | 882 | dma_test, b43_txhdr_size(dev), |
878 | DMA_TO_DEVICE); | 883 | DMA_TO_DEVICE); |
879 | } | 884 | } |
880 | 885 | ||
881 | err = alloc_ringmemory(ring); | 886 | err = alloc_ringmemory(ring); |
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 33cc256c5baf..9736b2f56a75 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c | |||
@@ -393,13 +393,13 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, | |||
393 | dma_addr_t dmaaddr; | 393 | dma_addr_t dmaaddr; |
394 | 394 | ||
395 | if (tx) | 395 | if (tx) |
396 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 396 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
397 | buf, len, | 397 | buf, len, |
398 | DMA_TO_DEVICE); | 398 | DMA_TO_DEVICE); |
399 | else | 399 | else |
400 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, | 400 | dmaaddr = ssb_dma_map_single(ring->dev->dev, |
401 | buf, len, | 401 | buf, len, |
402 | DMA_FROM_DEVICE); | 402 | DMA_FROM_DEVICE); |
403 | 403 | ||
404 | return dmaaddr; | 404 | return dmaaddr; |
405 | } | 405 | } |
@@ -411,13 +411,13 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring, | |||
411 | int tx) | 411 | int tx) |
412 | { | 412 | { |
413 | if (tx) | 413 | if (tx) |
414 | dma_unmap_single(ring->dev->dev->dma_dev, | 414 | ssb_dma_unmap_single(ring->dev->dev, |
415 | addr, len, | 415 | addr, len, |
416 | DMA_TO_DEVICE); | 416 | DMA_TO_DEVICE); |
417 | else | 417 | else |
418 | dma_unmap_single(ring->dev->dev->dma_dev, | 418 | ssb_dma_unmap_single(ring->dev->dev, |
419 | addr, len, | 419 | addr, len, |
420 | DMA_FROM_DEVICE); | 420 | DMA_FROM_DEVICE); |
421 | } | 421 | } |
422 | 422 | ||
423 | static inline | 423 | static inline |
@@ -427,8 +427,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, | |||
427 | { | 427 | { |
428 | B43legacy_WARN_ON(ring->tx); | 428 | B43legacy_WARN_ON(ring->tx); |
429 | 429 | ||
430 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, | 430 | ssb_dma_sync_single_for_cpu(ring->dev->dev, |
431 | addr, len, DMA_FROM_DEVICE); | 431 | addr, len, DMA_FROM_DEVICE); |
432 | } | 432 | } |
433 | 433 | ||
434 | static inline | 434 | static inline |
@@ -438,8 +438,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, | |||
438 | { | 438 | { |
439 | B43legacy_WARN_ON(ring->tx); | 439 | B43legacy_WARN_ON(ring->tx); |
440 | 440 | ||
441 | dma_sync_single_for_device(ring->dev->dev->dma_dev, | 441 | ssb_dma_sync_single_for_device(ring->dev->dev, |
442 | addr, len, DMA_FROM_DEVICE); | 442 | addr, len, DMA_FROM_DEVICE); |
443 | } | 443 | } |
444 | 444 | ||
445 | static inline | 445 | static inline |
@@ -458,10 +458,11 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |||
458 | 458 | ||
459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | 459 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) |
460 | { | 460 | { |
461 | struct device *dma_dev = ring->dev->dev->dma_dev; | 461 | /* GFP flags must match the flags in free_ringmemory()! */ |
462 | 462 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, | |
463 | ring->descbase = dma_alloc_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, | 463 | B43legacy_DMA_RINGMEMSIZE, |
464 | &(ring->dmabase), GFP_KERNEL); | 464 | &(ring->dmabase), |
465 | GFP_KERNEL); | ||
465 | if (!ring->descbase) { | 466 | if (!ring->descbase) { |
466 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" | 467 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" |
467 | " failed\n"); | 468 | " failed\n"); |
@@ -474,10 +475,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring) | |||
474 | 475 | ||
475 | static void free_ringmemory(struct b43legacy_dmaring *ring) | 476 | static void free_ringmemory(struct b43legacy_dmaring *ring) |
476 | { | 477 | { |
477 | struct device *dma_dev = ring->dev->dev->dma_dev; | 478 | ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE, |
478 | 479 | ring->descbase, ring->dmabase, GFP_KERNEL); | |
479 | dma_free_coherent(dma_dev, B43legacy_DMA_RINGMEMSIZE, | ||
480 | ring->descbase, ring->dmabase); | ||
481 | } | 480 | } |
482 | 481 | ||
483 | /* Reset the RX DMA channel */ | 482 | /* Reset the RX DMA channel */ |
@@ -589,7 +588,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |||
589 | size_t buffersize, | 588 | size_t buffersize, |
590 | bool dma_to_device) | 589 | bool dma_to_device) |
591 | { | 590 | { |
592 | if (unlikely(dma_mapping_error(addr))) | 591 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
593 | return 1; | 592 | return 1; |
594 | 593 | ||
595 | switch (ring->type) { | 594 | switch (ring->type) { |
@@ -893,9 +892,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
893 | goto err_kfree_meta; | 892 | goto err_kfree_meta; |
894 | 893 | ||
895 | /* test for ability to dma to txhdr_cache */ | 894 | /* test for ability to dma to txhdr_cache */ |
896 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, | 895 | dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache, |
897 | sizeof(struct b43legacy_txhdr_fw3), | 896 | sizeof(struct b43legacy_txhdr_fw3), |
898 | DMA_TO_DEVICE); | 897 | DMA_TO_DEVICE); |
899 | 898 | ||
900 | if (b43legacy_dma_mapping_error(ring, dma_test, | 899 | if (b43legacy_dma_mapping_error(ring, dma_test, |
901 | sizeof(struct b43legacy_txhdr_fw3), 1)) { | 900 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
@@ -907,7 +906,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
907 | if (!ring->txhdr_cache) | 906 | if (!ring->txhdr_cache) |
908 | goto err_kfree_meta; | 907 | goto err_kfree_meta; |
909 | 908 | ||
910 | dma_test = dma_map_single(dev->dev->dma_dev, | 909 | dma_test = ssb_dma_map_single(dev->dev, |
911 | ring->txhdr_cache, | 910 | ring->txhdr_cache, |
912 | sizeof(struct b43legacy_txhdr_fw3), | 911 | sizeof(struct b43legacy_txhdr_fw3), |
913 | DMA_TO_DEVICE); | 912 | DMA_TO_DEVICE); |
@@ -917,9 +916,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, | |||
917 | goto err_kfree_txhdr_cache; | 916 | goto err_kfree_txhdr_cache; |
918 | } | 917 | } |
919 | 918 | ||
920 | dma_unmap_single(dev->dev->dma_dev, | 919 | ssb_dma_unmap_single(dev->dev, dma_test, |
921 | dma_test, sizeof(struct b43legacy_txhdr_fw3), | 920 | sizeof(struct b43legacy_txhdr_fw3), |
922 | DMA_TO_DEVICE); | 921 | DMA_TO_DEVICE); |
923 | } | 922 | } |
924 | 923 | ||
925 | ring->dev = dev; | 924 | ring->dev = dev; |
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index cd845b8acd17..307b1f62d949 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig | |||
@@ -2,7 +2,7 @@ menu "Sonics Silicon Backplane" | |||
2 | 2 | ||
3 | config SSB_POSSIBLE | 3 | config SSB_POSSIBLE |
4 | bool | 4 | bool |
5 | depends on HAS_IOMEM | 5 | depends on HAS_IOMEM && HAS_DMA |
6 | default y | 6 | default y |
7 | 7 | ||
8 | config SSB | 8 | config SSB |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index d184f2aea78d..d831a2beff39 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -462,18 +462,15 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
462 | #ifdef CONFIG_SSB_PCIHOST | 462 | #ifdef CONFIG_SSB_PCIHOST |
463 | sdev->irq = bus->host_pci->irq; | 463 | sdev->irq = bus->host_pci->irq; |
464 | dev->parent = &bus->host_pci->dev; | 464 | dev->parent = &bus->host_pci->dev; |
465 | sdev->dma_dev = &bus->host_pci->dev; | ||
466 | #endif | 465 | #endif |
467 | break; | 466 | break; |
468 | case SSB_BUSTYPE_PCMCIA: | 467 | case SSB_BUSTYPE_PCMCIA: |
469 | #ifdef CONFIG_SSB_PCMCIAHOST | 468 | #ifdef CONFIG_SSB_PCMCIAHOST |
470 | sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; | 469 | sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; |
471 | dev->parent = &bus->host_pcmcia->dev; | 470 | dev->parent = &bus->host_pcmcia->dev; |
472 | sdev->dma_dev = &bus->host_pcmcia->dev; | ||
473 | #endif | 471 | #endif |
474 | break; | 472 | break; |
475 | case SSB_BUSTYPE_SSB: | 473 | case SSB_BUSTYPE_SSB: |
476 | sdev->dma_dev = dev; | ||
477 | break; | 474 | break; |
478 | } | 475 | } |
479 | 476 | ||
@@ -1156,36 +1153,82 @@ u32 ssb_dma_translation(struct ssb_device *dev) | |||
1156 | { | 1153 | { |
1157 | switch (dev->bus->bustype) { | 1154 | switch (dev->bus->bustype) { |
1158 | case SSB_BUSTYPE_SSB: | 1155 | case SSB_BUSTYPE_SSB: |
1159 | case SSB_BUSTYPE_PCMCIA: | ||
1160 | return 0; | 1156 | return 0; |
1161 | case SSB_BUSTYPE_PCI: | 1157 | case SSB_BUSTYPE_PCI: |
1162 | return SSB_PCI_DMA; | 1158 | return SSB_PCI_DMA; |
1159 | default: | ||
1160 | __ssb_dma_not_implemented(dev); | ||
1163 | } | 1161 | } |
1164 | return 0; | 1162 | return 0; |
1165 | } | 1163 | } |
1166 | EXPORT_SYMBOL(ssb_dma_translation); | 1164 | EXPORT_SYMBOL(ssb_dma_translation); |
1167 | 1165 | ||
1168 | int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask) | 1166 | int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) |
1169 | { | 1167 | { |
1170 | struct device *dma_dev = ssb_dev->dma_dev; | 1168 | int err; |
1171 | int err = 0; | ||
1172 | 1169 | ||
1173 | #ifdef CONFIG_SSB_PCIHOST | 1170 | switch (dev->bus->bustype) { |
1174 | if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI) { | 1171 | case SSB_BUSTYPE_PCI: |
1175 | err = pci_set_dma_mask(ssb_dev->bus->host_pci, mask); | 1172 | err = pci_set_dma_mask(dev->bus->host_pci, mask); |
1176 | if (err) | 1173 | if (err) |
1177 | return err; | 1174 | return err; |
1178 | err = pci_set_consistent_dma_mask(ssb_dev->bus->host_pci, mask); | 1175 | err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); |
1179 | return err; | 1176 | return err; |
1177 | case SSB_BUSTYPE_SSB: | ||
1178 | return dma_set_mask(dev->dev, mask); | ||
1179 | default: | ||
1180 | __ssb_dma_not_implemented(dev); | ||
1180 | } | 1181 | } |
1181 | #endif | 1182 | return -ENOSYS; |
1182 | dma_dev->coherent_dma_mask = mask; | ||
1183 | dma_dev->dma_mask = &dma_dev->coherent_dma_mask; | ||
1184 | |||
1185 | return err; | ||
1186 | } | 1183 | } |
1187 | EXPORT_SYMBOL(ssb_dma_set_mask); | 1184 | EXPORT_SYMBOL(ssb_dma_set_mask); |
1188 | 1185 | ||
1186 | void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, | ||
1187 | dma_addr_t *dma_handle, gfp_t gfp_flags) | ||
1188 | { | ||
1189 | switch (dev->bus->bustype) { | ||
1190 | case SSB_BUSTYPE_PCI: | ||
1191 | if (gfp_flags & GFP_DMA) { | ||
1192 | /* Workaround: The PCI API does not support passing | ||
1193 | * a GFP flag. */ | ||
1194 | return dma_alloc_coherent(&dev->bus->host_pci->dev, | ||
1195 | size, dma_handle, gfp_flags); | ||
1196 | } | ||
1197 | return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); | ||
1198 | case SSB_BUSTYPE_SSB: | ||
1199 | return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); | ||
1200 | default: | ||
1201 | __ssb_dma_not_implemented(dev); | ||
1202 | } | ||
1203 | return NULL; | ||
1204 | } | ||
1205 | EXPORT_SYMBOL(ssb_dma_alloc_consistent); | ||
1206 | |||
1207 | void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, | ||
1208 | void *vaddr, dma_addr_t dma_handle, | ||
1209 | gfp_t gfp_flags) | ||
1210 | { | ||
1211 | switch (dev->bus->bustype) { | ||
1212 | case SSB_BUSTYPE_PCI: | ||
1213 | if (gfp_flags & GFP_DMA) { | ||
1214 | /* Workaround: The PCI API does not support passing | ||
1215 | * a GFP flag. */ | ||
1216 | dma_free_coherent(&dev->bus->host_pci->dev, | ||
1217 | size, vaddr, dma_handle); | ||
1218 | return; | ||
1219 | } | ||
1220 | pci_free_consistent(dev->bus->host_pci, size, | ||
1221 | vaddr, dma_handle); | ||
1222 | return; | ||
1223 | case SSB_BUSTYPE_SSB: | ||
1224 | dma_free_coherent(dev->dev, size, vaddr, dma_handle); | ||
1225 | return; | ||
1226 | default: | ||
1227 | __ssb_dma_not_implemented(dev); | ||
1228 | } | ||
1229 | } | ||
1230 | EXPORT_SYMBOL(ssb_dma_free_consistent); | ||
1231 | |||
1189 | int ssb_bus_may_powerdown(struct ssb_bus *bus) | 1232 | int ssb_bus_may_powerdown(struct ssb_bus *bus) |
1190 | { | 1233 | { |
1191 | struct ssb_chipcommon *cc; | 1234 | struct ssb_chipcommon *cc; |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 50dfd0dc4093..0fe5a0ded3ea 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -137,9 +137,6 @@ struct ssb_device { | |||
137 | const struct ssb_bus_ops *ops; | 137 | const struct ssb_bus_ops *ops; |
138 | 138 | ||
139 | struct device *dev; | 139 | struct device *dev; |
140 | /* Pointer to the device that has to be used for | ||
141 | * any DMA related operation. */ | ||
142 | struct device *dma_dev; | ||
143 | 140 | ||
144 | struct ssb_bus *bus; | 141 | struct ssb_bus *bus; |
145 | struct ssb_device_id id; | 142 | struct ssb_device_id id; |
@@ -399,13 +396,151 @@ static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, | |||
399 | #endif /* CONFIG_SSB_BLOCKIO */ | 396 | #endif /* CONFIG_SSB_BLOCKIO */ |
400 | 397 | ||
401 | 398 | ||
399 | /* The SSB DMA API. Use this API for any DMA operation on the device. | ||
400 | * This API basically is a wrapper that calls the correct DMA API for | ||
401 | * the host device type the SSB device is attached to. */ | ||
402 | |||
402 | /* Translation (routing) bits that need to be ORed to DMA | 403 | /* Translation (routing) bits that need to be ORed to DMA |
403 | * addresses before they are given to a device. */ | 404 | * addresses before they are given to a device. */ |
404 | extern u32 ssb_dma_translation(struct ssb_device *dev); | 405 | extern u32 ssb_dma_translation(struct ssb_device *dev); |
405 | #define SSB_DMA_TRANSLATION_MASK 0xC0000000 | 406 | #define SSB_DMA_TRANSLATION_MASK 0xC0000000 |
406 | #define SSB_DMA_TRANSLATION_SHIFT 30 | 407 | #define SSB_DMA_TRANSLATION_SHIFT 30 |
407 | 408 | ||
408 | extern int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask); | 409 | extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask); |
410 | |||
411 | extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, | ||
412 | dma_addr_t *dma_handle, gfp_t gfp_flags); | ||
413 | extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, | ||
414 | void *vaddr, dma_addr_t dma_handle, | ||
415 | gfp_t gfp_flags); | ||
416 | |||
417 | static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) | ||
418 | { | ||
419 | #ifdef CONFIG_SSB_DEBUG | ||
420 | printk(KERN_ERR "SSB: BUG! Calling DMA API for " | ||
421 | "unsupported bustype %d\n", dev->bus->bustype); | ||
422 | #endif /* DEBUG */ | ||
423 | } | ||
424 | |||
425 | static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) | ||
426 | { | ||
427 | switch (dev->bus->bustype) { | ||
428 | case SSB_BUSTYPE_PCI: | ||
429 | return pci_dma_mapping_error(addr); | ||
430 | case SSB_BUSTYPE_SSB: | ||
431 | return dma_mapping_error(addr); | ||
432 | default: | ||
433 | __ssb_dma_not_implemented(dev); | ||
434 | } | ||
435 | return -ENOSYS; | ||
436 | } | ||
437 | |||
438 | static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, | ||
439 | size_t size, enum dma_data_direction dir) | ||
440 | { | ||
441 | switch (dev->bus->bustype) { | ||
442 | case SSB_BUSTYPE_PCI: | ||
443 | return pci_map_single(dev->bus->host_pci, p, size, dir); | ||
444 | case SSB_BUSTYPE_SSB: | ||
445 | return dma_map_single(dev->dev, p, size, dir); | ||
446 | default: | ||
447 | __ssb_dma_not_implemented(dev); | ||
448 | } | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr, | ||
453 | size_t size, enum dma_data_direction dir) | ||
454 | { | ||
455 | switch (dev->bus->bustype) { | ||
456 | case SSB_BUSTYPE_PCI: | ||
457 | pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); | ||
458 | return; | ||
459 | case SSB_BUSTYPE_SSB: | ||
460 | dma_unmap_single(dev->dev, dma_addr, size, dir); | ||
461 | return; | ||
462 | default: | ||
463 | __ssb_dma_not_implemented(dev); | ||
464 | } | ||
465 | } | ||
466 | |||
467 | static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, | ||
468 | dma_addr_t dma_addr, | ||
469 | size_t size, | ||
470 | enum dma_data_direction dir) | ||
471 | { | ||
472 | switch (dev->bus->bustype) { | ||
473 | case SSB_BUSTYPE_PCI: | ||
474 | pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, | ||
475 | size, dir); | ||
476 | return; | ||
477 | case SSB_BUSTYPE_SSB: | ||
478 | dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); | ||
479 | return; | ||
480 | default: | ||
481 | __ssb_dma_not_implemented(dev); | ||
482 | } | ||
483 | } | ||
484 | |||
485 | static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, | ||
486 | dma_addr_t dma_addr, | ||
487 | size_t size, | ||
488 | enum dma_data_direction dir) | ||
489 | { | ||
490 | switch (dev->bus->bustype) { | ||
491 | case SSB_BUSTYPE_PCI: | ||
492 | pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, | ||
493 | size, dir); | ||
494 | return; | ||
495 | case SSB_BUSTYPE_SSB: | ||
496 | dma_sync_single_for_device(dev->dev, dma_addr, size, dir); | ||
497 | return; | ||
498 | default: | ||
499 | __ssb_dma_not_implemented(dev); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, | ||
504 | dma_addr_t dma_addr, | ||
505 | unsigned long offset, | ||
506 | size_t size, | ||
507 | enum dma_data_direction dir) | ||
508 | { | ||
509 | switch (dev->bus->bustype) { | ||
510 | case SSB_BUSTYPE_PCI: | ||
511 | /* Just sync everything. That's all the PCI API can do. */ | ||
512 | pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, | ||
513 | offset + size, dir); | ||
514 | return; | ||
515 | case SSB_BUSTYPE_SSB: | ||
516 | dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, | ||
517 | size, dir); | ||
518 | return; | ||
519 | default: | ||
520 | __ssb_dma_not_implemented(dev); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, | ||
525 | dma_addr_t dma_addr, | ||
526 | unsigned long offset, | ||
527 | size_t size, | ||
528 | enum dma_data_direction dir) | ||
529 | { | ||
530 | switch (dev->bus->bustype) { | ||
531 | case SSB_BUSTYPE_PCI: | ||
532 | /* Just sync everything. That's all the PCI API can do. */ | ||
533 | pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, | ||
534 | offset + size, dir); | ||
535 | return; | ||
536 | case SSB_BUSTYPE_SSB: | ||
537 | dma_sync_single_range_for_device(dev->dev, dma_addr, offset, | ||
538 | size, dir); | ||
539 | return; | ||
540 | default: | ||
541 | __ssb_dma_not_implemented(dev); | ||
542 | } | ||
543 | } | ||
409 | 544 | ||
410 | 545 | ||
411 | #ifdef CONFIG_SSB_PCIHOST | 546 | #ifdef CONFIG_SSB_PCIHOST |