diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2010-06-03 22:37:40 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-06-04 16:00:42 -0400 |
commit | 39a6f4bce6b437046edf042f78f7a0529e253bff (patch) | |
tree | a479bd071e464360275293d8d350205bac8135f9 /drivers/net/b44.c | |
parent | 718e8898af2c523b1785f025350c34c59750734d (diff) |
b44: replace the ssb_dma API with the generic DMA API
Note that dma_sync_single_for_device and dma_sync_single_for_cpu support a
partial sync.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Gary Zambrano <zambrano@broadcom.com>
Acked-by: Michael Buesch <mb@bu3sch.de>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r-- | drivers/net/b44.c | 144 |
1 files changed, 70 insertions, 74 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 293f9c16e786..3d52538df6c4 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -150,9 +150,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, | |||
150 | unsigned long offset, | 150 | unsigned long offset, |
151 | enum dma_data_direction dir) | 151 | enum dma_data_direction dir) |
152 | { | 152 | { |
153 | ssb_dma_sync_single_range_for_device(sdev, dma_base, | 153 | dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, |
154 | offset & dma_desc_align_mask, | 154 | dma_desc_sync_size, dir); |
155 | dma_desc_sync_size, dir); | ||
156 | } | 155 | } |
157 | 156 | ||
158 | static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | 157 | static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, |
@@ -160,9 +159,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | |||
160 | unsigned long offset, | 159 | unsigned long offset, |
161 | enum dma_data_direction dir) | 160 | enum dma_data_direction dir) |
162 | { | 161 | { |
163 | ssb_dma_sync_single_range_for_cpu(sdev, dma_base, | 162 | dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, |
164 | offset & dma_desc_align_mask, | 163 | dma_desc_sync_size, dir); |
165 | dma_desc_sync_size, dir); | ||
166 | } | 164 | } |
167 | 165 | ||
168 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) | 166 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) |
@@ -608,10 +606,10 @@ static void b44_tx(struct b44 *bp) | |||
608 | 606 | ||
609 | BUG_ON(skb == NULL); | 607 | BUG_ON(skb == NULL); |
610 | 608 | ||
611 | ssb_dma_unmap_single(bp->sdev, | 609 | dma_unmap_single(bp->sdev->dma_dev, |
612 | rp->mapping, | 610 | rp->mapping, |
613 | skb->len, | 611 | skb->len, |
614 | DMA_TO_DEVICE); | 612 | DMA_TO_DEVICE); |
615 | rp->skb = NULL; | 613 | rp->skb = NULL; |
616 | dev_kfree_skb_irq(skb); | 614 | dev_kfree_skb_irq(skb); |
617 | } | 615 | } |
@@ -648,29 +646,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
648 | if (skb == NULL) | 646 | if (skb == NULL) |
649 | return -ENOMEM; | 647 | return -ENOMEM; |
650 | 648 | ||
651 | mapping = ssb_dma_map_single(bp->sdev, skb->data, | 649 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
652 | RX_PKT_BUF_SZ, | 650 | RX_PKT_BUF_SZ, |
653 | DMA_FROM_DEVICE); | 651 | DMA_FROM_DEVICE); |
654 | 652 | ||
655 | /* Hardware bug work-around, the chip is unable to do PCI DMA | 653 | /* Hardware bug work-around, the chip is unable to do PCI DMA |
656 | to/from anything above 1GB :-( */ | 654 | to/from anything above 1GB :-( */ |
657 | if (ssb_dma_mapping_error(bp->sdev, mapping) || | 655 | if (dma_mapping_error(bp->sdev->dma_dev, mapping) || |
658 | mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { | 656 | mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { |
659 | /* Sigh... */ | 657 | /* Sigh... */ |
660 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) | 658 | if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) |
661 | ssb_dma_unmap_single(bp->sdev, mapping, | 659 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
662 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); | 660 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); |
663 | dev_kfree_skb_any(skb); | 661 | dev_kfree_skb_any(skb); |
664 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); | 662 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
665 | if (skb == NULL) | 663 | if (skb == NULL) |
666 | return -ENOMEM; | 664 | return -ENOMEM; |
667 | mapping = ssb_dma_map_single(bp->sdev, skb->data, | 665 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
668 | RX_PKT_BUF_SZ, | 666 | RX_PKT_BUF_SZ, |
669 | DMA_FROM_DEVICE); | 667 | DMA_FROM_DEVICE); |
670 | if (ssb_dma_mapping_error(bp->sdev, mapping) || | 668 | if (dma_mapping_error(bp->sdev->dma_dev, mapping) || |
671 | mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { | 669 | mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { |
672 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) | 670 | if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) |
673 | ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); | 671 | dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); |
674 | dev_kfree_skb_any(skb); | 672 | dev_kfree_skb_any(skb); |
675 | return -ENOMEM; | 673 | return -ENOMEM; |
676 | } | 674 | } |
@@ -745,9 +743,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
745 | dest_idx * sizeof(*dest_desc), | 743 | dest_idx * sizeof(*dest_desc), |
746 | DMA_BIDIRECTIONAL); | 744 | DMA_BIDIRECTIONAL); |
747 | 745 | ||
748 | ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, | 746 | dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, |
749 | RX_PKT_BUF_SZ, | 747 | RX_PKT_BUF_SZ, |
750 | DMA_FROM_DEVICE); | 748 | DMA_FROM_DEVICE); |
751 | } | 749 | } |
752 | 750 | ||
753 | static int b44_rx(struct b44 *bp, int budget) | 751 | static int b44_rx(struct b44 *bp, int budget) |
@@ -767,9 +765,9 @@ static int b44_rx(struct b44 *bp, int budget) | |||
767 | struct rx_header *rh; | 765 | struct rx_header *rh; |
768 | u16 len; | 766 | u16 len; |
769 | 767 | ||
770 | ssb_dma_sync_single_for_cpu(bp->sdev, map, | 768 | dma_sync_single_for_cpu(bp->sdev->dma_dev, map, |
771 | RX_PKT_BUF_SZ, | 769 | RX_PKT_BUF_SZ, |
772 | DMA_FROM_DEVICE); | 770 | DMA_FROM_DEVICE); |
773 | rh = (struct rx_header *) skb->data; | 771 | rh = (struct rx_header *) skb->data; |
774 | len = le16_to_cpu(rh->len); | 772 | len = le16_to_cpu(rh->len); |
775 | if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || | 773 | if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || |
@@ -801,8 +799,8 @@ static int b44_rx(struct b44 *bp, int budget) | |||
801 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | 799 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); |
802 | if (skb_size < 0) | 800 | if (skb_size < 0) |
803 | goto drop_it; | 801 | goto drop_it; |
804 | ssb_dma_unmap_single(bp->sdev, map, | 802 | dma_unmap_single(bp->sdev->dma_dev, map, |
805 | skb_size, DMA_FROM_DEVICE); | 803 | skb_size, DMA_FROM_DEVICE); |
806 | /* Leave out rx_header */ | 804 | /* Leave out rx_header */ |
807 | skb_put(skb, len + RX_PKT_OFFSET); | 805 | skb_put(skb, len + RX_PKT_OFFSET); |
808 | skb_pull(skb, RX_PKT_OFFSET); | 806 | skb_pull(skb, RX_PKT_OFFSET); |
@@ -954,24 +952,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
954 | goto err_out; | 952 | goto err_out; |
955 | } | 953 | } |
956 | 954 | ||
957 | mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); | 955 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
958 | if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { | 956 | if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { |
959 | struct sk_buff *bounce_skb; | 957 | struct sk_buff *bounce_skb; |
960 | 958 | ||
961 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 959 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
962 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) | 960 | if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) |
963 | ssb_dma_unmap_single(bp->sdev, mapping, len, | 961 | dma_unmap_single(bp->sdev->dma_dev, mapping, len, |
964 | DMA_TO_DEVICE); | 962 | DMA_TO_DEVICE); |
965 | 963 | ||
966 | bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); | 964 | bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); |
967 | if (!bounce_skb) | 965 | if (!bounce_skb) |
968 | goto err_out; | 966 | goto err_out; |
969 | 967 | ||
970 | mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, | 968 | mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, |
971 | len, DMA_TO_DEVICE); | 969 | len, DMA_TO_DEVICE); |
972 | if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { | 970 | if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { |
973 | if (!ssb_dma_mapping_error(bp->sdev, mapping)) | 971 | if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) |
974 | ssb_dma_unmap_single(bp->sdev, mapping, | 972 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
975 | len, DMA_TO_DEVICE); | 973 | len, DMA_TO_DEVICE); |
976 | dev_kfree_skb_any(bounce_skb); | 974 | dev_kfree_skb_any(bounce_skb); |
977 | goto err_out; | 975 | goto err_out; |
@@ -1068,8 +1066,8 @@ static void b44_free_rings(struct b44 *bp) | |||
1068 | 1066 | ||
1069 | if (rp->skb == NULL) | 1067 | if (rp->skb == NULL) |
1070 | continue; | 1068 | continue; |
1071 | ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, | 1069 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, |
1072 | DMA_FROM_DEVICE); | 1070 | DMA_FROM_DEVICE); |
1073 | dev_kfree_skb_any(rp->skb); | 1071 | dev_kfree_skb_any(rp->skb); |
1074 | rp->skb = NULL; | 1072 | rp->skb = NULL; |
1075 | } | 1073 | } |
@@ -1080,8 +1078,8 @@ static void b44_free_rings(struct b44 *bp) | |||
1080 | 1078 | ||
1081 | if (rp->skb == NULL) | 1079 | if (rp->skb == NULL) |
1082 | continue; | 1080 | continue; |
1083 | ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, | 1081 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, |
1084 | DMA_TO_DEVICE); | 1082 | DMA_TO_DEVICE); |
1085 | dev_kfree_skb_any(rp->skb); | 1083 | dev_kfree_skb_any(rp->skb); |
1086 | rp->skb = NULL; | 1084 | rp->skb = NULL; |
1087 | } | 1085 | } |
@@ -1103,14 +1101,12 @@ static void b44_init_rings(struct b44 *bp) | |||
1103 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1101 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
1104 | 1102 | ||
1105 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 1103 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
1106 | ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, | 1104 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, |
1107 | DMA_TABLE_BYTES, | 1105 | DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); |
1108 | DMA_BIDIRECTIONAL); | ||
1109 | 1106 | ||
1110 | if (bp->flags & B44_FLAG_TX_RING_HACK) | 1107 | if (bp->flags & B44_FLAG_TX_RING_HACK) |
1111 | ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, | 1108 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, |
1112 | DMA_TABLE_BYTES, | 1109 | DMA_TABLE_BYTES, DMA_TO_DEVICE); |
1113 | DMA_TO_DEVICE); | ||
1114 | 1110 | ||
1115 | for (i = 0; i < bp->rx_pending; i++) { | 1111 | for (i = 0; i < bp->rx_pending; i++) { |
1116 | if (b44_alloc_rx_skb(bp, -1, i) < 0) | 1112 | if (b44_alloc_rx_skb(bp, -1, i) < 0) |
@@ -1130,27 +1126,23 @@ static void b44_free_consistent(struct b44 *bp) | |||
1130 | bp->tx_buffers = NULL; | 1126 | bp->tx_buffers = NULL; |
1131 | if (bp->rx_ring) { | 1127 | if (bp->rx_ring) { |
1132 | if (bp->flags & B44_FLAG_RX_RING_HACK) { | 1128 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
1133 | ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, | 1129 | dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, |
1134 | DMA_TABLE_BYTES, | 1130 | DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); |
1135 | DMA_BIDIRECTIONAL); | ||
1136 | kfree(bp->rx_ring); | 1131 | kfree(bp->rx_ring); |
1137 | } else | 1132 | } else |
1138 | ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, | 1133 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1139 | bp->rx_ring, bp->rx_ring_dma, | 1134 | bp->rx_ring, bp->rx_ring_dma); |
1140 | GFP_KERNEL); | ||
1141 | bp->rx_ring = NULL; | 1135 | bp->rx_ring = NULL; |
1142 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | 1136 | bp->flags &= ~B44_FLAG_RX_RING_HACK; |
1143 | } | 1137 | } |
1144 | if (bp->tx_ring) { | 1138 | if (bp->tx_ring) { |
1145 | if (bp->flags & B44_FLAG_TX_RING_HACK) { | 1139 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
1146 | ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, | 1140 | dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, |
1147 | DMA_TABLE_BYTES, | 1141 | DMA_TABLE_BYTES, DMA_TO_DEVICE); |
1148 | DMA_TO_DEVICE); | ||
1149 | kfree(bp->tx_ring); | 1142 | kfree(bp->tx_ring); |
1150 | } else | 1143 | } else |
1151 | ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, | 1144 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
1152 | bp->tx_ring, bp->tx_ring_dma, | 1145 | bp->tx_ring, bp->tx_ring_dma); |
1153 | GFP_KERNEL); | ||
1154 | bp->tx_ring = NULL; | 1146 | bp->tx_ring = NULL; |
1155 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | 1147 | bp->flags &= ~B44_FLAG_TX_RING_HACK; |
1156 | } | 1148 | } |
@@ -1175,7 +1167,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1175 | goto out_err; | 1167 | goto out_err; |
1176 | 1168 | ||
1177 | size = DMA_TABLE_BYTES; | 1169 | size = DMA_TABLE_BYTES; |
1178 | bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); | 1170 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, |
1171 | &bp->rx_ring_dma, gfp); | ||
1179 | if (!bp->rx_ring) { | 1172 | if (!bp->rx_ring) { |
1180 | /* Allocation may have failed due to pci_alloc_consistent | 1173 | /* Allocation may have failed due to pci_alloc_consistent |
1181 | insisting on use of GFP_DMA, which is more restrictive | 1174 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1187,11 +1180,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1187 | if (!rx_ring) | 1180 | if (!rx_ring) |
1188 | goto out_err; | 1181 | goto out_err; |
1189 | 1182 | ||
1190 | rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, | 1183 | rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, |
1191 | DMA_TABLE_BYTES, | 1184 | DMA_TABLE_BYTES, |
1192 | DMA_BIDIRECTIONAL); | 1185 | DMA_BIDIRECTIONAL); |
1193 | 1186 | ||
1194 | if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || | 1187 | if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || |
1195 | rx_ring_dma + size > DMA_BIT_MASK(30)) { | 1188 | rx_ring_dma + size > DMA_BIT_MASK(30)) { |
1196 | kfree(rx_ring); | 1189 | kfree(rx_ring); |
1197 | goto out_err; | 1190 | goto out_err; |
@@ -1202,7 +1195,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1202 | bp->flags |= B44_FLAG_RX_RING_HACK; | 1195 | bp->flags |= B44_FLAG_RX_RING_HACK; |
1203 | } | 1196 | } |
1204 | 1197 | ||
1205 | bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); | 1198 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, |
1199 | &bp->tx_ring_dma, gfp); | ||
1206 | if (!bp->tx_ring) { | 1200 | if (!bp->tx_ring) { |
1207 | /* Allocation may have failed due to ssb_dma_alloc_consistent | 1201 | /* Allocation may have failed due to ssb_dma_alloc_consistent |
1208 | insisting on use of GFP_DMA, which is more restrictive | 1202 | insisting on use of GFP_DMA, which is more restrictive |
@@ -1214,11 +1208,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
1214 | if (!tx_ring) | 1208 | if (!tx_ring) |
1215 | goto out_err; | 1209 | goto out_err; |
1216 | 1210 | ||
1217 | tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, | 1211 | tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, |
1218 | DMA_TABLE_BYTES, | 1212 | DMA_TABLE_BYTES, |
1219 | DMA_TO_DEVICE); | 1213 | DMA_TO_DEVICE); |
1220 | 1214 | ||
1221 | if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || | 1215 | if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || |
1222 | tx_ring_dma + size > DMA_BIT_MASK(30)) { | 1216 | tx_ring_dma + size > DMA_BIT_MASK(30)) { |
1223 | kfree(tx_ring); | 1217 | kfree(tx_ring); |
1224 | goto out_err; | 1218 | goto out_err; |
@@ -2176,12 +2170,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev, | |||
2176 | "Failed to powerup the bus\n"); | 2170 | "Failed to powerup the bus\n"); |
2177 | goto err_out_free_dev; | 2171 | goto err_out_free_dev; |
2178 | } | 2172 | } |
2179 | err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); | 2173 | |
2180 | if (err) { | 2174 | if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || |
2175 | dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { | ||
2181 | dev_err(sdev->dev, | 2176 | dev_err(sdev->dev, |
2182 | "Required 30BIT DMA mask unsupported by the system\n"); | 2177 | "Required 30BIT DMA mask unsupported by the system\n"); |
2183 | goto err_out_powerdown; | 2178 | goto err_out_powerdown; |
2184 | } | 2179 | } |
2180 | |||
2185 | err = b44_get_invariants(bp); | 2181 | err = b44_get_invariants(bp); |
2186 | if (err) { | 2182 | if (err) { |
2187 | dev_err(sdev->dev, | 2183 | dev_err(sdev->dev, |