diff options
| author | Steve French <sfrench@us.ibm.com> | 2008-04-17 19:38:45 -0400 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2008-04-17 19:38:45 -0400 |
| commit | 20e673810c69d18bee2ed74d19af3806ec2504f5 (patch) | |
| tree | 7c22dc5246295a82f2688a23ae1c7f3a4f424302 /drivers/net/b44.c | |
| parent | 8d142137b4fe87188f211042b16a5993964226f9 (diff) | |
| parent | 4b119e21d0c66c22e8ca03df05d9de623d0eb50f (diff) | |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/net/b44.c')
| -rw-r--r-- | drivers/net/b44.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 25f1337cd02c..59dce6aa0865 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
| @@ -148,7 +148,7 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, | |||
| 148 | unsigned long offset, | 148 | unsigned long offset, |
| 149 | enum dma_data_direction dir) | 149 | enum dma_data_direction dir) |
| 150 | { | 150 | { |
| 151 | dma_sync_single_range_for_device(sdev->dev, dma_base, | 151 | dma_sync_single_range_for_device(sdev->dma_dev, dma_base, |
| 152 | offset & dma_desc_align_mask, | 152 | offset & dma_desc_align_mask, |
| 153 | dma_desc_sync_size, dir); | 153 | dma_desc_sync_size, dir); |
| 154 | } | 154 | } |
| @@ -158,7 +158,7 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, | |||
| 158 | unsigned long offset, | 158 | unsigned long offset, |
| 159 | enum dma_data_direction dir) | 159 | enum dma_data_direction dir) |
| 160 | { | 160 | { |
| 161 | dma_sync_single_range_for_cpu(sdev->dev, dma_base, | 161 | dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base, |
| 162 | offset & dma_desc_align_mask, | 162 | offset & dma_desc_align_mask, |
| 163 | dma_desc_sync_size, dir); | 163 | dma_desc_sync_size, dir); |
| 164 | } | 164 | } |
| @@ -613,7 +613,7 @@ static void b44_tx(struct b44 *bp) | |||
| 613 | 613 | ||
| 614 | BUG_ON(skb == NULL); | 614 | BUG_ON(skb == NULL); |
| 615 | 615 | ||
| 616 | dma_unmap_single(bp->sdev->dev, | 616 | dma_unmap_single(bp->sdev->dma_dev, |
| 617 | rp->mapping, | 617 | rp->mapping, |
| 618 | skb->len, | 618 | skb->len, |
| 619 | DMA_TO_DEVICE); | 619 | DMA_TO_DEVICE); |
| @@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 653 | if (skb == NULL) | 653 | if (skb == NULL) |
| 654 | return -ENOMEM; | 654 | return -ENOMEM; |
| 655 | 655 | ||
| 656 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 656 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
| 657 | RX_PKT_BUF_SZ, | 657 | RX_PKT_BUF_SZ, |
| 658 | DMA_FROM_DEVICE); | 658 | DMA_FROM_DEVICE); |
| 659 | 659 | ||
| @@ -663,19 +663,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 663 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
| 664 | /* Sigh... */ | 664 | /* Sigh... */ |
| 665 | if (!dma_mapping_error(mapping)) | 665 | if (!dma_mapping_error(mapping)) |
| 666 | dma_unmap_single(bp->sdev->dev, mapping, | 666 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
| 667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); | 667 | RX_PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 668 | dev_kfree_skb_any(skb); | 668 | dev_kfree_skb_any(skb); |
| 669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); | 669 | skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); |
| 670 | if (skb == NULL) | 670 | if (skb == NULL) |
| 671 | return -ENOMEM; | 671 | return -ENOMEM; |
| 672 | mapping = dma_map_single(bp->sdev->dev, skb->data, | 672 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, |
| 673 | RX_PKT_BUF_SZ, | 673 | RX_PKT_BUF_SZ, |
| 674 | DMA_FROM_DEVICE); | 674 | DMA_FROM_DEVICE); |
| 675 | if (dma_mapping_error(mapping) || | 675 | if (dma_mapping_error(mapping) || |
| 676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { | 676 | mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { |
| 677 | if (!dma_mapping_error(mapping)) | 677 | if (!dma_mapping_error(mapping)) |
| 678 | dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); | 678 | dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); |
| 679 | dev_kfree_skb_any(skb); | 679 | dev_kfree_skb_any(skb); |
| 680 | return -ENOMEM; | 680 | return -ENOMEM; |
| 681 | } | 681 | } |
| @@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 750 | dest_idx * sizeof(dest_desc), | 750 | dest_idx * sizeof(dest_desc), |
| 751 | DMA_BIDIRECTIONAL); | 751 | DMA_BIDIRECTIONAL); |
| 752 | 752 | ||
| 753 | dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), | 753 | dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr), |
| 754 | RX_PKT_BUF_SZ, | 754 | RX_PKT_BUF_SZ, |
| 755 | DMA_FROM_DEVICE); | 755 | DMA_FROM_DEVICE); |
| 756 | } | 756 | } |
| @@ -772,7 +772,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
| 772 | struct rx_header *rh; | 772 | struct rx_header *rh; |
| 773 | u16 len; | 773 | u16 len; |
| 774 | 774 | ||
| 775 | dma_sync_single_for_cpu(bp->sdev->dev, map, | 775 | dma_sync_single_for_cpu(bp->sdev->dma_dev, map, |
| 776 | RX_PKT_BUF_SZ, | 776 | RX_PKT_BUF_SZ, |
| 777 | DMA_FROM_DEVICE); | 777 | DMA_FROM_DEVICE); |
| 778 | rh = (struct rx_header *) skb->data; | 778 | rh = (struct rx_header *) skb->data; |
| @@ -806,7 +806,7 @@ static int b44_rx(struct b44 *bp, int budget) | |||
| 806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | 806 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); |
| 807 | if (skb_size < 0) | 807 | if (skb_size < 0) |
| 808 | goto drop_it; | 808 | goto drop_it; |
| 809 | dma_unmap_single(bp->sdev->dev, map, | 809 | dma_unmap_single(bp->sdev->dma_dev, map, |
| 810 | skb_size, DMA_FROM_DEVICE); | 810 | skb_size, DMA_FROM_DEVICE); |
| 811 | /* Leave out rx_header */ | 811 | /* Leave out rx_header */ |
| 812 | skb_put(skb, len + RX_PKT_OFFSET); | 812 | skb_put(skb, len + RX_PKT_OFFSET); |
| @@ -966,24 +966,24 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 966 | goto err_out; | 966 | goto err_out; |
| 967 | } | 967 | } |
| 968 | 968 | ||
| 969 | mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); | 969 | mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
| 970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 970 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
| 971 | struct sk_buff *bounce_skb; | 971 | struct sk_buff *bounce_skb; |
| 972 | 972 | ||
| 973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 973 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
| 974 | if (!dma_mapping_error(mapping)) | 974 | if (!dma_mapping_error(mapping)) |
| 975 | dma_unmap_single(bp->sdev->dev, mapping, len, | 975 | dma_unmap_single(bp->sdev->dma_dev, mapping, len, |
| 976 | DMA_TO_DEVICE); | 976 | DMA_TO_DEVICE); |
| 977 | 977 | ||
| 978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); | 978 | bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); |
| 979 | if (!bounce_skb) | 979 | if (!bounce_skb) |
| 980 | goto err_out; | 980 | goto err_out; |
| 981 | 981 | ||
| 982 | mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, | 982 | mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, |
| 983 | len, DMA_TO_DEVICE); | 983 | len, DMA_TO_DEVICE); |
| 984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { | 984 | if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { |
| 985 | if (!dma_mapping_error(mapping)) | 985 | if (!dma_mapping_error(mapping)) |
| 986 | dma_unmap_single(bp->sdev->dev, mapping, | 986 | dma_unmap_single(bp->sdev->dma_dev, mapping, |
| 987 | len, DMA_TO_DEVICE); | 987 | len, DMA_TO_DEVICE); |
| 988 | dev_kfree_skb_any(bounce_skb); | 988 | dev_kfree_skb_any(bounce_skb); |
| 989 | goto err_out; | 989 | goto err_out; |
| @@ -1082,7 +1082,7 @@ static void b44_free_rings(struct b44 *bp) | |||
| 1082 | 1082 | ||
| 1083 | if (rp->skb == NULL) | 1083 | if (rp->skb == NULL) |
| 1084 | continue; | 1084 | continue; |
| 1085 | dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, | 1085 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, |
| 1086 | DMA_FROM_DEVICE); | 1086 | DMA_FROM_DEVICE); |
| 1087 | dev_kfree_skb_any(rp->skb); | 1087 | dev_kfree_skb_any(rp->skb); |
| 1088 | rp->skb = NULL; | 1088 | rp->skb = NULL; |
| @@ -1094,7 +1094,7 @@ static void b44_free_rings(struct b44 *bp) | |||
| 1094 | 1094 | ||
| 1095 | if (rp->skb == NULL) | 1095 | if (rp->skb == NULL) |
| 1096 | continue; | 1096 | continue; |
| 1097 | dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, | 1097 | dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, |
| 1098 | DMA_TO_DEVICE); | 1098 | DMA_TO_DEVICE); |
| 1099 | dev_kfree_skb_any(rp->skb); | 1099 | dev_kfree_skb_any(rp->skb); |
| 1100 | rp->skb = NULL; | 1100 | rp->skb = NULL; |
| @@ -1117,12 +1117,12 @@ static void b44_init_rings(struct b44 *bp) | |||
| 1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | 1117 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); |
| 1118 | 1118 | ||
| 1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 1119 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
| 1120 | dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, | 1120 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, |
| 1121 | DMA_TABLE_BYTES, | 1121 | DMA_TABLE_BYTES, |
| 1122 | DMA_BIDIRECTIONAL); | 1122 | DMA_BIDIRECTIONAL); |
| 1123 | 1123 | ||
| 1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) | 1124 | if (bp->flags & B44_FLAG_TX_RING_HACK) |
| 1125 | dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, | 1125 | dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, |
| 1126 | DMA_TABLE_BYTES, | 1126 | DMA_TABLE_BYTES, |
| 1127 | DMA_TO_DEVICE); | 1127 | DMA_TO_DEVICE); |
| 1128 | 1128 | ||
| @@ -1144,24 +1144,24 @@ static void b44_free_consistent(struct b44 *bp) | |||
| 1144 | bp->tx_buffers = NULL; | 1144 | bp->tx_buffers = NULL; |
| 1145 | if (bp->rx_ring) { | 1145 | if (bp->rx_ring) { |
| 1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { | 1146 | if (bp->flags & B44_FLAG_RX_RING_HACK) { |
| 1147 | dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, | 1147 | dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, |
| 1148 | DMA_TABLE_BYTES, | 1148 | DMA_TABLE_BYTES, |
| 1149 | DMA_BIDIRECTIONAL); | 1149 | DMA_BIDIRECTIONAL); |
| 1150 | kfree(bp->rx_ring); | 1150 | kfree(bp->rx_ring); |
| 1151 | } else | 1151 | } else |
| 1152 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1152 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
| 1153 | bp->rx_ring, bp->rx_ring_dma); | 1153 | bp->rx_ring, bp->rx_ring_dma); |
| 1154 | bp->rx_ring = NULL; | 1154 | bp->rx_ring = NULL; |
| 1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; | 1155 | bp->flags &= ~B44_FLAG_RX_RING_HACK; |
| 1156 | } | 1156 | } |
| 1157 | if (bp->tx_ring) { | 1157 | if (bp->tx_ring) { |
| 1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { | 1158 | if (bp->flags & B44_FLAG_TX_RING_HACK) { |
| 1159 | dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, | 1159 | dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, |
| 1160 | DMA_TABLE_BYTES, | 1160 | DMA_TABLE_BYTES, |
| 1161 | DMA_TO_DEVICE); | 1161 | DMA_TO_DEVICE); |
| 1162 | kfree(bp->tx_ring); | 1162 | kfree(bp->tx_ring); |
| 1163 | } else | 1163 | } else |
| 1164 | dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, | 1164 | dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, |
| 1165 | bp->tx_ring, bp->tx_ring_dma); | 1165 | bp->tx_ring, bp->tx_ring_dma); |
| 1166 | bp->tx_ring = NULL; | 1166 | bp->tx_ring = NULL; |
| 1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; | 1167 | bp->flags &= ~B44_FLAG_TX_RING_HACK; |
| @@ -1187,7 +1187,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
| 1187 | goto out_err; | 1187 | goto out_err; |
| 1188 | 1188 | ||
| 1189 | size = DMA_TABLE_BYTES; | 1189 | size = DMA_TABLE_BYTES; |
| 1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); | 1190 | bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); |
| 1191 | if (!bp->rx_ring) { | 1191 | if (!bp->rx_ring) { |
| 1192 | /* Allocation may have failed due to pci_alloc_consistent | 1192 | /* Allocation may have failed due to pci_alloc_consistent |
| 1193 | insisting on use of GFP_DMA, which is more restrictive | 1193 | insisting on use of GFP_DMA, which is more restrictive |
| @@ -1199,7 +1199,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
| 1199 | if (!rx_ring) | 1199 | if (!rx_ring) |
| 1200 | goto out_err; | 1200 | goto out_err; |
| 1201 | 1201 | ||
| 1202 | rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, | 1202 | rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, |
| 1203 | DMA_TABLE_BYTES, | 1203 | DMA_TABLE_BYTES, |
| 1204 | DMA_BIDIRECTIONAL); | 1204 | DMA_BIDIRECTIONAL); |
| 1205 | 1205 | ||
| @@ -1214,7 +1214,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
| 1214 | bp->flags |= B44_FLAG_RX_RING_HACK; | 1214 | bp->flags |= B44_FLAG_RX_RING_HACK; |
| 1215 | } | 1215 | } |
| 1216 | 1216 | ||
| 1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); | 1217 | bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp); |
| 1218 | if (!bp->tx_ring) { | 1218 | if (!bp->tx_ring) { |
| 1219 | /* Allocation may have failed due to dma_alloc_coherent | 1219 | /* Allocation may have failed due to dma_alloc_coherent |
| 1220 | insisting on use of GFP_DMA, which is more restrictive | 1220 | insisting on use of GFP_DMA, which is more restrictive |
| @@ -1226,7 +1226,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) | |||
| 1226 | if (!tx_ring) | 1226 | if (!tx_ring) |
| 1227 | goto out_err; | 1227 | goto out_err; |
| 1228 | 1228 | ||
| 1229 | tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, | 1229 | tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, |
| 1230 | DMA_TABLE_BYTES, | 1230 | DMA_TABLE_BYTES, |
| 1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
| 1232 | 1232 | ||
