diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-27 21:09:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-29 05:13:15 -0400 |
commit | 738f2b7b813913e651f39387d007dd961755dee2 (patch) | |
tree | 022ca4d144cba51495e6f26a8f55d3046d16c2e3 | |
parent | 944c67dff7a88f0a775e5b604937f9e30d2de555 (diff) |
sparc: Convert all SBUS drivers to dma_*() interfaces.
And all the SBUS dma interfaces are deleted.
A private implementation remains inside of the 32-bit sparc port which
exists only for the sake of the implementation of dma_*().
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc/include/asm/sbus_32.h | 17 | ||||
-rw-r--r-- | arch/sparc/include/asm/sbus_64.h | 63 | ||||
-rw-r--r-- | arch/sparc/kernel/sparc_ksyms.c | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 8 | ||||
-rw-r--r-- | drivers/atm/fore200e.c | 16 | ||||
-rw-r--r-- | drivers/net/myri_sbus.c | 63 | ||||
-rw-r--r-- | drivers/net/sunbmac.c | 68 | ||||
-rw-r--r-- | drivers/net/sunhme.c | 85 | ||||
-rw-r--r-- | drivers/net/sunlance.c | 15 | ||||
-rw-r--r-- | drivers/net/sunqe.c | 45 | ||||
-rw-r--r-- | drivers/scsi/qlogicpti.c | 53 | ||||
-rw-r--r-- | drivers/scsi/sun_esp.c | 26 | ||||
-rw-r--r-- | sound/core/memalloc.c | 8 | ||||
-rw-r--r-- | sound/sparc/dbri.c | 41 |
14 files changed, 206 insertions, 310 deletions
diff --git a/arch/sparc/include/asm/sbus_32.h b/arch/sparc/include/asm/sbus_32.h index 61d99f1bb23c..b09284b5ee06 100644 --- a/arch/sparc/include/asm/sbus_32.h +++ b/arch/sparc/include/asm/sbus_32.h | |||
@@ -109,26 +109,9 @@ extern void sbus_set_sbus64(struct sbus_dev *, int); | |||
109 | extern void sbus_fill_device_irq(struct sbus_dev *); | 109 | extern void sbus_fill_device_irq(struct sbus_dev *); |
110 | 110 | ||
111 | /* These yield IOMMU mappings in consistent mode. */ | 111 | /* These yield IOMMU mappings in consistent mode. */ |
112 | extern void *sbus_alloc_consistent(struct device *, long, u32 *dma_addrp); | ||
113 | extern void sbus_free_consistent(struct device *, long, void *, u32); | ||
114 | void prom_adjust_ranges(struct linux_prom_ranges *, int, | 112 | void prom_adjust_ranges(struct linux_prom_ranges *, int, |
115 | struct linux_prom_ranges *, int); | 113 | struct linux_prom_ranges *, int); |
116 | 114 | ||
117 | #define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL | ||
118 | #define SBUS_DMA_TODEVICE DMA_TO_DEVICE | ||
119 | #define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE | ||
120 | #define SBUS_DMA_NONE DMA_NONE | ||
121 | |||
122 | /* All the rest use streaming mode mappings. */ | ||
123 | extern dma_addr_t sbus_map_single(struct device *, void *, size_t, int); | ||
124 | extern void sbus_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
125 | extern int sbus_map_sg(struct device *, struct scatterlist *, int, int); | ||
126 | extern void sbus_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
127 | |||
128 | /* Finally, allow explicit synchronization of streamable mappings. */ | ||
129 | extern void sbus_dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, int); | ||
130 | extern void sbus_dma_sync_single_for_device(struct device *, dma_addr_t, size_t, int); | ||
131 | |||
132 | /* Eric Brower (ebrower@usa.net) | 115 | /* Eric Brower (ebrower@usa.net) |
133 | * Translate SBus interrupt levels to ino values-- | 116 | * Translate SBus interrupt levels to ino values-- |
134 | * this is used when converting sbus "interrupts" OBP | 117 | * this is used when converting sbus "interrupts" OBP |
diff --git a/arch/sparc/include/asm/sbus_64.h b/arch/sparc/include/asm/sbus_64.h index b22e99da49d8..9a2f27188f6a 100644 --- a/arch/sparc/include/asm/sbus_64.h +++ b/arch/sparc/include/asm/sbus_64.h | |||
@@ -100,69 +100,6 @@ extern struct sbus_bus *sbus_root; | |||
100 | extern void sbus_set_sbus64(struct sbus_dev *, int); | 100 | extern void sbus_set_sbus64(struct sbus_dev *, int); |
101 | extern void sbus_fill_device_irq(struct sbus_dev *); | 101 | extern void sbus_fill_device_irq(struct sbus_dev *); |
102 | 102 | ||
103 | static inline void *sbus_alloc_consistent(struct device *dev , size_t size, | ||
104 | dma_addr_t *dma_handle) | ||
105 | { | ||
106 | return dma_alloc_coherent(dev, size, dma_handle, GFP_ATOMIC); | ||
107 | } | ||
108 | |||
109 | static inline void sbus_free_consistent(struct device *dev, size_t size, | ||
110 | void *vaddr, dma_addr_t dma_handle) | ||
111 | { | ||
112 | return dma_free_coherent(dev, size, vaddr, dma_handle); | ||
113 | } | ||
114 | |||
115 | #define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL | ||
116 | #define SBUS_DMA_TODEVICE DMA_TO_DEVICE | ||
117 | #define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE | ||
118 | #define SBUS_DMA_NONE DMA_NONE | ||
119 | |||
120 | /* All the rest use streaming mode mappings. */ | ||
121 | static inline dma_addr_t sbus_map_single(struct device *dev, void *ptr, | ||
122 | size_t size, int direction) | ||
123 | { | ||
124 | return dma_map_single(dev, ptr, size, | ||
125 | (enum dma_data_direction) direction); | ||
126 | } | ||
127 | |||
128 | static inline void sbus_unmap_single(struct device *dev, | ||
129 | dma_addr_t dma_addr, size_t size, | ||
130 | int direction) | ||
131 | { | ||
132 | dma_unmap_single(dev, dma_addr, size, | ||
133 | (enum dma_data_direction) direction); | ||
134 | } | ||
135 | |||
136 | static inline int sbus_map_sg(struct device *dev, struct scatterlist *sg, | ||
137 | int nents, int direction) | ||
138 | { | ||
139 | return dma_map_sg(dev, sg, nents, | ||
140 | (enum dma_data_direction) direction); | ||
141 | } | ||
142 | |||
143 | static inline void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
144 | int nents, int direction) | ||
145 | { | ||
146 | dma_unmap_sg(dev, sg, nents, | ||
147 | (enum dma_data_direction) direction); | ||
148 | } | ||
149 | |||
150 | /* Finally, allow explicit synchronization of streamable mappings. */ | ||
151 | static inline void sbus_dma_sync_single_for_cpu(struct device *dev, | ||
152 | dma_addr_t dma_handle, | ||
153 | size_t size, int direction) | ||
154 | { | ||
155 | dma_sync_single_for_cpu(dev, dma_handle, size, | ||
156 | (enum dma_data_direction) direction); | ||
157 | } | ||
158 | |||
159 | static inline void sbus_dma_sync_single_for_device(struct device *dev, | ||
160 | dma_addr_t dma_handle, | ||
161 | size_t size, int direction) | ||
162 | { | ||
163 | /* No flushing needed to sync cpu writes to the device. */ | ||
164 | } | ||
165 | |||
166 | extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); | 103 | extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *); |
167 | extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); | 104 | extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *); |
168 | extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *); | 105 | extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *); |
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 9d85a83586a1..b1d2c975b32c 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -155,14 +155,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached)); | |||
155 | #ifdef CONFIG_SBUS | 155 | #ifdef CONFIG_SBUS |
156 | EXPORT_SYMBOL(sbus_root); | 156 | EXPORT_SYMBOL(sbus_root); |
157 | EXPORT_SYMBOL(sbus_set_sbus64); | 157 | EXPORT_SYMBOL(sbus_set_sbus64); |
158 | EXPORT_SYMBOL(sbus_alloc_consistent); | ||
159 | EXPORT_SYMBOL(sbus_free_consistent); | ||
160 | EXPORT_SYMBOL(sbus_map_single); | ||
161 | EXPORT_SYMBOL(sbus_unmap_single); | ||
162 | EXPORT_SYMBOL(sbus_map_sg); | ||
163 | EXPORT_SYMBOL(sbus_unmap_sg); | ||
164 | EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu); | ||
165 | EXPORT_SYMBOL(sbus_dma_sync_single_for_device); | ||
166 | EXPORT_SYMBOL(sbus_iounmap); | 158 | EXPORT_SYMBOL(sbus_iounmap); |
167 | EXPORT_SYMBOL(sbus_ioremap); | 159 | EXPORT_SYMBOL(sbus_ioremap); |
168 | #endif | 160 | #endif |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 1c56c8b854d5..901c26437b61 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -162,14 +162,6 @@ EXPORT_SYMBOL(auxio_set_lte); | |||
162 | #ifdef CONFIG_SBUS | 162 | #ifdef CONFIG_SBUS |
163 | EXPORT_SYMBOL(sbus_root); | 163 | EXPORT_SYMBOL(sbus_root); |
164 | EXPORT_SYMBOL(sbus_set_sbus64); | 164 | EXPORT_SYMBOL(sbus_set_sbus64); |
165 | EXPORT_SYMBOL(sbus_alloc_consistent); | ||
166 | EXPORT_SYMBOL(sbus_free_consistent); | ||
167 | EXPORT_SYMBOL(sbus_map_single); | ||
168 | EXPORT_SYMBOL(sbus_unmap_single); | ||
169 | EXPORT_SYMBOL(sbus_map_sg); | ||
170 | EXPORT_SYMBOL(sbus_unmap_sg); | ||
171 | EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu); | ||
172 | EXPORT_SYMBOL(sbus_dma_sync_single_for_device); | ||
173 | #endif | 165 | #endif |
174 | EXPORT_SYMBOL(outsb); | 166 | EXPORT_SYMBOL(outsb); |
175 | EXPORT_SYMBOL(outsw); | 167 | EXPORT_SYMBOL(outsw); |
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index c5ab44fc13df..f607e59bffae 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c | |||
@@ -680,7 +680,7 @@ fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int d | |||
680 | { | 680 | { |
681 | struct sbus_dev *sdev = fore200e->bus_dev; | 681 | struct sbus_dev *sdev = fore200e->bus_dev; |
682 | struct device *dev = &sdev->ofdev.dev; | 682 | struct device *dev = &sdev->ofdev.dev; |
683 | u32 dma_addr = sbus_map_single(dev, virt_addr, size, direction); | 683 | u32 dma_addr = dma_map_single(dev, virt_addr, size, direction); |
684 | 684 | ||
685 | DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", | 685 | DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", |
686 | virt_addr, size, direction, dma_addr); | 686 | virt_addr, size, direction, dma_addr); |
@@ -698,7 +698,7 @@ fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di | |||
698 | DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", | 698 | DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", |
699 | dma_addr, size, direction); | 699 | dma_addr, size, direction); |
700 | 700 | ||
701 | sbus_unmap_single(dev, dma_addr, size, direction); | 701 | dma_unmap_single(dev, dma_addr, size, direction); |
702 | } | 702 | } |
703 | 703 | ||
704 | 704 | ||
@@ -710,7 +710,7 @@ fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, | |||
710 | 710 | ||
711 | DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); | 711 | DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); |
712 | 712 | ||
713 | sbus_dma_sync_single_for_cpu(dev, dma_addr, size, direction); | 713 | dma_sync_single_for_cpu(dev, dma_addr, size, direction); |
714 | } | 714 | } |
715 | 715 | ||
716 | static void | 716 | static void |
@@ -721,7 +721,7 @@ fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si | |||
721 | 721 | ||
722 | DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); | 722 | DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); |
723 | 723 | ||
724 | sbus_dma_sync_single_for_device(dev, dma_addr, size, direction); | 724 | dma_sync_single_for_device(dev, dma_addr, size, direction); |
725 | } | 725 | } |
726 | 726 | ||
727 | 727 | ||
@@ -738,8 +738,8 @@ fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, | |||
738 | chunk->alloc_size = chunk->align_size = size * nbr; | 738 | chunk->alloc_size = chunk->align_size = size * nbr; |
739 | 739 | ||
740 | /* returned chunks are page-aligned */ | 740 | /* returned chunks are page-aligned */ |
741 | chunk->alloc_addr = sbus_alloc_consistent(dev, chunk->alloc_size, | 741 | chunk->alloc_addr = dma_alloc_coherent(dev, chunk->alloc_size, |
742 | &chunk->dma_addr); | 742 | &chunk->dma_addr, GFP_ATOMIC); |
743 | 743 | ||
744 | if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) | 744 | if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) |
745 | return -ENOMEM; | 745 | return -ENOMEM; |
@@ -758,8 +758,8 @@ fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) | |||
758 | struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev; | 758 | struct sbus_dev *sdev = (struct sbus_dev *) fore200e->bus_dev; |
759 | struct device *dev = &sdev->ofdev.dev; | 759 | struct device *dev = &sdev->ofdev.dev; |
760 | 760 | ||
761 | sbus_free_consistent(dev, chunk->alloc_size, | 761 | dma_free_coherent(dev, chunk->alloc_size, |
762 | chunk->alloc_addr, chunk->dma_addr); | 762 | chunk->alloc_addr, chunk->dma_addr); |
763 | } | 763 | } |
764 | 764 | ||
765 | 765 | ||
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index c17462159d9d..858880b619ce 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c | |||
@@ -22,6 +22,7 @@ static char version[] = | |||
22 | #include <linux/etherdevice.h> | 22 | #include <linux/etherdevice.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
25 | #include <linux/dma-mapping.h> | ||
25 | 26 | ||
26 | #include <net/dst.h> | 27 | #include <net/dst.h> |
27 | #include <net/arp.h> | 28 | #include <net/arp.h> |
@@ -243,8 +244,8 @@ static void myri_clean_rings(struct myri_eth *mp) | |||
243 | u32 dma_addr; | 244 | u32 dma_addr; |
244 | 245 | ||
245 | dma_addr = sbus_readl(&rxd->myri_scatters[0].addr); | 246 | dma_addr = sbus_readl(&rxd->myri_scatters[0].addr); |
246 | sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, | 247 | dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, |
247 | RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); | 248 | RX_ALLOC_SIZE, DMA_FROM_DEVICE); |
248 | dev_kfree_skb(mp->rx_skbs[i]); | 249 | dev_kfree_skb(mp->rx_skbs[i]); |
249 | mp->rx_skbs[i] = NULL; | 250 | mp->rx_skbs[i] = NULL; |
250 | } | 251 | } |
@@ -260,9 +261,9 @@ static void myri_clean_rings(struct myri_eth *mp) | |||
260 | u32 dma_addr; | 261 | u32 dma_addr; |
261 | 262 | ||
262 | dma_addr = sbus_readl(&txd->myri_gathers[0].addr); | 263 | dma_addr = sbus_readl(&txd->myri_gathers[0].addr); |
263 | sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, | 264 | dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, |
264 | (skb->len + 3) & ~3, | 265 | (skb->len + 3) & ~3, |
265 | SBUS_DMA_TODEVICE); | 266 | DMA_TO_DEVICE); |
266 | dev_kfree_skb(mp->tx_skbs[i]); | 267 | dev_kfree_skb(mp->tx_skbs[i]); |
267 | mp->tx_skbs[i] = NULL; | 268 | mp->tx_skbs[i] = NULL; |
268 | } | 269 | } |
@@ -291,9 +292,9 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq) | |||
291 | skb->dev = dev; | 292 | skb->dev = dev; |
292 | skb_put(skb, RX_ALLOC_SIZE); | 293 | skb_put(skb, RX_ALLOC_SIZE); |
293 | 294 | ||
294 | dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, | 295 | dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev, |
295 | skb->data, RX_ALLOC_SIZE, | 296 | skb->data, RX_ALLOC_SIZE, |
296 | SBUS_DMA_FROMDEVICE); | 297 | DMA_FROM_DEVICE); |
297 | sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr); | 298 | sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr); |
298 | sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len); | 299 | sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len); |
299 | sbus_writel(i, &rxd[i].ctx); | 300 | sbus_writel(i, &rxd[i].ctx); |
@@ -349,8 +350,8 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev) | |||
349 | 350 | ||
350 | DTX(("SKB[%d] ", entry)); | 351 | DTX(("SKB[%d] ", entry)); |
351 | dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr); | 352 | dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr); |
352 | sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, | 353 | dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, |
353 | skb->len, SBUS_DMA_TODEVICE); | 354 | skb->len, DMA_TO_DEVICE); |
354 | dev_kfree_skb(skb); | 355 | dev_kfree_skb(skb); |
355 | mp->tx_skbs[entry] = NULL; | 356 | mp->tx_skbs[entry] = NULL; |
356 | dev->stats.tx_packets++; | 357 | dev->stats.tx_packets++; |
@@ -429,9 +430,9 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
429 | 430 | ||
430 | /* Check for errors. */ | 431 | /* Check for errors. */ |
431 | DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); | 432 | DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); |
432 | sbus_dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev, | 433 | dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev, |
433 | sbus_readl(&rxd->myri_scatters[0].addr), | 434 | sbus_readl(&rxd->myri_scatters[0].addr), |
434 | RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); | 435 | RX_ALLOC_SIZE, DMA_FROM_DEVICE); |
435 | if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { | 436 | if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { |
436 | DRX(("ERROR[")); | 437 | DRX(("ERROR[")); |
437 | dev->stats.rx_errors++; | 438 | dev->stats.rx_errors++; |
@@ -448,10 +449,10 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
448 | drops++; | 449 | drops++; |
449 | DRX(("DROP ")); | 450 | DRX(("DROP ")); |
450 | dev->stats.rx_dropped++; | 451 | dev->stats.rx_dropped++; |
451 | sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, | 452 | dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, |
452 | sbus_readl(&rxd->myri_scatters[0].addr), | 453 | sbus_readl(&rxd->myri_scatters[0].addr), |
453 | RX_ALLOC_SIZE, | 454 | RX_ALLOC_SIZE, |
454 | SBUS_DMA_FROMDEVICE); | 455 | DMA_FROM_DEVICE); |
455 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); | 456 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); |
456 | sbus_writel(index, &rxd->ctx); | 457 | sbus_writel(index, &rxd->ctx); |
457 | sbus_writel(1, &rxd->num_sg); | 458 | sbus_writel(1, &rxd->num_sg); |
@@ -470,17 +471,17 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
470 | DRX(("skb_alloc(FAILED) ")); | 471 | DRX(("skb_alloc(FAILED) ")); |
471 | goto drop_it; | 472 | goto drop_it; |
472 | } | 473 | } |
473 | sbus_unmap_single(&mp->myri_sdev->ofdev.dev, | 474 | dma_unmap_single(&mp->myri_sdev->ofdev.dev, |
474 | sbus_readl(&rxd->myri_scatters[0].addr), | 475 | sbus_readl(&rxd->myri_scatters[0].addr), |
475 | RX_ALLOC_SIZE, | 476 | RX_ALLOC_SIZE, |
476 | SBUS_DMA_FROMDEVICE); | 477 | DMA_FROM_DEVICE); |
477 | mp->rx_skbs[index] = new_skb; | 478 | mp->rx_skbs[index] = new_skb; |
478 | new_skb->dev = dev; | 479 | new_skb->dev = dev; |
479 | skb_put(new_skb, RX_ALLOC_SIZE); | 480 | skb_put(new_skb, RX_ALLOC_SIZE); |
480 | dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, | 481 | dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev, |
481 | new_skb->data, | 482 | new_skb->data, |
482 | RX_ALLOC_SIZE, | 483 | RX_ALLOC_SIZE, |
483 | SBUS_DMA_FROMDEVICE); | 484 | DMA_FROM_DEVICE); |
484 | sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); | 485 | sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); |
485 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); | 486 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); |
486 | sbus_writel(index, &rxd->ctx); | 487 | sbus_writel(index, &rxd->ctx); |
@@ -506,10 +507,10 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) | |||
506 | 507 | ||
507 | /* Reuse original ring buffer. */ | 508 | /* Reuse original ring buffer. */ |
508 | DRX(("reuse ")); | 509 | DRX(("reuse ")); |
509 | sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, | 510 | dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, |
510 | sbus_readl(&rxd->myri_scatters[0].addr), | 511 | sbus_readl(&rxd->myri_scatters[0].addr), |
511 | RX_ALLOC_SIZE, | 512 | RX_ALLOC_SIZE, |
512 | SBUS_DMA_FROMDEVICE); | 513 | DMA_FROM_DEVICE); |
513 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); | 514 | sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); |
514 | sbus_writel(index, &rxd->ctx); | 515 | sbus_writel(index, &rxd->ctx); |
515 | sbus_writel(1, &rxd->num_sg); | 516 | sbus_writel(1, &rxd->num_sg); |
@@ -658,8 +659,8 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
658 | sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]); | 659 | sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]); |
659 | } | 660 | } |
660 | 661 | ||
661 | dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, skb->data, | 662 | dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev, skb->data, |
662 | len, SBUS_DMA_TODEVICE); | 663 | len, DMA_TO_DEVICE); |
663 | sbus_writel(dma_addr, &txd->myri_gathers[0].addr); | 664 | sbus_writel(dma_addr, &txd->myri_gathers[0].addr); |
664 | sbus_writel(len, &txd->myri_gathers[0].len); | 665 | sbus_writel(len, &txd->myri_gathers[0].len); |
665 | sbus_writel(1, &txd->num_sg); | 666 | sbus_writel(1, &txd->num_sg); |
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index b92218c2f76c..8fe4c49b0623 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/etherdevice.h> | 23 | #include <linux/etherdevice.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
26 | #include <linux/dma-mapping.h> | ||
26 | 27 | ||
27 | #include <asm/auxio.h> | 28 | #include <asm/auxio.h> |
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
@@ -239,9 +240,10 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq) | |||
239 | skb_reserve(skb, 34); | 240 | skb_reserve(skb, 34); |
240 | 241 | ||
241 | bb->be_rxd[i].rx_addr = | 242 | bb->be_rxd[i].rx_addr = |
242 | sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data, | 243 | dma_map_single(&bp->bigmac_sdev->ofdev.dev, |
243 | RX_BUF_ALLOC_SIZE - 34, | 244 | skb->data, |
244 | SBUS_DMA_FROMDEVICE); | 245 | RX_BUF_ALLOC_SIZE - 34, |
246 | DMA_FROM_DEVICE); | ||
245 | bb->be_rxd[i].rx_flags = | 247 | bb->be_rxd[i].rx_flags = |
246 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | 248 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); |
247 | } | 249 | } |
@@ -776,9 +778,9 @@ static void bigmac_tx(struct bigmac *bp) | |||
776 | skb = bp->tx_skbs[elem]; | 778 | skb = bp->tx_skbs[elem]; |
777 | bp->enet_stats.tx_packets++; | 779 | bp->enet_stats.tx_packets++; |
778 | bp->enet_stats.tx_bytes += skb->len; | 780 | bp->enet_stats.tx_bytes += skb->len; |
779 | sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev, | 781 | dma_unmap_single(&bp->bigmac_sdev->ofdev.dev, |
780 | this->tx_addr, skb->len, | 782 | this->tx_addr, skb->len, |
781 | SBUS_DMA_TODEVICE); | 783 | DMA_TO_DEVICE); |
782 | 784 | ||
783 | DTX(("skb(%p) ", skb)); | 785 | DTX(("skb(%p) ", skb)); |
784 | bp->tx_skbs[elem] = NULL; | 786 | bp->tx_skbs[elem] = NULL; |
@@ -831,19 +833,19 @@ static void bigmac_rx(struct bigmac *bp) | |||
831 | drops++; | 833 | drops++; |
832 | goto drop_it; | 834 | goto drop_it; |
833 | } | 835 | } |
834 | sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev, | 836 | dma_unmap_single(&bp->bigmac_sdev->ofdev.dev, |
835 | this->rx_addr, | 837 | this->rx_addr, |
836 | RX_BUF_ALLOC_SIZE - 34, | 838 | RX_BUF_ALLOC_SIZE - 34, |
837 | SBUS_DMA_FROMDEVICE); | 839 | DMA_FROM_DEVICE); |
838 | bp->rx_skbs[elem] = new_skb; | 840 | bp->rx_skbs[elem] = new_skb; |
839 | new_skb->dev = bp->dev; | 841 | new_skb->dev = bp->dev; |
840 | skb_put(new_skb, ETH_FRAME_LEN); | 842 | skb_put(new_skb, ETH_FRAME_LEN); |
841 | skb_reserve(new_skb, 34); | 843 | skb_reserve(new_skb, 34); |
842 | this->rx_addr = | 844 | this->rx_addr = |
843 | sbus_map_single(&bp->bigmac_sdev->ofdev.dev, | 845 | dma_map_single(&bp->bigmac_sdev->ofdev.dev, |
844 | new_skb->data, | 846 | new_skb->data, |
845 | RX_BUF_ALLOC_SIZE - 34, | 847 | RX_BUF_ALLOC_SIZE - 34, |
846 | SBUS_DMA_FROMDEVICE); | 848 | DMA_FROM_DEVICE); |
847 | this->rx_flags = | 849 | this->rx_flags = |
848 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); | 850 | (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); |
849 | 851 | ||
@@ -858,13 +860,13 @@ static void bigmac_rx(struct bigmac *bp) | |||
858 | } | 860 | } |
859 | skb_reserve(copy_skb, 2); | 861 | skb_reserve(copy_skb, 2); |
860 | skb_put(copy_skb, len); | 862 | skb_put(copy_skb, len); |
861 | sbus_dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev, | 863 | dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev, |
862 | this->rx_addr, len, | 864 | this->rx_addr, len, |
863 | SBUS_DMA_FROMDEVICE); | 865 | DMA_FROM_DEVICE); |
864 | skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); | 866 | skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); |
865 | sbus_dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev, | 867 | dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev, |
866 | this->rx_addr, len, | 868 | this->rx_addr, len, |
867 | SBUS_DMA_FROMDEVICE); | 869 | DMA_FROM_DEVICE); |
868 | 870 | ||
869 | /* Reuse original ring buffer. */ | 871 | /* Reuse original ring buffer. */ |
870 | this->rx_flags = | 872 | this->rx_flags = |
@@ -960,8 +962,8 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
960 | u32 mapping; | 962 | u32 mapping; |
961 | 963 | ||
962 | len = skb->len; | 964 | len = skb->len; |
963 | mapping = sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data, | 965 | mapping = dma_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data, |
964 | len, SBUS_DMA_TODEVICE); | 966 | len, DMA_TO_DEVICE); |
965 | 967 | ||
966 | /* Avoid a race... */ | 968 | /* Avoid a race... */ |
967 | spin_lock_irq(&bp->lock); | 969 | spin_lock_irq(&bp->lock); |
@@ -1185,9 +1187,9 @@ static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev) | |||
1185 | bigmac_stop(bp); | 1187 | bigmac_stop(bp); |
1186 | 1188 | ||
1187 | /* Allocate transmit/receive descriptor DVMA block. */ | 1189 | /* Allocate transmit/receive descriptor DVMA block. */ |
1188 | bp->bmac_block = sbus_alloc_consistent(&bp->bigmac_sdev->ofdev.dev, | 1190 | bp->bmac_block = dma_alloc_coherent(&bp->bigmac_sdev->ofdev.dev, |
1189 | PAGE_SIZE, | 1191 | PAGE_SIZE, |
1190 | &bp->bblock_dvma); | 1192 | &bp->bblock_dvma, GFP_ATOMIC); |
1191 | if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { | 1193 | if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { |
1192 | printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); | 1194 | printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); |
1193 | goto fail_and_cleanup; | 1195 | goto fail_and_cleanup; |
@@ -1247,10 +1249,10 @@ fail_and_cleanup: | |||
1247 | sbus_iounmap(bp->tregs, TCVR_REG_SIZE); | 1249 | sbus_iounmap(bp->tregs, TCVR_REG_SIZE); |
1248 | 1250 | ||
1249 | if (bp->bmac_block) | 1251 | if (bp->bmac_block) |
1250 | sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev, | 1252 | dma_free_coherent(&bp->bigmac_sdev->ofdev.dev, |
1251 | PAGE_SIZE, | 1253 | PAGE_SIZE, |
1252 | bp->bmac_block, | 1254 | bp->bmac_block, |
1253 | bp->bblock_dvma); | 1255 | bp->bblock_dvma); |
1254 | 1256 | ||
1255 | /* This also frees the co-located 'dev->priv' */ | 1257 | /* This also frees the co-located 'dev->priv' */ |
1256 | free_netdev(dev); | 1258 | free_netdev(dev); |
@@ -1282,10 +1284,10 @@ static int __devexit bigmac_sbus_remove(struct of_device *dev) | |||
1282 | sbus_iounmap(bp->creg, CREG_REG_SIZE); | 1284 | sbus_iounmap(bp->creg, CREG_REG_SIZE); |
1283 | sbus_iounmap(bp->bregs, BMAC_REG_SIZE); | 1285 | sbus_iounmap(bp->bregs, BMAC_REG_SIZE); |
1284 | sbus_iounmap(bp->tregs, TCVR_REG_SIZE); | 1286 | sbus_iounmap(bp->tregs, TCVR_REG_SIZE); |
1285 | sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev, | 1287 | dma_free_coherent(&bp->bigmac_sdev->ofdev.dev, |
1286 | PAGE_SIZE, | 1288 | PAGE_SIZE, |
1287 | bp->bmac_block, | 1289 | bp->bmac_block, |
1288 | bp->bblock_dvma); | 1290 | bp->bblock_dvma); |
1289 | 1291 | ||
1290 | free_netdev(net_dev); | 1292 | free_netdev(net_dev); |
1291 | 1293 | ||
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index cd93fc5e826a..69cc77192961 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
35 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
36 | #include <linux/bitops.h> | 36 | #include <linux/bitops.h> |
37 | #include <linux/dma-mapping.h> | ||
37 | 38 | ||
38 | #include <asm/system.h> | 39 | #include <asm/system.h> |
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
@@ -277,13 +278,13 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ | |||
277 | } while(0) | 278 | } while(0) |
278 | #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) | 279 | #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) |
279 | #define hme_dma_map(__hp, __ptr, __size, __dir) \ | 280 | #define hme_dma_map(__hp, __ptr, __size, __dir) \ |
280 | sbus_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) | 281 | dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) |
281 | #define hme_dma_unmap(__hp, __addr, __size, __dir) \ | 282 | #define hme_dma_unmap(__hp, __addr, __size, __dir) \ |
282 | sbus_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) | 283 | dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) |
283 | #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ | 284 | #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ |
284 | sbus_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) | 285 | dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) |
285 | #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ | 286 | #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ |
286 | sbus_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) | 287 | dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) |
287 | #else | 288 | #else |
288 | /* PCI only compilation */ | 289 | /* PCI only compilation */ |
289 | #define hme_write32(__hp, __reg, __val) \ | 290 | #define hme_write32(__hp, __reg, __val) \ |
@@ -316,25 +317,6 @@ static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) | |||
316 | #endif | 317 | #endif |
317 | 318 | ||
318 | 319 | ||
319 | #ifdef SBUS_DMA_BIDIRECTIONAL | ||
320 | # define DMA_BIDIRECTIONAL SBUS_DMA_BIDIRECTIONAL | ||
321 | #else | ||
322 | # define DMA_BIDIRECTIONAL 0 | ||
323 | #endif | ||
324 | |||
325 | #ifdef SBUS_DMA_FROMDEVICE | ||
326 | # define DMA_FROMDEVICE SBUS_DMA_FROMDEVICE | ||
327 | #else | ||
328 | # define DMA_TODEVICE 1 | ||
329 | #endif | ||
330 | |||
331 | #ifdef SBUS_DMA_TODEVICE | ||
332 | # define DMA_TODEVICE SBUS_DMA_TODEVICE | ||
333 | #else | ||
334 | # define DMA_FROMDEVICE 2 | ||
335 | #endif | ||
336 | |||
337 | |||
338 | /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ | 320 | /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ |
339 | static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) | 321 | static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) |
340 | { | 322 | { |
@@ -1224,7 +1206,7 @@ static void happy_meal_clean_rings(struct happy_meal *hp) | |||
1224 | 1206 | ||
1225 | rxd = &hp->happy_block->happy_meal_rxd[i]; | 1207 | rxd = &hp->happy_block->happy_meal_rxd[i]; |
1226 | dma_addr = hme_read_desc32(hp, &rxd->rx_addr); | 1208 | dma_addr = hme_read_desc32(hp, &rxd->rx_addr); |
1227 | hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); | 1209 | hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); |
1228 | dev_kfree_skb_any(skb); | 1210 | dev_kfree_skb_any(skb); |
1229 | hp->rx_skbs[i] = NULL; | 1211 | hp->rx_skbs[i] = NULL; |
1230 | } | 1212 | } |
@@ -1245,7 +1227,7 @@ static void happy_meal_clean_rings(struct happy_meal *hp) | |||
1245 | hme_dma_unmap(hp, dma_addr, | 1227 | hme_dma_unmap(hp, dma_addr, |
1246 | (hme_read_desc32(hp, &txd->tx_flags) | 1228 | (hme_read_desc32(hp, &txd->tx_flags) |
1247 | & TXFLAG_SIZE), | 1229 | & TXFLAG_SIZE), |
1248 | DMA_TODEVICE); | 1230 | DMA_TO_DEVICE); |
1249 | 1231 | ||
1250 | if (frag != skb_shinfo(skb)->nr_frags) | 1232 | if (frag != skb_shinfo(skb)->nr_frags) |
1251 | i++; | 1233 | i++; |
@@ -1287,7 +1269,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) | |||
1287 | skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | 1269 | skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); |
1288 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], | 1270 | hme_write_rxd(hp, &hb->happy_meal_rxd[i], |
1289 | (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), | 1271 | (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), |
1290 | hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); | 1272 | hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE)); |
1291 | skb_reserve(skb, RX_OFFSET); | 1273 | skb_reserve(skb, RX_OFFSET); |
1292 | } | 1274 | } |
1293 | 1275 | ||
@@ -1966,7 +1948,7 @@ static void happy_meal_tx(struct happy_meal *hp) | |||
1966 | dma_len = hme_read_desc32(hp, &this->tx_flags); | 1948 | dma_len = hme_read_desc32(hp, &this->tx_flags); |
1967 | 1949 | ||
1968 | dma_len &= TXFLAG_SIZE; | 1950 | dma_len &= TXFLAG_SIZE; |
1969 | hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE); | 1951 | hme_dma_unmap(hp, dma_addr, dma_len, DMA_TO_DEVICE); |
1970 | 1952 | ||
1971 | elem = NEXT_TX(elem); | 1953 | elem = NEXT_TX(elem); |
1972 | this = &txbase[elem]; | 1954 | this = &txbase[elem]; |
@@ -2044,13 +2026,13 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2044 | drops++; | 2026 | drops++; |
2045 | goto drop_it; | 2027 | goto drop_it; |
2046 | } | 2028 | } |
2047 | hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); | 2029 | hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); |
2048 | hp->rx_skbs[elem] = new_skb; | 2030 | hp->rx_skbs[elem] = new_skb; |
2049 | new_skb->dev = dev; | 2031 | new_skb->dev = dev; |
2050 | skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); | 2032 | skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); |
2051 | hme_write_rxd(hp, this, | 2033 | hme_write_rxd(hp, this, |
2052 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), | 2034 | (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), |
2053 | hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); | 2035 | hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE)); |
2054 | skb_reserve(new_skb, RX_OFFSET); | 2036 | skb_reserve(new_skb, RX_OFFSET); |
2055 | 2037 | ||
2056 | /* Trim the original skb for the netif. */ | 2038 | /* Trim the original skb for the netif. */ |
@@ -2065,9 +2047,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) | |||
2065 | 2047 | ||
2066 | skb_reserve(copy_skb, 2); | 2048 | skb_reserve(copy_skb, 2); |
2067 | skb_put(copy_skb, len); | 2049 | skb_put(copy_skb, len); |
2068 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); | 2050 | hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROM_DEVICE); |
2069 | skb_copy_from_linear_data(skb, copy_skb->data, len); | 2051 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
2070 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); | 2052 | hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROM_DEVICE); |
2071 | 2053 | ||
2072 | /* Reuse original ring buffer. */ | 2054 | /* Reuse original ring buffer. */ |
2073 | hme_write_rxd(hp, this, | 2055 | hme_write_rxd(hp, this, |
@@ -2300,7 +2282,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2300 | u32 mapping, len; | 2282 | u32 mapping, len; |
2301 | 2283 | ||
2302 | len = skb->len; | 2284 | len = skb->len; |
2303 | mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE); | 2285 | mapping = hme_dma_map(hp, skb->data, len, DMA_TO_DEVICE); |
2304 | tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); | 2286 | tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); |
2305 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], | 2287 | hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], |
2306 | (tx_flags | (len & TXFLAG_SIZE)), | 2288 | (tx_flags | (len & TXFLAG_SIZE)), |
@@ -2314,7 +2296,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2314 | * Otherwise we could race with the device. | 2296 | * Otherwise we could race with the device. |
2315 | */ | 2297 | */ |
2316 | first_len = skb_headlen(skb); | 2298 | first_len = skb_headlen(skb); |
2317 | first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE); | 2299 | first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TO_DEVICE); |
2318 | entry = NEXT_TX(entry); | 2300 | entry = NEXT_TX(entry); |
2319 | 2301 | ||
2320 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 2302 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -2325,7 +2307,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2325 | mapping = hme_dma_map(hp, | 2307 | mapping = hme_dma_map(hp, |
2326 | ((void *) page_address(this_frag->page) + | 2308 | ((void *) page_address(this_frag->page) + |
2327 | this_frag->page_offset), | 2309 | this_frag->page_offset), |
2328 | len, DMA_TODEVICE); | 2310 | len, DMA_TO_DEVICE); |
2329 | this_txflags = tx_flags; | 2311 | this_txflags = tx_flags; |
2330 | if (frag == skb_shinfo(skb)->nr_frags - 1) | 2312 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
2331 | this_txflags |= TXFLAG_EOP; | 2313 | this_txflags |= TXFLAG_EOP; |
@@ -2786,9 +2768,10 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2786 | hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node, | 2768 | hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node, |
2787 | "burst-sizes", 0x00); | 2769 | "burst-sizes", 0x00); |
2788 | 2770 | ||
2789 | hp->happy_block = sbus_alloc_consistent(hp->dma_dev, | 2771 | hp->happy_block = dma_alloc_coherent(hp->dma_dev, |
2790 | PAGE_SIZE, | 2772 | PAGE_SIZE, |
2791 | &hp->hblock_dvma); | 2773 | &hp->hblock_dvma, |
2774 | GFP_ATOMIC); | ||
2792 | err = -ENOMEM; | 2775 | err = -ENOMEM; |
2793 | if (!hp->happy_block) { | 2776 | if (!hp->happy_block) { |
2794 | printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); | 2777 | printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); |
@@ -2824,12 +2807,12 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2824 | hp->read_desc32 = sbus_hme_read_desc32; | 2807 | hp->read_desc32 = sbus_hme_read_desc32; |
2825 | hp->write_txd = sbus_hme_write_txd; | 2808 | hp->write_txd = sbus_hme_write_txd; |
2826 | hp->write_rxd = sbus_hme_write_rxd; | 2809 | hp->write_rxd = sbus_hme_write_rxd; |
2827 | hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single; | 2810 | hp->dma_map = (u32 (*)(void *, void *, long, int))dma_map_single; |
2828 | hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single; | 2811 | hp->dma_unmap = (void (*)(void *, u32, long, int))dma_unmap_single; |
2829 | hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int)) | 2812 | hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int)) |
2830 | sbus_dma_sync_single_for_cpu; | 2813 | dma_sync_single_for_cpu; |
2831 | hp->dma_sync_for_device = (void (*)(void *, u32, long, int)) | 2814 | hp->dma_sync_for_device = (void (*)(void *, u32, long, int)) |
2832 | sbus_dma_sync_single_for_device; | 2815 | dma_sync_single_for_device; |
2833 | hp->read32 = sbus_hme_read32; | 2816 | hp->read32 = sbus_hme_read32; |
2834 | hp->write32 = sbus_hme_write32; | 2817 | hp->write32 = sbus_hme_write32; |
2835 | #endif | 2818 | #endif |
@@ -2844,7 +2827,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2844 | if (register_netdev(hp->dev)) { | 2827 | if (register_netdev(hp->dev)) { |
2845 | printk(KERN_ERR "happymeal: Cannot register net device, " | 2828 | printk(KERN_ERR "happymeal: Cannot register net device, " |
2846 | "aborting.\n"); | 2829 | "aborting.\n"); |
2847 | goto err_out_free_consistent; | 2830 | goto err_out_free_coherent; |
2848 | } | 2831 | } |
2849 | 2832 | ||
2850 | dev_set_drvdata(&sdev->ofdev.dev, hp); | 2833 | dev_set_drvdata(&sdev->ofdev.dev, hp); |
@@ -2860,11 +2843,11 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe | |||
2860 | 2843 | ||
2861 | return 0; | 2844 | return 0; |
2862 | 2845 | ||
2863 | err_out_free_consistent: | 2846 | err_out_free_coherent: |
2864 | sbus_free_consistent(hp->dma_dev, | 2847 | dma_free_coherent(hp->dma_dev, |
2865 | PAGE_SIZE, | 2848 | PAGE_SIZE, |
2866 | hp->happy_block, | 2849 | hp->happy_block, |
2867 | hp->hblock_dvma); | 2850 | hp->hblock_dvma); |
2868 | 2851 | ||
2869 | err_out_iounmap: | 2852 | err_out_iounmap: |
2870 | if (hp->gregs) | 2853 | if (hp->gregs) |
@@ -3308,10 +3291,10 @@ static int __devexit hme_sbus_remove(struct of_device *dev) | |||
3308 | sbus_iounmap(hp->erxregs, ERX_REG_SIZE); | 3291 | sbus_iounmap(hp->erxregs, ERX_REG_SIZE); |
3309 | sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE); | 3292 | sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE); |
3310 | sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE); | 3293 | sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE); |
3311 | sbus_free_consistent(hp->dma_dev, | 3294 | dma_free_coherent(hp->dma_dev, |
3312 | PAGE_SIZE, | 3295 | PAGE_SIZE, |
3313 | hp->happy_block, | 3296 | hp->happy_block, |
3314 | hp->hblock_dvma); | 3297 | hp->hblock_dvma); |
3315 | 3298 | ||
3316 | free_netdev(net_dev); | 3299 | free_netdev(net_dev); |
3317 | 3300 | ||
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 4f4baf9f4ec8..65758881d7aa 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -91,6 +91,7 @@ static char lancestr[] = "LANCE"; | |||
91 | #include <linux/skbuff.h> | 91 | #include <linux/skbuff.h> |
92 | #include <linux/ethtool.h> | 92 | #include <linux/ethtool.h> |
93 | #include <linux/bitops.h> | 93 | #include <linux/bitops.h> |
94 | #include <linux/dma-mapping.h> | ||
94 | 95 | ||
95 | #include <asm/system.h> | 96 | #include <asm/system.h> |
96 | #include <asm/io.h> | 97 | #include <asm/io.h> |
@@ -1283,10 +1284,10 @@ static void lance_free_hwresources(struct lance_private *lp) | |||
1283 | sbus_iounmap(lp->init_block_iomem, | 1284 | sbus_iounmap(lp->init_block_iomem, |
1284 | sizeof(struct lance_init_block)); | 1285 | sizeof(struct lance_init_block)); |
1285 | } else if (lp->init_block_mem) { | 1286 | } else if (lp->init_block_mem) { |
1286 | sbus_free_consistent(&lp->sdev->ofdev.dev, | 1287 | dma_free_coherent(&lp->sdev->ofdev.dev, |
1287 | sizeof(struct lance_init_block), | 1288 | sizeof(struct lance_init_block), |
1288 | lp->init_block_mem, | 1289 | lp->init_block_mem, |
1289 | lp->init_block_dvma); | 1290 | lp->init_block_dvma); |
1290 | } | 1291 | } |
1291 | } | 1292 | } |
1292 | 1293 | ||
@@ -1384,9 +1385,9 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, | |||
1384 | lp->tx = lance_tx_pio; | 1385 | lp->tx = lance_tx_pio; |
1385 | } else { | 1386 | } else { |
1386 | lp->init_block_mem = | 1387 | lp->init_block_mem = |
1387 | sbus_alloc_consistent(&sdev->ofdev.dev, | 1388 | dma_alloc_coherent(&sdev->ofdev.dev, |
1388 | sizeof(struct lance_init_block), | 1389 | sizeof(struct lance_init_block), |
1389 | &lp->init_block_dvma); | 1390 | &lp->init_block_dvma, GFP_ATOMIC); |
1390 | if (!lp->init_block_mem || lp->init_block_dvma == 0) { | 1391 | if (!lp->init_block_mem || lp->init_block_dvma == 0) { |
1391 | printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); | 1392 | printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); |
1392 | goto fail; | 1393 | goto fail; |
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index ac8049cab247..66f66ee8ca63 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <linux/ethtool.h> | 25 | #include <linux/ethtool.h> |
26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
27 | #include <linux/dma-mapping.h> | ||
27 | 28 | ||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
@@ -879,12 +880,12 @@ static int __devinit qec_ether_init(struct sbus_dev *sdev) | |||
879 | goto fail; | 880 | goto fail; |
880 | } | 881 | } |
881 | 882 | ||
882 | qe->qe_block = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev, | 883 | qe->qe_block = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev, |
883 | PAGE_SIZE, | 884 | PAGE_SIZE, |
884 | &qe->qblock_dvma); | 885 | &qe->qblock_dvma, GFP_ATOMIC); |
885 | qe->buffers = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev, | 886 | qe->buffers = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev, |
886 | sizeof(struct sunqe_buffers), | 887 | sizeof(struct sunqe_buffers), |
887 | &qe->buffers_dvma); | 888 | &qe->buffers_dvma, GFP_ATOMIC); |
888 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || | 889 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || |
889 | qe->buffers == NULL || qe->buffers_dvma == 0) | 890 | qe->buffers == NULL || qe->buffers_dvma == 0) |
890 | goto fail; | 891 | goto fail; |
@@ -926,15 +927,15 @@ fail: | |||
926 | if (qe->mregs) | 927 | if (qe->mregs) |
927 | sbus_iounmap(qe->mregs, MREGS_REG_SIZE); | 928 | sbus_iounmap(qe->mregs, MREGS_REG_SIZE); |
928 | if (qe->qe_block) | 929 | if (qe->qe_block) |
929 | sbus_free_consistent(&qe->qe_sdev->ofdev.dev, | 930 | dma_free_coherent(&qe->qe_sdev->ofdev.dev, |
930 | PAGE_SIZE, | 931 | PAGE_SIZE, |
931 | qe->qe_block, | 932 | qe->qe_block, |
932 | qe->qblock_dvma); | 933 | qe->qblock_dvma); |
933 | if (qe->buffers) | 934 | if (qe->buffers) |
934 | sbus_free_consistent(&qe->qe_sdev->ofdev.dev, | 935 | dma_free_coherent(&qe->qe_sdev->ofdev.dev, |
935 | sizeof(struct sunqe_buffers), | 936 | sizeof(struct sunqe_buffers), |
936 | qe->buffers, | 937 | qe->buffers, |
937 | qe->buffers_dvma); | 938 | qe->buffers_dvma); |
938 | 939 | ||
939 | free_netdev(dev); | 940 | free_netdev(dev); |
940 | 941 | ||
@@ -957,14 +958,14 @@ static int __devexit qec_sbus_remove(struct of_device *dev) | |||
957 | 958 | ||
958 | sbus_iounmap(qp->qcregs, CREG_REG_SIZE); | 959 | sbus_iounmap(qp->qcregs, CREG_REG_SIZE); |
959 | sbus_iounmap(qp->mregs, MREGS_REG_SIZE); | 960 | sbus_iounmap(qp->mregs, MREGS_REG_SIZE); |
960 | sbus_free_consistent(&qp->qe_sdev->ofdev.dev, | 961 | dma_free_coherent(&qp->qe_sdev->ofdev.dev, |
961 | PAGE_SIZE, | 962 | PAGE_SIZE, |
962 | qp->qe_block, | 963 | qp->qe_block, |
963 | qp->qblock_dvma); | 964 | qp->qblock_dvma); |
964 | sbus_free_consistent(&qp->qe_sdev->ofdev.dev, | 965 | dma_free_coherent(&qp->qe_sdev->ofdev.dev, |
965 | sizeof(struct sunqe_buffers), | 966 | sizeof(struct sunqe_buffers), |
966 | qp->buffers, | 967 | qp->buffers, |
967 | qp->buffers_dvma); | 968 | qp->buffers_dvma); |
968 | 969 | ||
969 | free_netdev(net_dev); | 970 | free_netdev(net_dev); |
970 | 971 | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index f010506af884..1559d455b2b7 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/jiffies.h> | 27 | #include <linux/jiffies.h> |
28 | #include <linux/dma-mapping.h> | ||
28 | 29 | ||
29 | #include <asm/byteorder.h> | 30 | #include <asm/byteorder.h> |
30 | 31 | ||
@@ -788,22 +789,22 @@ static int __devinit qpti_map_queues(struct qlogicpti *qpti) | |||
788 | struct sbus_dev *sdev = qpti->sdev; | 789 | struct sbus_dev *sdev = qpti->sdev; |
789 | 790 | ||
790 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) | 791 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) |
791 | qpti->res_cpu = sbus_alloc_consistent(&sdev->ofdev.dev, | 792 | qpti->res_cpu = dma_alloc_coherent(&sdev->ofdev.dev, |
792 | QSIZE(RES_QUEUE_LEN), | 793 | QSIZE(RES_QUEUE_LEN), |
793 | &qpti->res_dvma); | 794 | &qpti->res_dvma, GFP_ATOMIC); |
794 | if (qpti->res_cpu == NULL || | 795 | if (qpti->res_cpu == NULL || |
795 | qpti->res_dvma == 0) { | 796 | qpti->res_dvma == 0) { |
796 | printk("QPTI: Cannot map response queue.\n"); | 797 | printk("QPTI: Cannot map response queue.\n"); |
797 | return -1; | 798 | return -1; |
798 | } | 799 | } |
799 | 800 | ||
800 | qpti->req_cpu = sbus_alloc_consistent(&sdev->ofdev.dev, | 801 | qpti->req_cpu = dma_alloc_coherent(&sdev->ofdev.dev, |
801 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), | 802 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), |
802 | &qpti->req_dvma); | 803 | &qpti->req_dvma, GFP_ATOMIC); |
803 | if (qpti->req_cpu == NULL || | 804 | if (qpti->req_cpu == NULL || |
804 | qpti->req_dvma == 0) { | 805 | qpti->req_dvma == 0) { |
805 | sbus_free_consistent(&sdev->ofdev.dev, QSIZE(RES_QUEUE_LEN), | 806 | dma_free_coherent(&sdev->ofdev.dev, QSIZE(RES_QUEUE_LEN), |
806 | qpti->res_cpu, qpti->res_dvma); | 807 | qpti->res_cpu, qpti->res_dvma); |
807 | printk("QPTI: Cannot map request queue.\n"); | 808 | printk("QPTI: Cannot map request queue.\n"); |
808 | return -1; | 809 | return -1; |
809 | } | 810 | } |
@@ -875,9 +876,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
875 | int sg_count; | 876 | int sg_count; |
876 | 877 | ||
877 | sg = scsi_sglist(Cmnd); | 878 | sg = scsi_sglist(Cmnd); |
878 | sg_count = sbus_map_sg(&qpti->sdev->ofdev.dev, sg, | 879 | sg_count = dma_map_sg(&qpti->sdev->ofdev.dev, sg, |
879 | scsi_sg_count(Cmnd), | 880 | scsi_sg_count(Cmnd), |
880 | Cmnd->sc_data_direction); | 881 | Cmnd->sc_data_direction); |
881 | 882 | ||
882 | ds = cmd->dataseg; | 883 | ds = cmd->dataseg; |
883 | cmd->segment_cnt = sg_count; | 884 | cmd->segment_cnt = sg_count; |
@@ -1152,9 +1153,9 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) | |||
1152 | Cmnd->result = DID_ERROR << 16; | 1153 | Cmnd->result = DID_ERROR << 16; |
1153 | 1154 | ||
1154 | if (scsi_bufflen(Cmnd)) | 1155 | if (scsi_bufflen(Cmnd)) |
1155 | sbus_unmap_sg(&qpti->sdev->ofdev.dev, | 1156 | dma_unmap_sg(&qpti->sdev->ofdev.dev, |
1156 | scsi_sglist(Cmnd), scsi_sg_count(Cmnd), | 1157 | scsi_sglist(Cmnd), scsi_sg_count(Cmnd), |
1157 | Cmnd->sc_data_direction); | 1158 | Cmnd->sc_data_direction); |
1158 | 1159 | ||
1159 | qpti->cmd_count[Cmnd->device->id]--; | 1160 | qpti->cmd_count[Cmnd->device->id]--; |
1160 | sbus_writew(out_ptr, qpti->qregs + MBOX5); | 1161 | sbus_writew(out_ptr, qpti->qregs + MBOX5); |
@@ -1357,12 +1358,12 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi | |||
1357 | 1358 | ||
1358 | fail_unmap_queues: | 1359 | fail_unmap_queues: |
1359 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) | 1360 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) |
1360 | sbus_free_consistent(&qpti->sdev->ofdev.dev, | 1361 | dma_free_coherent(&qpti->sdev->ofdev.dev, |
1361 | QSIZE(RES_QUEUE_LEN), | 1362 | QSIZE(RES_QUEUE_LEN), |
1362 | qpti->res_cpu, qpti->res_dvma); | 1363 | qpti->res_cpu, qpti->res_dvma); |
1363 | sbus_free_consistent(&qpti->sdev->ofdev.dev, | 1364 | dma_free_coherent(&qpti->sdev->ofdev.dev, |
1364 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), | 1365 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), |
1365 | qpti->req_cpu, qpti->req_dvma); | 1366 | qpti->req_cpu, qpti->req_dvma); |
1366 | #undef QSIZE | 1367 | #undef QSIZE |
1367 | 1368 | ||
1368 | fail_unmap_regs: | 1369 | fail_unmap_regs: |
@@ -1395,12 +1396,12 @@ static int __devexit qpti_sbus_remove(struct of_device *dev) | |||
1395 | free_irq(qpti->irq, qpti); | 1396 | free_irq(qpti->irq, qpti); |
1396 | 1397 | ||
1397 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) | 1398 | #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) |
1398 | sbus_free_consistent(&qpti->sdev->ofdev.dev, | 1399 | dma_free_coherent(&qpti->sdev->ofdev.dev, |
1399 | QSIZE(RES_QUEUE_LEN), | 1400 | QSIZE(RES_QUEUE_LEN), |
1400 | qpti->res_cpu, qpti->res_dvma); | 1401 | qpti->res_cpu, qpti->res_dvma); |
1401 | sbus_free_consistent(&qpti->sdev->ofdev.dev, | 1402 | dma_free_coherent(&qpti->sdev->ofdev.dev, |
1402 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), | 1403 | QSIZE(QLOGICPTI_REQ_QUEUE_LEN), |
1403 | qpti->req_cpu, qpti->req_dvma); | 1404 | qpti->req_cpu, qpti->req_dvma); |
1404 | #undef QSIZE | 1405 | #undef QSIZE |
1405 | 1406 | ||
1406 | sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size); | 1407 | sbus_iounmap(qpti->qregs, qpti->sdev->reg_addrs[0].reg_size); |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 35b6e2ccc394..f7508743f705 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/dma-mapping.h> | ||
12 | 13 | ||
13 | #include <asm/irq.h> | 14 | #include <asm/irq.h> |
14 | #include <asm/io.h> | 15 | #include <asm/io.h> |
@@ -101,8 +102,9 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp) | |||
101 | { | 102 | { |
102 | struct sbus_dev *sdev = esp->dev; | 103 | struct sbus_dev *sdev = esp->dev; |
103 | 104 | ||
104 | esp->command_block = sbus_alloc_consistent(&sdev->ofdev.dev, 16, | 105 | esp->command_block = dma_alloc_coherent(&sdev->ofdev.dev, 16, |
105 | &esp->command_block_dma); | 106 | &esp->command_block_dma, |
107 | GFP_ATOMIC); | ||
106 | if (!esp->command_block) | 108 | if (!esp->command_block) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
108 | return 0; | 110 | return 0; |
@@ -225,7 +227,7 @@ static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | |||
225 | { | 227 | { |
226 | struct sbus_dev *sdev = esp->dev; | 228 | struct sbus_dev *sdev = esp->dev; |
227 | 229 | ||
228 | return sbus_map_single(&sdev->ofdev.dev, buf, sz, dir); | 230 | return dma_map_single(&sdev->ofdev.dev, buf, sz, dir); |
229 | } | 231 | } |
230 | 232 | ||
231 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | 233 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, |
@@ -233,7 +235,7 @@ static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | |||
233 | { | 235 | { |
234 | struct sbus_dev *sdev = esp->dev; | 236 | struct sbus_dev *sdev = esp->dev; |
235 | 237 | ||
236 | return sbus_map_sg(&sdev->ofdev.dev, sg, num_sg, dir); | 238 | return dma_map_sg(&sdev->ofdev.dev, sg, num_sg, dir); |
237 | } | 239 | } |
238 | 240 | ||
239 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | 241 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, |
@@ -241,7 +243,7 @@ static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | |||
241 | { | 243 | { |
242 | struct sbus_dev *sdev = esp->dev; | 244 | struct sbus_dev *sdev = esp->dev; |
243 | 245 | ||
244 | sbus_unmap_single(&sdev->ofdev.dev, addr, sz, dir); | 246 | dma_unmap_single(&sdev->ofdev.dev, addr, sz, dir); |
245 | } | 247 | } |
246 | 248 | ||
247 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | 249 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, |
@@ -249,7 +251,7 @@ static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | |||
249 | { | 251 | { |
250 | struct sbus_dev *sdev = esp->dev; | 252 | struct sbus_dev *sdev = esp->dev; |
251 | 253 | ||
252 | sbus_unmap_sg(&sdev->ofdev.dev, sg, num_sg, dir); | 254 | dma_unmap_sg(&sdev->ofdev.dev, sg, num_sg, dir); |
253 | } | 255 | } |
254 | 256 | ||
255 | static int sbus_esp_irq_pending(struct esp *esp) | 257 | static int sbus_esp_irq_pending(struct esp *esp) |
@@ -558,9 +560,9 @@ static int __devinit esp_sbus_probe_one(struct device *dev, | |||
558 | fail_free_irq: | 560 | fail_free_irq: |
559 | free_irq(host->irq, esp); | 561 | free_irq(host->irq, esp); |
560 | fail_unmap_command_block: | 562 | fail_unmap_command_block: |
561 | sbus_free_consistent(&esp_dev->ofdev.dev, 16, | 563 | dma_free_coherent(&esp_dev->ofdev.dev, 16, |
562 | esp->command_block, | 564 | esp->command_block, |
563 | esp->command_block_dma); | 565 | esp->command_block_dma); |
564 | fail_unmap_regs: | 566 | fail_unmap_regs: |
565 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | 567 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); |
566 | fail_unlink: | 568 | fail_unlink: |
@@ -609,9 +611,9 @@ static int __devexit esp_sbus_remove(struct of_device *dev) | |||
609 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | 611 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); |
610 | 612 | ||
611 | free_irq(irq, esp); | 613 | free_irq(irq, esp); |
612 | sbus_free_consistent(&sdev->ofdev.dev, 16, | 614 | dma_free_coherent(&sdev->ofdev.dev, 16, |
613 | esp->command_block, | 615 | esp->command_block, |
614 | esp->command_block_dma); | 616 | esp->command_block_dma); |
615 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | 617 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); |
616 | of_iounmap(&dma_of->resource[0], esp->dma_regs, | 618 | of_iounmap(&dma_of->resource[0], esp->dma_regs, |
617 | resource_size(&dma_of->resource[0])); | 619 | resource_size(&dma_of->resource[0])); |
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index cc803972c0fb..ccaaac45fafb 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c | |||
@@ -192,8 +192,8 @@ static void *snd_malloc_sbus_pages(struct device *dev, size_t size, | |||
192 | snd_assert(size > 0, return NULL); | 192 | snd_assert(size > 0, return NULL); |
193 | snd_assert(dma_addr != NULL, return NULL); | 193 | snd_assert(dma_addr != NULL, return NULL); |
194 | pg = get_order(size); | 194 | pg = get_order(size); |
195 | res = sbus_alloc_consistent(&sdev->ofdev.dev, PAGE_SIZE * (1 << pg), | 195 | res = dma_alloc_coherent(&sdev->ofdev.dev, PAGE_SIZE * (1 << pg), |
196 | dma_addr); | 196 | dma_addr, GFP_ATOMIC); |
197 | if (res != NULL) | 197 | if (res != NULL) |
198 | inc_snd_pages(pg); | 198 | inc_snd_pages(pg); |
199 | return res; | 199 | return res; |
@@ -209,8 +209,8 @@ static void snd_free_sbus_pages(struct device *dev, size_t size, | |||
209 | return; | 209 | return; |
210 | pg = get_order(size); | 210 | pg = get_order(size); |
211 | dec_snd_pages(pg); | 211 | dec_snd_pages(pg); |
212 | sbus_free_consistent(&sdev->ofdev.dev, PAGE_SIZE * (1 << pg), | 212 | dma_free_coherent(&sdev->ofdev.dev, PAGE_SIZE * (1 << pg), |
213 | ptr, dma_addr); | 213 | ptr, dma_addr); |
214 | } | 214 | } |
215 | 215 | ||
216 | #endif /* CONFIG_SBUS */ | 216 | #endif /* CONFIG_SBUS */ |
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index a6b32ec34bde..5242ecbb91d5 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/delay.h> | 57 | #include <linux/delay.h> |
58 | #include <linux/irq.h> | 58 | #include <linux/irq.h> |
59 | #include <linux/io.h> | 59 | #include <linux/io.h> |
60 | #include <linux/dma-mapping.h> | ||
60 | 61 | ||
61 | #include <sound/core.h> | 62 | #include <sound/core.h> |
62 | #include <sound/pcm.h> | 63 | #include <sound/pcm.h> |
@@ -2093,15 +2094,15 @@ static int snd_dbri_hw_params(struct snd_pcm_substream *substream, | |||
2093 | */ | 2094 | */ |
2094 | if (info->dvma_buffer == 0) { | 2095 | if (info->dvma_buffer == 0) { |
2095 | if (DBRI_STREAMNO(substream) == DBRI_PLAY) | 2096 | if (DBRI_STREAMNO(substream) == DBRI_PLAY) |
2096 | direction = SBUS_DMA_TODEVICE; | 2097 | direction = DMA_TO_DEVICE; |
2097 | else | 2098 | else |
2098 | direction = SBUS_DMA_FROMDEVICE; | 2099 | direction = DMA_FROM_DEVICE; |
2099 | 2100 | ||
2100 | info->dvma_buffer = | 2101 | info->dvma_buffer = |
2101 | sbus_map_single(&dbri->sdev->ofdev.dev, | 2102 | dma_map_single(&dbri->sdev->ofdev.dev, |
2102 | runtime->dma_area, | 2103 | runtime->dma_area, |
2103 | params_buffer_bytes(hw_params), | 2104 | params_buffer_bytes(hw_params), |
2104 | direction); | 2105 | direction); |
2105 | } | 2106 | } |
2106 | 2107 | ||
2107 | direction = params_buffer_bytes(hw_params); | 2108 | direction = params_buffer_bytes(hw_params); |
@@ -2122,12 +2123,12 @@ static int snd_dbri_hw_free(struct snd_pcm_substream *substream) | |||
2122 | */ | 2123 | */ |
2123 | if (info->dvma_buffer) { | 2124 | if (info->dvma_buffer) { |
2124 | if (DBRI_STREAMNO(substream) == DBRI_PLAY) | 2125 | if (DBRI_STREAMNO(substream) == DBRI_PLAY) |
2125 | direction = SBUS_DMA_TODEVICE; | 2126 | direction = DMA_TO_DEVICE; |
2126 | else | 2127 | else |
2127 | direction = SBUS_DMA_FROMDEVICE; | 2128 | direction = DMA_FROM_DEVICE; |
2128 | 2129 | ||
2129 | sbus_unmap_single(&dbri->sdev->ofdev.dev, info->dvma_buffer, | 2130 | dma_unmap_single(&dbri->sdev->ofdev.dev, info->dvma_buffer, |
2130 | substream->runtime->buffer_size, direction); | 2131 | substream->runtime->buffer_size, direction); |
2131 | info->dvma_buffer = 0; | 2132 | info->dvma_buffer = 0; |
2132 | } | 2133 | } |
2133 | if (info->pipe != -1) { | 2134 | if (info->pipe != -1) { |
@@ -2525,9 +2526,9 @@ static int __devinit snd_dbri_create(struct snd_card *card, | |||
2525 | dbri->sdev = sdev; | 2526 | dbri->sdev = sdev; |
2526 | dbri->irq = irq; | 2527 | dbri->irq = irq; |
2527 | 2528 | ||
2528 | dbri->dma = sbus_alloc_consistent(&sdev->ofdev.dev, | 2529 | dbri->dma = dma_alloc_coherent(&sdev->ofdev.dev, |
2529 | sizeof(struct dbri_dma), | 2530 | sizeof(struct dbri_dma), |
2530 | &dbri->dma_dvma); | 2531 | &dbri->dma_dvma, GFP_ATOMIC); |
2531 | memset((void *)dbri->dma, 0, sizeof(struct dbri_dma)); | 2532 | memset((void *)dbri->dma, 0, sizeof(struct dbri_dma)); |
2532 | 2533 | ||
2533 | dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", | 2534 | dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", |
@@ -2539,8 +2540,8 @@ static int __devinit snd_dbri_create(struct snd_card *card, | |||
2539 | dbri->regs_size, "DBRI Registers"); | 2540 | dbri->regs_size, "DBRI Registers"); |
2540 | if (!dbri->regs) { | 2541 | if (!dbri->regs) { |
2541 | printk(KERN_ERR "DBRI: could not allocate registers\n"); | 2542 | printk(KERN_ERR "DBRI: could not allocate registers\n"); |
2542 | sbus_free_consistent(&sdev->ofdev.dev, sizeof(struct dbri_dma), | 2543 | dma_free_coherent(&sdev->ofdev.dev, sizeof(struct dbri_dma), |
2543 | (void *)dbri->dma, dbri->dma_dvma); | 2544 | (void *)dbri->dma, dbri->dma_dvma); |
2544 | return -EIO; | 2545 | return -EIO; |
2545 | } | 2546 | } |
2546 | 2547 | ||
@@ -2549,8 +2550,8 @@ static int __devinit snd_dbri_create(struct snd_card *card, | |||
2549 | if (err) { | 2550 | if (err) { |
2550 | printk(KERN_ERR "DBRI: Can't get irq %d\n", dbri->irq); | 2551 | printk(KERN_ERR "DBRI: Can't get irq %d\n", dbri->irq); |
2551 | sbus_iounmap(dbri->regs, dbri->regs_size); | 2552 | sbus_iounmap(dbri->regs, dbri->regs_size); |
2552 | sbus_free_consistent(&sdev->ofdev.dev, sizeof(struct dbri_dma), | 2553 | dma_free_coherent(&sdev->ofdev.dev, sizeof(struct dbri_dma), |
2553 | (void *)dbri->dma, dbri->dma_dvma); | 2554 | (void *)dbri->dma, dbri->dma_dvma); |
2554 | return err; | 2555 | return err; |
2555 | } | 2556 | } |
2556 | 2557 | ||
@@ -2577,9 +2578,9 @@ static void snd_dbri_free(struct snd_dbri *dbri) | |||
2577 | sbus_iounmap(dbri->regs, dbri->regs_size); | 2578 | sbus_iounmap(dbri->regs, dbri->regs_size); |
2578 | 2579 | ||
2579 | if (dbri->dma) | 2580 | if (dbri->dma) |
2580 | sbus_free_consistent(&dbri->sdev->ofdev.dev, | 2581 | dma_free_coherent(&dbri->sdev->ofdev.dev, |
2581 | sizeof(struct dbri_dma), | 2582 | sizeof(struct dbri_dma), |
2582 | (void *)dbri->dma, dbri->dma_dvma); | 2583 | (void *)dbri->dma, dbri->dma_dvma); |
2583 | } | 2584 | } |
2584 | 2585 | ||
2585 | static int __devinit dbri_probe(struct of_device *of_dev, | 2586 | static int __devinit dbri_probe(struct of_device *of_dev, |