aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-27 21:09:11 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-29 05:13:15 -0400
commit738f2b7b813913e651f39387d007dd961755dee2 (patch)
tree022ca4d144cba51495e6f26a8f55d3046d16c2e3 /drivers/net
parent944c67dff7a88f0a775e5b604937f9e30d2de555 (diff)
sparc: Convert all SBUS drivers to dma_*() interfaces.
And all the SBUS dma interfaces are deleted. A private implementation remains inside of the 32-bit sparc port which exists only for the sake of the implementation of dma_*(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/myri_sbus.c63
-rw-r--r--drivers/net/sunbmac.c68
-rw-r--r--drivers/net/sunhme.c85
-rw-r--r--drivers/net/sunlance.c15
-rw-r--r--drivers/net/sunqe.c45
5 files changed, 132 insertions, 144 deletions
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index c17462159d9d..858880b619ce 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -22,6 +22,7 @@ static char version[] =
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/dma-mapping.h>
25 26
26#include <net/dst.h> 27#include <net/dst.h>
27#include <net/arp.h> 28#include <net/arp.h>
@@ -243,8 +244,8 @@ static void myri_clean_rings(struct myri_eth *mp)
243 u32 dma_addr; 244 u32 dma_addr;
244 245
245 dma_addr = sbus_readl(&rxd->myri_scatters[0].addr); 246 dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
246 sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, 247 dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
247 RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); 248 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
248 dev_kfree_skb(mp->rx_skbs[i]); 249 dev_kfree_skb(mp->rx_skbs[i]);
249 mp->rx_skbs[i] = NULL; 250 mp->rx_skbs[i] = NULL;
250 } 251 }
@@ -260,9 +261,9 @@ static void myri_clean_rings(struct myri_eth *mp)
260 u32 dma_addr; 261 u32 dma_addr;
261 262
262 dma_addr = sbus_readl(&txd->myri_gathers[0].addr); 263 dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
263 sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, 264 dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
264 (skb->len + 3) & ~3, 265 (skb->len + 3) & ~3,
265 SBUS_DMA_TODEVICE); 266 DMA_TO_DEVICE);
266 dev_kfree_skb(mp->tx_skbs[i]); 267 dev_kfree_skb(mp->tx_skbs[i]);
267 mp->tx_skbs[i] = NULL; 268 mp->tx_skbs[i] = NULL;
268 } 269 }
@@ -291,9 +292,9 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
291 skb->dev = dev; 292 skb->dev = dev;
292 skb_put(skb, RX_ALLOC_SIZE); 293 skb_put(skb, RX_ALLOC_SIZE);
293 294
294 dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, 295 dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev,
295 skb->data, RX_ALLOC_SIZE, 296 skb->data, RX_ALLOC_SIZE,
296 SBUS_DMA_FROMDEVICE); 297 DMA_FROM_DEVICE);
297 sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr); 298 sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
298 sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len); 299 sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
299 sbus_writel(i, &rxd[i].ctx); 300 sbus_writel(i, &rxd[i].ctx);
@@ -349,8 +350,8 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev)
349 350
350 DTX(("SKB[%d] ", entry)); 351 DTX(("SKB[%d] ", entry));
351 dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr); 352 dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
352 sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr, 353 dma_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
353 skb->len, SBUS_DMA_TODEVICE); 354 skb->len, DMA_TO_DEVICE);
354 dev_kfree_skb(skb); 355 dev_kfree_skb(skb);
355 mp->tx_skbs[entry] = NULL; 356 mp->tx_skbs[entry] = NULL;
356 dev->stats.tx_packets++; 357 dev->stats.tx_packets++;
@@ -429,9 +430,9 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
429 430
430 /* Check for errors. */ 431 /* Check for errors. */
431 DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); 432 DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
432 sbus_dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev, 433 dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev,
433 sbus_readl(&rxd->myri_scatters[0].addr), 434 sbus_readl(&rxd->myri_scatters[0].addr),
434 RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); 435 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
435 if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { 436 if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
436 DRX(("ERROR[")); 437 DRX(("ERROR["));
437 dev->stats.rx_errors++; 438 dev->stats.rx_errors++;
@@ -448,10 +449,10 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
448 drops++; 449 drops++;
449 DRX(("DROP ")); 450 DRX(("DROP "));
450 dev->stats.rx_dropped++; 451 dev->stats.rx_dropped++;
451 sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, 452 dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
452 sbus_readl(&rxd->myri_scatters[0].addr), 453 sbus_readl(&rxd->myri_scatters[0].addr),
453 RX_ALLOC_SIZE, 454 RX_ALLOC_SIZE,
454 SBUS_DMA_FROMDEVICE); 455 DMA_FROM_DEVICE);
455 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); 456 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
456 sbus_writel(index, &rxd->ctx); 457 sbus_writel(index, &rxd->ctx);
457 sbus_writel(1, &rxd->num_sg); 458 sbus_writel(1, &rxd->num_sg);
@@ -470,17 +471,17 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
470 DRX(("skb_alloc(FAILED) ")); 471 DRX(("skb_alloc(FAILED) "));
471 goto drop_it; 472 goto drop_it;
472 } 473 }
473 sbus_unmap_single(&mp->myri_sdev->ofdev.dev, 474 dma_unmap_single(&mp->myri_sdev->ofdev.dev,
474 sbus_readl(&rxd->myri_scatters[0].addr), 475 sbus_readl(&rxd->myri_scatters[0].addr),
475 RX_ALLOC_SIZE, 476 RX_ALLOC_SIZE,
476 SBUS_DMA_FROMDEVICE); 477 DMA_FROM_DEVICE);
477 mp->rx_skbs[index] = new_skb; 478 mp->rx_skbs[index] = new_skb;
478 new_skb->dev = dev; 479 new_skb->dev = dev;
479 skb_put(new_skb, RX_ALLOC_SIZE); 480 skb_put(new_skb, RX_ALLOC_SIZE);
480 dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, 481 dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev,
481 new_skb->data, 482 new_skb->data,
482 RX_ALLOC_SIZE, 483 RX_ALLOC_SIZE,
483 SBUS_DMA_FROMDEVICE); 484 DMA_FROM_DEVICE);
484 sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); 485 sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
485 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); 486 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
486 sbus_writel(index, &rxd->ctx); 487 sbus_writel(index, &rxd->ctx);
@@ -506,10 +507,10 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
506 507
507 /* Reuse original ring buffer. */ 508 /* Reuse original ring buffer. */
508 DRX(("reuse ")); 509 DRX(("reuse "));
509 sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev, 510 dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
510 sbus_readl(&rxd->myri_scatters[0].addr), 511 sbus_readl(&rxd->myri_scatters[0].addr),
511 RX_ALLOC_SIZE, 512 RX_ALLOC_SIZE,
512 SBUS_DMA_FROMDEVICE); 513 DMA_FROM_DEVICE);
513 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); 514 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
514 sbus_writel(index, &rxd->ctx); 515 sbus_writel(index, &rxd->ctx);
515 sbus_writel(1, &rxd->num_sg); 516 sbus_writel(1, &rxd->num_sg);
@@ -658,8 +659,8 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]); 659 sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
659 } 660 }
660 661
661 dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, skb->data, 662 dma_addr = dma_map_single(&mp->myri_sdev->ofdev.dev, skb->data,
662 len, SBUS_DMA_TODEVICE); 663 len, DMA_TO_DEVICE);
663 sbus_writel(dma_addr, &txd->myri_gathers[0].addr); 664 sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
664 sbus_writel(len, &txd->myri_gathers[0].len); 665 sbus_writel(len, &txd->myri_gathers[0].len);
665 sbus_writel(1, &txd->num_sg); 666 sbus_writel(1, &txd->num_sg);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index b92218c2f76c..8fe4c49b0623 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -23,6 +23,7 @@
23#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/dma-mapping.h>
26 27
27#include <asm/auxio.h> 28#include <asm/auxio.h>
28#include <asm/byteorder.h> 29#include <asm/byteorder.h>
@@ -239,9 +240,10 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
239 skb_reserve(skb, 34); 240 skb_reserve(skb, 34);
240 241
241 bb->be_rxd[i].rx_addr = 242 bb->be_rxd[i].rx_addr =
242 sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data, 243 dma_map_single(&bp->bigmac_sdev->ofdev.dev,
243 RX_BUF_ALLOC_SIZE - 34, 244 skb->data,
244 SBUS_DMA_FROMDEVICE); 245 RX_BUF_ALLOC_SIZE - 34,
246 DMA_FROM_DEVICE);
245 bb->be_rxd[i].rx_flags = 247 bb->be_rxd[i].rx_flags =
246 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 248 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
247 } 249 }
@@ -776,9 +778,9 @@ static void bigmac_tx(struct bigmac *bp)
776 skb = bp->tx_skbs[elem]; 778 skb = bp->tx_skbs[elem];
777 bp->enet_stats.tx_packets++; 779 bp->enet_stats.tx_packets++;
778 bp->enet_stats.tx_bytes += skb->len; 780 bp->enet_stats.tx_bytes += skb->len;
779 sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev, 781 dma_unmap_single(&bp->bigmac_sdev->ofdev.dev,
780 this->tx_addr, skb->len, 782 this->tx_addr, skb->len,
781 SBUS_DMA_TODEVICE); 783 DMA_TO_DEVICE);
782 784
783 DTX(("skb(%p) ", skb)); 785 DTX(("skb(%p) ", skb));
784 bp->tx_skbs[elem] = NULL; 786 bp->tx_skbs[elem] = NULL;
@@ -831,19 +833,19 @@ static void bigmac_rx(struct bigmac *bp)
831 drops++; 833 drops++;
832 goto drop_it; 834 goto drop_it;
833 } 835 }
834 sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev, 836 dma_unmap_single(&bp->bigmac_sdev->ofdev.dev,
835 this->rx_addr, 837 this->rx_addr,
836 RX_BUF_ALLOC_SIZE - 34, 838 RX_BUF_ALLOC_SIZE - 34,
837 SBUS_DMA_FROMDEVICE); 839 DMA_FROM_DEVICE);
838 bp->rx_skbs[elem] = new_skb; 840 bp->rx_skbs[elem] = new_skb;
839 new_skb->dev = bp->dev; 841 new_skb->dev = bp->dev;
840 skb_put(new_skb, ETH_FRAME_LEN); 842 skb_put(new_skb, ETH_FRAME_LEN);
841 skb_reserve(new_skb, 34); 843 skb_reserve(new_skb, 34);
842 this->rx_addr = 844 this->rx_addr =
843 sbus_map_single(&bp->bigmac_sdev->ofdev.dev, 845 dma_map_single(&bp->bigmac_sdev->ofdev.dev,
844 new_skb->data, 846 new_skb->data,
845 RX_BUF_ALLOC_SIZE - 34, 847 RX_BUF_ALLOC_SIZE - 34,
846 SBUS_DMA_FROMDEVICE); 848 DMA_FROM_DEVICE);
847 this->rx_flags = 849 this->rx_flags =
848 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); 850 (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
849 851
@@ -858,13 +860,13 @@ static void bigmac_rx(struct bigmac *bp)
858 } 860 }
859 skb_reserve(copy_skb, 2); 861 skb_reserve(copy_skb, 2);
860 skb_put(copy_skb, len); 862 skb_put(copy_skb, len);
861 sbus_dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev, 863 dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev,
862 this->rx_addr, len, 864 this->rx_addr, len,
863 SBUS_DMA_FROMDEVICE); 865 DMA_FROM_DEVICE);
864 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); 866 skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
865 sbus_dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev, 867 dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev,
866 this->rx_addr, len, 868 this->rx_addr, len,
867 SBUS_DMA_FROMDEVICE); 869 DMA_FROM_DEVICE);
868 870
869 /* Reuse original ring buffer. */ 871 /* Reuse original ring buffer. */
870 this->rx_flags = 872 this->rx_flags =
@@ -960,8 +962,8 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
960 u32 mapping; 962 u32 mapping;
961 963
962 len = skb->len; 964 len = skb->len;
963 mapping = sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data, 965 mapping = dma_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
964 len, SBUS_DMA_TODEVICE); 966 len, DMA_TO_DEVICE);
965 967
966 /* Avoid a race... */ 968 /* Avoid a race... */
967 spin_lock_irq(&bp->lock); 969 spin_lock_irq(&bp->lock);
@@ -1185,9 +1187,9 @@ static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
1185 bigmac_stop(bp); 1187 bigmac_stop(bp);
1186 1188
1187 /* Allocate transmit/receive descriptor DVMA block. */ 1189 /* Allocate transmit/receive descriptor DVMA block. */
1188 bp->bmac_block = sbus_alloc_consistent(&bp->bigmac_sdev->ofdev.dev, 1190 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_sdev->ofdev.dev,
1189 PAGE_SIZE, 1191 PAGE_SIZE,
1190 &bp->bblock_dvma); 1192 &bp->bblock_dvma, GFP_ATOMIC);
1191 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { 1193 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
1192 printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); 1194 printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
1193 goto fail_and_cleanup; 1195 goto fail_and_cleanup;
@@ -1247,10 +1249,10 @@ fail_and_cleanup:
1247 sbus_iounmap(bp->tregs, TCVR_REG_SIZE); 1249 sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
1248 1250
1249 if (bp->bmac_block) 1251 if (bp->bmac_block)
1250 sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev, 1252 dma_free_coherent(&bp->bigmac_sdev->ofdev.dev,
1251 PAGE_SIZE, 1253 PAGE_SIZE,
1252 bp->bmac_block, 1254 bp->bmac_block,
1253 bp->bblock_dvma); 1255 bp->bblock_dvma);
1254 1256
1255 /* This also frees the co-located 'dev->priv' */ 1257 /* This also frees the co-located 'dev->priv' */
1256 free_netdev(dev); 1258 free_netdev(dev);
@@ -1282,10 +1284,10 @@ static int __devexit bigmac_sbus_remove(struct of_device *dev)
1282 sbus_iounmap(bp->creg, CREG_REG_SIZE); 1284 sbus_iounmap(bp->creg, CREG_REG_SIZE);
1283 sbus_iounmap(bp->bregs, BMAC_REG_SIZE); 1285 sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
1284 sbus_iounmap(bp->tregs, TCVR_REG_SIZE); 1286 sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
1285 sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev, 1287 dma_free_coherent(&bp->bigmac_sdev->ofdev.dev,
1286 PAGE_SIZE, 1288 PAGE_SIZE,
1287 bp->bmac_block, 1289 bp->bmac_block,
1288 bp->bblock_dvma); 1290 bp->bblock_dvma);
1289 1291
1290 free_netdev(net_dev); 1292 free_netdev(net_dev);
1291 1293
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index cd93fc5e826a..69cc77192961 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -34,6 +34,7 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/mm.h> 35#include <linux/mm.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/dma-mapping.h>
37 38
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/io.h> 40#include <asm/io.h>
@@ -277,13 +278,13 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
277} while(0) 278} while(0)
278#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) 279#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
279#define hme_dma_map(__hp, __ptr, __size, __dir) \ 280#define hme_dma_map(__hp, __ptr, __size, __dir) \
280 sbus_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) 281 dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
281#define hme_dma_unmap(__hp, __addr, __size, __dir) \ 282#define hme_dma_unmap(__hp, __addr, __size, __dir) \
282 sbus_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) 283 dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
283#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ 284#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
284 sbus_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) 285 dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
285#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ 286#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
286 sbus_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) 287 dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
287#else 288#else
288/* PCI only compilation */ 289/* PCI only compilation */
289#define hme_write32(__hp, __reg, __val) \ 290#define hme_write32(__hp, __reg, __val) \
@@ -316,25 +317,6 @@ static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
316#endif 317#endif
317 318
318 319
319#ifdef SBUS_DMA_BIDIRECTIONAL
320# define DMA_BIDIRECTIONAL SBUS_DMA_BIDIRECTIONAL
321#else
322# define DMA_BIDIRECTIONAL 0
323#endif
324
325#ifdef SBUS_DMA_FROMDEVICE
326# define DMA_FROMDEVICE SBUS_DMA_FROMDEVICE
327#else
328# define DMA_TODEVICE 1
329#endif
330
331#ifdef SBUS_DMA_TODEVICE
332# define DMA_TODEVICE SBUS_DMA_TODEVICE
333#else
334# define DMA_FROMDEVICE 2
335#endif
336
337
338/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ 320/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
339static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) 321static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
340{ 322{
@@ -1224,7 +1206,7 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
1224 1206
1225 rxd = &hp->happy_block->happy_meal_rxd[i]; 1207 rxd = &hp->happy_block->happy_meal_rxd[i];
1226 dma_addr = hme_read_desc32(hp, &rxd->rx_addr); 1208 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1227 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); 1209 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1228 dev_kfree_skb_any(skb); 1210 dev_kfree_skb_any(skb);
1229 hp->rx_skbs[i] = NULL; 1211 hp->rx_skbs[i] = NULL;
1230 } 1212 }
@@ -1245,7 +1227,7 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
1245 hme_dma_unmap(hp, dma_addr, 1227 hme_dma_unmap(hp, dma_addr,
1246 (hme_read_desc32(hp, &txd->tx_flags) 1228 (hme_read_desc32(hp, &txd->tx_flags)
1247 & TXFLAG_SIZE), 1229 & TXFLAG_SIZE),
1248 DMA_TODEVICE); 1230 DMA_TO_DEVICE);
1249 1231
1250 if (frag != skb_shinfo(skb)->nr_frags) 1232 if (frag != skb_shinfo(skb)->nr_frags)
1251 i++; 1233 i++;
@@ -1287,7 +1269,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1287 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1269 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1288 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1270 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1289 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1271 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1290 hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); 1272 hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE));
1291 skb_reserve(skb, RX_OFFSET); 1273 skb_reserve(skb, RX_OFFSET);
1292 } 1274 }
1293 1275
@@ -1966,7 +1948,7 @@ static void happy_meal_tx(struct happy_meal *hp)
1966 dma_len = hme_read_desc32(hp, &this->tx_flags); 1948 dma_len = hme_read_desc32(hp, &this->tx_flags);
1967 1949
1968 dma_len &= TXFLAG_SIZE; 1950 dma_len &= TXFLAG_SIZE;
1969 hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE); 1951 hme_dma_unmap(hp, dma_addr, dma_len, DMA_TO_DEVICE);
1970 1952
1971 elem = NEXT_TX(elem); 1953 elem = NEXT_TX(elem);
1972 this = &txbase[elem]; 1954 this = &txbase[elem];
@@ -2044,13 +2026,13 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2044 drops++; 2026 drops++;
2045 goto drop_it; 2027 goto drop_it;
2046 } 2028 }
2047 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); 2029 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2048 hp->rx_skbs[elem] = new_skb; 2030 hp->rx_skbs[elem] = new_skb;
2049 new_skb->dev = dev; 2031 new_skb->dev = dev;
2050 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2051 hme_write_rxd(hp, this, 2033 hme_write_rxd(hp, this,
2052 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2053 hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); 2035 hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE));
2054 skb_reserve(new_skb, RX_OFFSET); 2036 skb_reserve(new_skb, RX_OFFSET);
2055 2037
2056 /* Trim the original skb for the netif. */ 2038 /* Trim the original skb for the netif. */
@@ -2065,9 +2047,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2065 2047
2066 skb_reserve(copy_skb, 2); 2048 skb_reserve(copy_skb, 2);
2067 skb_put(copy_skb, len); 2049 skb_put(copy_skb, len);
2068 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); 2050 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROM_DEVICE);
2069 skb_copy_from_linear_data(skb, copy_skb->data, len); 2051 skb_copy_from_linear_data(skb, copy_skb->data, len);
2070 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); 2052 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROM_DEVICE);
2071 2053
2072 /* Reuse original ring buffer. */ 2054 /* Reuse original ring buffer. */
2073 hme_write_rxd(hp, this, 2055 hme_write_rxd(hp, this,
@@ -2300,7 +2282,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2300 u32 mapping, len; 2282 u32 mapping, len;
2301 2283
2302 len = skb->len; 2284 len = skb->len;
2303 mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE); 2285 mapping = hme_dma_map(hp, skb->data, len, DMA_TO_DEVICE);
2304 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2286 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2305 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2287 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2306 (tx_flags | (len & TXFLAG_SIZE)), 2288 (tx_flags | (len & TXFLAG_SIZE)),
@@ -2314,7 +2296,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2314 * Otherwise we could race with the device. 2296 * Otherwise we could race with the device.
2315 */ 2297 */
2316 first_len = skb_headlen(skb); 2298 first_len = skb_headlen(skb);
2317 first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE); 2299 first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TO_DEVICE);
2318 entry = NEXT_TX(entry); 2300 entry = NEXT_TX(entry);
2319 2301
2320 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2302 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2325,7 +2307,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2325 mapping = hme_dma_map(hp, 2307 mapping = hme_dma_map(hp,
2326 ((void *) page_address(this_frag->page) + 2308 ((void *) page_address(this_frag->page) +
2327 this_frag->page_offset), 2309 this_frag->page_offset),
2328 len, DMA_TODEVICE); 2310 len, DMA_TO_DEVICE);
2329 this_txflags = tx_flags; 2311 this_txflags = tx_flags;
2330 if (frag == skb_shinfo(skb)->nr_frags - 1) 2312 if (frag == skb_shinfo(skb)->nr_frags - 1)
2331 this_txflags |= TXFLAG_EOP; 2313 this_txflags |= TXFLAG_EOP;
@@ -2786,9 +2768,10 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
2786 hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node, 2768 hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node,
2787 "burst-sizes", 0x00); 2769 "burst-sizes", 0x00);
2788 2770
2789 hp->happy_block = sbus_alloc_consistent(hp->dma_dev, 2771 hp->happy_block = dma_alloc_coherent(hp->dma_dev,
2790 PAGE_SIZE, 2772 PAGE_SIZE,
2791 &hp->hblock_dvma); 2773 &hp->hblock_dvma,
2774 GFP_ATOMIC);
2792 err = -ENOMEM; 2775 err = -ENOMEM;
2793 if (!hp->happy_block) { 2776 if (!hp->happy_block) {
2794 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); 2777 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
@@ -2824,12 +2807,12 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
2824 hp->read_desc32 = sbus_hme_read_desc32; 2807 hp->read_desc32 = sbus_hme_read_desc32;
2825 hp->write_txd = sbus_hme_write_txd; 2808 hp->write_txd = sbus_hme_write_txd;
2826 hp->write_rxd = sbus_hme_write_rxd; 2809 hp->write_rxd = sbus_hme_write_rxd;
2827 hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single; 2810 hp->dma_map = (u32 (*)(void *, void *, long, int))dma_map_single;
2828 hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single; 2811 hp->dma_unmap = (void (*)(void *, u32, long, int))dma_unmap_single;
2829 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int)) 2812 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
2830 sbus_dma_sync_single_for_cpu; 2813 dma_sync_single_for_cpu;
2831 hp->dma_sync_for_device = (void (*)(void *, u32, long, int)) 2814 hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
2832 sbus_dma_sync_single_for_device; 2815 dma_sync_single_for_device;
2833 hp->read32 = sbus_hme_read32; 2816 hp->read32 = sbus_hme_read32;
2834 hp->write32 = sbus_hme_write32; 2817 hp->write32 = sbus_hme_write32;
2835#endif 2818#endif
@@ -2844,7 +2827,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
2844 if (register_netdev(hp->dev)) { 2827 if (register_netdev(hp->dev)) {
2845 printk(KERN_ERR "happymeal: Cannot register net device, " 2828 printk(KERN_ERR "happymeal: Cannot register net device, "
2846 "aborting.\n"); 2829 "aborting.\n");
2847 goto err_out_free_consistent; 2830 goto err_out_free_coherent;
2848 } 2831 }
2849 2832
2850 dev_set_drvdata(&sdev->ofdev.dev, hp); 2833 dev_set_drvdata(&sdev->ofdev.dev, hp);
@@ -2860,11 +2843,11 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
2860 2843
2861 return 0; 2844 return 0;
2862 2845
2863err_out_free_consistent: 2846err_out_free_coherent:
2864 sbus_free_consistent(hp->dma_dev, 2847 dma_free_coherent(hp->dma_dev,
2865 PAGE_SIZE, 2848 PAGE_SIZE,
2866 hp->happy_block, 2849 hp->happy_block,
2867 hp->hblock_dvma); 2850 hp->hblock_dvma);
2868 2851
2869err_out_iounmap: 2852err_out_iounmap:
2870 if (hp->gregs) 2853 if (hp->gregs)
@@ -3308,10 +3291,10 @@ static int __devexit hme_sbus_remove(struct of_device *dev)
3308 sbus_iounmap(hp->erxregs, ERX_REG_SIZE); 3291 sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
3309 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE); 3292 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
3310 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE); 3293 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
3311 sbus_free_consistent(hp->dma_dev, 3294 dma_free_coherent(hp->dma_dev,
3312 PAGE_SIZE, 3295 PAGE_SIZE,
3313 hp->happy_block, 3296 hp->happy_block,
3314 hp->hblock_dvma); 3297 hp->hblock_dvma);
3315 3298
3316 free_netdev(net_dev); 3299 free_netdev(net_dev);
3317 3300
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 4f4baf9f4ec8..65758881d7aa 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -91,6 +91,7 @@ static char lancestr[] = "LANCE";
91#include <linux/skbuff.h> 91#include <linux/skbuff.h>
92#include <linux/ethtool.h> 92#include <linux/ethtool.h>
93#include <linux/bitops.h> 93#include <linux/bitops.h>
94#include <linux/dma-mapping.h>
94 95
95#include <asm/system.h> 96#include <asm/system.h>
96#include <asm/io.h> 97#include <asm/io.h>
@@ -1283,10 +1284,10 @@ static void lance_free_hwresources(struct lance_private *lp)
1283 sbus_iounmap(lp->init_block_iomem, 1284 sbus_iounmap(lp->init_block_iomem,
1284 sizeof(struct lance_init_block)); 1285 sizeof(struct lance_init_block));
1285 } else if (lp->init_block_mem) { 1286 } else if (lp->init_block_mem) {
1286 sbus_free_consistent(&lp->sdev->ofdev.dev, 1287 dma_free_coherent(&lp->sdev->ofdev.dev,
1287 sizeof(struct lance_init_block), 1288 sizeof(struct lance_init_block),
1288 lp->init_block_mem, 1289 lp->init_block_mem,
1289 lp->init_block_dvma); 1290 lp->init_block_dvma);
1290 } 1291 }
1291} 1292}
1292 1293
@@ -1384,9 +1385,9 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev,
1384 lp->tx = lance_tx_pio; 1385 lp->tx = lance_tx_pio;
1385 } else { 1386 } else {
1386 lp->init_block_mem = 1387 lp->init_block_mem =
1387 sbus_alloc_consistent(&sdev->ofdev.dev, 1388 dma_alloc_coherent(&sdev->ofdev.dev,
1388 sizeof(struct lance_init_block), 1389 sizeof(struct lance_init_block),
1389 &lp->init_block_dvma); 1390 &lp->init_block_dvma, GFP_ATOMIC);
1390 if (!lp->init_block_mem || lp->init_block_dvma == 0) { 1391 if (!lp->init_block_mem || lp->init_block_dvma == 0) {
1391 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); 1392 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
1392 goto fail; 1393 goto fail;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index ac8049cab247..66f66ee8ca63 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -24,6 +24,7 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/ethtool.h> 25#include <linux/ethtool.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/dma-mapping.h>
27 28
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/io.h> 30#include <asm/io.h>
@@ -879,12 +880,12 @@ static int __devinit qec_ether_init(struct sbus_dev *sdev)
879 goto fail; 880 goto fail;
880 } 881 }
881 882
882 qe->qe_block = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev, 883 qe->qe_block = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev,
883 PAGE_SIZE, 884 PAGE_SIZE,
884 &qe->qblock_dvma); 885 &qe->qblock_dvma, GFP_ATOMIC);
885 qe->buffers = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev, 886 qe->buffers = dma_alloc_coherent(&qe->qe_sdev->ofdev.dev,
886 sizeof(struct sunqe_buffers), 887 sizeof(struct sunqe_buffers),
887 &qe->buffers_dvma); 888 &qe->buffers_dvma, GFP_ATOMIC);
888 if (qe->qe_block == NULL || qe->qblock_dvma == 0 || 889 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
889 qe->buffers == NULL || qe->buffers_dvma == 0) 890 qe->buffers == NULL || qe->buffers_dvma == 0)
890 goto fail; 891 goto fail;
@@ -926,15 +927,15 @@ fail:
926 if (qe->mregs) 927 if (qe->mregs)
927 sbus_iounmap(qe->mregs, MREGS_REG_SIZE); 928 sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
928 if (qe->qe_block) 929 if (qe->qe_block)
929 sbus_free_consistent(&qe->qe_sdev->ofdev.dev, 930 dma_free_coherent(&qe->qe_sdev->ofdev.dev,
930 PAGE_SIZE, 931 PAGE_SIZE,
931 qe->qe_block, 932 qe->qe_block,
932 qe->qblock_dvma); 933 qe->qblock_dvma);
933 if (qe->buffers) 934 if (qe->buffers)
934 sbus_free_consistent(&qe->qe_sdev->ofdev.dev, 935 dma_free_coherent(&qe->qe_sdev->ofdev.dev,
935 sizeof(struct sunqe_buffers), 936 sizeof(struct sunqe_buffers),
936 qe->buffers, 937 qe->buffers,
937 qe->buffers_dvma); 938 qe->buffers_dvma);
938 939
939 free_netdev(dev); 940 free_netdev(dev);
940 941
@@ -957,14 +958,14 @@ static int __devexit qec_sbus_remove(struct of_device *dev)
957 958
958 sbus_iounmap(qp->qcregs, CREG_REG_SIZE); 959 sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
959 sbus_iounmap(qp->mregs, MREGS_REG_SIZE); 960 sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
960 sbus_free_consistent(&qp->qe_sdev->ofdev.dev, 961 dma_free_coherent(&qp->qe_sdev->ofdev.dev,
961 PAGE_SIZE, 962 PAGE_SIZE,
962 qp->qe_block, 963 qp->qe_block,
963 qp->qblock_dvma); 964 qp->qblock_dvma);
964 sbus_free_consistent(&qp->qe_sdev->ofdev.dev, 965 dma_free_coherent(&qp->qe_sdev->ofdev.dev,
965 sizeof(struct sunqe_buffers), 966 sizeof(struct sunqe_buffers),
966 qp->buffers, 967 qp->buffers,
967 qp->buffers_dvma); 968 qp->buffers_dvma);
968 969
969 free_netdev(net_dev); 970 free_netdev(net_dev);
970 971