aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sgiseeq.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-09-04 09:41:01 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:51:07 -0400
commit99cd149efe824cf27c5d34506002a0fbfa831c0f (patch)
tree8df785ae6c8b4d4416412e72f9a5c52875981c43 /drivers/net/sgiseeq.c
parentdc67369573eee33a4b1220d416cb7dd3501dccbc (diff)
sgiseeq: replace use of dma_cache_wback_inv
The sgiseeq driver is one of the few remaining users of the ancient cache banging DMA API. Replaced with the modern days DMA API. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/sgiseeq.c')
-rw-r--r--drivers/net/sgiseeq.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 0fb74cb51c4b..eb67b024e413 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -75,6 +75,7 @@ struct sgiseeq_init_block { /* Note the name ;-) */
75 75
76struct sgiseeq_private { 76struct sgiseeq_private {
77 struct sgiseeq_init_block *srings; 77 struct sgiseeq_init_block *srings;
78 dma_addr_t srings_dma;
78 79
79 /* Ptrs to the descriptors in uncached space. */ 80 /* Ptrs to the descriptors in uncached space. */
80 struct sgiseeq_rx_desc *rx_desc; 81 struct sgiseeq_rx_desc *rx_desc;
@@ -643,13 +644,20 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
643 sp = netdev_priv(dev); 644 sp = netdev_priv(dev);
644 645
645 /* Make private data page aligned */ 646 /* Make private data page aligned */
646 sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); 647 sr = dma_alloc_coherent(&pdev->dev, sizeof(*sp->srings),
648 &sp->srings_dma, GFP_KERNEL);
647 if (!sr) { 649 if (!sr) {
648 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); 650 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
649 err = -ENOMEM; 651 err = -ENOMEM;
650 goto err_out_free_dev; 652 goto err_out_free_dev;
651 } 653 }
652 sp->srings = sr; 654 sp->srings = sr;
655 sp->rx_desc = sp->srings->rxvector;
656 sp->tx_desc = sp->srings->txvector;
657
658 /* A couple calculations now, saves many cycles later. */
659 setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
660 setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
653 661
654 memcpy(dev->dev_addr, pd->mac, ETH_ALEN); 662 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
655 663
@@ -662,19 +670,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
662 sp->name = sgiseeqstr; 670 sp->name = sgiseeqstr;
663 sp->mode = SEEQ_RCMD_RBCAST; 671 sp->mode = SEEQ_RCMD_RBCAST;
664 672
665 sp->rx_desc = (struct sgiseeq_rx_desc *)
666 CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0]));
667 dma_cache_wback_inv((unsigned long)&sp->srings->rxvector,
668 sizeof(sp->srings->rxvector));
669 sp->tx_desc = (struct sgiseeq_tx_desc *)
670 CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0]));
671 dma_cache_wback_inv((unsigned long)&sp->srings->txvector,
672 sizeof(sp->srings->txvector));
673
674 /* A couple calculations now, saves many cycles later. */
675 setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
676 setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
677
678 /* Setup PIO and DMA transfer timing */ 673 /* Setup PIO and DMA transfer timing */
679 sp->hregs->pconfig = 0x161; 674 sp->hregs->pconfig = 0x161;
680 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | 675 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
@@ -732,7 +727,8 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
732 struct sgiseeq_private *sp = netdev_priv(dev); 727 struct sgiseeq_private *sp = netdev_priv(dev);
733 728
734 unregister_netdev(dev); 729 unregister_netdev(dev);
735 free_page((unsigned long) sp->srings); 730 dma_free_coherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
731 sp->srings_dma);
736 free_netdev(dev); 732 free_netdev(dev);
737 platform_set_drvdata(pdev, NULL); 733 platform_set_drvdata(pdev, NULL);
738 734