aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 62182d2d7b0d..9c3ed88853f3 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */ 36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/scatterlist.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/vaddrs.h> 41#include <asm/vaddrs.h>
@@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
717 * Device ownership issues as mentioned above for pci_map_single are 718 * Device ownership issues as mentioned above for pci_map_single are
718 * the same here. 719 * the same here.
719 */ 720 */
720int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, 721int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
721 int direction) 722 int direction)
722{ 723{
724 struct scatterlist *sg;
723 int n; 725 int n;
724 726
725 BUG_ON(direction == PCI_DMA_NONE); 727 BUG_ON(direction == PCI_DMA_NONE);
726 /* IIep is write-through, not flushing. */ 728 /* IIep is write-through, not flushing. */
727 for (n = 0; n < nents; n++) { 729 for_each_sg(sgl, sg, nents, n) {
728 BUG_ON(page_address(sg->page) == NULL); 730 BUG_ON(page_address(sg->page) == NULL);
729 sg->dvma_address = 731 sg->dvma_address =
730 virt_to_phys(page_address(sg->page)) + sg->offset; 732 virt_to_phys(page_address(sg->page)) + sg->offset;
731 sg->dvma_length = sg->length; 733 sg->dvma_length = sg->length;
732 sg++;
733 } 734 }
734 return nents; 735 return nents;
735} 736}
@@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
738 * Again, cpu read rules concerning calls here are the same as for 739 * Again, cpu read rules concerning calls here are the same as for
739 * pci_unmap_single() above. 740 * pci_unmap_single() above.
740 */ 741 */
741void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, 742void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
742 int direction) 743 int direction)
743{ 744{
745 struct scatterlist *sg;
744 int n; 746 int n;
745 747
746 BUG_ON(direction == PCI_DMA_NONE); 748 BUG_ON(direction == PCI_DMA_NONE);
747 if (direction != PCI_DMA_TODEVICE) { 749 if (direction != PCI_DMA_TODEVICE) {
748 for (n = 0; n < nents; n++) { 750 for_each_sg(sgl, sg, nents, n) {
749 BUG_ON(page_address(sg->page) == NULL); 751 BUG_ON(page_address(sg->page) == NULL);
750 mmu_inval_dma_area( 752 mmu_inval_dma_area(
751 (unsigned long) page_address(sg->page), 753 (unsigned long) page_address(sg->page),
752 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 754 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
753 sg++;
754 } 755 }
755 } 756 }
756} 757}
@@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
789 * The same as pci_dma_sync_single_* but for a scatter-gather list, 790 * The same as pci_dma_sync_single_* but for a scatter-gather list,
790 * same rules and usage. 791 * same rules and usage.
791 */ 792 */
792void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 793void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
793{ 794{
795 struct scatterlist *sg;
794 int n; 796 int n;
795 797
796 BUG_ON(direction == PCI_DMA_NONE); 798 BUG_ON(direction == PCI_DMA_NONE);
797 if (direction != PCI_DMA_TODEVICE) { 799 if (direction != PCI_DMA_TODEVICE) {
798 for (n = 0; n < nents; n++) { 800 for_each_sg(sgl, sg, nents, n) {
799 BUG_ON(page_address(sg->page) == NULL); 801 BUG_ON(page_address(sg->page) == NULL);
800 mmu_inval_dma_area( 802 mmu_inval_dma_area(
801 (unsigned long) page_address(sg->page), 803 (unsigned long) page_address(sg->page),
802 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 804 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
803 sg++;
804 } 805 }
805 } 806 }
806} 807}
807 808
808void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 809void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
809{ 810{
811 struct scatterlist *sg;
810 int n; 812 int n;
811 813
812 BUG_ON(direction == PCI_DMA_NONE); 814 BUG_ON(direction == PCI_DMA_NONE);
813 if (direction != PCI_DMA_TODEVICE) { 815 if (direction != PCI_DMA_TODEVICE) {
814 for (n = 0; n < nents; n++) { 816 for_each_sg(sgl, sg, nents, n) {
815 BUG_ON(page_address(sg->page) == NULL); 817 BUG_ON(page_address(sg->page) == NULL);
816 mmu_inval_dma_area( 818 mmu_inval_dma_area(
817 (unsigned long) page_address(sg->page), 819 (unsigned long) page_address(sg->page),
818 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 820 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
819 sg++;
820 } 821 }
821 } 822 }
822} 823}