diff options
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 9c3ed88853f3..97aa50d1e4ae 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
727 | BUG_ON(direction == PCI_DMA_NONE); | 727 | BUG_ON(direction == PCI_DMA_NONE); |
728 | /* IIep is write-through, not flushing. */ | 728 | /* IIep is write-through, not flushing. */ |
729 | for_each_sg(sgl, sg, nents, n) { | 729 | for_each_sg(sgl, sg, nents, n) { |
730 | BUG_ON(page_address(sg->page) == NULL); | 730 | BUG_ON(page_address(sg_page(sg)) == NULL); |
731 | sg->dvma_address = | 731 | sg->dvma_address = virt_to_phys(sg_virt(sg)); |
732 | virt_to_phys(page_address(sg->page)) + sg->offset; | ||
733 | sg->dvma_length = sg->length; | 732 | sg->dvma_length = sg->length; |
734 | } | 733 | } |
735 | return nents; | 734 | return nents; |
@@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | |||
748 | BUG_ON(direction == PCI_DMA_NONE); | 747 | BUG_ON(direction == PCI_DMA_NONE); |
749 | if (direction != PCI_DMA_TODEVICE) { | 748 | if (direction != PCI_DMA_TODEVICE) { |
750 | for_each_sg(sgl, sg, nents, n) { | 749 | for_each_sg(sgl, sg, nents, n) { |
751 | BUG_ON(page_address(sg->page) == NULL); | 750 | BUG_ON(page_address(sg_page(sg)) == NULL); |
752 | mmu_inval_dma_area( | 751 | mmu_inval_dma_area( |
753 | (unsigned long) page_address(sg->page), | 752 | (unsigned long) page_address(sg_page(sg)), |
754 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 753 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
755 | } | 754 | } |
756 | } | 755 | } |
@@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int | |||
798 | BUG_ON(direction == PCI_DMA_NONE); | 797 | BUG_ON(direction == PCI_DMA_NONE); |
799 | if (direction != PCI_DMA_TODEVICE) { | 798 | if (direction != PCI_DMA_TODEVICE) { |
800 | for_each_sg(sgl, sg, nents, n) { | 799 | for_each_sg(sgl, sg, nents, n) { |
801 | BUG_ON(page_address(sg->page) == NULL); | 800 | BUG_ON(page_address(sg_page(sg)) == NULL); |
802 | mmu_inval_dma_area( | 801 | mmu_inval_dma_area( |
803 | (unsigned long) page_address(sg->page), | 802 | (unsigned long) page_address(sg_page(sg)), |
804 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 803 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
805 | } | 804 | } |
806 | } | 805 | } |
@@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, | |||
814 | BUG_ON(direction == PCI_DMA_NONE); | 813 | BUG_ON(direction == PCI_DMA_NONE); |
815 | if (direction != PCI_DMA_TODEVICE) { | 814 | if (direction != PCI_DMA_TODEVICE) { |
816 | for_each_sg(sgl, sg, nents, n) { | 815 | for_each_sg(sgl, sg, nents, n) { |
817 | BUG_ON(page_address(sg->page) == NULL); | 816 | BUG_ON(page_address(sg_page(sg)) == NULL); |
818 | mmu_inval_dma_area( | 817 | mmu_inval_dma_area( |
819 | (unsigned long) page_address(sg->page), | 818 | (unsigned long) page_address(sg_page(sg)), |
820 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 819 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); |
821 | } | 820 | } |
822 | } | 821 | } |