aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-01-19 06:05:57 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-01-19 06:05:57 -0500
commit15653371c67c3fbe359ae37b720639dd4c7b42c5 (patch)
treee92cba77b7796993d2cce4af22b9022196c9211a /arch/arm
parent6f16f4998f98e42e3f2dedf663cfb691ff0324af (diff)
ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
Subhash Jadavani reported this partial backtrace: Now consider this call stack from MMC block driver (this is on the ARMv7 based board): [<c001b50c>] (v7_dma_inv_range+0x30/0x48) from [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) from [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) from [<c0017ff8>] (dma_map_sg+0x3c/0x114) This is caused by incrementing the struct page pointer, and running off the end of the sparsemem page array. Fix this by incrementing by pfn instead, and convert the pfn to a struct page. Cc: <stable@vger.kernel.org> Suggested-by: James Bottomley <JBottomley@Parallels.com> Tested-by: Subhash Jadavani <subhashj@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/dma-mapping.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b2fb87c8698..076c26d43864 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
774 size_t size, enum dma_data_direction dir, 774 size_t size, enum dma_data_direction dir,
775 void (*op)(const void *, size_t, int)) 775 void (*op)(const void *, size_t, int))
776{ 776{
777 unsigned long pfn;
778 size_t left = size;
779
780 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
781 offset %= PAGE_SIZE;
782
777 /* 783 /*
778 * A single sg entry may refer to multiple physically contiguous 784 * A single sg entry may refer to multiple physically contiguous
779 * pages. But we still need to process highmem pages individually. 785 * pages. But we still need to process highmem pages individually.
780 * If highmem is not configured then the bulk of this loop gets 786 * If highmem is not configured then the bulk of this loop gets
781 * optimized out. 787 * optimized out.
782 */ 788 */
783 size_t left = size;
784 do { 789 do {
785 size_t len = left; 790 size_t len = left;
786 void *vaddr; 791 void *vaddr;
787 792
793 page = pfn_to_page(pfn);
794
788 if (PageHighMem(page)) { 795 if (PageHighMem(page)) {
789 if (len + offset > PAGE_SIZE) { 796 if (len + offset > PAGE_SIZE)
790 if (offset >= PAGE_SIZE) {
791 page += offset / PAGE_SIZE;
792 offset %= PAGE_SIZE;
793 }
794 len = PAGE_SIZE - offset; 797 len = PAGE_SIZE - offset;
795 }
796 vaddr = kmap_high_get(page); 798 vaddr = kmap_high_get(page);
797 if (vaddr) { 799 if (vaddr) {
798 vaddr += offset; 800 vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
809 op(vaddr, len, dir); 811 op(vaddr, len, dir);
810 } 812 }
811 offset = 0; 813 offset = 0;
812 page++; 814 pfn++;
813 left -= len; 815 left -= len;
814 } while (left); 816 } while (left);
815} 817}