diff options
| author | Richard Purdie <rpurdie@rpsys.net> | 2006-12-30 10:08:50 -0500 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-12-30 12:05:08 -0500 |
| commit | 1c9d3df5e88ad7db23f5b22f4341c39722a904a4 (patch) | |
| tree | dbabefd52a5f8a5f35216bda33f29e4b9b398569 | |
| parent | b0b1d60a64054697ef828e0565f006cc0f823590 (diff) | |
[ARM] 4078/1: Fix ARM copypage cache coherency problems
If PG_dcache_dirty is set for a page, we need to flush the source page
before performing any copypage operation using a different virtual address.
This fixes the copypage implementations for XScale, StrongARM and ARMv6.
This patch fixes segmentation faults seen in the dynamic linker under
the usage patterns in glibc 2.4/2.5.
Signed-off-by: Richard Purdie <rpurdie@rpsys.net>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 6 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-v6.c | 4 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-xscale.c | 6 | ||||
| -rw-r--r-- | include/asm-arm/cacheflush.h | 2 |
4 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 408b05ae6b9b..ded0e96d069d 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
| 20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/cacheflush.h> | ||
| 22 | 23 | ||
| 23 | #include "mm.h" | 24 | #include "mm.h" |
| 24 | 25 | ||
| @@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to) | |||
| 69 | 70 | ||
| 70 | void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 71 | void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) |
| 71 | { | 72 | { |
| 73 | struct page *page = virt_to_page(kfrom); | ||
| 74 | |||
| 75 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
| 76 | __flush_dcache_page(page_mapping(page), page); | ||
| 77 | |||
| 72 | spin_lock(&minicache_lock); | 78 | spin_lock(&minicache_lock); |
| 73 | 79 | ||
| 74 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 80 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 865777dec161..3adb79257f43 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
| @@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo | |||
| 53 | { | 53 | { |
| 54 | unsigned int offset = CACHE_COLOUR(vaddr); | 54 | unsigned int offset = CACHE_COLOUR(vaddr); |
| 55 | unsigned long from, to; | 55 | unsigned long from, to; |
| 56 | struct page *page = virt_to_page(kfrom); | ||
| 57 | |||
| 58 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
| 59 | __flush_dcache_page(page_mapping(page), page); | ||
| 56 | 60 | ||
| 57 | /* | 61 | /* |
| 58 | * Discard data in the kernel mapping for the new page. | 62 | * Discard data in the kernel mapping for the new page. |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aea5da723596..2e455f82a4d5 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
| 20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/cacheflush.h> | ||
| 22 | 23 | ||
| 23 | #include "mm.h" | 24 | #include "mm.h" |
| 24 | 25 | ||
| @@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to) | |||
| 91 | 92 | ||
| 92 | void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 93 | void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) |
| 93 | { | 94 | { |
| 95 | struct page *page = virt_to_page(kfrom); | ||
| 96 | |||
| 97 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
| 98 | __flush_dcache_page(page_mapping(page), page); | ||
| 99 | |||
| 94 | spin_lock(&minicache_lock); | 100 | spin_lock(&minicache_lock); |
| 95 | 101 | ||
| 96 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 102 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); |
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 378a3a2ce8d9..d51049522cd0 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h | |||
| @@ -355,6 +355,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
| 355 | */ | 355 | */ |
| 356 | extern void flush_dcache_page(struct page *); | 356 | extern void flush_dcache_page(struct page *); |
| 357 | 357 | ||
| 358 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | ||
| 359 | |||
| 358 | #define flush_dcache_mmap_lock(mapping) \ | 360 | #define flush_dcache_mmap_lock(mapping) \ |
| 359 | write_lock_irq(&(mapping)->tree_lock) | 361 | write_lock_irq(&(mapping)->tree_lock) |
| 360 | #define flush_dcache_mmap_unlock(mapping) \ | 362 | #define flush_dcache_mmap_unlock(mapping) \ |
