aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@rpsys.net>2006-12-30 10:08:50 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-12-30 12:05:08 -0500
commit1c9d3df5e88ad7db23f5b22f4341c39722a904a4 (patch)
treedbabefd52a5f8a5f35216bda33f29e4b9b398569 /arch/arm
parentb0b1d60a64054697ef828e0565f006cc0f823590 (diff)
[ARM] 4078/1: Fix ARM copypage cache coherency problems
If PG_dcache_dirty is set for a page, we need to flush the source page before performing any copypage operation using a different virtual address. This fixes the copypage implementations for XScale, StrongARM and ARMv6. This patch fixes segmentation faults seen in the dynamic linker under the usage patterns in glibc 2.4/2.5. Signed-off-by: Richard Purdie <rpurdie@rpsys.net> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/copypage-v4mc.c6
-rw-r--r--arch/arm/mm/copypage-v6.c4
-rw-r--r--arch/arm/mm/copypage-xscale.c6
3 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 408b05ae6b9b..ded0e96d069d 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -19,6 +19,7 @@
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
22 23
23#include "mm.h" 24#include "mm.h"
24 25
@@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to)
69 70
70void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 71void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
71{ 72{
73 struct page *page = virt_to_page(kfrom);
74
75 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
76 __flush_dcache_page(page_mapping(page), page);
77
72 spin_lock(&minicache_lock); 78 spin_lock(&minicache_lock);
73 79
74 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 80 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 865777dec161..3adb79257f43 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
53{ 53{
54 unsigned int offset = CACHE_COLOUR(vaddr); 54 unsigned int offset = CACHE_COLOUR(vaddr);
55 unsigned long from, to; 55 unsigned long from, to;
56 struct page *page = virt_to_page(kfrom);
57
58 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
59 __flush_dcache_page(page_mapping(page), page);
56 60
57 /* 61 /*
58 * Discard data in the kernel mapping for the new page. 62 * Discard data in the kernel mapping for the new page.
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index aea5da723596..2e455f82a4d5 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -19,6 +19,7 @@
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
22 23
23#include "mm.h" 24#include "mm.h"
24 25
@@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to)
91 92
92void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
93{ 94{
95 struct page *page = virt_to_page(kfrom);
96
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
98 __flush_dcache_page(page_mapping(page), page);
99
94 spin_lock(&minicache_lock); 100 spin_lock(&minicache_lock);
95 101
96 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);