aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-10-05 10:34:22 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-10-05 10:42:16 -0400
commit2725898fc9bb2121ac0fb1b5e4faf4fc09014729 (patch)
treef2397446518cdfd8dc42a1d3808ed77005f427cc /arch/arm/mm
parentf00a75c094c340c4e7435665816c3273c870e849 (diff)
ARM: Flush user mapping on VIVT processors when copying a page
Steven Walter <stevenrwalter@gmail.com> writes: > I've been tracking down an instance of userspace data corruption, > and I believe I have found a window during fork where data can be > lost. The corruption is occurring on an ARMv5 system with VIVT > caches. Here's the scenario in question. Thread A is forking, > Thread B is running in userspace: > > Thread A: flush_cache_mm() (dup_mmap) > Thread B: writes to a page in the above mm > Thread A: pte_wrprotect() the above page (copy_one_pte) > Thread B: writes to the same page again > > During thread B's second write, he'll take a fault and enter the > do_wp_page() case. We'll end up calling copy_page(), which notably > uses the kernel virtual addresses for the old and new pages. This > means that the new page does not necessarily have the data from the > first write. Now there are two conflicting copies of the same > cache-line in dcache. If the userspace cache-line flushes before > the kernel cache-line, we lose the changes made during the first > write. do_wp_page does call flush_dcache_page on the newly-copied > page, but there's still a window where the CPU could flush the > userspace cache-line before then. Resolve this by flushing the user mapping before copying the page on processors with a writeback VIVT cache. Note: this does have a performance impact, and so needs further consideration before being merged - can we optimize out some of the cache flushes if, eg, we know that the page isn't yet mapped? Thread: <e06498070903061426o5875ad13hc6328aa0d3f08ed7@mail.gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-feroceon.c1
-rw-r--r--arch/arm/mm/copypage-v4wb.c1
-rw-r--r--arch/arm/mm/copypage-xsc3.c1
3 files changed, 3 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index e2e8d29c548b..5eb4fd93893d 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -74,6 +74,7 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
74 74
75 kto = kmap_atomic(to, KM_USER0); 75 kto = kmap_atomic(to, KM_USER0);
76 kfrom = kmap_atomic(from, KM_USER1); 76 kfrom = kmap_atomic(from, KM_USER1);
77 flush_cache_page(vma, vaddr, page_to_pfn(from));
77 feroceon_copy_user_page(kto, kfrom); 78 feroceon_copy_user_page(kto, kfrom);
78 kunmap_atomic(kfrom, KM_USER1); 79 kunmap_atomic(kfrom, KM_USER1);
79 kunmap_atomic(kto, KM_USER0); 80 kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index e9920f68b76b..7c2eb55cd4a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -54,6 +54,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
54 54
55 kto = kmap_atomic(to, KM_USER0); 55 kto = kmap_atomic(to, KM_USER0);
56 kfrom = kmap_atomic(from, KM_USER1); 56 kfrom = kmap_atomic(from, KM_USER1);
57 flush_cache_page(vma, vaddr, page_to_pfn(from));
57 v4wb_copy_user_page(kto, kfrom); 58 v4wb_copy_user_page(kto, kfrom);
58 kunmap_atomic(kfrom, KM_USER1); 59 kunmap_atomic(kfrom, KM_USER1);
59 kunmap_atomic(kto, KM_USER0); 60 kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 18ae05d5829c..747ad4140fc7 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -77,6 +77,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
77 77
78 kto = kmap_atomic(to, KM_USER0); 78 kto = kmap_atomic(to, KM_USER0);
79 kfrom = kmap_atomic(from, KM_USER1); 79 kfrom = kmap_atomic(from, KM_USER1);
80 flush_cache_page(vma, vaddr, page_to_pfn(from));
80 xsc3_mc_copy_user_page(kto, kfrom); 81 xsc3_mc_copy_user_page(kto, kfrom);
81 kunmap_atomic(kfrom, KM_USER1); 82 kunmap_atomic(kfrom, KM_USER1);
82 kunmap_atomic(kto, KM_USER0); 83 kunmap_atomic(kto, KM_USER0);