aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-27 18:08:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-27 18:08:12 -0400
commitac6c9e2bed093c4b60e313674fb7aec4f264c3d4 (patch)
tree6eccadffcc04a7b66021ab3bd114cafcb52181c6 /arch
parent33c0022f0e687b0161a9bb84a5671df932551e3a (diff)
parent1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 (diff)
Merge branch 'safe-dirty-tlb-flush'
This merges the patch to fix possible loss of dirty bit on munmap() or madvice(DONTNEED). If there are concurrent writers on other CPU's that have the unmapped/unneeded page in their TLBs, their writes to the page could possibly get lost if a third CPU raced with the TLB flush and did a page_mkclean() before the page was fully written. Admittedly, if you unmap() or madvice(DONTNEED) an area _while_ another thread is still busy writing to it, you deserve all the lost writes you could get. But we kernel people hold ourselves to higher quality standards than "crazy people deserve to lose", because, well, we've seen people do all kinds of crazy things. So let's get it right, just because we can, and we don't have to worry about it. * safe-dirty-tlb-flush: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/tlb.h12
-rw-r--r--arch/ia64/include/asm/tlb.h42
-rw-r--r--arch/s390/include/asm/tlb.h13
-rw-r--r--arch/sh/include/asm/tlb.h8
-rw-r--r--arch/um/include/asm/tlb.h16
5 files changed, 77 insertions, 14 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 0baf7f0d9394..f1a0dace3efe 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
98 } 98 }
99} 99}
100 100
101static inline void tlb_flush_mmu(struct mmu_gather *tlb) 101static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
102{ 102{
103 tlb_flush(tlb); 103 tlb_flush(tlb);
104}
105
106static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
107{
104 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0; 109 tlb->nr = 0;
106 if (tlb->pages == tlb->local) 110 if (tlb->pages == tlb->local)
107 __tlb_alloc_page(tlb); 111 __tlb_alloc_page(tlb);
108} 112}
109 113
114static inline void tlb_flush_mmu(struct mmu_gather *tlb)
115{
116 tlb_flush_mmu_tlbonly(tlb);
117 tlb_flush_mmu_free(tlb);
118}
119
110static inline void 120static inline void
111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 121tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
112{ 122{
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index bc5efc7c3f3f..39d64e0df1de 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
91#define RR_RID_MASK 0x00000000ffffff00L 91#define RR_RID_MASK 0x00000000ffffff00L
92#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 92#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
93 93
94/*
95 * Flush the TLB for address range START to END and, if not in fast mode, release the
96 * freed pages that where gathered up to this point.
97 */
98static inline void 94static inline void
99ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 95ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
100{ 96{
101 unsigned long i;
102 unsigned int nr;
103
104 if (!tlb->need_flush)
105 return;
106 tlb->need_flush = 0; 97 tlb->need_flush = 0;
107 98
108 if (tlb->fullmm) { 99 if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
135 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); 126 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
136 } 127 }
137 128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134 unsigned long i;
135 unsigned int nr;
136
138 /* lastly, release the freed pages */ 137 /* lastly, release the freed pages */
139 nr = tlb->nr; 138 nr = tlb->nr;
140 139
@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
144 free_page_and_swap_cache(tlb->pages[i]); 143 free_page_and_swap_cache(tlb->pages[i]);
145} 144}
146 145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153 if (!tlb->need_flush)
154 return;
155 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156 ia64_tlb_flush_mmu_free(tlb);
157}
158
147static inline void __tlb_alloc_page(struct mmu_gather *tlb) 159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
148{ 160{
149 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 161 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
206 return tlb->max - tlb->nr; 218 return tlb->max - tlb->nr;
207} 219}
208 220
221static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
222{
223 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
224}
225
226static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
227{
228 ia64_tlb_flush_mmu_free(tlb);
229}
230
209static inline void tlb_flush_mmu(struct mmu_gather *tlb) 231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
210{ 232{
211 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c544b6f05d95..a25f09fbaf36 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
59 tlb->batch = NULL; 59 tlb->batch = NULL;
60} 60}
61 61
62static inline void tlb_flush_mmu(struct mmu_gather *tlb) 62static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63{ 63{
64 __tlb_flush_mm_lazy(tlb->mm); 64 __tlb_flush_mm_lazy(tlb->mm);
65}
66
67static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68{
65 tlb_table_flush(tlb); 69 tlb_table_flush(tlb);
66} 70}
67 71
72
73static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74{
75 tlb_flush_mmu_tlbonly(tlb);
76 tlb_flush_mmu_free(tlb);
77}
78
68static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
69 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
70{ 81{
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 362192ed12fe..62f80d2a9df9 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
86 } 86 }
87} 87}
88 88
89static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
90{
91}
92
93static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
94{
95}
96
89static inline void tlb_flush_mmu(struct mmu_gather *tlb) 97static inline void tlb_flush_mmu(struct mmu_gather *tlb)
90{ 98{
91} 99}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 29b0301c18aa..16eb63fac57d 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -59,13 +59,25 @@ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
59 unsigned long end); 59 unsigned long end);
60 60
61static inline void 61static inline void
62tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63{
64 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
65}
66
67static inline void
68tlb_flush_mmu_free(struct mmu_gather *tlb)
69{
70 init_tlb_gather(tlb);
71}
72
73static inline void
62tlb_flush_mmu(struct mmu_gather *tlb) 74tlb_flush_mmu(struct mmu_gather *tlb)
63{ 75{
64 if (!tlb->need_flush) 76 if (!tlb->need_flush)
65 return; 77 return;
66 78
67 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); 79 tlb_flush_mmu_tlbonly(tlb);
68 init_tlb_gather(tlb); 80 tlb_flush_mmu_free(tlb);
69} 81}
70 82
71/* tlb_finish_mmu 83/* tlb_finish_mmu