aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/tlb.h12
-rw-r--r--arch/ia64/include/asm/tlb.h42
-rw-r--r--arch/s390/include/asm/tlb.h13
-rw-r--r--arch/sh/include/asm/tlb.h8
-rw-r--r--arch/um/include/asm/tlb.h16
-rw-r--r--mm/memory.c53
6 files changed, 111 insertions, 33 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 0baf7f0d9394..f1a0dace3efe 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
98 } 98 }
99} 99}
100 100
101static inline void tlb_flush_mmu(struct mmu_gather *tlb) 101static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
102{ 102{
103 tlb_flush(tlb); 103 tlb_flush(tlb);
104}
105
106static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
107{
104 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0; 109 tlb->nr = 0;
106 if (tlb->pages == tlb->local) 110 if (tlb->pages == tlb->local)
107 __tlb_alloc_page(tlb); 111 __tlb_alloc_page(tlb);
108} 112}
109 113
114static inline void tlb_flush_mmu(struct mmu_gather *tlb)
115{
116 tlb_flush_mmu_tlbonly(tlb);
117 tlb_flush_mmu_free(tlb);
118}
119
110static inline void 120static inline void
111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 121tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
112{ 122{
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index bc5efc7c3f3f..39d64e0df1de 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
91#define RR_RID_MASK 0x00000000ffffff00L 91#define RR_RID_MASK 0x00000000ffffff00L
92#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 92#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
93 93
94/*
95 * Flush the TLB for address range START to END and, if not in fast mode, release the
96 * freed pages that where gathered up to this point.
97 */
98static inline void 94static inline void
99ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 95ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
100{ 96{
101 unsigned long i;
102 unsigned int nr;
103
104 if (!tlb->need_flush)
105 return;
106 tlb->need_flush = 0; 97 tlb->need_flush = 0;
107 98
108 if (tlb->fullmm) { 99 if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
135 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); 126 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
136 } 127 }
137 128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134 unsigned long i;
135 unsigned int nr;
136
138 /* lastly, release the freed pages */ 137 /* lastly, release the freed pages */
139 nr = tlb->nr; 138 nr = tlb->nr;
140 139
@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
144 free_page_and_swap_cache(tlb->pages[i]); 143 free_page_and_swap_cache(tlb->pages[i]);
145} 144}
146 145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153 if (!tlb->need_flush)
154 return;
155 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156 ia64_tlb_flush_mmu_free(tlb);
157}
158
147static inline void __tlb_alloc_page(struct mmu_gather *tlb) 159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
148{ 160{
149 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 161 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
206 return tlb->max - tlb->nr; 218 return tlb->max - tlb->nr;
207} 219}
208 220
221static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
222{
223 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
224}
225
226static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
227{
228 ia64_tlb_flush_mmu_free(tlb);
229}
230
209static inline void tlb_flush_mmu(struct mmu_gather *tlb) 231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
210{ 232{
211 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c544b6f05d95..a25f09fbaf36 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
59 tlb->batch = NULL; 59 tlb->batch = NULL;
60} 60}
61 61
62static inline void tlb_flush_mmu(struct mmu_gather *tlb) 62static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63{ 63{
64 __tlb_flush_mm_lazy(tlb->mm); 64 __tlb_flush_mm_lazy(tlb->mm);
65}
66
67static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68{
65 tlb_table_flush(tlb); 69 tlb_table_flush(tlb);
66} 70}
67 71
72
73static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74{
75 tlb_flush_mmu_tlbonly(tlb);
76 tlb_flush_mmu_free(tlb);
77}
78
68static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
69 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
70{ 81{
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 362192ed12fe..62f80d2a9df9 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
86 } 86 }
87} 87}
88 88
89static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
90{
91}
92
93static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
94{
95}
96
89static inline void tlb_flush_mmu(struct mmu_gather *tlb) 97static inline void tlb_flush_mmu(struct mmu_gather *tlb)
90{ 98{
91} 99}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 29b0301c18aa..16eb63fac57d 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -59,13 +59,25 @@ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
59 unsigned long end); 59 unsigned long end);
60 60
61static inline void 61static inline void
62tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63{
64 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
65}
66
67static inline void
68tlb_flush_mmu_free(struct mmu_gather *tlb)
69{
70 init_tlb_gather(tlb);
71}
72
73static inline void
62tlb_flush_mmu(struct mmu_gather *tlb) 74tlb_flush_mmu(struct mmu_gather *tlb)
63{ 75{
64 if (!tlb->need_flush) 76 if (!tlb->need_flush)
65 return; 77 return;
66 78
67 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); 79 tlb_flush_mmu_tlbonly(tlb);
68 init_tlb_gather(tlb); 80 tlb_flush_mmu_free(tlb);
69} 81}
70 82
71/* tlb_finish_mmu 83/* tlb_finish_mmu
diff --git a/mm/memory.c b/mm/memory.c
index 93e332d5ed77..037b812a9531 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
232#endif 232#endif
233} 233}
234 234
235void tlb_flush_mmu(struct mmu_gather *tlb) 235static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
236{ 236{
237 struct mmu_gather_batch *batch;
238
239 if (!tlb->need_flush)
240 return;
241 tlb->need_flush = 0; 237 tlb->need_flush = 0;
242 tlb_flush(tlb); 238 tlb_flush(tlb);
243#ifdef CONFIG_HAVE_RCU_TABLE_FREE 239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
244 tlb_table_flush(tlb); 240 tlb_table_flush(tlb);
245#endif 241#endif
242}
243
244static void tlb_flush_mmu_free(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
246 247
247 for (batch = &tlb->local; batch; batch = batch->next) { 248 for (batch = &tlb->local; batch; batch = batch->next) {
248 free_pages_and_swap_cache(batch->pages, batch->nr); 249 free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
251 tlb->active = &tlb->local; 252 tlb->active = &tlb->local;
252} 253}
253 254
255void tlb_flush_mmu(struct mmu_gather *tlb)
256{
257 if (!tlb->need_flush)
258 return;
259 tlb_flush_mmu_tlbonly(tlb);
260 tlb_flush_mmu_free(tlb);
261}
262
254/* tlb_finish_mmu 263/* tlb_finish_mmu
255 * Called at the end of the shootdown operation to free up any resources 264 * Called at the end of the shootdown operation to free up any resources
256 * that were required. 265 * that were required.
@@ -1127,8 +1136,10 @@ again:
1127 if (PageAnon(page)) 1136 if (PageAnon(page))
1128 rss[MM_ANONPAGES]--; 1137 rss[MM_ANONPAGES]--;
1129 else { 1138 else {
1130 if (pte_dirty(ptent)) 1139 if (pte_dirty(ptent)) {
1140 force_flush = 1;
1131 set_page_dirty(page); 1141 set_page_dirty(page);
1142 }
1132 if (pte_young(ptent) && 1143 if (pte_young(ptent) &&
1133 likely(!(vma->vm_flags & VM_SEQ_READ))) 1144 likely(!(vma->vm_flags & VM_SEQ_READ)))
1134 mark_page_accessed(page); 1145 mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
1137 page_remove_rmap(page); 1148 page_remove_rmap(page);
1138 if (unlikely(page_mapcount(page) < 0)) 1149 if (unlikely(page_mapcount(page) < 0))
1139 print_bad_pte(vma, addr, ptent, page); 1150 print_bad_pte(vma, addr, ptent, page);
1140 force_flush = !__tlb_remove_page(tlb, page); 1151 if (unlikely(!__tlb_remove_page(tlb, page))) {
1141 if (force_flush) 1152 force_flush = 1;
1142 break; 1153 break;
1154 }
1143 continue; 1155 continue;
1144 } 1156 }
1145 /* 1157 /*
@@ -1174,18 +1186,11 @@ again:
1174 1186
1175 add_mm_rss_vec(mm, rss); 1187 add_mm_rss_vec(mm, rss);
1176 arch_leave_lazy_mmu_mode(); 1188 arch_leave_lazy_mmu_mode();
1177 pte_unmap_unlock(start_pte, ptl);
1178 1189
1179 /* 1190 /* Do the actual TLB flush before dropping ptl */
1180 * mmu_gather ran out of room to batch pages, we break out of
1181 * the PTE lock to avoid doing the potential expensive TLB invalidate
1182 * and page-free while holding it.
1183 */
1184 if (force_flush) { 1191 if (force_flush) {
1185 unsigned long old_end; 1192 unsigned long old_end;
1186 1193
1187 force_flush = 0;
1188
1189 /* 1194 /*
1190 * Flush the TLB just for the previous segment, 1195 * Flush the TLB just for the previous segment,
1191 * then update the range to be the remaining 1196 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
1193 */ 1198 */
1194 old_end = tlb->end; 1199 old_end = tlb->end;
1195 tlb->end = addr; 1200 tlb->end = addr;
1196 1201 tlb_flush_mmu_tlbonly(tlb);
1197 tlb_flush_mmu(tlb);
1198
1199 tlb->start = addr; 1202 tlb->start = addr;
1200 tlb->end = old_end; 1203 tlb->end = old_end;
1204 }
1205 pte_unmap_unlock(start_pte, ptl);
1206
1207 /*
1208 * If we forced a TLB flush (either due to running out of
1209 * batch buffers or because we needed to flush dirty TLB
1210 * entries before releasing the ptl), free the batched
1211 * memory too. Restart if we didn't do everything.
1212 */
1213 if (force_flush) {
1214 force_flush = 0;
1215 tlb_flush_mmu_free(tlb);
1201 1216
1202 if (addr != end) 1217 if (addr != end)
1203 goto again; 1218 goto again;