diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 19b2d44de9f0..a77fd23ee68a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -193,6 +193,83 @@ static void check_sync_rss_stat(struct task_struct *task) | |||
193 | 193 | ||
194 | #endif | 194 | #endif |
195 | 195 | ||
196 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
197 | |||
198 | /* | ||
199 | * See the comment near struct mmu_table_batch. | ||
200 | */ | ||
201 | |||
202 | static void tlb_remove_table_smp_sync(void *arg) | ||
203 | { | ||
204 | /* Simply deliver the interrupt */ | ||
205 | } | ||
206 | |||
207 | static void tlb_remove_table_one(void *table) | ||
208 | { | ||
209 | /* | ||
210 | * This isn't an RCU grace period and hence the page-tables cannot be | ||
211 | * assumed to be actually RCU-freed. | ||
212 | * | ||
213 | * It is however sufficient for software page-table walkers that rely on | ||
214 | * IRQ disabling. See the comment near struct mmu_table_batch. | ||
215 | */ | ||
216 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | ||
217 | __tlb_remove_table(table); | ||
218 | } | ||
219 | |||
220 | static void tlb_remove_table_rcu(struct rcu_head *head) | ||
221 | { | ||
222 | struct mmu_table_batch *batch; | ||
223 | int i; | ||
224 | |||
225 | batch = container_of(head, struct mmu_table_batch, rcu); | ||
226 | |||
227 | for (i = 0; i < batch->nr; i++) | ||
228 | __tlb_remove_table(batch->tables[i]); | ||
229 | |||
230 | free_page((unsigned long)batch); | ||
231 | } | ||
232 | |||
233 | void tlb_table_flush(struct mmu_gather *tlb) | ||
234 | { | ||
235 | struct mmu_table_batch **batch = &tlb->batch; | ||
236 | |||
237 | if (*batch) { | ||
238 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | ||
239 | *batch = NULL; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | ||
244 | { | ||
245 | struct mmu_table_batch **batch = &tlb->batch; | ||
246 | |||
247 | tlb->need_flush = 1; | ||
248 | |||
249 | /* | ||
250 | * When there's less then two users of this mm there cannot be a | ||
251 | * concurrent page-table walk. | ||
252 | */ | ||
253 | if (atomic_read(&tlb->mm->mm_users) < 2) { | ||
254 | __tlb_remove_table(table); | ||
255 | return; | ||
256 | } | ||
257 | |||
258 | if (*batch == NULL) { | ||
259 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); | ||
260 | if (*batch == NULL) { | ||
261 | tlb_remove_table_one(table); | ||
262 | return; | ||
263 | } | ||
264 | (*batch)->nr = 0; | ||
265 | } | ||
266 | (*batch)->tables[(*batch)->nr++] = table; | ||
267 | if ((*batch)->nr == MAX_TABLE_BATCH) | ||
268 | tlb_table_flush(tlb); | ||
269 | } | ||
270 | |||
271 | #endif | ||
272 | |||
196 | /* | 273 | /* |
197 | * If a p?d_bad entry is found while walking page tables, report | 274 | * If a p?d_bad entry is found while walking page tables, report |
198 | * the error, before resetting entry to p?d_none. Usually (but | 275 | * the error, before resetting entry to p?d_none. Usually (but |