aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:16 -0400
commit267239116987d64850ad2037d8e0f3071dc3b5ce (patch)
tree142595897f7fc7bb673b791891dcc2fab31f6e91 /mm/memory.c
parent1c395176962176660bb108f90e97e1686cfe0d85 (diff)
mm, powerpc: move the RCU page-table freeing into generic code
In case other architectures require RCU freed page-tables to implement gup_fast() and software filled hashes and similar things, provide the means to do so by moving the logic into generic code. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Requested-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 19b2d44de9f0..a77fd23ee68a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -193,6 +193,83 @@ static void check_sync_rss_stat(struct task_struct *task)
193 193
194#endif 194#endif
195 195
196#ifdef CONFIG_HAVE_RCU_TABLE_FREE
197
198/*
199 * See the comment near struct mmu_table_batch.
200 */
201
202static void tlb_remove_table_smp_sync(void *arg)
203{
204 /* Simply deliver the interrupt */
205}
206
207static void tlb_remove_table_one(void *table)
208{
209 /*
210 * This isn't an RCU grace period and hence the page-tables cannot be
211 * assumed to be actually RCU-freed.
212 *
213 * It is however sufficient for software page-table walkers that rely on
214 * IRQ disabling. See the comment near struct mmu_table_batch.
215 */
216 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
217 __tlb_remove_table(table);
218}
219
220static void tlb_remove_table_rcu(struct rcu_head *head)
221{
222 struct mmu_table_batch *batch;
223 int i;
224
225 batch = container_of(head, struct mmu_table_batch, rcu);
226
227 for (i = 0; i < batch->nr; i++)
228 __tlb_remove_table(batch->tables[i]);
229
230 free_page((unsigned long)batch);
231}
232
233void tlb_table_flush(struct mmu_gather *tlb)
234{
235 struct mmu_table_batch **batch = &tlb->batch;
236
237 if (*batch) {
238 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
239 *batch = NULL;
240 }
241}
242
243void tlb_remove_table(struct mmu_gather *tlb, void *table)
244{
245 struct mmu_table_batch **batch = &tlb->batch;
246
247 tlb->need_flush = 1;
248
249 /*
250 * When there's less then two users of this mm there cannot be a
251 * concurrent page-table walk.
252 */
253 if (atomic_read(&tlb->mm->mm_users) < 2) {
254 __tlb_remove_table(table);
255 return;
256 }
257
258 if (*batch == NULL) {
259 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
260 if (*batch == NULL) {
261 tlb_remove_table_one(table);
262 return;
263 }
264 (*batch)->nr = 0;
265 }
266 (*batch)->tables[(*batch)->nr++] = table;
267 if ((*batch)->nr == MAX_TABLE_BATCH)
268 tlb_table_flush(tlb);
269}
270
271#endif
272
196/* 273/*
197 * If a p?d_bad entry is found while walking page tables, report 274 * If a p?d_bad entry is found while walking page tables, report
198 * the error, before resetting entry to p?d_none. Usually (but 275 * the error, before resetting entry to p?d_none. Usually (but