aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-08-06 19:08:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:22 -0400
commitb972216e27d1c853eced33f8638926636c606341 (patch)
tree163addfd60315832e514fd7b2aa23cadd6fb6bf9 /mm
parent1d352bfd41e8219cdf9bebe79677700bdc38b540 (diff)
mmu_notifier: add call_srcu and sync function for listener to delay call and sync
When kernel device drivers or subsystems want to bind their lifespan to t= he lifespan of the mm_struct, they usually use one of the following methods: 1. Manually calling a function in the interested kernel module. The funct= ion call needs to be placed in mmput. This method was rejected by several ker= nel maintainers. 2. Registering to the mmu notifier release mechanism. The problem with the latter approach is that the mmu_notifier_release cal= lback is called from__mmu_notifier_release (called from exit_mmap). That functi= on iterates over the list of mmu notifiers and don't expect the release call= back function to remove itself from the list. Therefore, the callback function= in the kernel module can't release the mmu_notifier_object, which is actuall= y the kernel module's object itself. As a result, the destruction of the kernel module's object must to be done in a delayed fashion. This patch adds support for this delayed callback, by adding a new mmu_notifier_call_srcu function that receives a function ptr and calls th= at function with call_srcu. In that function, the kernel module releases its object. To use mmu_notifier_call_srcu, the calling module needs to call b= efore that a new function called mmu_notifier_unregister_no_release that as its= name implies, unregisters a notifier without calling its notifier release call= back. This patch also adds a function that will call barrier_srcu so those kern= el modules can sync with mmu_notifier. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmu_notifier.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 41cefdf0aadd..950813b1eb36 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -23,6 +23,25 @@
23static struct srcu_struct srcu; 23static struct srcu_struct srcu;
24 24
25/* 25/*
26 * This function allows mmu_notifier::release callback to delay a call to
27 * a function that will free appropriate resources. The function must be
28 * quick and must not block.
29 */
30void mmu_notifier_call_srcu(struct rcu_head *rcu,
31 void (*func)(struct rcu_head *rcu))
32{
33 call_srcu(&srcu, rcu, func);
34}
35EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
36
37void mmu_notifier_synchronize(void)
38{
39 /* Wait for any running method to finish. */
40 srcu_barrier(&srcu);
41}
42EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
43
44/*
26 * This function can't run concurrently against mmu_notifier_register 45 * This function can't run concurrently against mmu_notifier_register
27 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap 46 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
28 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers 47 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
@@ -53,7 +72,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
53 */ 72 */
54 if (mn->ops->release) 73 if (mn->ops->release)
55 mn->ops->release(mn, mm); 74 mn->ops->release(mn, mm);
56 srcu_read_unlock(&srcu, id);
57 75
58 spin_lock(&mm->mmu_notifier_mm->lock); 76 spin_lock(&mm->mmu_notifier_mm->lock);
59 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { 77 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
@@ -69,6 +87,7 @@ void __mmu_notifier_release(struct mm_struct *mm)
69 hlist_del_init_rcu(&mn->hlist); 87 hlist_del_init_rcu(&mn->hlist);
70 } 88 }
71 spin_unlock(&mm->mmu_notifier_mm->lock); 89 spin_unlock(&mm->mmu_notifier_mm->lock);
90 srcu_read_unlock(&srcu, id);
72 91
73 /* 92 /*
74 * synchronize_srcu here prevents mmu_notifier_release from returning to 93 * synchronize_srcu here prevents mmu_notifier_release from returning to
@@ -325,6 +344,25 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
325} 344}
326EXPORT_SYMBOL_GPL(mmu_notifier_unregister); 345EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
327 346
347/*
348 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
349 */
350void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
351 struct mm_struct *mm)
352{
353 spin_lock(&mm->mmu_notifier_mm->lock);
354 /*
355 * Can not use list_del_rcu() since __mmu_notifier_release
356 * can delete it before we hold the lock.
357 */
358 hlist_del_init_rcu(&mn->hlist);
359 spin_unlock(&mm->mmu_notifier_mm->lock);
360
361 BUG_ON(atomic_read(&mm->mm_count) <= 0);
362 mmdrop(mm);
363}
364EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
365
328static int __init mmu_notifier_init(void) 366static int __init mmu_notifier_init(void)
329{ 367{
330 return init_srcu_struct(&srcu); 368 return init_srcu_struct(&srcu);