aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGavin Shan <shangw@linux.vnet.ibm.com>2012-10-08 19:29:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:23 -0400
commite0f3c3f78da29b114e7c1c68019036559f715948 (patch)
tree0f052b6e1708bbcafc0097d328d5013b161542e2 /mm
parent21a92735f660eaecf69a6f2e777f18463760ec32 (diff)
mm/mmu_notifier: init notifier if necessary
While registering MMU notifier, new instance of MMU notifier_mm will be allocated and later free'd if currrent mm_struct's MMU notifier_mm has been initialized. That causes some overhead. The patch tries to elominate that. Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com> Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Avi Kivity <avi@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Sagi Grimberg <sagig@mellanox.co.il> Cc: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmu_notifier.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 35ff447d8d1..947df83dccb 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -207,22 +207,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
207 */ 207 */
208 BUG_ON(!srcu.per_cpu_ref); 208 BUG_ON(!srcu.per_cpu_ref);
209 209
210 ret = -ENOMEM;
211 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
212 if (unlikely(!mmu_notifier_mm))
213 goto out;
214
215 if (take_mmap_sem) 210 if (take_mmap_sem)
216 down_write(&mm->mmap_sem); 211 down_write(&mm->mmap_sem);
217 ret = mm_take_all_locks(mm); 212 ret = mm_take_all_locks(mm);
218 if (unlikely(ret)) 213 if (unlikely(ret))
219 goto out_cleanup; 214 goto out;
220 215
221 if (!mm_has_notifiers(mm)) { 216 if (!mm_has_notifiers(mm)) {
217 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
218 GFP_KERNEL);
219 if (unlikely(!mmu_notifier_mm)) {
220 ret = -ENOMEM;
221 goto out_of_mem;
222 }
222 INIT_HLIST_HEAD(&mmu_notifier_mm->list); 223 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
223 spin_lock_init(&mmu_notifier_mm->lock); 224 spin_lock_init(&mmu_notifier_mm->lock);
225
224 mm->mmu_notifier_mm = mmu_notifier_mm; 226 mm->mmu_notifier_mm = mmu_notifier_mm;
225 mmu_notifier_mm = NULL;
226 } 227 }
227 atomic_inc(&mm->mm_count); 228 atomic_inc(&mm->mm_count);
228 229
@@ -238,13 +239,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
238 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); 239 hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
239 spin_unlock(&mm->mmu_notifier_mm->lock); 240 spin_unlock(&mm->mmu_notifier_mm->lock);
240 241
242out_of_mem:
241 mm_drop_all_locks(mm); 243 mm_drop_all_locks(mm);
242out_cleanup: 244out:
243 if (take_mmap_sem) 245 if (take_mmap_sem)
244 up_write(&mm->mmap_sem); 246 up_write(&mm->mmap_sem);
245 /* kfree() does nothing if mmu_notifier_mm is NULL */ 247
246 kfree(mmu_notifier_mm);
247out:
248 BUG_ON(atomic_read(&mm->mm_users) <= 0); 248 BUG_ON(atomic_read(&mm->mm_users) <= 0);
249 return ret; 249 return ret;
250} 250}