aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/metag/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mn10300/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/score/kernel/traps.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/traps_32.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/tile/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/xtensa/kernel/smp.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--include/linux/sched.h22
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/mmu_context.c2
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/oom_kill.c4
-rw-r--r--virt/kvm/kvm_main.c2
40 files changed, 65 insertions, 43 deletions
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 46bf263c3153..acb4b146a607 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -144,7 +144,7 @@ smp_callin(void)
144 alpha_mv.smp_callin(); 144 alpha_mv.smp_callin();
145 145
146 /* All kernel threads share the same mm context. */ 146 /* All kernel threads share the same mm context. */
147 atomic_inc(&init_mm.mm_count); 147 mmgrab(&init_mm);
148 current->active_mm = &init_mm; 148 current->active_mm = &init_mm;
149 149
150 /* inform the notifiers about the new cpu */ 150 /* inform the notifiers about the new cpu */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 2afbafadb6ab..695624181682 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -140,7 +140,7 @@ void start_kernel_secondary(void)
140 setup_processor(); 140 setup_processor();
141 141
142 atomic_inc(&mm->mm_users); 142 atomic_inc(&mm->mm_users);
143 atomic_inc(&mm->mm_count); 143 mmgrab(mm);
144 current->active_mm = mm; 144 current->active_mm = mm;
145 cpumask_set_cpu(cpu, mm_cpumask(mm)); 145 cpumask_set_cpu(cpu, mm_cpumask(mm));
146 146
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7dd14e8395e6..c6514ce0fcbc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
371 * reference and switch to it. 371 * reference and switch to it.
372 */ 372 */
373 cpu = smp_processor_id(); 373 cpu = smp_processor_id();
374 atomic_inc(&mm->mm_count); 374 mmgrab(mm);
375 current->active_mm = mm; 375 current->active_mm = mm;
376 cpumask_set_cpu(cpu, mm_cpumask(mm)); 376 cpumask_set_cpu(cpu, mm_cpumask(mm));
377 377
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a8ec5da530af..827d52d78b67 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
222 * All kernel threads share the same mm context; grab a 222 * All kernel threads share the same mm context; grab a
223 * reference and switch to it. 223 * reference and switch to it.
224 */ 224 */
225 atomic_inc(&mm->mm_count); 225 mmgrab(mm);
226 current->active_mm = mm; 226 current->active_mm = mm;
227 227
228 /* 228 /*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 23c4ef5f8bdc..bc5617ef7128 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -308,7 +308,7 @@ void secondary_start_kernel(void)
308 308
309 /* Attach the new idle task to the global mm. */ 309 /* Attach the new idle task to the global mm. */
310 atomic_inc(&mm->mm_users); 310 atomic_inc(&mm->mm_users);
311 atomic_inc(&mm->mm_count); 311 mmgrab(mm);
312 current->active_mm = mm; 312 current->active_mm = mm;
313 313
314 preempt_disable(); 314 preempt_disable();
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 983bae7d2665..c02a6455839e 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -162,7 +162,7 @@ void start_secondary(void)
162 ); 162 );
163 163
164 /* Set the memory struct */ 164 /* Set the memory struct */
165 atomic_inc(&init_mm.mm_count); 165 mmgrab(&init_mm);
166 current->active_mm = &init_mm; 166 current->active_mm = &init_mm;
167 167
168 cpu = smp_processor_id(); 168 cpu = smp_processor_id();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c483ece3eb84..d68322966f33 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -994,7 +994,7 @@ cpu_init (void)
994 */ 994 */
995 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 995 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
996 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 996 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
997 atomic_inc(&init_mm.mm_count); 997 mmgrab(&init_mm);
998 current->active_mm = &init_mm; 998 current->active_mm = &init_mm;
999 BUG_ON(current->mm); 999 BUG_ON(current->mm);
1000 1000
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 136c69f1fb8a..b18bc0bd6544 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -403,7 +403,7 @@ void __init cpu_init (void)
403 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); 403 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
404 404
405 /* Set up and load the per-CPU TSS and LDT */ 405 /* Set up and load the per-CPU TSS and LDT */
406 atomic_inc(&init_mm.mm_count); 406 mmgrab(&init_mm);
407 current->active_mm = &init_mm; 407 current->active_mm = &init_mm;
408 if (current->mm) 408 if (current->mm)
409 BUG(); 409 BUG();
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index bad13232de51..af9cff547a19 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -345,7 +345,7 @@ asmlinkage void secondary_start_kernel(void)
345 * reference and switch to it. 345 * reference and switch to it.
346 */ 346 */
347 atomic_inc(&mm->mm_users); 347 atomic_inc(&mm->mm_users);
348 atomic_inc(&mm->mm_count); 348 mmgrab(mm);
349 current->active_mm = mm; 349 current->active_mm = mm;
350 cpumask_set_cpu(cpu, mm_cpumask(mm)); 350 cpumask_set_cpu(cpu, mm_cpumask(mm));
351 enter_lazy_tlb(mm, current); 351 enter_lazy_tlb(mm, current);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cb479be31a50..49c6df20672a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2232,7 +2232,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
2232 if (!cpu_data[cpu].asid_cache) 2232 if (!cpu_data[cpu].asid_cache)
2233 cpu_data[cpu].asid_cache = asid_first_version(cpu); 2233 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2234 2234
2235 atomic_inc(&init_mm.mm_count); 2235 mmgrab(&init_mm);
2236 current->active_mm = &init_mm; 2236 current->active_mm = &init_mm;
2237 BUG_ON(current->mm); 2237 BUG_ON(current->mm);
2238 enter_lazy_tlb(&init_mm, current); 2238 enter_lazy_tlb(&init_mm, current);
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 426173c4b0b9..e65b5cc2fa67 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -589,7 +589,7 @@ static void __init smp_cpu_init(void)
589 } 589 }
590 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); 590 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
591 591
592 atomic_inc(&init_mm.mm_count); 592 mmgrab(&init_mm);
593 current->active_mm = &init_mm; 593 current->active_mm = &init_mm;
594 BUG_ON(current->mm); 594 BUG_ON(current->mm);
595 595
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 75dab2871346..67b452b41ff6 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -279,7 +279,7 @@ smp_cpu_init(int cpunum)
279 set_cpu_online(cpunum, true); 279 set_cpu_online(cpunum, true);
280 280
281 /* Initialise the idle task for this CPU */ 281 /* Initialise the idle task for this CPU */
282 atomic_inc(&init_mm.mm_count); 282 mmgrab(&init_mm);
283 current->active_mm = &init_mm; 283 current->active_mm = &init_mm;
284 BUG_ON(current->mm); 284 BUG_ON(current->mm);
285 enter_lazy_tlb(&init_mm, current); 285 enter_lazy_tlb(&init_mm, current);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 893bd7f79be6..573fb3a461b5 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -707,7 +707,7 @@ void start_secondary(void *unused)
707 unsigned int cpu = smp_processor_id(); 707 unsigned int cpu = smp_processor_id();
708 int i, base; 708 int i, base;
709 709
710 atomic_inc(&init_mm.mm_count); 710 mmgrab(&init_mm);
711 current->active_mm = &init_mm; 711 current->active_mm = &init_mm;
712 712
713 smp_store_cpu_info(cpu); 713 smp_store_cpu_info(cpu);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 21004aaac69b..bc2b60dcb178 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -73,7 +73,7 @@ void cpu_init(void)
73 get_cpu_id(id); 73 get_cpu_id(id);
74 if (machine_has_cpu_mhz) 74 if (machine_has_cpu_mhz)
75 update_cpu_mhz(NULL); 75 update_cpu_mhz(NULL);
76 atomic_inc(&init_mm.mm_count); 76 mmgrab(&init_mm);
77 current->active_mm = &init_mm; 77 current->active_mm = &init_mm;
78 BUG_ON(current->mm); 78 BUG_ON(current->mm);
79 enter_lazy_tlb(&init_mm, current); 79 enter_lazy_tlb(&init_mm, current);
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 2b22bcf02c27..569ac02f68df 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -336,7 +336,7 @@ void __init trap_init(void)
336 set_except_vector(18, handle_dbe); 336 set_except_vector(18, handle_dbe);
337 flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); 337 flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
338 338
339 atomic_inc(&init_mm.mm_count); 339 mmgrab(&init_mm);
340 current->active_mm = &init_mm; 340 current->active_mm = &init_mm;
341 cpu_cache_init(); 341 cpu_cache_init();
342} 342}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 38e7860845db..ee379c699c08 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -178,7 +178,7 @@ asmlinkage void start_secondary(void)
178 struct mm_struct *mm = &init_mm; 178 struct mm_struct *mm = &init_mm;
179 179
180 enable_mmu(); 180 enable_mmu();
181 atomic_inc(&mm->mm_count); 181 mmgrab(mm);
182 atomic_inc(&mm->mm_users); 182 atomic_inc(&mm->mm_users);
183 current->active_mm = mm; 183 current->active_mm = mm;
184#ifdef CONFIG_MMU 184#ifdef CONFIG_MMU
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 71e16f2241c2..b99d33797e1d 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg)
93 : "memory" /* paranoid */); 93 : "memory" /* paranoid */);
94 94
95 /* Attach to the address space of init_task. */ 95 /* Attach to the address space of init_task. */
96 atomic_inc(&init_mm.mm_count); 96 mmgrab(&init_mm);
97 current->active_mm = &init_mm; 97 current->active_mm = &init_mm;
98 98
99 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 99 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 90a02cb64e20..8e3e13924594 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -122,7 +122,7 @@ void smp_callin(void)
122 current_thread_info()->new_child = 0; 122 current_thread_info()->new_child = 0;
123 123
124 /* Attach to the address space of init_task. */ 124 /* Attach to the address space of init_task. */
125 atomic_inc(&init_mm.mm_count); 125 mmgrab(&init_mm);
126 current->active_mm = &init_mm; 126 current->active_mm = &init_mm;
127 127
128 /* inform the notifiers about the new cpu */ 128 /* inform the notifiers about the new cpu */
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 9d98e5002a09..7b55c50eabe5 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg)
93 show_leds(cpuid); 93 show_leds(cpuid);
94 94
95 /* Attach to the address space of init_task. */ 95 /* Attach to the address space of init_task. */
96 atomic_inc(&init_mm.mm_count); 96 mmgrab(&init_mm);
97 current->active_mm = &init_mm; 97 current->active_mm = &init_mm;
98 98
99 local_ops->cache_all(); 99 local_ops->cache_all();
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 278c40abce82..633c4cf6fdb0 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg)
59 : "memory" /* paranoid */); 59 : "memory" /* paranoid */);
60 60
61 /* Attach to the address space of init_task. */ 61 /* Attach to the address space of init_task. */
62 atomic_inc(&init_mm.mm_count); 62 mmgrab(&init_mm);
63 current->active_mm = &init_mm; 63 current->active_mm = &init_mm;
64 64
65 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 65 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 4f21df7d4f13..ecddac5a4c96 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -448,7 +448,7 @@ void trap_init(void)
448 thread_info_offsets_are_bolixed_pete(); 448 thread_info_offsets_are_bolixed_pete();
449 449
450 /* Attach to the address space of init_task. */ 450 /* Attach to the address space of init_task. */
451 atomic_inc(&init_mm.mm_count); 451 mmgrab(&init_mm);
452 current->active_mm = &init_mm; 452 current->active_mm = &init_mm;
453 453
454 /* NOTE: Other cpus have this done as they are started 454 /* NOTE: Other cpus have this done as they are started
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index dfc97a47c9a0..e022d7b00390 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2837,6 +2837,6 @@ void __init trap_init(void)
2837 /* Attach to the address space of init_task. On SMP we 2837 /* Attach to the address space of init_task. On SMP we
2838 * do this in smp.c:smp_callin for other cpus. 2838 * do this in smp.c:smp_callin for other cpus.
2839 */ 2839 */
2840 atomic_inc(&init_mm.mm_count); 2840 mmgrab(&init_mm);
2841 current->active_mm = &init_mm; 2841 current->active_mm = &init_mm;
2842} 2842}
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 6c0abaacec33..53ce940a5016 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -160,7 +160,7 @@ static void start_secondary(void)
160 __this_cpu_write(current_asid, min_asid); 160 __this_cpu_write(current_asid, min_asid);
161 161
162 /* Set up this thread as another owner of the init_mm */ 162 /* Set up this thread as another owner of the init_mm */
163 atomic_inc(&init_mm.mm_count); 163 mmgrab(&init_mm);
164 current->active_mm = &init_mm; 164 current->active_mm = &init_mm;
165 if (current->mm) 165 if (current->mm)
166 BUG(); 166 BUG();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f07005e6f461..c64ca5929cb5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1510,7 +1510,7 @@ void cpu_init(void)
1510 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1510 for (i = 0; i <= IO_BITMAP_LONGS; i++)
1511 t->io_bitmap[i] = ~0UL; 1511 t->io_bitmap[i] = ~0UL;
1512 1512
1513 atomic_inc(&init_mm.mm_count); 1513 mmgrab(&init_mm);
1514 me->active_mm = &init_mm; 1514 me->active_mm = &init_mm;
1515 BUG_ON(me->mm); 1515 BUG_ON(me->mm);
1516 enter_lazy_tlb(&init_mm, me); 1516 enter_lazy_tlb(&init_mm, me);
@@ -1561,7 +1561,7 @@ void cpu_init(void)
1561 /* 1561 /*
1562 * Set up and load the per-CPU TSS and LDT 1562 * Set up and load the per-CPU TSS and LDT
1563 */ 1563 */
1564 atomic_inc(&init_mm.mm_count); 1564 mmgrab(&init_mm);
1565 curr->active_mm = &init_mm; 1565 curr->active_mm = &init_mm;
1566 BUG_ON(curr->mm); 1566 BUG_ON(curr->mm);
1567 enter_lazy_tlb(&init_mm, curr); 1567 enter_lazy_tlb(&init_mm, curr);
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index fc4ad21a5ed4..9bf5cea3bae4 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -136,7 +136,7 @@ void secondary_start_kernel(void)
136 /* All kernel threads share the same mm context. */ 136 /* All kernel threads share the same mm context. */
137 137
138 atomic_inc(&mm->mm_users); 138 atomic_inc(&mm->mm_users);
139 atomic_inc(&mm->mm_count); 139 mmgrab(mm);
140 current->active_mm = mm; 140 current->active_mm = mm;
141 cpumask_set_cpu(cpu, mm_cpumask(mm)); 141 cpumask_set_cpu(cpu, mm_cpumask(mm));
142 enter_lazy_tlb(mm, current); 142 enter_lazy_tlb(mm, current);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ef7c8de7060e..ca5f2aa7232d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -262,7 +262,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
262 * and because the mmu_notifier_unregister function also drop 262 * and because the mmu_notifier_unregister function also drop
263 * mm_count we need to take an extra count here. 263 * mm_count we need to take an extra count here.
264 */ 264 */
265 atomic_inc(&p->mm->mm_count); 265 mmgrab(p->mm);
266 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm); 266 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
267 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); 267 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
268} 268}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6a8fa085b74e..65802d93fdc1 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -334,7 +334,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
334 mm->i915 = to_i915(obj->base.dev); 334 mm->i915 = to_i915(obj->base.dev);
335 335
336 mm->mm = current->mm; 336 mm->mm = current->mm;
337 atomic_inc(&current->mm->mm_count); 337 mmgrab(current->mm);
338 338
339 mm->mn = NULL; 339 mm->mn = NULL;
340 340
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f46033984d07..3b19c16a9e45 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -185,7 +185,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
185 if (fd) { 185 if (fd) {
186 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 186 fd->rec_cpu_num = -1; /* no cpu affinity by default */
187 fd->mm = current->mm; 187 fd->mm = current->mm;
188 atomic_inc(&fd->mm->mm_count); 188 mmgrab(fd->mm);
189 fp->private_data = fd; 189 fp->private_data = fd;
190 } else { 190 } else {
191 fp->private_data = NULL; 191 fp->private_data = NULL;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b8f06273353e..5d51a188871b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -766,7 +766,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
766 766
767 if (!IS_ERR_OR_NULL(mm)) { 767 if (!IS_ERR_OR_NULL(mm)) {
768 /* ensure this mm_struct can't be freed */ 768 /* ensure this mm_struct can't be freed */
769 atomic_inc(&mm->mm_count); 769 mmgrab(mm);
770 /* but do not pin its memory */ 770 /* but do not pin its memory */
771 mmput(mm); 771 mmput(mm);
772 } 772 }
@@ -1064,7 +1064,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
1064 if (p) { 1064 if (p) {
1065 if (atomic_read(&p->mm->mm_users) > 1) { 1065 if (atomic_read(&p->mm->mm_users) > 1) {
1066 mm = p->mm; 1066 mm = p->mm;
1067 atomic_inc(&mm->mm_count); 1067 mmgrab(mm);
1068 } 1068 }
1069 task_unlock(p); 1069 task_unlock(p);
1070 } 1070 }
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e6e0a619cb3a..3c421d06a18e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1847,7 +1847,7 @@ static struct file *userfaultfd_file_create(int flags)
1847 ctx->released = false; 1847 ctx->released = false;
1848 ctx->mm = current->mm; 1848 ctx->mm = current->mm;
1849 /* prevent the mm struct to be freed */ 1849 /* prevent the mm struct to be freed */
1850 atomic_inc(&ctx->mm->mm_count); 1850 mmgrab(ctx->mm);
1851 1851
1852 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, 1852 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
1853 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); 1853 O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 451e241f32c5..7cfa5546c840 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2904 */ 2904 */
2905extern struct mm_struct * mm_alloc(void); 2905extern struct mm_struct * mm_alloc(void);
2906 2906
2907/**
2908 * mmgrab() - Pin a &struct mm_struct.
2909 * @mm: The &struct mm_struct to pin.
2910 *
2911 * Make sure that @mm will not get freed even after the owning task
2912 * exits. This doesn't guarantee that the associated address space
2913 * will still exist later on and mmget_not_zero() has to be used before
2914 * accessing it.
2915 *
2916 * This is a preferred way to to pin @mm for a longer/unbounded amount
2917 * of time.
2918 *
2919 * Use mmdrop() to release the reference acquired by mmgrab().
2920 *
2921 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
2922 * of &mm_struct.mm_count vs &mm_struct.mm_users.
2923 */
2924static inline void mmgrab(struct mm_struct *mm)
2925{
2926 atomic_inc(&mm->mm_count);
2927}
2928
2907/* mmdrop drops the mm and the page tables */ 2929/* mmdrop drops the mm and the page tables */
2908extern void __mmdrop(struct mm_struct *); 2930extern void __mmdrop(struct mm_struct *);
2909static inline void mmdrop(struct mm_struct *mm) 2931static inline void mmdrop(struct mm_struct *mm)
diff --git a/kernel/exit.c b/kernel/exit.c
index 90b09ca35c84..8a768a3672a5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -539,7 +539,7 @@ static void exit_mm(void)
539 __set_current_state(TASK_RUNNING); 539 __set_current_state(TASK_RUNNING);
540 down_read(&mm->mmap_sem); 540 down_read(&mm->mmap_sem);
541 } 541 }
542 atomic_inc(&mm->mm_count); 542 mmgrab(mm);
543 BUG_ON(mm != current->active_mm); 543 BUG_ON(mm != current->active_mm);
544 /* more a memory barrier than a real lock */ 544 /* more a memory barrier than a real lock */
545 task_lock(current); 545 task_lock(current);
diff --git a/kernel/futex.c b/kernel/futex.c
index cdf365036141..b687cb22301c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared)
338 338
339static inline void futex_get_mm(union futex_key *key) 339static inline void futex_get_mm(union futex_key *key)
340{ 340{
341 atomic_inc(&key->private.mm->mm_count); 341 mmgrab(key->private.mm);
342 /* 342 /*
343 * Ensure futex_get_mm() implies a full barrier such that 343 * Ensure futex_get_mm() implies a full barrier such that
344 * get_futex_key() implies a full barrier. This is relied upon 344 * get_futex_key() implies a full barrier. This is relied upon
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e1ae6ac15eac..6ea1925ac5c0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2847 2847
2848 if (!mm) { 2848 if (!mm) {
2849 next->active_mm = oldmm; 2849 next->active_mm = oldmm;
2850 atomic_inc(&oldmm->mm_count); 2850 mmgrab(oldmm);
2851 enter_lazy_tlb(oldmm, next); 2851 enter_lazy_tlb(oldmm, next);
2852 } else 2852 } else
2853 switch_mm_irqs_off(oldmm, mm, next); 2853 switch_mm_irqs_off(oldmm, mm, next);
@@ -6098,7 +6098,7 @@ void __init sched_init(void)
6098 /* 6098 /*
6099 * The boot idle thread does lazy MMU switching as well: 6099 * The boot idle thread does lazy MMU switching as well:
6100 */ 6100 */
6101 atomic_inc(&init_mm.mm_count); 6101 mmgrab(&init_mm);
6102 enter_lazy_tlb(&init_mm, current); 6102 enter_lazy_tlb(&init_mm, current);
6103 6103
6104 /* 6104 /*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 77ae3239c3de..34bce5c308e3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
420 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 420 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
421 spin_unlock(&khugepaged_mm_lock); 421 spin_unlock(&khugepaged_mm_lock);
422 422
423 atomic_inc(&mm->mm_count); 423 mmgrab(mm);
424 if (wakeup) 424 if (wakeup)
425 wake_up_interruptible(&khugepaged_wait); 425 wake_up_interruptible(&khugepaged_wait);
426 426
diff --git a/mm/ksm.c b/mm/ksm.c
index cf211c01ceac..520e4c37fec7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1854,7 +1854,7 @@ int __ksm_enter(struct mm_struct *mm)
1854 spin_unlock(&ksm_mmlist_lock); 1854 spin_unlock(&ksm_mmlist_lock);
1855 1855
1856 set_bit(MMF_VM_MERGEABLE, &mm->flags); 1856 set_bit(MMF_VM_MERGEABLE, &mm->flags);
1857 atomic_inc(&mm->mm_count); 1857 mmgrab(mm);
1858 1858
1859 if (needs_wakeup) 1859 if (needs_wakeup)
1860 wake_up_interruptible(&ksm_thread_wait); 1860 wake_up_interruptible(&ksm_thread_wait);
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index 6f4d27c5bb32..daf67bb02b4a 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
25 task_lock(tsk); 25 task_lock(tsk);
26 active_mm = tsk->active_mm; 26 active_mm = tsk->active_mm;
27 if (active_mm != mm) { 27 if (active_mm != mm) {
28 atomic_inc(&mm->mm_count); 28 mmgrab(mm);
29 tsk->active_mm = mm; 29 tsk->active_mm = mm;
30 } 30 }
31 tsk->mm = mm; 31 tsk->mm = mm;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f4259e496f83..32bc9f2ff7eb 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
275 mm->mmu_notifier_mm = mmu_notifier_mm; 275 mm->mmu_notifier_mm = mmu_notifier_mm;
276 mmu_notifier_mm = NULL; 276 mmu_notifier_mm = NULL;
277 } 277 }
278 atomic_inc(&mm->mm_count); 278 mmgrab(mm);
279 279
280 /* 280 /*
281 * Serialize the update against mmu_notifier_unregister. A 281 * Serialize the update against mmu_notifier_unregister. A
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 578321f1c070..51c091849dcb 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -653,7 +653,7 @@ static void mark_oom_victim(struct task_struct *tsk)
653 653
654 /* oom_mm is bound to the signal struct life time. */ 654 /* oom_mm is bound to the signal struct life time. */
655 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 655 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
656 atomic_inc(&tsk->signal->oom_mm->mm_count); 656 mmgrab(tsk->signal->oom_mm);
657 657
658 /* 658 /*
659 * Make sure that the task is woken up from uninterruptible sleep 659 * Make sure that the task is woken up from uninterruptible sleep
@@ -870,7 +870,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
870 870
871 /* Get a reference to safely compare mm after task_unlock(victim) */ 871 /* Get a reference to safely compare mm after task_unlock(victim) */
872 mm = victim->mm; 872 mm = victim->mm;
873 atomic_inc(&mm->mm_count); 873 mmgrab(mm);
874 /* 874 /*
875 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent 875 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
876 * the OOM victim from depleting the memory reserves from the user 876 * the OOM victim from depleting the memory reserves from the user
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5b0dd4a9b2cb..35f71409d9ee 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -611,7 +611,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
611 return ERR_PTR(-ENOMEM); 611 return ERR_PTR(-ENOMEM);
612 612
613 spin_lock_init(&kvm->mmu_lock); 613 spin_lock_init(&kvm->mmu_lock);
614 atomic_inc(&current->mm->mm_count); 614 mmgrab(current->mm);
615 kvm->mm = current->mm; 615 kvm->mm = current->mm;
616 kvm_eventfd_init(kvm); 616 kvm_eventfd_init(kvm);
617 mutex_init(&kvm->lock); 617 mutex_init(&kvm->lock);