summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@oracle.com>2017-02-27 17:30:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-27 21:43:48 -0500
commit3fce371bfac2be0396ffc1e763600e6c6b1bb52a (patch)
treeda45267ba334ada341aa13d600848299564751ce
parentf1f1007644ffc8051a4c11427d58b1967ae7b75a (diff)
mm: add new mmget() helper
Apart from adding the helper function itself, the rest of the kernel is converted mechanically using: git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_users);/mmget\(\1\);/' git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_users);/mmget\(\&\1\);/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. (Michal Hocko provided most of the kerneldoc comment.) Link: http://lkml.kernel.org/r/20161218123229.22952-2-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/frv/mm/mmu-context.c2
-rw-r--r--arch/metag/kernel/smp.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/xtensa/kernel/smp.c2
-rw-r--r--include/linux/sched.h21
-rw-r--r--kernel/fork.c4
-rw-r--r--mm/swapfile.c10
-rw-r--r--virt/kvm/async_pf.c2
10 files changed, 35 insertions, 14 deletions
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 695624181682..b8e8d3944481 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -139,7 +139,7 @@ void start_kernel_secondary(void)
139 /* MMU, Caches, Vector Table, Interrupts etc */ 139 /* MMU, Caches, Vector Table, Interrupts etc */
140 setup_processor(); 140 setup_processor();
141 141
142 atomic_inc(&mm->mm_users); 142 mmget(mm);
143 mmgrab(mm); 143 mmgrab(mm);
144 current->active_mm = mm; 144 current->active_mm = mm;
145 cpumask_set_cpu(cpu, mm_cpumask(mm)); 145 cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index bc5617ef7128..a2e6db2ce811 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -307,7 +307,7 @@ void secondary_start_kernel(void)
307 local_irq_disable(); 307 local_irq_disable();
308 308
309 /* Attach the new idle task to the global mm. */ 309 /* Attach the new idle task to the global mm. */
310 atomic_inc(&mm->mm_users); 310 mmget(mm);
311 mmgrab(mm); 311 mmgrab(mm);
312 current->active_mm = mm; 312 current->active_mm = mm;
313 313
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index 81757d55a5b5..3473bde77f56 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
188 task_lock(tsk); 188 task_lock(tsk);
189 if (tsk->mm) { 189 if (tsk->mm) {
190 mm = tsk->mm; 190 mm = tsk->mm;
191 atomic_inc(&mm->mm_users); 191 mmget(mm);
192 ret = 0; 192 ret = 0;
193 } 193 }
194 task_unlock(tsk); 194 task_unlock(tsk);
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index af9cff547a19..c622293254e4 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
344 * All kernel threads share the same mm context; grab a 344 * All kernel threads share the same mm context; grab a
345 * reference and switch to it. 345 * reference and switch to it.
346 */ 346 */
347 atomic_inc(&mm->mm_users); 347 mmget(mm);
348 mmgrab(mm); 348 mmgrab(mm);
349 current->active_mm = mm; 349 current->active_mm = mm;
350 cpumask_set_cpu(cpu, mm_cpumask(mm)); 350 cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index ee379c699c08..edc4769b047e 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)
179 179
180 enable_mmu(); 180 enable_mmu();
181 mmgrab(mm); 181 mmgrab(mm);
182 atomic_inc(&mm->mm_users); 182 mmget(mm);
183 current->active_mm = mm; 183 current->active_mm = mm;
184#ifdef CONFIG_MMU 184#ifdef CONFIG_MMU
185 enter_lazy_tlb(mm, current); 185 enter_lazy_tlb(mm, current);
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 9bf5cea3bae4..fcea72019df7 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -135,7 +135,7 @@ void secondary_start_kernel(void)
135 135
136 /* All kernel threads share the same mm context. */ 136 /* All kernel threads share the same mm context. */
137 137
138 atomic_inc(&mm->mm_users); 138 mmget(mm);
139 mmgrab(mm); 139 mmgrab(mm);
140 current->active_mm = mm; 140 current->active_mm = mm;
141 cpumask_set_cpu(cpu, mm_cpumask(mm)); 141 cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7cfa5546c840..4a28deb5f210 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
2948 } 2948 }
2949} 2949}
2950 2950
2951/**
2952 * mmget() - Pin the address space associated with a &struct mm_struct.
2953 * @mm: The address space to pin.
2954 *
2955 * Make sure that the address space of the given &struct mm_struct doesn't
2956 * go away. This does not protect against parts of the address space being
2957 * modified or freed, however.
2958 *
2959 * Never use this function to pin this address space for an
2960 * unbounded/indefinite amount of time.
2961 *
2962 * Use mmput() to release the reference acquired by mmget().
2963 *
2964 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
2965 * of &mm_struct.mm_count vs &mm_struct.mm_users.
2966 */
2967static inline void mmget(struct mm_struct *mm)
2968{
2969 atomic_inc(&mm->mm_users);
2970}
2971
2951static inline bool mmget_not_zero(struct mm_struct *mm) 2972static inline bool mmget_not_zero(struct mm_struct *mm)
2952{ 2973{
2953 return atomic_inc_not_zero(&mm->mm_users); 2974 return atomic_inc_not_zero(&mm->mm_users);
diff --git a/kernel/fork.c b/kernel/fork.c
index 348fe73155bc..246bf9aaf9df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
1000 if (task->flags & PF_KTHREAD) 1000 if (task->flags & PF_KTHREAD)
1001 mm = NULL; 1001 mm = NULL;
1002 else 1002 else
1003 atomic_inc(&mm->mm_users); 1003 mmget(mm);
1004 } 1004 }
1005 task_unlock(task); 1005 task_unlock(task);
1006 return mm; 1006 return mm;
@@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1188 vmacache_flush(tsk); 1188 vmacache_flush(tsk);
1189 1189
1190 if (clone_flags & CLONE_VM) { 1190 if (clone_flags & CLONE_VM) {
1191 atomic_inc(&oldmm->mm_users); 1191 mmget(oldmm);
1192 mm = oldmm; 1192 mm = oldmm;
1193 goto good_mm; 1193 goto good_mm;
1194 } 1194 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2cac12cc9abe..7a0713b76668 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1671 * that. 1671 * that.
1672 */ 1672 */
1673 start_mm = &init_mm; 1673 start_mm = &init_mm;
1674 atomic_inc(&init_mm.mm_users); 1674 mmget(&init_mm);
1675 1675
1676 /* 1676 /*
1677 * Keep on scanning until all entries have gone. Usually, 1677 * Keep on scanning until all entries have gone. Usually,
@@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1720 if (atomic_read(&start_mm->mm_users) == 1) { 1720 if (atomic_read(&start_mm->mm_users) == 1) {
1721 mmput(start_mm); 1721 mmput(start_mm);
1722 start_mm = &init_mm; 1722 start_mm = &init_mm;
1723 atomic_inc(&init_mm.mm_users); 1723 mmget(&init_mm);
1724 } 1724 }
1725 1725
1726 /* 1726 /*
@@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
1757 struct mm_struct *prev_mm = start_mm; 1757 struct mm_struct *prev_mm = start_mm;
1758 struct mm_struct *mm; 1758 struct mm_struct *mm;
1759 1759
1760 atomic_inc(&new_start_mm->mm_users); 1760 mmget(new_start_mm);
1761 atomic_inc(&prev_mm->mm_users); 1761 mmget(prev_mm);
1762 spin_lock(&mmlist_lock); 1762 spin_lock(&mmlist_lock);
1763 while (swap_count(*swap_map) && !retval && 1763 while (swap_count(*swap_map) && !retval &&
1764 (p = p->next) != &start_mm->mmlist) { 1764 (p = p->next) != &start_mm->mmlist) {
@@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
1781 1781
1782 if (set_start_mm && *swap_map < swcount) { 1782 if (set_start_mm && *swap_map < swcount) {
1783 mmput(new_start_mm); 1783 mmput(new_start_mm);
1784 atomic_inc(&mm->mm_users); 1784 mmget(mm);
1785 new_start_mm = mm; 1785 new_start_mm = mm;
1786 set_start_mm = 0; 1786 set_start_mm = 0;
1787 } 1787 }
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 3815e940fbea..2366177172f6 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
204 work->addr = hva; 204 work->addr = hva;
205 work->arch = *arch; 205 work->arch = *arch;
206 work->mm = current->mm; 206 work->mm = current->mm;
207 atomic_inc(&work->mm->mm_users); 207 mmget(work->mm);
208 kvm_get_kvm(work->vcpu->kvm); 208 kvm_get_kvm(work->vcpu->kvm);
209 209
210 /* this can't really happen otherwise gfn_to_pfn_async 210 /* this can't really happen otherwise gfn_to_pfn_async