diff options
author | Vegard Nossum <vegard.nossum@oracle.com> | 2017-02-27 17:30:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-27 21:43:48 -0500 |
commit | f1f1007644ffc8051a4c11427d58b1967ae7b75a (patch) | |
tree | e7e590cb1baa494b963d81b967a3f98d3c56f33e /kernel | |
parent | 522b837c672eba9e9fb69f9f52bced0256dc6697 (diff) |
mm: add new mmgrab() helper
Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:
git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/'
git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/'
This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.
(Michal Hocko provided most of the kerneldoc comment.)
Link: http://lkml.kernel.org/r/20161218123229.22952-1-vegard.nossum@oracle.com
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 |
3 files changed, 4 insertions, 4 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 90b09ca35c84..8a768a3672a5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -539,7 +539,7 @@ static void exit_mm(void) | |||
539 | __set_current_state(TASK_RUNNING); | 539 | __set_current_state(TASK_RUNNING); |
540 | down_read(&mm->mmap_sem); | 540 | down_read(&mm->mmap_sem); |
541 | } | 541 | } |
542 | atomic_inc(&mm->mm_count); | 542 | mmgrab(mm); |
543 | BUG_ON(mm != current->active_mm); | 543 | BUG_ON(mm != current->active_mm); |
544 | /* more a memory barrier than a real lock */ | 544 | /* more a memory barrier than a real lock */ |
545 | task_lock(current); | 545 | task_lock(current); |
diff --git a/kernel/futex.c b/kernel/futex.c index cdf365036141..b687cb22301c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared) | |||
338 | 338 | ||
339 | static inline void futex_get_mm(union futex_key *key) | 339 | static inline void futex_get_mm(union futex_key *key) |
340 | { | 340 | { |
341 | atomic_inc(&key->private.mm->mm_count); | 341 | mmgrab(key->private.mm); |
342 | /* | 342 | /* |
343 | * Ensure futex_get_mm() implies a full barrier such that | 343 | * Ensure futex_get_mm() implies a full barrier such that |
344 | * get_futex_key() implies a full barrier. This is relied upon | 344 | * get_futex_key() implies a full barrier. This is relied upon |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1ae6ac15eac..6ea1925ac5c0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2847 | 2847 | ||
2848 | if (!mm) { | 2848 | if (!mm) { |
2849 | next->active_mm = oldmm; | 2849 | next->active_mm = oldmm; |
2850 | atomic_inc(&oldmm->mm_count); | 2850 | mmgrab(oldmm); |
2851 | enter_lazy_tlb(oldmm, next); | 2851 | enter_lazy_tlb(oldmm, next); |
2852 | } else | 2852 | } else |
2853 | switch_mm_irqs_off(oldmm, mm, next); | 2853 | switch_mm_irqs_off(oldmm, mm, next); |
@@ -6098,7 +6098,7 @@ void __init sched_init(void) | |||
6098 | /* | 6098 | /* |
6099 | * The boot idle thread does lazy MMU switching as well: | 6099 | * The boot idle thread does lazy MMU switching as well: |
6100 | */ | 6100 | */ |
6101 | atomic_inc(&init_mm.mm_count); | 6101 | mmgrab(&init_mm); |
6102 | enter_lazy_tlb(&init_mm, current); | 6102 | enter_lazy_tlb(&init_mm, current); |
6103 | 6103 | ||
6104 | /* | 6104 | /* |