diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 |
3 files changed, 4 insertions, 4 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 90b09ca35c84..8a768a3672a5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -539,7 +539,7 @@ static void exit_mm(void) | |||
539 | __set_current_state(TASK_RUNNING); | 539 | __set_current_state(TASK_RUNNING); |
540 | down_read(&mm->mmap_sem); | 540 | down_read(&mm->mmap_sem); |
541 | } | 541 | } |
542 | atomic_inc(&mm->mm_count); | 542 | mmgrab(mm); |
543 | BUG_ON(mm != current->active_mm); | 543 | BUG_ON(mm != current->active_mm); |
544 | /* more a memory barrier than a real lock */ | 544 | /* more a memory barrier than a real lock */ |
545 | task_lock(current); | 545 | task_lock(current); |
diff --git a/kernel/futex.c b/kernel/futex.c index cdf365036141..b687cb22301c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared) | |||
338 | 338 | ||
339 | static inline void futex_get_mm(union futex_key *key) | 339 | static inline void futex_get_mm(union futex_key *key) |
340 | { | 340 | { |
341 | atomic_inc(&key->private.mm->mm_count); | 341 | mmgrab(key->private.mm); |
342 | /* | 342 | /* |
343 | * Ensure futex_get_mm() implies a full barrier such that | 343 | * Ensure futex_get_mm() implies a full barrier such that |
344 | * get_futex_key() implies a full barrier. This is relied upon | 344 | * get_futex_key() implies a full barrier. This is relied upon |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1ae6ac15eac..6ea1925ac5c0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2847 | 2847 | ||
2848 | if (!mm) { | 2848 | if (!mm) { |
2849 | next->active_mm = oldmm; | 2849 | next->active_mm = oldmm; |
2850 | atomic_inc(&oldmm->mm_count); | 2850 | mmgrab(oldmm); |
2851 | enter_lazy_tlb(oldmm, next); | 2851 | enter_lazy_tlb(oldmm, next); |
2852 | } else | 2852 | } else |
2853 | switch_mm_irqs_off(oldmm, mm, next); | 2853 | switch_mm_irqs_off(oldmm, mm, next); |
@@ -6098,7 +6098,7 @@ void __init sched_init(void) | |||
6098 | /* | 6098 | /* |
6099 | * The boot idle thread does lazy MMU switching as well: | 6099 | * The boot idle thread does lazy MMU switching as well: |
6100 | */ | 6100 | */ |
6101 | atomic_inc(&init_mm.mm_count); | 6101 | mmgrab(&init_mm); |
6102 | enter_lazy_tlb(&init_mm, current); | 6102 | enter_lazy_tlb(&init_mm, current); |
6103 | 6103 | ||
6104 | /* | 6104 | /* |