summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2019-09-19 13:37:04 -0400
committerIngo Molnar <mingo@kernel.org>2019-09-25 11:42:31 -0400
commitc6d68c1c4a4d6611fc0f8145d764226571d737ca (patch)
treef808d18297778aa54b35e5cd6bf838b2636d2f8d /kernel/sched
parent19a4ff534bb09686f53800564cb977bad2177c00 (diff)
sched/membarrier: Skip IPIs when mm->mm_users == 1
If there is only a single mm_user for the mm, the private expedited membarrier command can skip the IPIs, because only a single thread is using the mm. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190919173705.2181-7-mathieu.desnoyers@efficios.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/membarrier.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 070cf433bb9a..fced54ad0f3d 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -145,20 +145,21 @@ static int membarrier_private_expedited(int flags)
145 int cpu; 145 int cpu;
146 bool fallback = false; 146 bool fallback = false;
147 cpumask_var_t tmpmask; 147 cpumask_var_t tmpmask;
148 struct mm_struct *mm = current->mm;
148 149
149 if (flags & MEMBARRIER_FLAG_SYNC_CORE) { 150 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
150 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE)) 151 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
151 return -EINVAL; 152 return -EINVAL;
152 if (!(atomic_read(&current->mm->membarrier_state) & 153 if (!(atomic_read(&mm->membarrier_state) &
153 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) 154 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
154 return -EPERM; 155 return -EPERM;
155 } else { 156 } else {
156 if (!(atomic_read(&current->mm->membarrier_state) & 157 if (!(atomic_read(&mm->membarrier_state) &
157 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) 158 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
158 return -EPERM; 159 return -EPERM;
159 } 160 }
160 161
161 if (num_online_cpus() == 1) 162 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
162 return 0; 163 return 0;
163 164
164 /* 165 /*
@@ -194,7 +195,7 @@ static int membarrier_private_expedited(int flags)
194 continue; 195 continue;
195 rcu_read_lock(); 196 rcu_read_lock();
196 p = rcu_dereference(cpu_rq(cpu)->curr); 197 p = rcu_dereference(cpu_rq(cpu)->curr);
197 if (p && p->mm == current->mm) { 198 if (p && p->mm == mm) {
198 if (!fallback) 199 if (!fallback)
199 __cpumask_set_cpu(cpu, tmpmask); 200 __cpumask_set_cpu(cpu, tmpmask);
200 else 201 else