aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/membarrier.c
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2017-07-28 16:40:40 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-08-17 10:28:05 -0400
commit22e4ebb975822833b083533035233d128b30e98f (patch)
tree973b3dda0e0b79af883482f66b49da2dd74225f1 /kernel/sched/membarrier.c
parent955dbdf4ce87fd9be4bc8378e26b8c2eb8b3d184 (diff)
membarrier: Provide expedited private command
Implement MEMBARRIER_CMD_PRIVATE_EXPEDITED with IPIs using cpumask built from all runqueues for which current thread's mm is the same as the thread calling sys_membarrier. It executes faster than the non-expedited variant (no blocking). It also works on NOHZ_FULL configurations. Scheduler-wise, it requires a memory barrier before and after context switching between processes (which have different mm). The memory barrier before context switch is already present. For the barrier after context switch: * Our TSO archs can do RELEASE without being a full barrier. Look at x86 spin_unlock() being a regular STORE for example. But for those archs, all atomics imply smp_mb and all of them have atomic ops in switch_mm() for mm_cpumask(), and on x86 the CR3 load acts as a full barrier. * From all weakly ordered machines, only ARM64 and PPC can do RELEASE, the rest does indeed do smp_mb(), so there the spin_unlock() is a full barrier and we're good. * ARM64 has a very heavy barrier in switch_to(), which suffices. * PPC just removed its barrier from switch_to(), but appears to be talking about adding something to switch_mm(). So add a smp_mb__after_unlock_lock() for now, until this is settled on the PPC side. Changes since v3: - Properly document the memory barriers provided by each architecture. Changes since v2: - Address comments from Peter Zijlstra, - Add smp_mb__after_unlock_lock() after finish_lock_switch() in finish_task_switch() to add the memory barrier we need after storing to rq->curr. This is much simpler than the previous approach relying on atomic_dec_and_test() in mmdrop(), which actually added a memory barrier in the common case of switching between userspace processes. - Return -EINVAL when MEMBARRIER_CMD_SHARED is used on a nohz_full kernel, rather than having the whole membarrier system call returning -ENOSYS. Indeed, CMD_PRIVATE_EXPEDITED is compatible with nohz_full. Adapt the CMD_QUERY mask accordingly. Changes since v1: - move membarrier code under kernel/sched/ because it uses the scheduler runqueue, - only add the barrier when we switch from a kernel thread. The case where we switch from a user-space thread is already handled by the atomic_dec_and_test() in mmdrop(). - add a comment to mmdrop() documenting the requirement on the implicit memory barrier. CC: Peter Zijlstra <peterz@infradead.org> CC: Paul E. McKenney <paulmck@linux.vnet.ibm.com> CC: Boqun Feng <boqun.feng@gmail.com> CC: Andrew Hunter <ahh@google.com> CC: Maged Michael <maged.michael@gmail.com> CC: gromer@google.com CC: Avi Kivity <avi@scylladb.com> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> CC: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Dave Watson <davejwatson@fb.com>
Diffstat (limited to 'kernel/sched/membarrier.c')
-rw-r--r--kernel/sched/membarrier.c152
1 files changed, 152 insertions, 0 deletions
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
new file mode 100644
index 000000000000..a92fddc22747
--- /dev/null
+++ b/kernel/sched/membarrier.c
@@ -0,0 +1,152 @@
1/*
2 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 *
4 * membarrier system call
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/syscalls.h>
18#include <linux/membarrier.h>
19#include <linux/tick.h>
20#include <linux/cpumask.h>
21
22#include "sched.h" /* for cpu_rq(). */
23
24/*
25 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
26 * except MEMBARRIER_CMD_QUERY.
27 */
28#define MEMBARRIER_CMD_BITMASK \
29 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED)
30
31static void ipi_mb(void *info)
32{
33 smp_mb(); /* IPIs should be serializing but paranoid. */
34}
35
36static void membarrier_private_expedited(void)
37{
38 int cpu;
39 bool fallback = false;
40 cpumask_var_t tmpmask;
41
42 if (num_online_cpus() == 1)
43 return;
44
45 /*
46 * Matches memory barriers around rq->curr modification in
47 * scheduler.
48 */
49 smp_mb(); /* system call entry is not a mb. */
50
51 /*
52 * Expedited membarrier commands guarantee that they won't
53 * block, hence the GFP_NOWAIT allocation flag and fallback
54 * implementation.
55 */
56 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
57 /* Fallback for OOM. */
58 fallback = true;
59 }
60
61 cpus_read_lock();
62 for_each_online_cpu(cpu) {
63 struct task_struct *p;
64
65 /*
66 * Skipping the current CPU is OK even through we can be
67 * migrated at any point. The current CPU, at the point
68 * where we read raw_smp_processor_id(), is ensured to
69 * be in program order with respect to the caller
70 * thread. Therefore, we can skip this CPU from the
71 * iteration.
72 */
73 if (cpu == raw_smp_processor_id())
74 continue;
75 rcu_read_lock();
76 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
77 if (p && p->mm == current->mm) {
78 if (!fallback)
79 __cpumask_set_cpu(cpu, tmpmask);
80 else
81 smp_call_function_single(cpu, ipi_mb, NULL, 1);
82 }
83 rcu_read_unlock();
84 }
85 if (!fallback) {
86 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
87 free_cpumask_var(tmpmask);
88 }
89 cpus_read_unlock();
90
91 /*
92 * Memory barrier on the caller thread _after_ we finished
93 * waiting for the last IPI. Matches memory barriers around
94 * rq->curr modification in scheduler.
95 */
96 smp_mb(); /* exit from system call is not a mb */
97}
98
99/**
100 * sys_membarrier - issue memory barriers on a set of threads
101 * @cmd: Takes command values defined in enum membarrier_cmd.
102 * @flags: Currently needs to be 0. For future extensions.
103 *
104 * If this system call is not implemented, -ENOSYS is returned. If the
105 * command specified does not exist, not available on the running
106 * kernel, or if the command argument is invalid, this system call
107 * returns -EINVAL. For a given command, with flags argument set to 0,
108 * this system call is guaranteed to always return the same value until
109 * reboot.
110 *
111 * All memory accesses performed in program order from each targeted thread
112 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
113 * the semantic "barrier()" to represent a compiler barrier forcing memory
114 * accesses to be performed in program order across the barrier, and
115 * smp_mb() to represent explicit memory barriers forcing full memory
116 * ordering across the barrier, we have the following ordering table for
117 * each pair of barrier(), sys_membarrier() and smp_mb():
118 *
119 * The pair ordering is detailed as (O: ordered, X: not ordered):
120 *
121 * barrier() smp_mb() sys_membarrier()
122 * barrier() X X O
123 * smp_mb() X O O
124 * sys_membarrier() O O O
125 */
126SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
127{
128 if (unlikely(flags))
129 return -EINVAL;
130 switch (cmd) {
131 case MEMBARRIER_CMD_QUERY:
132 {
133 int cmd_mask = MEMBARRIER_CMD_BITMASK;
134
135 if (tick_nohz_full_enabled())
136 cmd_mask &= ~MEMBARRIER_CMD_SHARED;
137 return cmd_mask;
138 }
139 case MEMBARRIER_CMD_SHARED:
140 /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
141 if (tick_nohz_full_enabled())
142 return -EINVAL;
143 if (num_online_cpus() > 1)
144 synchronize_sched();
145 return 0;
146 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
147 membarrier_private_expedited();
148 return 0;
149 default:
150 return -EINVAL;
151 }
152}