diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-07-14 14:08:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-08-10 08:34:01 -0400 |
commit | 80127a39681bd68c959f0953f84a830cbd7c3b1c (patch) | |
tree | 223bcc2a5cbec5c0873f8fae85a98797f94e6c56 | |
parent | 08be8f63c40c030b5cf95b4368e314e563a86301 (diff) |
locking/percpu-rwsem: Optimize readers and reduce global impact
Currently the percpu-rwsem switches to (global) atomic ops while a
writer is waiting; which could be quite a while and slows down
releasing the readers.
This patch cures this problem by ordering the reader-state vs
reader-count (see the comments in __percpu_down_read() and
percpu_down_write()). This changes a global atomic op into a full
memory barrier, which doesn't have the global cacheline contention.
This also enables using the percpu-rwsem with rcu_sync disabled in order
to bias the implementation differently, reducing the writer latency by
adding some cost to readers.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
[ Fixed modular build. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/percpu-rwsem.h | 84 | ||||
-rw-r--r-- | kernel/locking/percpu-rwsem.c | 228 | ||||
-rw-r--r-- | kernel/rcu/sync.c | 2 |
3 files changed, 208 insertions, 106 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index c2fa3ecb0dce..146efefde2a1 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h | |||
@@ -10,30 +10,96 @@ | |||
10 | 10 | ||
11 | struct percpu_rw_semaphore { | 11 | struct percpu_rw_semaphore { |
12 | struct rcu_sync rss; | 12 | struct rcu_sync rss; |
13 | unsigned int __percpu *fast_read_ctr; | 13 | unsigned int __percpu *read_count; |
14 | struct rw_semaphore rw_sem; | 14 | struct rw_semaphore rw_sem; |
15 | atomic_t slow_read_ctr; | 15 | wait_queue_head_t writer; |
16 | wait_queue_head_t write_waitq; | 16 | int readers_block; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | extern void percpu_down_read(struct percpu_rw_semaphore *); | 19 | extern int __percpu_down_read(struct percpu_rw_semaphore *, int); |
20 | extern int percpu_down_read_trylock(struct percpu_rw_semaphore *); | 20 | extern void __percpu_up_read(struct percpu_rw_semaphore *); |
21 | extern void percpu_up_read(struct percpu_rw_semaphore *); | 21 | |
22 | static inline void percpu_down_read(struct percpu_rw_semaphore *sem) | ||
23 | { | ||
24 | might_sleep(); | ||
25 | |||
26 | rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); | ||
27 | |||
28 | preempt_disable(); | ||
29 | /* | ||
30 | * We are in an RCU-sched read-side critical section, so the writer | ||
31 | * cannot both change sem->state from readers_fast and start checking | ||
32 | * counters while we are here. So if we see !sem->state, we know that | ||
33 | * the writer won't be checking until we're past the preempt_enable() | ||
34 | * and that one the synchronize_sched() is done, the writer will see | ||
35 | * anything we did within this RCU-sched read-size critical section. | ||
36 | */ | ||
37 | __this_cpu_inc(*sem->read_count); | ||
38 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) | ||
39 | __percpu_down_read(sem, false); /* Unconditional memory barrier */ | ||
40 | preempt_enable(); | ||
41 | /* | ||
42 | * The barrier() from preempt_enable() prevents the compiler from | ||
43 | * bleeding the critical section out. | ||
44 | */ | ||
45 | } | ||
46 | |||
47 | static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) | ||
48 | { | ||
49 | int ret = 1; | ||
50 | |||
51 | preempt_disable(); | ||
52 | /* | ||
53 | * Same as in percpu_down_read(). | ||
54 | */ | ||
55 | __this_cpu_inc(*sem->read_count); | ||
56 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) | ||
57 | ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ | ||
58 | preempt_enable(); | ||
59 | /* | ||
60 | * The barrier() from preempt_enable() prevents the compiler from | ||
61 | * bleeding the critical section out. | ||
62 | */ | ||
63 | |||
64 | if (ret) | ||
65 | rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static inline void percpu_up_read(struct percpu_rw_semaphore *sem) | ||
71 | { | ||
72 | /* | ||
73 | * The barrier() in preempt_disable() prevents the compiler from | ||
74 | * bleeding the critical section out. | ||
75 | */ | ||
76 | preempt_disable(); | ||
77 | /* | ||
78 | * Same as in percpu_down_read(). | ||
79 | */ | ||
80 | if (likely(rcu_sync_is_idle(&sem->rss))) | ||
81 | __this_cpu_dec(*sem->read_count); | ||
82 | else | ||
83 | __percpu_up_read(sem); /* Unconditional memory barrier */ | ||
84 | preempt_enable(); | ||
85 | |||
86 | rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); | ||
87 | } | ||
22 | 88 | ||
23 | extern void percpu_down_write(struct percpu_rw_semaphore *); | 89 | extern void percpu_down_write(struct percpu_rw_semaphore *); |
24 | extern void percpu_up_write(struct percpu_rw_semaphore *); | 90 | extern void percpu_up_write(struct percpu_rw_semaphore *); |
25 | 91 | ||
26 | extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, | 92 | extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, |
27 | const char *, struct lock_class_key *); | 93 | const char *, struct lock_class_key *); |
94 | |||
28 | extern void percpu_free_rwsem(struct percpu_rw_semaphore *); | 95 | extern void percpu_free_rwsem(struct percpu_rw_semaphore *); |
29 | 96 | ||
30 | #define percpu_init_rwsem(brw) \ | 97 | #define percpu_init_rwsem(sem) \ |
31 | ({ \ | 98 | ({ \ |
32 | static struct lock_class_key rwsem_key; \ | 99 | static struct lock_class_key rwsem_key; \ |
33 | __percpu_init_rwsem(brw, #brw, &rwsem_key); \ | 100 | __percpu_init_rwsem(sem, #sem, &rwsem_key); \ |
34 | }) | 101 | }) |
35 | 102 | ||
36 | |||
37 | #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) | 103 | #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) |
38 | 104 | ||
39 | static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, | 105 | static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, |
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index bec0b647f9cc..ce182599cf2e 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c | |||
@@ -8,152 +8,186 @@ | |||
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | 10 | ||
11 | int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, | 11 | int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, |
12 | const char *name, struct lock_class_key *rwsem_key) | 12 | const char *name, struct lock_class_key *rwsem_key) |
13 | { | 13 | { |
14 | brw->fast_read_ctr = alloc_percpu(int); | 14 | sem->read_count = alloc_percpu(int); |
15 | if (unlikely(!brw->fast_read_ctr)) | 15 | if (unlikely(!sem->read_count)) |
16 | return -ENOMEM; | 16 | return -ENOMEM; |
17 | 17 | ||
18 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ | 18 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ |
19 | __init_rwsem(&brw->rw_sem, name, rwsem_key); | 19 | rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); |
20 | rcu_sync_init(&brw->rss, RCU_SCHED_SYNC); | 20 | __init_rwsem(&sem->rw_sem, name, rwsem_key); |
21 | atomic_set(&brw->slow_read_ctr, 0); | 21 | init_waitqueue_head(&sem->writer); |
22 | init_waitqueue_head(&brw->write_waitq); | 22 | sem->readers_block = 0; |
23 | return 0; | 23 | return 0; |
24 | } | 24 | } |
25 | EXPORT_SYMBOL_GPL(__percpu_init_rwsem); | 25 | EXPORT_SYMBOL_GPL(__percpu_init_rwsem); |
26 | 26 | ||
27 | void percpu_free_rwsem(struct percpu_rw_semaphore *brw) | 27 | void percpu_free_rwsem(struct percpu_rw_semaphore *sem) |
28 | { | 28 | { |
29 | /* | 29 | /* |
30 | * XXX: temporary kludge. The error path in alloc_super() | 30 | * XXX: temporary kludge. The error path in alloc_super() |
31 | * assumes that percpu_free_rwsem() is safe after kzalloc(). | 31 | * assumes that percpu_free_rwsem() is safe after kzalloc(). |
32 | */ | 32 | */ |
33 | if (!brw->fast_read_ctr) | 33 | if (!sem->read_count) |
34 | return; | 34 | return; |
35 | 35 | ||
36 | rcu_sync_dtor(&brw->rss); | 36 | rcu_sync_dtor(&sem->rss); |
37 | free_percpu(brw->fast_read_ctr); | 37 | free_percpu(sem->read_count); |
38 | brw->fast_read_ctr = NULL; /* catch use after free bugs */ | 38 | sem->read_count = NULL; /* catch use after free bugs */ |
39 | } | 39 | } |
40 | EXPORT_SYMBOL_GPL(percpu_free_rwsem); | 40 | EXPORT_SYMBOL_GPL(percpu_free_rwsem); |
41 | 41 | ||
42 | /* | 42 | int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) |
43 | * This is the fast-path for down_read/up_read. If it succeeds we rely | ||
44 | * on the barriers provided by rcu_sync_enter/exit; see the comments in | ||
45 | * percpu_down_write() and percpu_up_write(). | ||
46 | * | ||
47 | * If this helper fails the callers rely on the normal rw_semaphore and | ||
48 | * atomic_dec_and_test(), so in this case we have the necessary barriers. | ||
49 | */ | ||
50 | static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) | ||
51 | { | 43 | { |
52 | bool success; | 44 | /* |
45 | * Due to having preemption disabled the decrement happens on | ||
46 | * the same CPU as the increment, avoiding the | ||
47 | * increment-on-one-CPU-and-decrement-on-another problem. | ||
48 | * | ||
49 | * If the reader misses the writer's assignment of readers_block, then | ||
50 | * the writer is guaranteed to see the reader's increment. | ||
51 | * | ||
52 | * Conversely, any readers that increment their sem->read_count after | ||
53 | * the writer looks are guaranteed to see the readers_block value, | ||
54 | * which in turn means that they are guaranteed to immediately | ||
55 | * decrement their sem->read_count, so that it doesn't matter that the | ||
56 | * writer missed them. | ||
57 | */ | ||
53 | 58 | ||
54 | preempt_disable(); | 59 | smp_mb(); /* A matches D */ |
55 | success = rcu_sync_is_idle(&brw->rss); | ||
56 | if (likely(success)) | ||
57 | __this_cpu_add(*brw->fast_read_ctr, val); | ||
58 | preempt_enable(); | ||
59 | 60 | ||
60 | return success; | 61 | /* |
61 | } | 62 | * If !readers_block the critical section starts here, matched by the |
63 | * release in percpu_up_write(). | ||
64 | */ | ||
65 | if (likely(!smp_load_acquire(&sem->readers_block))) | ||
66 | return 1; | ||
62 | 67 | ||
63 | /* | 68 | /* |
64 | * Like the normal down_read() this is not recursive, the writer can | 69 | * Per the above comment; we still have preemption disabled and |
65 | * come after the first percpu_down_read() and create the deadlock. | 70 | * will thus decrement on the same CPU as we incremented. |
66 | * | 71 | */ |
67 | * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, | 72 | __percpu_up_read(sem); |
68 | * percpu_up_read() does rwsem_release(). This pairs with the usage | ||
69 | * of ->rw_sem in percpu_down/up_write(). | ||
70 | */ | ||
71 | void percpu_down_read(struct percpu_rw_semaphore *brw) | ||
72 | { | ||
73 | might_sleep(); | ||
74 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); | ||
75 | 73 | ||
76 | if (likely(update_fast_ctr(brw, +1))) | 74 | if (try) |
77 | return; | 75 | return 0; |
78 | 76 | ||
79 | /* Avoid rwsem_acquire_read() and rwsem_release() */ | 77 | /* |
80 | __down_read(&brw->rw_sem); | 78 | * We either call schedule() in the wait, or we'll fall through |
81 | atomic_inc(&brw->slow_read_ctr); | 79 | * and reschedule on the preempt_enable() in percpu_down_read(). |
82 | __up_read(&brw->rw_sem); | 80 | */ |
83 | } | 81 | preempt_enable_no_resched(); |
84 | EXPORT_SYMBOL_GPL(percpu_down_read); | ||
85 | 82 | ||
86 | int percpu_down_read_trylock(struct percpu_rw_semaphore *brw) | 83 | /* |
87 | { | 84 | * Avoid lockdep for the down/up_read() we already have them. |
88 | if (unlikely(!update_fast_ctr(brw, +1))) { | 85 | */ |
89 | if (!__down_read_trylock(&brw->rw_sem)) | 86 | __down_read(&sem->rw_sem); |
90 | return 0; | 87 | this_cpu_inc(*sem->read_count); |
91 | atomic_inc(&brw->slow_read_ctr); | 88 | __up_read(&sem->rw_sem); |
92 | __up_read(&brw->rw_sem); | 89 | |
93 | } | 90 | preempt_disable(); |
94 | |||
95 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_); | ||
96 | return 1; | 91 | return 1; |
97 | } | 92 | } |
93 | EXPORT_SYMBOL_GPL(__percpu_down_read); | ||
98 | 94 | ||
99 | void percpu_up_read(struct percpu_rw_semaphore *brw) | 95 | void __percpu_up_read(struct percpu_rw_semaphore *sem) |
100 | { | 96 | { |
101 | rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); | 97 | smp_mb(); /* B matches C */ |
102 | 98 | /* | |
103 | if (likely(update_fast_ctr(brw, -1))) | 99 | * In other words, if they see our decrement (presumably to aggregate |
104 | return; | 100 | * zero, as that is the only time it matters) they will also see our |
101 | * critical section. | ||
102 | */ | ||
103 | __this_cpu_dec(*sem->read_count); | ||
105 | 104 | ||
106 | /* false-positive is possible but harmless */ | 105 | /* Prod writer to recheck readers_active */ |
107 | if (atomic_dec_and_test(&brw->slow_read_ctr)) | 106 | wake_up(&sem->writer); |
108 | wake_up_all(&brw->write_waitq); | ||
109 | } | 107 | } |
110 | EXPORT_SYMBOL_GPL(percpu_up_read); | 108 | EXPORT_SYMBOL_GPL(__percpu_up_read); |
109 | |||
110 | #define per_cpu_sum(var) \ | ||
111 | ({ \ | ||
112 | typeof(var) __sum = 0; \ | ||
113 | int cpu; \ | ||
114 | compiletime_assert_atomic_type(__sum); \ | ||
115 | for_each_possible_cpu(cpu) \ | ||
116 | __sum += per_cpu(var, cpu); \ | ||
117 | __sum; \ | ||
118 | }) | ||
111 | 119 | ||
112 | static int clear_fast_ctr(struct percpu_rw_semaphore *brw) | 120 | /* |
121 | * Return true if the modular sum of the sem->read_count per-CPU variable is | ||
122 | * zero. If this sum is zero, then it is stable due to the fact that if any | ||
123 | * newly arriving readers increment a given counter, they will immediately | ||
124 | * decrement that same counter. | ||
125 | */ | ||
126 | static bool readers_active_check(struct percpu_rw_semaphore *sem) | ||
113 | { | 127 | { |
114 | unsigned int sum = 0; | 128 | if (per_cpu_sum(*sem->read_count) != 0) |
115 | int cpu; | 129 | return false; |
130 | |||
131 | /* | ||
132 | * If we observed the decrement; ensure we see the entire critical | ||
133 | * section. | ||
134 | */ | ||
116 | 135 | ||
117 | for_each_possible_cpu(cpu) { | 136 | smp_mb(); /* C matches B */ |
118 | sum += per_cpu(*brw->fast_read_ctr, cpu); | ||
119 | per_cpu(*brw->fast_read_ctr, cpu) = 0; | ||
120 | } | ||
121 | 137 | ||
122 | return sum; | 138 | return true; |
123 | } | 139 | } |
124 | 140 | ||
125 | void percpu_down_write(struct percpu_rw_semaphore *brw) | 141 | void percpu_down_write(struct percpu_rw_semaphore *sem) |
126 | { | 142 | { |
143 | /* Notify readers to take the slow path. */ | ||
144 | rcu_sync_enter(&sem->rss); | ||
145 | |||
146 | down_write(&sem->rw_sem); | ||
147 | |||
127 | /* | 148 | /* |
128 | * Make rcu_sync_is_idle() == F and thus disable the fast-path in | 149 | * Notify new readers to block; up until now, and thus throughout the |
129 | * percpu_down_read() and percpu_up_read(), and wait for gp pass. | 150 | * longish rcu_sync_enter() above, new readers could still come in. |
130 | * | ||
131 | * The latter synchronises us with the preceding readers which used | ||
132 | * the fast-past, so we can not miss the result of __this_cpu_add() | ||
133 | * or anything else inside their criticial sections. | ||
134 | */ | 151 | */ |
135 | rcu_sync_enter(&brw->rss); | 152 | WRITE_ONCE(sem->readers_block, 1); |
136 | 153 | ||
137 | /* exclude other writers, and block the new readers completely */ | 154 | smp_mb(); /* D matches A */ |
138 | down_write(&brw->rw_sem); | ||
139 | 155 | ||
140 | /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ | 156 | /* |
141 | atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); | 157 | * If they don't see our writer of readers_block, then we are |
158 | * guaranteed to see their sem->read_count increment, and therefore | ||
159 | * will wait for them. | ||
160 | */ | ||
142 | 161 | ||
143 | /* wait for all readers to complete their percpu_up_read() */ | 162 | /* Wait for all now active readers to complete. */ |
144 | wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); | 163 | wait_event(sem->writer, readers_active_check(sem)); |
145 | } | 164 | } |
146 | EXPORT_SYMBOL_GPL(percpu_down_write); | 165 | EXPORT_SYMBOL_GPL(percpu_down_write); |
147 | 166 | ||
148 | void percpu_up_write(struct percpu_rw_semaphore *brw) | 167 | void percpu_up_write(struct percpu_rw_semaphore *sem) |
149 | { | 168 | { |
150 | /* release the lock, but the readers can't use the fast-path */ | ||
151 | up_write(&brw->rw_sem); | ||
152 | /* | 169 | /* |
153 | * Enable the fast-path in percpu_down_read() and percpu_up_read() | 170 | * Signal the writer is done, no fast path yet. |
154 | * but only after another gp pass; this adds the necessary barrier | 171 | * |
155 | * to ensure the reader can't miss the changes done by us. | 172 | * One reason that we cannot just immediately flip to readers_fast is |
173 | * that new readers might fail to see the results of this writer's | ||
174 | * critical section. | ||
175 | * | ||
176 | * Therefore we force it through the slow path which guarantees an | ||
177 | * acquire and thereby guarantees the critical section's consistency. | ||
178 | */ | ||
179 | smp_store_release(&sem->readers_block, 0); | ||
180 | |||
181 | /* | ||
182 | * Release the write lock, this will allow readers back in the game. | ||
183 | */ | ||
184 | up_write(&sem->rw_sem); | ||
185 | |||
186 | /* | ||
187 | * Once this completes (at least one RCU-sched grace period hence) the | ||
188 | * reader fast path will be available again. Safe to use outside the | ||
189 | * exclusive write lock because its counting. | ||
156 | */ | 190 | */ |
157 | rcu_sync_exit(&brw->rss); | 191 | rcu_sync_exit(&sem->rss); |
158 | } | 192 | } |
159 | EXPORT_SYMBOL_GPL(percpu_up_write); | 193 | EXPORT_SYMBOL_GPL(percpu_up_write); |
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index be922c9f3d37..198473d90f81 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c | |||
@@ -68,6 +68,8 @@ void rcu_sync_lockdep_assert(struct rcu_sync *rsp) | |||
68 | RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), | 68 | RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), |
69 | "suspicious rcu_sync_is_idle() usage"); | 69 | "suspicious rcu_sync_is_idle() usage"); |
70 | } | 70 | } |
71 | |||
72 | EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert); | ||
71 | #endif | 73 | #endif |
72 | 74 | ||
73 | /** | 75 | /** |