aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-18 11:23:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-18 11:23:42 -0400
commit396c9df2231865ef55aa031e3f5df9d99e036869 (patch)
tree566c2a9e9cab77720e1dc41fd16ef00fd6a662c7 /kernel
parent8f502d5b9e3362971f58dad5d468f070340336e1 (diff)
parent8053871d0f7f67c7efb7f226ef031f78877d6625 (diff)
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Ingo Molnar: "Two fixes: an smp-call fix and a lockdep fix" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: smp: Fix smp_call_function_single_async() locking lockdep: Make print_lock() robust against concurrent release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/lockdep.c16
-rw-r--r--kernel/smp.c78
2 files changed, 62 insertions, 32 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ba77ab5f64dd..a0831e1b99f4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -551,7 +551,21 @@ static void print_lockdep_cache(struct lockdep_map *lock)
551 551
552static void print_lock(struct held_lock *hlock) 552static void print_lock(struct held_lock *hlock)
553{ 553{
554 print_lock_name(hlock_class(hlock)); 554 /*
555 * We can be called locklessly through debug_show_all_locks() so be
556 * extra careful, the hlock might have been released and cleared.
557 */
558 unsigned int class_idx = hlock->class_idx;
559
560 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
561 barrier();
562
563 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
564 printk("<RELEASED>\n");
565 return;
566 }
567
568 print_lock_name(lock_classes + class_idx - 1);
555 printk(", at: "); 569 printk(", at: ");
556 print_ip_sym(hlock->acquire_ip); 570 print_ip_sym(hlock->acquire_ip);
557} 571}
diff --git a/kernel/smp.c b/kernel/smp.c
index f38a1e692259..2aaac2c47683 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -19,7 +19,7 @@
19 19
20enum { 20enum {
21 CSD_FLAG_LOCK = 0x01, 21 CSD_FLAG_LOCK = 0x01,
22 CSD_FLAG_WAIT = 0x02, 22 CSD_FLAG_SYNCHRONOUS = 0x02,
23}; 23};
24 24
25struct call_function_data { 25struct call_function_data {
@@ -107,7 +107,7 @@ void __init call_function_init(void)
107 */ 107 */
108static void csd_lock_wait(struct call_single_data *csd) 108static void csd_lock_wait(struct call_single_data *csd)
109{ 109{
110 while (csd->flags & CSD_FLAG_LOCK) 110 while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
111 cpu_relax(); 111 cpu_relax();
112} 112}
113 113
@@ -121,19 +121,17 @@ static void csd_lock(struct call_single_data *csd)
121 * to ->flags with any subsequent assignments to other 121 * to ->flags with any subsequent assignments to other
122 * fields of the specified call_single_data structure: 122 * fields of the specified call_single_data structure:
123 */ 123 */
124 smp_mb(); 124 smp_wmb();
125} 125}
126 126
127static void csd_unlock(struct call_single_data *csd) 127static void csd_unlock(struct call_single_data *csd)
128{ 128{
129 WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK)); 129 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
130 130
131 /* 131 /*
132 * ensure we're all done before releasing data: 132 * ensure we're all done before releasing data:
133 */ 133 */
134 smp_mb(); 134 smp_store_release(&csd->flags, 0);
135
136 csd->flags &= ~CSD_FLAG_LOCK;
137} 135}
138 136
139static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); 137static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
@@ -144,13 +142,16 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
144 * ->func, ->info, and ->flags set. 142 * ->func, ->info, and ->flags set.
145 */ 143 */
146static int generic_exec_single(int cpu, struct call_single_data *csd, 144static int generic_exec_single(int cpu, struct call_single_data *csd,
147 smp_call_func_t func, void *info, int wait) 145 smp_call_func_t func, void *info)
148{ 146{
149 struct call_single_data csd_stack = { .flags = 0 };
150 unsigned long flags;
151
152
153 if (cpu == smp_processor_id()) { 147 if (cpu == smp_processor_id()) {
148 unsigned long flags;
149
150 /*
151 * We can unlock early even for the synchronous on-stack case,
152 * since we're doing this from the same CPU..
153 */
154 csd_unlock(csd);
154 local_irq_save(flags); 155 local_irq_save(flags);
155 func(info); 156 func(info);
156 local_irq_restore(flags); 157 local_irq_restore(flags);
@@ -161,21 +162,9 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
161 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) 162 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
162 return -ENXIO; 163 return -ENXIO;
163 164
164
165 if (!csd) {
166 csd = &csd_stack;
167 if (!wait)
168 csd = this_cpu_ptr(&csd_data);
169 }
170
171 csd_lock(csd);
172
173 csd->func = func; 165 csd->func = func;
174 csd->info = info; 166 csd->info = info;
175 167
176 if (wait)
177 csd->flags |= CSD_FLAG_WAIT;
178
179 /* 168 /*
180 * The list addition should be visible before sending the IPI 169 * The list addition should be visible before sending the IPI
181 * handler locks the list to pull the entry off it because of 170 * handler locks the list to pull the entry off it because of
@@ -190,9 +179,6 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
190 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) 179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
191 arch_send_call_function_single_ipi(cpu); 180 arch_send_call_function_single_ipi(cpu);
192 181
193 if (wait)
194 csd_lock_wait(csd);
195
196 return 0; 182 return 0;
197} 183}
198 184
@@ -250,8 +236,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
250 } 236 }
251 237
252 llist_for_each_entry_safe(csd, csd_next, entry, llist) { 238 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
253 csd->func(csd->info); 239 smp_call_func_t func = csd->func;
254 csd_unlock(csd); 240 void *info = csd->info;
241
242 /* Do we wait until *after* callback? */
243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
244 func(info);
245 csd_unlock(csd);
246 } else {
247 csd_unlock(csd);
248 func(info);
249 }
255 } 250 }
256 251
257 /* 252 /*
@@ -274,6 +269,8 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
274int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 269int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
275 int wait) 270 int wait)
276{ 271{
272 struct call_single_data *csd;
273 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
277 int this_cpu; 274 int this_cpu;
278 int err; 275 int err;
279 276
@@ -292,7 +289,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
292 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 289 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
293 && !oops_in_progress); 290 && !oops_in_progress);
294 291
295 err = generic_exec_single(cpu, NULL, func, info, wait); 292 csd = &csd_stack;
293 if (!wait) {
294 csd = this_cpu_ptr(&csd_data);
295 csd_lock(csd);
296 }
297
298 err = generic_exec_single(cpu, csd, func, info);
299
300 if (wait)
301 csd_lock_wait(csd);
296 302
297 put_cpu(); 303 put_cpu();
298 304
@@ -321,7 +327,15 @@ int smp_call_function_single_async(int cpu, struct call_single_data *csd)
321 int err = 0; 327 int err = 0;
322 328
323 preempt_disable(); 329 preempt_disable();
324 err = generic_exec_single(cpu, csd, csd->func, csd->info, 0); 330
331 /* We could deadlock if we have to wait here with interrupts disabled! */
332 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
333 csd_lock_wait(csd);
334
335 csd->flags = CSD_FLAG_LOCK;
336 smp_wmb();
337
338 err = generic_exec_single(cpu, csd, csd->func, csd->info);
325 preempt_enable(); 339 preempt_enable();
326 340
327 return err; 341 return err;
@@ -433,6 +447,8 @@ void smp_call_function_many(const struct cpumask *mask,
433 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); 447 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
434 448
435 csd_lock(csd); 449 csd_lock(csd);
450 if (wait)
451 csd->flags |= CSD_FLAG_SYNCHRONOUS;
436 csd->func = func; 452 csd->func = func;
437 csd->info = info; 453 csd->info = info;
438 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); 454 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));