diff options
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r-- | kernel/rcuclassic.c | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index d4271146a9bd..d7ec731de75c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
48 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/time.h> | ||
50 | 51 | ||
51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
52 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
@@ -286,6 +287,81 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
286 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | 287 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace |
287 | * period (if necessary). | 288 | * period (if necessary). |
288 | */ | 289 | */ |
290 | |||
291 | #ifdef CONFIG_DEBUG_RCU_STALL | ||
292 | |||
293 | static inline void record_gp_check_time(struct rcu_ctrlblk *rcp) | ||
294 | { | ||
295 | rcp->gp_check = get_seconds() + 3; | ||
296 | } | ||
297 | static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | ||
298 | { | ||
299 | int cpu; | ||
300 | long delta; | ||
301 | |||
302 | /* Only let one CPU complain about others per time interval. */ | ||
303 | |||
304 | spin_lock(&rcp->lock); | ||
305 | delta = get_seconds() - rcp->gp_check; | ||
306 | if (delta < 2L || | ||
307 | cpus_empty(rcp->cpumask)) { | ||
308 | spin_unlock(&rcp->lock); | ||
309 | return; | ||
310 | rcp->gp_check = get_seconds() + 30; | ||
311 | } | ||
312 | spin_unlock(&rcp->lock); | ||
313 | |||
314 | /* OK, time to rat on our buddy... */ | ||
315 | |||
316 | printk(KERN_ERR "RCU detected CPU stalls:"); | ||
317 | for_each_cpu_mask(cpu, rcp->cpumask) | ||
318 | printk(" %d", cpu); | ||
319 | printk(" (detected by %d, t=%lu/%lu)\n", | ||
320 | smp_processor_id(), get_seconds(), rcp->gp_check); | ||
321 | } | ||
322 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) | ||
323 | { | ||
324 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n", | ||
325 | smp_processor_id(), get_seconds(), rcp->gp_check); | ||
326 | dump_stack(); | ||
327 | spin_lock(&rcp->lock); | ||
328 | if ((long)(get_seconds() - rcp->gp_check) >= 0L) | ||
329 | rcp->gp_check = get_seconds() + 30; | ||
330 | spin_unlock(&rcp->lock); | ||
331 | } | ||
332 | static inline void check_cpu_stall(struct rcu_ctrlblk *rcp, | ||
333 | struct rcu_data *rdp) | ||
334 | { | ||
335 | long delta; | ||
336 | |||
337 | delta = get_seconds() - rcp->gp_check; | ||
338 | if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0L) { | ||
339 | |||
340 | /* We haven't checked in, so go dump stack. */ | ||
341 | |||
342 | print_cpu_stall(rcp); | ||
343 | |||
344 | } else if (!cpus_empty(rcp->cpumask) && delta >= 2L) { | ||
345 | |||
346 | /* They had two seconds to dump stack, so complain. */ | ||
347 | |||
348 | print_other_cpu_stall(rcp); | ||
349 | |||
350 | } | ||
351 | } | ||
352 | |||
353 | #else /* #ifdef CONFIG_DEBUG_RCU_STALL */ | ||
354 | |||
355 | static inline void record_gp_check_time(struct rcu_ctrlblk *rcp) | ||
356 | { | ||
357 | } | ||
358 | static inline void check_cpu_stall(struct rcu_ctrlblk *rcp, | ||
359 | struct rcu_data *rdp) | ||
360 | { | ||
361 | } | ||
362 | |||
363 | #endif /* #else #ifdef CONFIG_DEBUG_RCU_STALL */ | ||
364 | |||
289 | /* | 365 | /* |
290 | * Register a new batch of callbacks, and start it up if there is currently no | 366 | * Register a new batch of callbacks, and start it up if there is currently no |
291 | * active batch and the batch to be registered has not already occurred. | 367 | * active batch and the batch to be registered has not already occurred. |
@@ -296,6 +372,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |||
296 | if (rcp->cur != rcp->pending && | 372 | if (rcp->cur != rcp->pending && |
297 | rcp->completed == rcp->cur) { | 373 | rcp->completed == rcp->cur) { |
298 | rcp->cur++; | 374 | rcp->cur++; |
375 | record_gp_check_time(rcp); | ||
299 | 376 | ||
300 | /* | 377 | /* |
301 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | 378 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a |
@@ -489,6 +566,9 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
489 | 566 | ||
490 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 567 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
491 | { | 568 | { |
569 | /* Check for CPU stalls, if enabled. */ | ||
570 | check_cpu_stall(rcp, rdp); | ||
571 | |||
492 | if (rdp->nxtlist) { | 572 | if (rdp->nxtlist) { |
493 | /* | 573 | /* |
494 | * This cpu has pending rcu entries and the grace period | 574 | * This cpu has pending rcu entries and the grace period |