aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-02-25 17:06:47 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-26 02:20:46 -0500
commitd9f1bb6ad7fc53c406706f47858dd5ff030b14a3 (patch)
tree4af519d68b396e372e6a5afbb5e968de3bacd123
parent056ba4a9bea5f32781a36b797c562fb731e5eaa6 (diff)
rcu: Make rcu_read_lock_sched_held() take boot time into account
Before the scheduler starts, all tasks are non-preemptible by definition. So, during that time, rcu_read_lock_sched_held() needs to always return "true". This patch makes that be so. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267135607-7056-2-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcutiny.h4
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--kernel/rcutree.c19
5 files changed, 21 insertions, 25 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1a4de31bd7b4..fcea332a8424 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page);
62 62
63/* Internal to kernel */ 63/* Internal to kernel */
64extern void rcu_init(void); 64extern void rcu_init(void);
65extern int rcu_scheduler_active;
66extern void rcu_scheduler_starting(void);
65 67
66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
67#include <linux/rcutree.h> 69#include <linux/rcutree.h>
@@ -140,7 +142,7 @@ static inline int rcu_read_lock_sched_held(void)
140 142
141 if (debug_locks) 143 if (debug_locks)
142 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 144 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
143 return lockdep_opinion || preempt_count() != 0; 145 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
144} 146}
145 147
146#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 148#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 2b70d4e37383..a5195875480a 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -105,10 +105,6 @@ static inline void rcu_exit_nohz(void)
105 105
106#endif /* #else #ifdef CONFIG_NO_HZ */ 106#endif /* #else #ifdef CONFIG_NO_HZ */
107 107
108static inline void rcu_scheduler_starting(void)
109{
110}
111
112static inline void exit_rcu(void) 108static inline void exit_rcu(void)
113{ 109{
114} 110}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 704a010f686c..42cc3a04779e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,7 +35,6 @@ struct notifier_block;
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
39extern int rcu_expedited_torture_stats(char *page); 38extern int rcu_expedited_torture_stats(char *page);
40 39
41#ifdef CONFIG_TREE_PREEMPT_RCU 40#ifdef CONFIG_TREE_PREEMPT_RCU
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 033cb55c26df..7bfa004572b1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,6 +44,7 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
47 48
48#ifdef CONFIG_DEBUG_LOCK_ALLOC 49#ifdef CONFIG_DEBUG_LOCK_ALLOC
49static struct lock_class_key rcu_lock_key; 50static struct lock_class_key rcu_lock_key;
@@ -62,6 +63,23 @@ struct lockdep_map rcu_sched_lock_map =
62EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 63EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
63#endif 64#endif
64 65
66int rcu_scheduler_active __read_mostly;
67
68/*
69 * This function is invoked towards the end of the scheduler's initialization
70 * process. Before this is called, the idle task might contain
71 * RCU read-side critical sections (during which time, this idle
72 * task is booting the system). After this function is called, the
73 * idle tasks are prohibited from containing RCU read-side critical
74 * sections.
75 */
76void rcu_scheduler_starting(void)
77{
78 WARN_ON(num_online_cpus() != 1);
79 WARN_ON(nr_context_switches() > 0);
80 rcu_scheduler_active = 1;
81}
82
65/* 83/*
66 * Awaken the corresponding synchronize_rcu() instance now that a 84 * Awaken the corresponding synchronize_rcu() instance now that a
67 * grace period has elapsed. 85 * grace period has elapsed.
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 525d39810616..335bfe4f0076 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,7 +46,6 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
50 49
51#include "rcutree.h" 50#include "rcutree.h"
52 51
@@ -81,9 +80,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 82
84static int rcu_scheduler_active __read_mostly;
85
86
87/* 83/*
88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 84 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
89 * permit this function to be invoked without holding the root rcu_node 85 * permit this function to be invoked without holding the root rcu_node
@@ -1565,21 +1561,6 @@ static int rcu_needs_cpu_quick_check(int cpu)
1565 rcu_preempt_needs_cpu(cpu); 1561 rcu_preempt_needs_cpu(cpu);
1566} 1562}
1567 1563
1568/*
1569 * This function is invoked towards the end of the scheduler's initialization
1570 * process. Before this is called, the idle task might contain
1571 * RCU read-side critical sections (during which time, this idle
1572 * task is booting the system). After this function is called, the
1573 * idle tasks are prohibited from containing RCU read-side critical
1574 * sections.
1575 */
1576void rcu_scheduler_starting(void)
1577{
1578 WARN_ON(num_online_cpus() != 1);
1579 WARN_ON(nr_context_switches() > 0);
1580 rcu_scheduler_active = 1;
1581}
1582
1583static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1564static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1584static atomic_t rcu_barrier_cpu_count; 1565static atomic_t rcu_barrier_cpu_count;
1585static DEFINE_MUTEX(rcu_barrier_mutex); 1566static DEFINE_MUTEX(rcu_barrier_mutex);