diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-02-25 17:06:47 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-26 02:20:46 -0500 |
commit | d9f1bb6ad7fc53c406706f47858dd5ff030b14a3 (patch) | |
tree | 4af519d68b396e372e6a5afbb5e968de3bacd123 /kernel/rcutree.c | |
parent | 056ba4a9bea5f32781a36b797c562fb731e5eaa6 (diff) |
rcu: Make rcu_read_lock_sched_held() take boot time into account
Before the scheduler starts, all tasks are non-preemptible by
definition. So, during that time, rcu_read_lock_sched_held()
needs to always return "true". This patch makes that be so.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267135607-7056-2-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 19 |
1 files changed, 0 insertions, 19 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 525d39810616..335bfe4f0076 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
50 | 49 | ||
51 | #include "rcutree.h" | 50 | #include "rcutree.h" |
52 | 51 | ||
@@ -81,9 +80,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 80 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 81 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
83 | 82 | ||
84 | static int rcu_scheduler_active __read_mostly; | ||
85 | |||
86 | |||
87 | /* | 83 | /* |
88 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 84 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
89 | * permit this function to be invoked without holding the root rcu_node | 85 | * permit this function to be invoked without holding the root rcu_node |
@@ -1565,21 +1561,6 @@ static int rcu_needs_cpu_quick_check(int cpu) | |||
1565 | rcu_preempt_needs_cpu(cpu); | 1561 | rcu_preempt_needs_cpu(cpu); |
1566 | } | 1562 | } |
1567 | 1563 | ||
1568 | /* | ||
1569 | * This function is invoked towards the end of the scheduler's initialization | ||
1570 | * process. Before this is called, the idle task might contain | ||
1571 | * RCU read-side critical sections (during which time, this idle | ||
1572 | * task is booting the system). After this function is called, the | ||
1573 | * idle tasks are prohibited from containing RCU read-side critical | ||
1574 | * sections. | ||
1575 | */ | ||
1576 | void rcu_scheduler_starting(void) | ||
1577 | { | ||
1578 | WARN_ON(num_online_cpus() != 1); | ||
1579 | WARN_ON(nr_context_switches() > 0); | ||
1580 | rcu_scheduler_active = 1; | ||
1581 | } | ||
1582 | |||
1583 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 1564 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1584 | static atomic_t rcu_barrier_cpu_count; | 1565 | static atomic_t rcu_barrier_cpu_count; |
1585 | static DEFINE_MUTEX(rcu_barrier_mutex); | 1566 | static DEFINE_MUTEX(rcu_barrier_mutex); |