aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h98
-rw-r--r--include/linux/rcupreempt.h127
-rw-r--r--include/linux/rcupreempt_trace.h97
-rw-r--r--include/linux/rcutree.h262
-rw-r--r--include/linux/sched.h32
10 files changed, 157 insertions, 673 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4d668e05d458..47536197ffdd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,15 @@ struct notifier_block;
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50/* Need to know about CPUs going up/down? */ 50/* Need to know about CPUs going up/down? */
51#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
52#define cpu_notifier(fn, pri) { \
53 static struct notifier_block fn##_nb __cpuinitdata = \
54 { .notifier_call = fn, .priority = pri }; \
55 register_cpu_notifier(&fn##_nb); \
56}
57#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
58#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
59#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
51#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
52extern int register_cpu_notifier(struct notifier_block *nb); 61extern int register_cpu_notifier(struct notifier_block *nb);
53extern void unregister_cpu_notifier(struct notifier_block *nb); 62extern void unregister_cpu_notifier(struct notifier_block *nb);
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void);
74 83
75#else /* CONFIG_SMP */ 84#else /* CONFIG_SMP */
76 85
86#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
87
77static inline int register_cpu_notifier(struct notifier_block *nb) 88static inline int register_cpu_notifier(struct notifier_block *nb)
78{ 89{
79 return 0; 90 return 0;
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class;
99 110
100extern void get_online_cpus(void); 111extern void get_online_cpus(void);
101extern void put_online_cpus(void); 112extern void put_online_cpus(void);
102#define hotcpu_notifier(fn, pri) { \ 113#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
103 static struct notifier_block fn##_nb __cpuinitdata = \
104 { .notifier_call = fn, .priority = pri }; \
105 register_cpu_notifier(&fn##_nb); \
106}
107#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 114#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
108#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 115#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
109int cpu_down(unsigned int cpu); 116int cpu_down(unsigned int cpu);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8246c697863d..330cb31bb496 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -132,7 +132,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
132} 132}
133#endif 133#endif
134 134
135#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) 135#if defined(CONFIG_NO_HZ)
136extern void rcu_irq_enter(void); 136extern void rcu_irq_enter(void);
137extern void rcu_irq_exit(void); 137extern void rcu_irq_exit(void);
138extern void rcu_nmi_enter(void); 138extern void rcu_nmi_enter(void);
@@ -142,7 +142,7 @@ extern void rcu_nmi_exit(void);
142# define rcu_irq_exit() do { } while (0) 142# define rcu_irq_exit() do { } while (0)
143# define rcu_nmi_enter() do { } while (0) 143# define rcu_nmi_enter() do { } while (0)
144# define rcu_nmi_exit() do { } while (0) 144# define rcu_nmi_exit() do { } while (0)
145#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ 145#endif /* #if defined(CONFIG_NO_HZ) */
146 146
147/* 147/*
148 * It is safe to do non-atomic ops on ->hardirq_context, 148 * It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be43..9e7f2e8fc66e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -94,6 +94,16 @@ extern struct group_info init_groups;
94# define CAP_INIT_BSET CAP_INIT_EFF_SET 94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif 95#endif
96 96
97#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \
99 .rcu_read_lock_nesting = 0, \
100 .rcu_read_unlock_special = 0, \
101 .rcu_blocked_node = NULL, \
102 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
103#else
104#define INIT_TASK_RCU_PREEMPT(tsk)
105#endif
106
97extern struct cred init_cred; 107extern struct cred init_cred;
98 108
99#ifdef CONFIG_PERF_COUNTERS 109#ifdef CONFIG_PERF_COUNTERS
@@ -173,6 +183,7 @@ extern struct cred init_cred;
173 INIT_LOCKDEP \ 183 INIT_LOCKDEP \
174 INIT_FTRACE_GRAPH \ 184 INIT_FTRACE_GRAPH \
175 INIT_TRACE_RECURSION \ 185 INIT_TRACE_RECURSION \
186 INIT_TASK_RCU_PREEMPT(tsk) \
176} 187}
177 188
178 189
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index aec3252afcf5..ed5d7501e181 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page)
132{ 132{
133 VM_BUG_ON(in_interrupt()); 133 VM_BUG_ON(in_interrupt());
134 134
135#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
136# ifdef CONFIG_PREEMPT 136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic()); 137 VM_BUG_ON(!in_atomic());
138# endif 138# endif
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
170{ 170{
171 VM_BUG_ON(in_interrupt()); 171 VM_BUG_ON(in_interrupt());
172 172
173#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
174# ifdef CONFIG_PREEMPT 174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic()); 175 VM_BUG_ON(!in_atomic());
176# endif 176# endif
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2c..000000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (classic version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUCLASSIC_H
34#define __LINUX_RCUCLASSIC_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/cpumask.h>
40#include <linux/seqlock.h>
41
42#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
43#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
44#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
45#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
46
47/* Global control variables for rcupdate callback mechanism. */
48struct rcu_ctrlblk {
49 long cur; /* Current batch number. */
50 long completed; /* Number of the last completed batch */
51 long pending; /* Number of the last pending batch */
52#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
53 unsigned long gp_start; /* Time at which GP started in jiffies. */
54 unsigned long jiffies_stall;
55 /* Time at which to check for CPU stalls. */
56#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
57
58 int signaled;
59
60 spinlock_t lock ____cacheline_internodealigned_in_smp;
61 DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
62 /* current batch to proceed. */
63} ____cacheline_internodealigned_in_smp;
64
65/* Is batch a before batch b ? */
66static inline int rcu_batch_before(long a, long b)
67{
68 return (a - b) < 0;
69}
70
71/* Is batch a after batch b ? */
72static inline int rcu_batch_after(long a, long b)
73{
74 return (a - b) > 0;
75}
76
77/* Per-CPU data for Read-Copy UPdate. */
78struct rcu_data {
79 /* 1) quiescent state handling : */
80 long quiescbatch; /* Batch # for grace period */
81 int passed_quiesc; /* User-mode/idle loop etc. */
82 int qs_pending; /* core waits for quiesc state */
83
84 /* 2) batch handling */
85 /*
86 * if nxtlist is not NULL, then:
87 * batch:
88 * The batch # for the last entry of nxtlist
89 * [*nxttail[1], NULL = *nxttail[2]):
90 * Entries that batch # <= batch
91 * [*nxttail[0], *nxttail[1]):
92 * Entries that batch # <= batch - 1
93 * [nxtlist, *nxttail[0]):
94 * Entries that batch # <= batch - 2
95 * The grace period for these entries has completed, and
96 * the other grace-period-completed entries may be moved
97 * here temporarily in rcu_process_callbacks().
98 */
99 long batch;
100 struct rcu_head *nxtlist;
101 struct rcu_head **nxttail[3];
102 long qlen; /* # of queued callbacks */
103 struct rcu_head *donelist;
104 struct rcu_head **donetail;
105 long blimit; /* Upper limit on a processed batch */
106 int cpu;
107 struct rcu_head barrier;
108};
109
110/*
111 * Increment the quiescent state counter.
112 * The counter is a bit degenerated: We do not need to know
113 * how many quiescent states passed, just if there was at least
114 * one since the start of the grace period. Thus just a flag.
115 */
116extern void rcu_qsctr_inc(int cpu);
117extern void rcu_bh_qsctr_inc(int cpu);
118
119extern int rcu_pending(int cpu);
120extern int rcu_needs_cpu(int cpu);
121
122#ifdef CONFIG_DEBUG_LOCK_ALLOC
123extern struct lockdep_map rcu_lock_map;
124# define rcu_read_acquire() \
125 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
126# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
127#else
128# define rcu_read_acquire() do { } while (0)
129# define rcu_read_release() do { } while (0)
130#endif
131
132#define __rcu_read_lock() \
133 do { \
134 preempt_disable(); \
135 __acquire(RCU); \
136 rcu_read_acquire(); \
137 } while (0)
138#define __rcu_read_unlock() \
139 do { \
140 rcu_read_release(); \
141 __release(RCU); \
142 preempt_enable(); \
143 } while (0)
144#define __rcu_read_lock_bh() \
145 do { \
146 local_bh_disable(); \
147 __acquire(RCU_BH); \
148 rcu_read_acquire(); \
149 } while (0)
150#define __rcu_read_unlock_bh() \
151 do { \
152 rcu_read_release(); \
153 __release(RCU_BH); \
154 local_bh_enable(); \
155 } while (0)
156
157#define __synchronize_sched() synchronize_rcu()
158
159#define call_rcu_sched(head, func) call_rcu(head, func)
160
161extern void __rcu_init(void);
162#define rcu_init_sched() do { } while (0)
163extern void rcu_check_callbacks(int cpu, int user);
164extern void rcu_restart_cpu(int cpu);
165
166extern long rcu_batches_completed(void);
167extern long rcu_batches_completed_bh(void);
168
169#define rcu_enter_nohz() do { } while (0)
170#define rcu_exit_nohz() do { } while (0)
171
172/* A context switch is a grace period for rcuclassic. */
173static inline int rcu_blocking_is_gp(void)
174{
175 return num_online_cpus() == 1;
176}
177
178#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634d..95e0615f4d75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,18 +51,26 @@ struct rcu_head {
51 void (*func)(struct rcu_head *head); 51 void (*func)(struct rcu_head *head);
52}; 52};
53 53
54/* Internal to kernel, but needed by rcupreempt.h. */ 54/* Exported common interfaces */
55extern void synchronize_rcu(void);
56extern void synchronize_rcu_bh(void);
57extern void rcu_barrier(void);
58extern void rcu_barrier_bh(void);
59extern void rcu_barrier_sched(void);
60extern void synchronize_sched_expedited(void);
61extern int sched_expedited_torture_stats(char *page);
62
63/* Internal to kernel */
64extern void rcu_init(void);
65extern void rcu_scheduler_starting(void);
66extern int rcu_needs_cpu(int cpu);
55extern int rcu_scheduler_active; 67extern int rcu_scheduler_active;
56 68
57#if defined(CONFIG_CLASSIC_RCU) 69#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
58#include <linux/rcuclassic.h>
59#elif defined(CONFIG_TREE_RCU)
60#include <linux/rcutree.h> 70#include <linux/rcutree.h>
61#elif defined(CONFIG_PREEMPT_RCU)
62#include <linux/rcupreempt.h>
63#else 71#else
64#error "Unknown RCU implementation specified to kernel configuration" 72#error "Unknown RCU implementation specified to kernel configuration"
65#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ 73#endif
66 74
67#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 75#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
68#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 76#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active;
70 (ptr)->next = NULL; (ptr)->func = NULL; \ 78 (ptr)->next = NULL; (ptr)->func = NULL; \
71} while (0) 79} while (0)
72 80
81#ifdef CONFIG_DEBUG_LOCK_ALLOC
82extern struct lockdep_map rcu_lock_map;
83# define rcu_read_acquire() \
84 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
85# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
86#else
87# define rcu_read_acquire() do { } while (0)
88# define rcu_read_release() do { } while (0)
89#endif
90
73/** 91/**
74 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 92 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
75 * 93 *
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active;
99 * 117 *
100 * It is illegal to block while in an RCU read-side critical section. 118 * It is illegal to block while in an RCU read-side critical section.
101 */ 119 */
102#define rcu_read_lock() __rcu_read_lock() 120static inline void rcu_read_lock(void)
121{
122 __rcu_read_lock();
123 __acquire(RCU);
124 rcu_read_acquire();
125}
103 126
104/** 127/**
105 * rcu_read_unlock - marks the end of an RCU read-side critical section. 128 * rcu_read_unlock - marks the end of an RCU read-side critical section.
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active;
116 * used as well. RCU does not care how the writers keep out of each 139 * used as well. RCU does not care how the writers keep out of each
117 * others' way, as long as they do so. 140 * others' way, as long as they do so.
118 */ 141 */
119#define rcu_read_unlock() __rcu_read_unlock() 142static inline void rcu_read_unlock(void)
143{
144 rcu_read_release();
145 __release(RCU);
146 __rcu_read_unlock();
147}
120 148
121/** 149/**
122 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 150 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active;
129 * can use just rcu_read_lock(). 157 * can use just rcu_read_lock().
130 * 158 *
131 */ 159 */
132#define rcu_read_lock_bh() __rcu_read_lock_bh() 160static inline void rcu_read_lock_bh(void)
161{
162 __rcu_read_lock_bh();
163 __acquire(RCU_BH);
164 rcu_read_acquire();
165}
133 166
134/* 167/*
135 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 168 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
136 * 169 *
137 * See rcu_read_lock_bh() for more information. 170 * See rcu_read_lock_bh() for more information.
138 */ 171 */
139#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 172static inline void rcu_read_unlock_bh(void)
173{
174 rcu_read_release();
175 __release(RCU_BH);
176 __rcu_read_unlock_bh();
177}
140 178
141/** 179/**
142 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 180 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active;
147 * - call_rcu_sched() and rcu_barrier_sched() 185 * - call_rcu_sched() and rcu_barrier_sched()
148 * on the write-side to insure proper synchronization. 186 * on the write-side to insure proper synchronization.
149 */ 187 */
150#define rcu_read_lock_sched() preempt_disable() 188static inline void rcu_read_lock_sched(void)
151#define rcu_read_lock_sched_notrace() preempt_disable_notrace() 189{
190 preempt_disable();
191 __acquire(RCU_SCHED);
192 rcu_read_acquire();
193}
194static inline notrace void rcu_read_lock_sched_notrace(void)
195{
196 preempt_disable_notrace();
197 __acquire(RCU_SCHED);
198}
152 199
153/* 200/*
154 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 201 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
155 * 202 *
156 * See rcu_read_lock_sched for more information. 203 * See rcu_read_lock_sched for more information.
157 */ 204 */
158#define rcu_read_unlock_sched() preempt_enable() 205static inline void rcu_read_unlock_sched(void)
159#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 206{
160 207 rcu_read_release();
208 __release(RCU_SCHED);
209 preempt_enable();
210}
211static inline notrace void rcu_read_unlock_sched_notrace(void)
212{
213 __release(RCU_SCHED);
214 preempt_enable_notrace();
215}
161 216
162 217
163/** 218/**
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head,
259extern void call_rcu_bh(struct rcu_head *head, 314extern void call_rcu_bh(struct rcu_head *head,
260 void (*func)(struct rcu_head *head)); 315 void (*func)(struct rcu_head *head));
261 316
262/* Exported common interfaces */
263extern void synchronize_rcu(void);
264extern void rcu_barrier(void);
265extern void rcu_barrier_bh(void);
266extern void rcu_barrier_sched(void);
267
268/* Internal to kernel */
269extern void rcu_init(void);
270extern void rcu_scheduler_starting(void);
271extern int rcu_needs_cpu(int cpu);
272
273#endif /* __LINUX_RCUPDATE_H */ 317#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
deleted file mode 100644
index fce522782ffa..000000000000
--- a/include/linux/rcupreempt.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUPREEMPT_H
34#define __LINUX_RCUPREEMPT_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/smp.h>
40#include <linux/cpumask.h>
41#include <linux/seqlock.h>
42
43extern void rcu_qsctr_inc(int cpu);
44static inline void rcu_bh_qsctr_inc(int cpu) { }
45
46/*
47 * Someone might want to pass call_rcu_bh as a function pointer.
48 * So this needs to just be a rename and not a macro function.
49 * (no parentheses)
50 */
51#define call_rcu_bh call_rcu
52
53/**
54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
55 * @head: structure to be used for queueing the RCU updates.
56 * @func: actual update function to be invoked after the grace period
57 *
58 * The update function will be invoked some time after a full
59 * synchronize_sched()-style grace period elapses, in other words after
60 * all currently executing preempt-disabled sections of code (including
61 * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
62 * completed.
63 */
64extern void call_rcu_sched(struct rcu_head *head,
65 void (*func)(struct rcu_head *head));
66
67extern void __rcu_read_lock(void) __acquires(RCU);
68extern void __rcu_read_unlock(void) __releases(RCU);
69extern int rcu_pending(int cpu);
70extern int rcu_needs_cpu(int cpu);
71
72#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
73#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
74
75extern void __synchronize_sched(void);
76
77extern void __rcu_init(void);
78extern void rcu_init_sched(void);
79extern void rcu_check_callbacks(int cpu, int user);
80extern void rcu_restart_cpu(int cpu);
81extern long rcu_batches_completed(void);
82
83/*
84 * Return the number of RCU batches processed thus far. Useful for debug
85 * and statistic. The _bh variant is identifcal to straight RCU
86 */
87static inline long rcu_batches_completed_bh(void)
88{
89 return rcu_batches_completed();
90}
91
92#ifdef CONFIG_RCU_TRACE
93struct rcupreempt_trace;
94extern long *rcupreempt_flipctr(int cpu);
95extern long rcupreempt_data_completed(void);
96extern int rcupreempt_flip_flag(int cpu);
97extern int rcupreempt_mb_flag(int cpu);
98extern char *rcupreempt_try_flip_state_name(void);
99extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
100#endif
101
102struct softirq_action;
103
104#ifdef CONFIG_NO_HZ
105extern void rcu_enter_nohz(void);
106extern void rcu_exit_nohz(void);
107#else
108# define rcu_enter_nohz() do { } while (0)
109# define rcu_exit_nohz() do { } while (0)
110#endif
111
112/*
113 * A context switch is a grace period for rcupreempt synchronize_rcu()
114 * only during early boot, before the scheduler has been initialized.
115 * So, how the heck do we get a context switch? Well, if the caller
116 * invokes synchronize_rcu(), they are willing to accept a context
117 * switch, so we simply pretend that one happened.
118 *
119 * After boot, there might be a blocked or preempted task in an RCU
120 * read-side critical section, so we cannot then take the fastpath.
121 */
122static inline int rcu_blocking_is_gp(void)
123{
124 return num_online_cpus() == 1 && !rcu_scheduler_active;
125}
126
127#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
deleted file mode 100644
index b99ae073192a..000000000000
--- a/include/linux/rcupreempt_trace.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
29 * http://lwn.net/Articles/253651/
30 */
31
32#ifndef __LINUX_RCUPREEMPT_TRACE_H
33#define __LINUX_RCUPREEMPT_TRACE_H
34
35#include <linux/types.h>
36#include <linux/kernel.h>
37
38#include <asm/atomic.h>
39
40/*
41 * PREEMPT_RCU data structures.
42 */
43
44struct rcupreempt_trace {
45 long next_length;
46 long next_add;
47 long wait_length;
48 long wait_add;
49 long done_length;
50 long done_add;
51 long done_remove;
52 atomic_t done_invoked;
53 long rcu_check_callbacks;
54 atomic_t rcu_try_flip_1;
55 atomic_t rcu_try_flip_e1;
56 long rcu_try_flip_i1;
57 long rcu_try_flip_ie1;
58 long rcu_try_flip_g1;
59 long rcu_try_flip_a1;
60 long rcu_try_flip_ae1;
61 long rcu_try_flip_a2;
62 long rcu_try_flip_z1;
63 long rcu_try_flip_ze1;
64 long rcu_try_flip_z2;
65 long rcu_try_flip_m1;
66 long rcu_try_flip_me1;
67 long rcu_try_flip_m2;
68};
69
70#ifdef CONFIG_RCU_TRACE
71#define RCU_TRACE(fn, arg) fn(arg);
72#else
73#define RCU_TRACE(fn, arg)
74#endif
75
76extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
77extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
78extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
79extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
80extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
81extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
82extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
83extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
84extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
85extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
86extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
87extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
88extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
89extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
90extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
91extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
92extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
93extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
94extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
95extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
96
97#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c42..a89307717825 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,264 +30,57 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33#include <linux/cache.h> 33extern void rcu_sched_qs(int cpu);
34#include <linux/spinlock.h> 34extern void rcu_bh_qs(int cpu);
35#include <linux/threads.h>
36#include <linux/cpumask.h>
37#include <linux/seqlock.h>
38 35
39/* 36extern int rcu_needs_cpu(int cpu);
40 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
41 * In theory, it should be possible to add more levels straightforwardly.
42 * In practice, this has not been tested, so there is probably some
43 * bug somewhere.
44 */
45#define MAX_RCU_LVLS 3
46#define RCU_FANOUT (CONFIG_RCU_FANOUT)
47#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
48#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
49
50#if NR_CPUS <= RCU_FANOUT
51# define NUM_RCU_LVLS 1
52# define NUM_RCU_LVL_0 1
53# define NUM_RCU_LVL_1 (NR_CPUS)
54# define NUM_RCU_LVL_2 0
55# define NUM_RCU_LVL_3 0
56#elif NR_CPUS <= RCU_FANOUT_SQ
57# define NUM_RCU_LVLS 2
58# define NUM_RCU_LVL_0 1
59# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
60# define NUM_RCU_LVL_2 (NR_CPUS)
61# define NUM_RCU_LVL_3 0
62#elif NR_CPUS <= RCU_FANOUT_CUBE
63# define NUM_RCU_LVLS 3
64# define NUM_RCU_LVL_0 1
65# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
66# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
67# define NUM_RCU_LVL_3 NR_CPUS
68#else
69# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
70#endif /* #if (NR_CPUS) <= RCU_FANOUT */
71
72#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
73#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
74
75/*
76 * Dynticks per-CPU state.
77 */
78struct rcu_dynticks {
79 int dynticks_nesting; /* Track nesting level, sort of. */
80 int dynticks; /* Even value for dynticks-idle, else odd. */
81 int dynticks_nmi; /* Even value for either dynticks-idle or */
82 /* not in nmi handler, else odd. So this */
83 /* remains even for nmi from irq handler. */
84};
85
86/*
87 * Definition for node within the RCU grace-period-detection hierarchy.
88 */
89struct rcu_node {
90 spinlock_t lock;
91 unsigned long qsmask; /* CPUs or groups that need to switch in */
92 /* order for current grace period to proceed.*/
93 unsigned long qsmaskinit;
94 /* Per-GP initialization for qsmask. */
95 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 int grplo; /* lowest-numbered CPU or group here. */
97 int grphi; /* highest-numbered CPU or group here. */
98 u8 grpnum; /* CPU/group number for next level up. */
99 u8 level; /* root is at level 0. */
100 struct rcu_node *parent;
101} ____cacheline_internodealigned_in_smp;
102
103/* Index values for nxttail array in struct rcu_data. */
104#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
105#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
106#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
107#define RCU_NEXT_TAIL 3
108#define RCU_NEXT_SIZE 4
109
110/* Per-CPU data for read-copy update. */
111struct rcu_data {
112 /* 1) quiescent-state and grace-period handling : */
113 long completed; /* Track rsp->completed gp number */
114 /* in order to detect GP end. */
115 long gpnum; /* Highest gp number that this CPU */
116 /* is aware of having started. */
117 long passed_quiesc_completed;
118 /* Value of completed at time of qs. */
119 bool passed_quiesc; /* User-mode/idle loop etc. */
120 bool qs_pending; /* Core waits for quiesc state. */
121 bool beenonline; /* CPU online at least once. */
122 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
123 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
124
125 /* 2) batch handling */
126 /*
127 * If nxtlist is not NULL, it is partitioned as follows.
128 * Any of the partitions might be empty, in which case the
129 * pointer to that partition will be equal to the pointer for
130 * the following partition. When the list is empty, all of
131 * the nxttail elements point to nxtlist, which is NULL.
132 *
133 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
134 * Entries that might have arrived after current GP ended
135 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
136 * Entries known to have arrived before current GP ended
137 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
138 * Entries that batch # <= ->completed - 1: waiting for current GP
139 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
140 * Entries that batch # <= ->completed
141 * The grace period for these entries has completed, and
142 * the other grace-period-completed entries may be moved
143 * here temporarily in rcu_process_callbacks().
144 */
145 struct rcu_head *nxtlist;
146 struct rcu_head **nxttail[RCU_NEXT_SIZE];
147 long qlen; /* # of queued callbacks */
148 long blimit; /* Upper limit on a processed batch */
149
150#ifdef CONFIG_NO_HZ
151 /* 3) dynticks interface. */
152 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
153 int dynticks_snap; /* Per-GP tracking for dynticks. */
154 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
155#endif /* #ifdef CONFIG_NO_HZ */
156
157 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
158#ifdef CONFIG_NO_HZ
159 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
160#endif /* #ifdef CONFIG_NO_HZ */
161 unsigned long offline_fqs; /* Kicked due to being offline. */
162 unsigned long resched_ipi; /* Sent a resched IPI. */
163
164 /* 5) __rcu_pending() statistics. */
165 long n_rcu_pending; /* rcu_pending() calls since boot. */
166 long n_rp_qs_pending;
167 long n_rp_cb_ready;
168 long n_rp_cpu_needs_gp;
169 long n_rp_gp_completed;
170 long n_rp_gp_started;
171 long n_rp_need_fqs;
172 long n_rp_need_nothing;
173
174 int cpu;
175};
176
177/* Values for signaled field in struct rcu_state. */
178#define RCU_GP_INIT 0 /* Grace period being initialized. */
179#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
180#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
181#ifdef CONFIG_NO_HZ
182#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
183#else /* #ifdef CONFIG_NO_HZ */
184#define RCU_SIGNAL_INIT RCU_FORCE_QS
185#endif /* #else #ifdef CONFIG_NO_HZ */
186
187#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
188#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
189#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
190#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
191#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
192 /* to take at least one */
193 /* scheduling clock irq */
194 /* before ratting on them. */
195
196#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
197
198/*
199 * RCU global state, including node hierarchy. This hierarchy is
200 * represented in "heap" form in a dense array. The root (first level)
201 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
202 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
203 * and the third level in ->node[m+1] and following (->node[m+1] referenced
204 * by ->level[2]). The number of levels is determined by the number of
205 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
206 * consisting of a single rcu_node.
207 */
208struct rcu_state {
209 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
210 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
211 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
212 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
213 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
214
215 /* The following fields are guarded by the root rcu_node's lock. */
216
217 u8 signaled ____cacheline_internodealigned_in_smp;
218 /* Force QS state. */
219 long gpnum; /* Current gp number. */
220 long completed; /* # of last completed gp. */
221 spinlock_t onofflock; /* exclude on/offline and */
222 /* starting new GP. */
223 spinlock_t fqslock; /* Only one task forcing */
224 /* quiescent states. */
225 unsigned long jiffies_force_qs; /* Time at which to invoke */
226 /* force_quiescent_state(). */
227 unsigned long n_force_qs; /* Number of calls to */
228 /* force_quiescent_state(). */
229 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
230 /* due to lock unavailable. */
231 unsigned long n_force_qs_ngp; /* Number of calls leaving */
232 /* due to no GP active. */
233#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
234 unsigned long gp_start; /* Time at which GP started, */
235 /* but in jiffies. */
236 unsigned long jiffies_stall; /* Time at which to check */
237 /* for CPU stalls. */
238#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
239#ifdef CONFIG_NO_HZ
240 long dynticks_completed; /* Value of completed @ snap. */
241#endif /* #ifdef CONFIG_NO_HZ */
242};
243 37
244extern void rcu_qsctr_inc(int cpu); 38#ifdef CONFIG_TREE_PREEMPT_RCU
245extern void rcu_bh_qsctr_inc(int cpu);
246 39
247extern int rcu_pending(int cpu); 40extern void __rcu_read_lock(void);
248extern int rcu_needs_cpu(int cpu); 41extern void __rcu_read_unlock(void);
42extern void exit_rcu(void);
249 43
250#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
251extern struct lockdep_map rcu_lock_map;
252# define rcu_read_acquire() \
253 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
254# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
255#else
256# define rcu_read_acquire() do { } while (0)
257# define rcu_read_release() do { } while (0)
258#endif
259 45
260static inline void __rcu_read_lock(void) 46static inline void __rcu_read_lock(void)
261{ 47{
262 preempt_disable(); 48 preempt_disable();
263 __acquire(RCU);
264 rcu_read_acquire();
265} 49}
50
266static inline void __rcu_read_unlock(void) 51static inline void __rcu_read_unlock(void)
267{ 52{
268 rcu_read_release();
269 __release(RCU);
270 preempt_enable(); 53 preempt_enable();
271} 54}
55
56static inline void exit_rcu(void)
57{
58}
59
60#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
61
272static inline void __rcu_read_lock_bh(void) 62static inline void __rcu_read_lock_bh(void)
273{ 63{
274 local_bh_disable(); 64 local_bh_disable();
275 __acquire(RCU_BH);
276 rcu_read_acquire();
277} 65}
278static inline void __rcu_read_unlock_bh(void) 66static inline void __rcu_read_unlock_bh(void)
279{ 67{
280 rcu_read_release();
281 __release(RCU_BH);
282 local_bh_enable(); 68 local_bh_enable();
283} 69}
284 70
285#define __synchronize_sched() synchronize_rcu() 71#define __synchronize_sched() synchronize_rcu()
286 72
287#define call_rcu_sched(head, func) call_rcu(head, func) 73extern void call_rcu_sched(struct rcu_head *head,
74 void (*func)(struct rcu_head *rcu));
288 75
289static inline void rcu_init_sched(void) 76static inline void synchronize_rcu_expedited(void)
290{ 77{
78 synchronize_sched_expedited();
79}
80
81static inline void synchronize_rcu_bh_expedited(void)
82{
83 synchronize_sched_expedited();
291} 84}
292 85
293extern void __rcu_init(void); 86extern void __rcu_init(void);
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu);
296 89
297extern long rcu_batches_completed(void); 90extern long rcu_batches_completed(void);
298extern long rcu_batches_completed_bh(void); 91extern long rcu_batches_completed_bh(void);
92extern long rcu_batches_completed_sched(void);
93
94static inline void rcu_init_sched(void)
95{
96}
299 97
300#ifdef CONFIG_NO_HZ 98#ifdef CONFIG_NO_HZ
301void rcu_enter_nohz(void); 99void rcu_enter_nohz(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9304027673b0..379531c08975 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1163,6 +1163,8 @@ struct sched_rt_entity {
1163#endif 1163#endif
1164}; 1164};
1165 1165
1166struct rcu_node;
1167
1166struct task_struct { 1168struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1169 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1170 void *stack;
@@ -1206,10 +1208,12 @@ struct task_struct {
1206 unsigned int policy; 1208 unsigned int policy;
1207 cpumask_t cpus_allowed; 1209 cpumask_t cpus_allowed;
1208 1210
1209#ifdef CONFIG_PREEMPT_RCU 1211#ifdef CONFIG_TREE_PREEMPT_RCU
1210 int rcu_read_lock_nesting; 1212 int rcu_read_lock_nesting;
1211 int rcu_flipctr_idx; 1213 char rcu_read_unlock_special;
1212#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1214 struct rcu_node *rcu_blocked_node;
1215 struct list_head rcu_node_entry;
1216#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1213 1217
1214#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1218#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1215 struct sched_info sched_info; 1219 struct sched_info sched_info;
@@ -1725,6 +1729,28 @@ extern cputime_t task_gtime(struct task_struct *p);
1725#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1729#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1726#define used_math() tsk_used_math(current) 1730#define used_math() tsk_used_math(current)
1727 1731
1732#ifdef CONFIG_TREE_PREEMPT_RCU
1733
1734#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1735#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1736#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1737
1738static inline void rcu_copy_process(struct task_struct *p)
1739{
1740 p->rcu_read_lock_nesting = 0;
1741 p->rcu_read_unlock_special = 0;
1742 p->rcu_blocked_node = NULL;
1743 INIT_LIST_HEAD(&p->rcu_node_entry);
1744}
1745
1746#else
1747
1748static inline void rcu_copy_process(struct task_struct *p)
1749{
1750}
1751
1752#endif
1753
1728#ifdef CONFIG_SMP 1754#ifdef CONFIG_SMP
1729extern int set_cpus_allowed_ptr(struct task_struct *p, 1755extern int set_cpus_allowed_ptr(struct task_struct *p,
1730 const struct cpumask *new_mask); 1756 const struct cpumask *new_mask);