aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/rcuclassic.h3
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcupreempt.h42
3 files changed, 61 insertions, 6 deletions
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index b3aa05baab8a..8c774905dcfe 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map;
151 151
152#define __synchronize_sched() synchronize_rcu() 152#define __synchronize_sched() synchronize_rcu()
153 153
154#define call_rcu_sched(head, func) call_rcu(head, func)
155
154extern void __rcu_init(void); 156extern void __rcu_init(void);
157#define rcu_init_sched() do { } while (0)
155extern void rcu_check_callbacks(int cpu, int user); 158extern void rcu_check_callbacks(int cpu, int user);
156extern void rcu_restart_cpu(int cpu); 159extern void rcu_restart_cpu(int cpu);
157 160
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ec2fc5b32646..411969cb5243 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -40,6 +40,7 @@
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/completion.h>
43 44
44/** 45/**
45 * struct rcu_head - callback structure for use with RCU 46 * struct rcu_head - callback structure for use with RCU
@@ -168,6 +169,27 @@ struct rcu_head {
168 (p) = (v); \ 169 (p) = (v); \
169 }) 170 })
170 171
172/* Infrastructure to implement the synchronize_() primitives. */
173
174struct rcu_synchronize {
175 struct rcu_head head;
176 struct completion completion;
177};
178
179extern void wakeme_after_rcu(struct rcu_head *head);
180
181#define synchronize_rcu_xxx(name, func) \
182void name(void) \
183{ \
184 struct rcu_synchronize rcu; \
185 \
186 init_completion(&rcu.completion); \
187 /* Will wake me after RCU finished. */ \
188 func(&rcu.head, wakeme_after_rcu); \
189 /* Wait for it. */ \
190 wait_for_completion(&rcu.completion); \
191}
192
171/** 193/**
172 * synchronize_sched - block until all CPUs have exited any non-preemptive 194 * synchronize_sched - block until all CPUs have exited any non-preemptive
173 * kernel code sequences. 195 * kernel code sequences.
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 8a05c7e20bc4..f04b64eca636 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -40,10 +40,39 @@
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43#define rcu_qsctr_inc(cpu) 43struct rcu_dyntick_sched {
44 int dynticks;
45 int dynticks_snap;
46 int sched_qs;
47 int sched_qs_snap;
48 int sched_dynticks_snap;
49};
50
51DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52
53static inline void rcu_qsctr_inc(int cpu)
54{
55 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56
57 rdssp->sched_qs++;
58}
44#define rcu_bh_qsctr_inc(cpu) 59#define rcu_bh_qsctr_inc(cpu)
45#define call_rcu_bh(head, rcu) call_rcu(head, rcu) 60#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
46 61
62/**
63 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
64 * @head: structure to be used for queueing the RCU updates.
65 * @func: actual update function to be invoked after the grace period
66 *
67 * The update function will be invoked some time after a full
68 * synchronize_sched()-style grace period elapses, in other words after
69 * all currently executing preempt-disabled sections of code (including
70 * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
71 * completed.
72 */
73extern void call_rcu_sched(struct rcu_head *head,
74 void (*func)(struct rcu_head *head));
75
47extern void __rcu_read_lock(void) __acquires(RCU); 76extern void __rcu_read_lock(void) __acquires(RCU);
48extern void __rcu_read_unlock(void) __releases(RCU); 77extern void __rcu_read_unlock(void) __releases(RCU);
49extern int rcu_pending(int cpu); 78extern int rcu_pending(int cpu);
@@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu);
55extern void __synchronize_sched(void); 84extern void __synchronize_sched(void);
56 85
57extern void __rcu_init(void); 86extern void __rcu_init(void);
87extern void rcu_init_sched(void);
58extern void rcu_check_callbacks(int cpu, int user); 88extern void rcu_check_callbacks(int cpu, int user);
59extern void rcu_restart_cpu(int cpu); 89extern void rcu_restart_cpu(int cpu);
60extern long rcu_batches_completed(void); 90extern long rcu_batches_completed(void);
@@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
81struct softirq_action; 111struct softirq_action;
82 112
83#ifdef CONFIG_NO_HZ 113#ifdef CONFIG_NO_HZ
84DECLARE_PER_CPU(long, dynticks_progress_counter); 114DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
85 115
86static inline void rcu_enter_nohz(void) 116static inline void rcu_enter_nohz(void)
87{ 117{
88 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 118 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
89 __get_cpu_var(dynticks_progress_counter)++; 119 __get_cpu_var(rcu_dyntick_sched).dynticks++;
90 WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); 120 WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1);
91} 121}
92 122
93static inline void rcu_exit_nohz(void) 123static inline void rcu_exit_nohz(void)
94{ 124{
95 __get_cpu_var(dynticks_progress_counter)++;
96 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 125 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
97 WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); 126 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1));
98} 128}
99 129
100#else /* CONFIG_NO_HZ */ 130#else /* CONFIG_NO_HZ */