aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:37:49 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:54:19 -0400
commitc032862fba51a3ca504752d3a25186b324c5ce83 (patch)
tree955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /kernel/rcutree.h
parentfda76e074c7737fc57855dd17c762e50ed526052 (diff)
parent8700c95adb033843fc163d112b9d21d4fda78018 (diff)
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched upstream updates in order to fix some dependencies. Merge a common upstream merge point that has these updates. Conflicts: include/linux/perf_event.h kernel/rcutree.h kernel/rcutree_plugin.h Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/rcutree.h')
-rw-r--r--kernel/rcutree.h41
1 files changed, 20 insertions, 21 deletions
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 38acc49da2c6..da77a8f57ff9 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -88,18 +88,13 @@ struct rcu_dynticks {
88 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 88 int dynticks_nmi_nesting; /* Track NMI nesting level. */
89 atomic_t dynticks; /* Even value for idle, else odd. */ 89 atomic_t dynticks; /* Even value for idle, else odd. */
90#ifdef CONFIG_RCU_FAST_NO_HZ 90#ifdef CONFIG_RCU_FAST_NO_HZ
91 int dyntick_drain; /* Prepare-for-idle state variable. */ 91 bool all_lazy; /* Are all CPU's CBs lazy? */
92 unsigned long dyntick_holdoff;
93 /* No retries for the jiffy of failure. */
94 struct timer_list idle_gp_timer;
95 /* Wake up CPU sleeping with callbacks. */
96 unsigned long idle_gp_timer_expires;
97 /* When to wake up CPU (for repost). */
98 bool idle_first_pass; /* First pass of attempt to go idle? */
99 unsigned long nonlazy_posted; 92 unsigned long nonlazy_posted;
100 /* # times non-lazy CBs posted to CPU. */ 93 /* # times non-lazy CBs posted to CPU. */
101 unsigned long nonlazy_posted_snap; 94 unsigned long nonlazy_posted_snap;
102 /* idle-period nonlazy_posted snapshot. */ 95 /* idle-period nonlazy_posted snapshot. */
96 unsigned long last_accelerate;
97 /* Last jiffy CBs were accelerated. */
103 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 98 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
104#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 99#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
105}; 100};
@@ -134,9 +129,6 @@ struct rcu_node {
134 /* elements that need to drain to allow the */ 129 /* elements that need to drain to allow the */
135 /* current expedited grace period to */ 130 /* current expedited grace period to */
136 /* complete (only for TREE_PREEMPT_RCU). */ 131 /* complete (only for TREE_PREEMPT_RCU). */
137 atomic_t wakemask; /* CPUs whose kthread needs to be awakened. */
138 /* Since this has meaning only for leaf */
139 /* rcu_node structures, 32 bits suffices. */
140 unsigned long qsmaskinit; 132 unsigned long qsmaskinit;
141 /* Per-GP initial value for qsmask & expmask. */ 133 /* Per-GP initial value for qsmask & expmask. */
142 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 134 unsigned long grpmask; /* Mask to apply to parent qsmask. */
@@ -196,6 +188,12 @@ struct rcu_node {
196 /* Refused to boost: not sure why, though. */ 188 /* Refused to boost: not sure why, though. */
197 /* This can happen due to race conditions. */ 189 /* This can happen due to race conditions. */
198#endif /* #ifdef CONFIG_RCU_BOOST */ 190#endif /* #ifdef CONFIG_RCU_BOOST */
191#ifdef CONFIG_RCU_NOCB_CPU
192 wait_queue_head_t nocb_gp_wq[2];
193 /* Place for rcu_nocb_kthread() to wait GP. */
194#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
195 int need_future_gp[2];
196 /* Counts of upcoming no-CB GP requests. */
199 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 197 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
200} ____cacheline_internodealigned_in_smp; 198} ____cacheline_internodealigned_in_smp;
201 199
@@ -328,6 +326,11 @@ struct rcu_data {
328 struct task_struct *nocb_kthread; 326 struct task_struct *nocb_kthread;
329#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 327#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
330 328
329 /* 8) RCU CPU stall data. */
330#ifdef CONFIG_RCU_CPU_STALL_INFO
331 unsigned int softirq_snap; /* Snapshot of softirq activity. */
332#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
333
331 int cpu; 334 int cpu;
332 struct rcu_state *rsp; 335 struct rcu_state *rsp;
333}; 336};
@@ -375,12 +378,6 @@ struct rcu_state {
375 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 378 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
376 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 379 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
377 void (*func)(struct rcu_head *head)); 380 void (*func)(struct rcu_head *head));
378#ifdef CONFIG_RCU_NOCB_CPU
379 void (*call_remote)(struct rcu_head *head,
380 void (*func)(struct rcu_head *head));
381 /* call_rcu() flavor, but for */
382 /* placing on remote CPU. */
383#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
384 381
385 /* The following fields are guarded by the root rcu_node's lock. */ 382 /* The following fields are guarded by the root rcu_node's lock. */
386 383
@@ -443,6 +440,7 @@ struct rcu_state {
443 unsigned long gp_max; /* Maximum GP duration in */ 440 unsigned long gp_max; /* Maximum GP duration in */
444 /* jiffies. */ 441 /* jiffies. */
445 char *name; /* Name of structure. */ 442 char *name; /* Name of structure. */
443 char abbr; /* Abbreviated name. */
446 struct list_head flavors; /* List of RCU flavors. */ 444 struct list_head flavors; /* List of RCU flavors. */
447}; 445};
448 446
@@ -520,7 +518,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
520 struct rcu_node *rnp); 518 struct rcu_node *rnp);
521#endif /* #ifdef CONFIG_RCU_BOOST */ 519#endif /* #ifdef CONFIG_RCU_BOOST */
522static void __cpuinit rcu_prepare_kthreads(int cpu); 520static void __cpuinit rcu_prepare_kthreads(int cpu);
523static void rcu_prepare_for_idle_init(int cpu);
524static void rcu_cleanup_after_idle(int cpu); 521static void rcu_cleanup_after_idle(int cpu);
525static void rcu_prepare_for_idle(int cpu); 522static void rcu_prepare_for_idle(int cpu);
526static void rcu_idle_count_callbacks_posted(void); 523static void rcu_idle_count_callbacks_posted(void);
@@ -529,16 +526,18 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
529static void print_cpu_stall_info_end(void); 526static void print_cpu_stall_info_end(void);
530static void zero_cpu_stall_ticks(struct rcu_data *rdp); 527static void zero_cpu_stall_ticks(struct rcu_data *rdp);
531static void increment_cpu_stall_ticks(void); 528static void increment_cpu_stall_ticks(void);
529static int rcu_nocb_needs_gp(struct rcu_state *rsp);
530static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
531static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
532static void rcu_init_one_nocb(struct rcu_node *rnp);
532static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 533static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
533 bool lazy); 534 bool lazy);
534static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 535static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
535 struct rcu_data *rdp); 536 struct rcu_data *rdp);
536static bool nocb_cpu_expendable(int cpu);
537static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 537static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
538static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 538static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
539static void init_nocb_callback_list(struct rcu_data *rdp);
540static void __init rcu_init_nocb(void);
541static void rcu_kick_nohz_cpu(int cpu); 539static void rcu_kick_nohz_cpu(int cpu);
540static bool init_nocb_callback_list(struct rcu_data *rdp);
542 541
543#endif /* #ifndef RCU_TREE_NONCORE */ 542#endif /* #ifndef RCU_TREE_NONCORE */
544 543