aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-08-20 00:35:53 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-11-16 13:05:56 -0500
commit3fbfbf7a3b66ec424042d909f14ba2ddf4372ea8 (patch)
treecc364c320a6e23927ecc154a8ef8021dc7d1a9e8 /kernel/rcutree.h
parentaac1cda34b84a9411d6b8d18c3658f094c834911 (diff)
rcu: Add callback-free CPUs
RCU callback execution can add significant OS jitter and also can degrade both scheduling latency and, in asymmetric multiprocessors, energy efficiency. This commit therefore adds the ability for selected CPUs ("rcu_nocbs=" boot parameter) to have their callbacks offloaded to kthreads. If the "rcu_nocb_poll" boot parameter is also specified, these kthreads will do polling, removing the need for the offloaded CPUs to do wakeups. At least one CPU must be doing normal callback processing: currently CPU 0 cannot be selected as a no-CBs CPU. In addition, attempts to offline the last normal-CBs CPU will fail. This feature was inspired by Jim Houston's and Joe Korty's JRCU, and this commit includes fixes to problems located by Fengguang Wu's kbuild test robot. [ paulmck: Added gfp.h include file as suggested by Fengguang Wu. ] Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.h')
-rw-r--r--kernel/rcutree.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index d274af357210..488f2ec6b663 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -317,6 +317,18 @@ struct rcu_data {
317 struct rcu_head oom_head; 317 struct rcu_head oom_head;
318#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 318#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
319 319
320 /* 7) Callback offloading. */
321#ifdef CONFIG_RCU_NOCB_CPU
322 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
323 struct rcu_head **nocb_tail;
324 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
325 atomic_long_t nocb_q_count_lazy; /* (approximate). */
326 int nocb_p_count; /* # CBs being invoked by kthread */
327 int nocb_p_count_lazy; /* (approximate). */
328 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
329 struct task_struct *nocb_kthread;
330#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
331
320 int cpu; 332 int cpu;
321 struct rcu_state *rsp; 333 struct rcu_state *rsp;
322}; 334};
@@ -369,6 +381,12 @@ struct rcu_state {
369 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 381 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
370 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 382 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
371 void (*func)(struct rcu_head *head)); 383 void (*func)(struct rcu_head *head));
384#ifdef CONFIG_RCU_NOCB_CPU
385 void (*call_remote)(struct rcu_head *head,
386 void (*func)(struct rcu_head *head));
387 /* call_rcu() flavor, but for */
388 /* placing on remote CPU. */
389#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
372 390
373 /* The following fields are guarded by the root rcu_node's lock. */ 391 /* The following fields are guarded by the root rcu_node's lock. */
374 392
@@ -439,6 +457,8 @@ struct rcu_state {
439#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 457#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
440 458
441extern struct list_head rcu_struct_flavors; 459extern struct list_head rcu_struct_flavors;
460
461/* Sequence through rcu_state structures for each RCU flavor. */
442#define for_each_rcu_flavor(rsp) \ 462#define for_each_rcu_flavor(rsp) \
443 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 463 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
444 464
@@ -515,5 +535,32 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
515static void print_cpu_stall_info_end(void); 535static void print_cpu_stall_info_end(void);
516static void zero_cpu_stall_ticks(struct rcu_data *rdp); 536static void zero_cpu_stall_ticks(struct rcu_data *rdp);
517static void increment_cpu_stall_ticks(void); 537static void increment_cpu_stall_ticks(void);
538static bool is_nocb_cpu(int cpu);
539static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
540 bool lazy);
541static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
542 struct rcu_data *rdp);
543static bool nocb_cpu_expendable(int cpu);
544static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
545static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
546static void init_nocb_callback_list(struct rcu_data *rdp);
547static void __init rcu_init_nocb(void);
518 548
519#endif /* #ifndef RCU_TREE_NONCORE */ 549#endif /* #ifndef RCU_TREE_NONCORE */
550
551#ifdef CONFIG_RCU_TRACE
552#ifdef CONFIG_RCU_NOCB_CPU
553/* Sum up queue lengths for tracing. */
554static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
555{
556 *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
557 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
558}
559#else /* #ifdef CONFIG_RCU_NOCB_CPU */
560static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
561{
562 *ql = 0;
563 *qll = 0;
564}
565#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
566#endif /* #ifdef CONFIG_RCU_TRACE */