diff options
Diffstat (limited to 'kernel/rcu/tree.h')
| -rw-r--r-- | kernel/rcu/tree.h | 62 |
1 files changed, 15 insertions, 47 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8e7b1843896e..119de399eb2f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include <linux/threads.h> | 27 | #include <linux/threads.h> |
| 28 | #include <linux/cpumask.h> | 28 | #include <linux/cpumask.h> |
| 29 | #include <linux/seqlock.h> | 29 | #include <linux/seqlock.h> |
| 30 | #include <linux/irq_work.h> | ||
| 31 | 30 | ||
| 32 | /* | 31 | /* |
| 33 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and | 32 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and |
| @@ -172,11 +171,6 @@ struct rcu_node { | |||
| 172 | /* queued on this rcu_node structure that */ | 171 | /* queued on this rcu_node structure that */ |
| 173 | /* are blocking the current grace period, */ | 172 | /* are blocking the current grace period, */ |
| 174 | /* there can be no such task. */ | 173 | /* there can be no such task. */ |
| 175 | struct completion boost_completion; | ||
| 176 | /* Used to ensure that the rt_mutex used */ | ||
| 177 | /* to carry out the boosting is fully */ | ||
| 178 | /* released with no future boostee accesses */ | ||
| 179 | /* before that rt_mutex is re-initialized. */ | ||
| 180 | struct rt_mutex boost_mtx; | 174 | struct rt_mutex boost_mtx; |
| 181 | /* Used only for the priority-boosting */ | 175 | /* Used only for the priority-boosting */ |
| 182 | /* side effect, not as a lock. */ | 176 | /* side effect, not as a lock. */ |
| @@ -257,9 +251,12 @@ struct rcu_data { | |||
| 257 | /* in order to detect GP end. */ | 251 | /* in order to detect GP end. */ |
| 258 | unsigned long gpnum; /* Highest gp number that this CPU */ | 252 | unsigned long gpnum; /* Highest gp number that this CPU */ |
| 259 | /* is aware of having started. */ | 253 | /* is aware of having started. */ |
| 254 | unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ | ||
| 255 | /* for rcu_all_qs() invocations. */ | ||
| 260 | bool passed_quiesce; /* User-mode/idle loop etc. */ | 256 | bool passed_quiesce; /* User-mode/idle loop etc. */ |
| 261 | bool qs_pending; /* Core waits for quiesc state. */ | 257 | bool qs_pending; /* Core waits for quiesc state. */ |
| 262 | bool beenonline; /* CPU online at least once. */ | 258 | bool beenonline; /* CPU online at least once. */ |
| 259 | bool gpwrap; /* Possible gpnum/completed wrap. */ | ||
| 263 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | 260 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
| 264 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | 261 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
| 265 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 262 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
| @@ -340,14 +337,10 @@ struct rcu_data { | |||
| 340 | #ifdef CONFIG_RCU_NOCB_CPU | 337 | #ifdef CONFIG_RCU_NOCB_CPU |
| 341 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ | 338 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ |
| 342 | struct rcu_head **nocb_tail; | 339 | struct rcu_head **nocb_tail; |
| 343 | atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ | 340 | atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ |
| 344 | atomic_long_t nocb_q_count_lazy; /* (approximate). */ | 341 | atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ |
| 345 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ | 342 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ |
| 346 | struct rcu_head **nocb_follower_tail; | 343 | struct rcu_head **nocb_follower_tail; |
| 347 | atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */ | ||
| 348 | atomic_long_t nocb_follower_count_lazy; /* (approximate). */ | ||
| 349 | int nocb_p_count; /* # CBs being invoked by kthread */ | ||
| 350 | int nocb_p_count_lazy; /* (approximate). */ | ||
| 351 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ | 344 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ |
| 352 | struct task_struct *nocb_kthread; | 345 | struct task_struct *nocb_kthread; |
| 353 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ | 346 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
| @@ -356,8 +349,6 @@ struct rcu_data { | |||
| 356 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; | 349 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; |
| 357 | /* CBs waiting for GP. */ | 350 | /* CBs waiting for GP. */ |
| 358 | struct rcu_head **nocb_gp_tail; | 351 | struct rcu_head **nocb_gp_tail; |
| 359 | long nocb_gp_count; | ||
| 360 | long nocb_gp_count_lazy; | ||
| 361 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ | 352 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
| 362 | struct rcu_data *nocb_next_follower; | 353 | struct rcu_data *nocb_next_follower; |
| 363 | /* Next follower in wakeup chain. */ | 354 | /* Next follower in wakeup chain. */ |
| @@ -488,10 +479,14 @@ struct rcu_state { | |||
| 488 | /* due to no GP active. */ | 479 | /* due to no GP active. */ |
| 489 | unsigned long gp_start; /* Time at which GP started, */ | 480 | unsigned long gp_start; /* Time at which GP started, */ |
| 490 | /* but in jiffies. */ | 481 | /* but in jiffies. */ |
| 482 | unsigned long gp_activity; /* Time of last GP kthread */ | ||
| 483 | /* activity in jiffies. */ | ||
| 491 | unsigned long jiffies_stall; /* Time at which to check */ | 484 | unsigned long jiffies_stall; /* Time at which to check */ |
| 492 | /* for CPU stalls. */ | 485 | /* for CPU stalls. */ |
| 493 | unsigned long jiffies_resched; /* Time at which to resched */ | 486 | unsigned long jiffies_resched; /* Time at which to resched */ |
| 494 | /* a reluctant CPU. */ | 487 | /* a reluctant CPU. */ |
| 488 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ | ||
| 489 | /* GP start. */ | ||
| 495 | unsigned long gp_max; /* Maximum GP duration in */ | 490 | unsigned long gp_max; /* Maximum GP duration in */ |
| 496 | /* jiffies. */ | 491 | /* jiffies. */ |
| 497 | const char *name; /* Name of structure. */ | 492 | const char *name; /* Name of structure. */ |
| @@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors; | |||
| 514 | #define for_each_rcu_flavor(rsp) \ | 509 | #define for_each_rcu_flavor(rsp) \ |
| 515 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) | 510 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) |
| 516 | 511 | ||
| 517 | /* Return values for rcu_preempt_offline_tasks(). */ | ||
| 518 | |||
| 519 | #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ | ||
| 520 | /* GP were moved to root. */ | ||
| 521 | #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ | ||
| 522 | /* GP were moved to root. */ | ||
| 523 | |||
| 524 | /* | 512 | /* |
| 525 | * RCU implementation internal declarations: | 513 | * RCU implementation internal declarations: |
| 526 | */ | 514 | */ |
| @@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); | |||
| 546 | 534 | ||
| 547 | /* Forward declarations for rcutree_plugin.h */ | 535 | /* Forward declarations for rcutree_plugin.h */ |
| 548 | static void rcu_bootup_announce(void); | 536 | static void rcu_bootup_announce(void); |
| 549 | long rcu_batches_completed(void); | ||
| 550 | static void rcu_preempt_note_context_switch(void); | 537 | static void rcu_preempt_note_context_switch(void); |
| 551 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 538 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
| 552 | #ifdef CONFIG_HOTPLUG_CPU | 539 | #ifdef CONFIG_HOTPLUG_CPU |
| 553 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 540 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
| 554 | unsigned long flags); | ||
| 555 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 541 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 556 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 542 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
| 557 | static int rcu_print_task_stall(struct rcu_node *rnp); | 543 | static int rcu_print_task_stall(struct rcu_node *rnp); |
| 558 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 544 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
| 559 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 560 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
| 561 | struct rcu_node *rnp, | ||
| 562 | struct rcu_data *rdp); | ||
| 563 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 564 | static void rcu_preempt_check_callbacks(void); | 545 | static void rcu_preempt_check_callbacks(void); |
| 565 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 546 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
| 566 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) | ||
| 567 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | ||
| 568 | bool wake); | ||
| 569 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */ | ||
| 570 | static void __init __rcu_init_preempt(void); | 547 | static void __init __rcu_init_preempt(void); |
| 571 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 548 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
| 572 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 549 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
| @@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void); | |||
| 622 | #endif /* #ifndef RCU_TREE_NONCORE */ | 599 | #endif /* #ifndef RCU_TREE_NONCORE */ |
| 623 | 600 | ||
| 624 | #ifdef CONFIG_RCU_TRACE | 601 | #ifdef CONFIG_RCU_TRACE |
| 625 | #ifdef CONFIG_RCU_NOCB_CPU | 602 | /* Read out queue lengths for tracing. */ |
| 626 | /* Sum up queue lengths for tracing. */ | ||
| 627 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | 603 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) |
| 628 | { | 604 | { |
| 629 | *ql = atomic_long_read(&rdp->nocb_q_count) + | 605 | #ifdef CONFIG_RCU_NOCB_CPU |
| 630 | rdp->nocb_p_count + | 606 | *ql = atomic_long_read(&rdp->nocb_q_count); |
| 631 | atomic_long_read(&rdp->nocb_follower_count) + | 607 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy); |
| 632 | rdp->nocb_p_count + rdp->nocb_gp_count; | ||
| 633 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + | ||
| 634 | rdp->nocb_p_count_lazy + | ||
| 635 | atomic_long_read(&rdp->nocb_follower_count_lazy) + | ||
| 636 | rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy; | ||
| 637 | } | ||
| 638 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 608 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 639 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | ||
| 640 | { | ||
| 641 | *ql = 0; | 609 | *ql = 0; |
| 642 | *qll = 0; | 610 | *qll = 0; |
| 643 | } | ||
| 644 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ | 611 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
| 612 | } | ||
| 645 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 613 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
