diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-08-04 20:43:50 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-07 19:27:30 -0400 |
commit | 176f8f7a52cc6d09d686f0d900abda6942a52fbb (patch) | |
tree | bd3d6a8f21e576a94d1e1812c5c5317f62d1c96d | |
parent | 84a8f446ffd70c2799a96268aaa4d47c22a83ff0 (diff) |
rcu: Make TASKS_RCU handle nohz_full= CPUs
Currently TASKS_RCU would ignore a CPU running a task in nohz_full=
usermode execution. There would be neither a context switch nor a
scheduling-clock interrupt to tell TASKS_RCU that the task in question
had passed through a quiescent state. The grace period would therefore
extend indefinitely. This commit therefore makes RCU's dyntick-idle
subsystem record the task_struct structure of the task that is running
in dyntick-idle mode on each CPU. The TASKS_RCU grace period can
then access this information and record a quiescent state on
behalf of any CPU running in dyntick-idle usermode.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | include/linux/init_task.h | 3 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 2 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 16 | ||||
-rw-r--r-- | kernel/rcu/update.c | 4 |
6 files changed, 27 insertions, 2 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index dffd9258ee60..03b274873b06 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -121,7 +121,8 @@ extern struct group_info init_groups; | |||
121 | #define INIT_TASK_RCU_TASKS(tsk) \ | 121 | #define INIT_TASK_RCU_TASKS(tsk) \ |
122 | .rcu_tasks_holdout = false, \ | 122 | .rcu_tasks_holdout = false, \ |
123 | .rcu_tasks_holdout_list = \ | 123 | .rcu_tasks_holdout_list = \ |
124 | LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), | 124 | LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ |
125 | .rcu_tasks_idle_cpu = -1, | ||
125 | #else | 126 | #else |
126 | #define INIT_TASK_RCU_TASKS(tsk) | 127 | #define INIT_TASK_RCU_TASKS(tsk) |
127 | #endif | 128 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index eaacac4ae77d..ec8b34722bcc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1274,6 +1274,7 @@ struct task_struct { | |||
1274 | unsigned long rcu_tasks_nvcsw; | 1274 | unsigned long rcu_tasks_nvcsw; |
1275 | bool rcu_tasks_holdout; | 1275 | bool rcu_tasks_holdout; |
1276 | struct list_head rcu_tasks_holdout_list; | 1276 | struct list_head rcu_tasks_holdout_list; |
1277 | int rcu_tasks_idle_cpu; | ||
1277 | #endif /* #ifdef CONFIG_TASKS_RCU */ | 1278 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
1278 | 1279 | ||
1279 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1280 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
@@ -2020,6 +2021,7 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
2020 | #ifdef CONFIG_TASKS_RCU | 2021 | #ifdef CONFIG_TASKS_RCU |
2021 | p->rcu_tasks_holdout = false; | 2022 | p->rcu_tasks_holdout = false; |
2022 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); | 2023 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); |
2024 | p->rcu_tasks_idle_cpu = -1; | ||
2023 | #endif /* #ifdef CONFIG_TASKS_RCU */ | 2025 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
2024 | } | 2026 | } |
2025 | 2027 | ||
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e23dad0661e2..c880f5387b1f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -526,6 +526,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
526 | atomic_inc(&rdtp->dynticks); | 526 | atomic_inc(&rdtp->dynticks); |
527 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ | 527 | smp_mb__after_atomic(); /* Force ordering with next sojourn. */ |
528 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 528 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
529 | rcu_dynticks_task_enter(); | ||
529 | 530 | ||
530 | /* | 531 | /* |
531 | * It is illegal to enter an extended quiescent state while | 532 | * It is illegal to enter an extended quiescent state while |
@@ -642,6 +643,7 @@ void rcu_irq_exit(void) | |||
642 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | 643 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
643 | int user) | 644 | int user) |
644 | { | 645 | { |
646 | rcu_dynticks_task_exit(); | ||
645 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ | 647 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ |
646 | atomic_inc(&rdtp->dynticks); | 648 | atomic_inc(&rdtp->dynticks); |
647 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 649 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 6a86eb7bac45..3a92000c354f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -605,6 +605,8 @@ static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | |||
605 | static void rcu_bind_gp_kthread(void); | 605 | static void rcu_bind_gp_kthread(void); |
606 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); | 606 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); |
607 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp); | 607 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp); |
608 | static void rcu_dynticks_task_enter(void); | ||
609 | static void rcu_dynticks_task_exit(void); | ||
608 | 610 | ||
609 | #endif /* #ifndef RCU_TREE_NONCORE */ | 611 | #endif /* #ifndef RCU_TREE_NONCORE */ |
610 | 612 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7672586d3920..e466b40052a7 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -3036,3 +3036,19 @@ static void rcu_bind_gp_kthread(void) | |||
3036 | housekeeping_affine(current); | 3036 | housekeeping_affine(current); |
3037 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 3037 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
3038 | } | 3038 | } |
3039 | |||
3040 | /* Record the current task on dyntick-idle entry. */ | ||
3041 | static void rcu_dynticks_task_enter(void) | ||
3042 | { | ||
3043 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | ||
3044 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id(); | ||
3045 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | ||
3046 | } | ||
3047 | |||
3048 | /* Record no current task on dyntick-idle exit. */ | ||
3049 | static void rcu_dynticks_task_exit(void) | ||
3050 | { | ||
3051 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | ||
3052 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1; | ||
3053 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | ||
3054 | } | ||
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index e1d71741958f..2658de4a5975 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
@@ -463,7 +463,9 @@ static void check_holdout_task(struct task_struct *t, | |||
463 | { | 463 | { |
464 | if (!ACCESS_ONCE(t->rcu_tasks_holdout) || | 464 | if (!ACCESS_ONCE(t->rcu_tasks_holdout) || |
465 | t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || | 465 | t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || |
466 | !ACCESS_ONCE(t->on_rq)) { | 466 | !ACCESS_ONCE(t->on_rq) || |
467 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && | ||
468 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | ||
467 | ACCESS_ONCE(t->rcu_tasks_holdout) = false; | 469 | ACCESS_ONCE(t->rcu_tasks_holdout) = false; |
468 | list_del_rcu(&t->rcu_tasks_holdout_list); | 470 | list_del_rcu(&t->rcu_tasks_holdout_list); |
469 | put_task_struct(t); | 471 | put_task_struct(t); |