summaryrefslogtreecommitdiffstats
path: root/kernel/sched/psi.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/psi.c')
-rw-r--r--kernel/sched/psi.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7acc632c3b82..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
1051 1051
1052 if (!rcu_access_pointer(group->poll_kworker)) { 1052 if (!rcu_access_pointer(group->poll_kworker)) {
1053 struct sched_param param = { 1053 struct sched_param param = {
1054 .sched_priority = MAX_RT_PRIO - 1, 1054 .sched_priority = 1,
1055 }; 1055 };
1056 struct kthread_worker *kworker; 1056 struct kthread_worker *kworker;
1057 1057
@@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
1061 mutex_unlock(&group->trigger_lock); 1061 mutex_unlock(&group->trigger_lock);
1062 return ERR_CAST(kworker); 1062 return ERR_CAST(kworker);
1063 } 1063 }
1064 sched_setscheduler(kworker->task, SCHED_FIFO, &param); 1064 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
1065 kthread_init_delayed_work(&group->poll_work, 1065 kthread_init_delayed_work(&group->poll_work,
1066 psi_poll_work); 1066 psi_poll_work);
1067 rcu_assign_pointer(group->poll_kworker, kworker); 1067 rcu_assign_pointer(group->poll_kworker, kworker);
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);