diff options
author | Peter Zijlstra <peterz@infradead.org> | 2019-05-29 16:36:41 -0400 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2019-08-08 03:09:31 -0400 |
commit | 03b7fad167efca3b7abbbb39733933f9df56e79c (patch) | |
tree | 504ea9f63d5badf1559e6d430f69e56259c32dfe | |
parent | 10e7071b2f491b0fb981717ea0a585c441906ede (diff) |
sched: Add task_struct pointer to sched_class::set_curr_task
In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
-rw-r--r-- | kernel/sched/core.c | 12 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 7 | ||||
-rw-r--r-- | kernel/sched/fair.c | 17 | ||||
-rw-r--r-- | kernel/sched/idle.c | 27 | ||||
-rw-r--r-- | kernel/sched/rt.c | 7 | ||||
-rw-r--r-- | kernel/sched/sched.h | 7 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 17 |
7 files changed, 48 insertions, 46 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 364b6d7da2be..0c4220789092 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | |||
1494 | if (queued) | 1494 | if (queued) |
1495 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); | 1495 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); |
1496 | if (running) | 1496 | if (running) |
1497 | set_curr_task(rq, p); | 1497 | set_next_task(rq, p); |
1498 | } | 1498 | } |
1499 | 1499 | ||
1500 | /* | 1500 | /* |
@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) | |||
4325 | if (queued) | 4325 | if (queued) |
4326 | enqueue_task(rq, p, queue_flag); | 4326 | enqueue_task(rq, p, queue_flag); |
4327 | if (running) | 4327 | if (running) |
4328 | set_curr_task(rq, p); | 4328 | set_next_task(rq, p); |
4329 | 4329 | ||
4330 | check_class_changed(rq, p, prev_class, oldprio); | 4330 | check_class_changed(rq, p, prev_class, oldprio); |
4331 | out_unlock: | 4331 | out_unlock: |
@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4392 | resched_curr(rq); | 4392 | resched_curr(rq); |
4393 | } | 4393 | } |
4394 | if (running) | 4394 | if (running) |
4395 | set_curr_task(rq, p); | 4395 | set_next_task(rq, p); |
4396 | out_unlock: | 4396 | out_unlock: |
4397 | task_rq_unlock(rq, p, &rf); | 4397 | task_rq_unlock(rq, p, &rf); |
4398 | } | 4398 | } |
@@ -4840,7 +4840,7 @@ change: | |||
4840 | enqueue_task(rq, p, queue_flags); | 4840 | enqueue_task(rq, p, queue_flags); |
4841 | } | 4841 | } |
4842 | if (running) | 4842 | if (running) |
4843 | set_curr_task(rq, p); | 4843 | set_next_task(rq, p); |
4844 | 4844 | ||
4845 | check_class_changed(rq, p, prev_class, oldprio); | 4845 | check_class_changed(rq, p, prev_class, oldprio); |
4846 | 4846 | ||
@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid) | |||
6042 | if (queued) | 6042 | if (queued) |
6043 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); | 6043 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); |
6044 | if (running) | 6044 | if (running) |
6045 | set_curr_task(rq, p); | 6045 | set_next_task(rq, p); |
6046 | task_rq_unlock(rq, p, &rf); | 6046 | task_rq_unlock(rq, p, &rf); |
6047 | } | 6047 | } |
6048 | #endif /* CONFIG_NUMA_BALANCING */ | 6048 | #endif /* CONFIG_NUMA_BALANCING */ |
@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk) | |||
6919 | if (queued) | 6919 | if (queued) |
6920 | enqueue_task(rq, tsk, queue_flags); | 6920 | enqueue_task(rq, tsk, queue_flags); |
6921 | if (running) | 6921 | if (running) |
6922 | set_curr_task(rq, tsk); | 6922 | set_next_task(rq, tsk); |
6923 | 6923 | ||
6924 | task_rq_unlock(rq, tsk, &rf); | 6924 | task_rq_unlock(rq, tsk, &rf); |
6925 | } | 6925 | } |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 2dc2784b196c..6eae79350303 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p) | |||
1844 | */ | 1844 | */ |
1845 | } | 1845 | } |
1846 | 1846 | ||
1847 | static void set_curr_task_dl(struct rq *rq) | ||
1848 | { | ||
1849 | set_next_task_dl(rq, rq->curr); | ||
1850 | } | ||
1851 | |||
1852 | #ifdef CONFIG_SMP | 1847 | #ifdef CONFIG_SMP |
1853 | 1848 | ||
1854 | /* Only try algorithms three times */ | 1849 | /* Only try algorithms three times */ |
@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = { | |||
2466 | 2461 | ||
2467 | .pick_next_task = pick_next_task_dl, | 2462 | .pick_next_task = pick_next_task_dl, |
2468 | .put_prev_task = put_prev_task_dl, | 2463 | .put_prev_task = put_prev_task_dl, |
2464 | .set_next_task = set_next_task_dl, | ||
2469 | 2465 | ||
2470 | #ifdef CONFIG_SMP | 2466 | #ifdef CONFIG_SMP |
2471 | .select_task_rq = select_task_rq_dl, | 2467 | .select_task_rq = select_task_rq_dl, |
@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = { | |||
2476 | .task_woken = task_woken_dl, | 2472 | .task_woken = task_woken_dl, |
2477 | #endif | 2473 | #endif |
2478 | 2474 | ||
2479 | .set_curr_task = set_curr_task_dl, | ||
2480 | .task_tick = task_tick_dl, | 2475 | .task_tick = task_tick_dl, |
2481 | .task_fork = task_fork_dl, | 2476 | .task_fork = task_fork_dl, |
2482 | 2477 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7d8043fc8317..8ce1b8893947 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) | |||
10150 | * This routine is mostly called to set cfs_rq->curr field when a task | 10150 | * This routine is mostly called to set cfs_rq->curr field when a task |
10151 | * migrates between groups/classes. | 10151 | * migrates between groups/classes. |
10152 | */ | 10152 | */ |
10153 | static void set_curr_task_fair(struct rq *rq) | 10153 | static void set_next_task_fair(struct rq *rq, struct task_struct *p) |
10154 | { | 10154 | { |
10155 | struct sched_entity *se = &rq->curr->se; | 10155 | struct sched_entity *se = &p->se; |
10156 | |||
10157 | #ifdef CONFIG_SMP | ||
10158 | if (task_on_rq_queued(p)) { | ||
10159 | /* | ||
10160 | * Move the next running task to the front of the list, so our | ||
10161 | * cfs_tasks list becomes MRU one. | ||
10162 | */ | ||
10163 | list_move(&se->group_node, &rq->cfs_tasks); | ||
10164 | } | ||
10165 | #endif | ||
10156 | 10166 | ||
10157 | for_each_sched_entity(se) { | 10167 | for_each_sched_entity(se) { |
10158 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 10168 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = { | |||
10423 | .check_preempt_curr = check_preempt_wakeup, | 10433 | .check_preempt_curr = check_preempt_wakeup, |
10424 | 10434 | ||
10425 | .pick_next_task = pick_next_task_fair, | 10435 | .pick_next_task = pick_next_task_fair, |
10436 | |||
10426 | .put_prev_task = put_prev_task_fair, | 10437 | .put_prev_task = put_prev_task_fair, |
10438 | .set_next_task = set_next_task_fair, | ||
10427 | 10439 | ||
10428 | #ifdef CONFIG_SMP | 10440 | #ifdef CONFIG_SMP |
10429 | .select_task_rq = select_task_rq_fair, | 10441 | .select_task_rq = select_task_rq_fair, |
@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = { | |||
10436 | .set_cpus_allowed = set_cpus_allowed_common, | 10448 | .set_cpus_allowed = set_cpus_allowed_common, |
10437 | #endif | 10449 | #endif |
10438 | 10450 | ||
10439 | .set_curr_task = set_curr_task_fair, | ||
10440 | .task_tick = task_tick_fair, | 10451 | .task_tick = task_tick_fair, |
10441 | .task_fork = task_fork_fair, | 10452 | .task_fork = task_fork_fair, |
10442 | 10453 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 80940939b733..54194d41035c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl | |||
374 | resched_curr(rq); | 374 | resched_curr(rq); |
375 | } | 375 | } |
376 | 376 | ||
377 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||
378 | { | ||
379 | } | ||
380 | |||
381 | static void set_next_task_idle(struct rq *rq, struct task_struct *next) | ||
382 | { | ||
383 | update_idle_core(rq); | ||
384 | schedstat_inc(rq->sched_goidle); | ||
385 | } | ||
386 | |||
377 | static struct task_struct * | 387 | static struct task_struct * |
378 | pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | 388 | pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
379 | { | 389 | { |
390 | struct task_struct *next = rq->idle; | ||
391 | |||
380 | put_prev_task(rq, prev); | 392 | put_prev_task(rq, prev); |
381 | update_idle_core(rq); | 393 | set_next_task_idle(rq, next); |
382 | schedstat_inc(rq->sched_goidle); | ||
383 | 394 | ||
384 | return rq->idle; | 395 | return next; |
385 | } | 396 | } |
386 | 397 | ||
387 | /* | 398 | /* |
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | |||
397 | raw_spin_lock_irq(&rq->lock); | 408 | raw_spin_lock_irq(&rq->lock); |
398 | } | 409 | } |
399 | 410 | ||
400 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||
401 | { | ||
402 | } | ||
403 | |||
404 | /* | 411 | /* |
405 | * scheduler tick hitting a task of our scheduling class. | 412 | * scheduler tick hitting a task of our scheduling class. |
406 | * | 413 | * |
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | |||
413 | { | 420 | { |
414 | } | 421 | } |
415 | 422 | ||
416 | static void set_curr_task_idle(struct rq *rq) | ||
417 | { | ||
418 | } | ||
419 | |||
420 | static void switched_to_idle(struct rq *rq, struct task_struct *p) | 423 | static void switched_to_idle(struct rq *rq, struct task_struct *p) |
421 | { | 424 | { |
422 | BUG(); | 425 | BUG(); |
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = { | |||
451 | 454 | ||
452 | .pick_next_task = pick_next_task_idle, | 455 | .pick_next_task = pick_next_task_idle, |
453 | .put_prev_task = put_prev_task_idle, | 456 | .put_prev_task = put_prev_task_idle, |
457 | .set_next_task = set_next_task_idle, | ||
454 | 458 | ||
455 | #ifdef CONFIG_SMP | 459 | #ifdef CONFIG_SMP |
456 | .select_task_rq = select_task_rq_idle, | 460 | .select_task_rq = select_task_rq_idle, |
457 | .set_cpus_allowed = set_cpus_allowed_common, | 461 | .set_cpus_allowed = set_cpus_allowed_common, |
458 | #endif | 462 | #endif |
459 | 463 | ||
460 | .set_curr_task = set_curr_task_idle, | ||
461 | .task_tick = task_tick_idle, | 464 | .task_tick = task_tick_idle, |
462 | 465 | ||
463 | .get_rr_interval = get_rr_interval_idle, | 466 | .get_rr_interval = get_rr_interval_idle, |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 40bb71004325..f71bcbe1a00c 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
2354 | } | 2354 | } |
2355 | } | 2355 | } |
2356 | 2356 | ||
2357 | static void set_curr_task_rt(struct rq *rq) | ||
2358 | { | ||
2359 | set_next_task_rt(rq, rq->curr); | ||
2360 | } | ||
2361 | |||
2362 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) | 2357 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |
2363 | { | 2358 | { |
2364 | /* | 2359 | /* |
@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = { | |||
2380 | 2375 | ||
2381 | .pick_next_task = pick_next_task_rt, | 2376 | .pick_next_task = pick_next_task_rt, |
2382 | .put_prev_task = put_prev_task_rt, | 2377 | .put_prev_task = put_prev_task_rt, |
2378 | .set_next_task = set_next_task_rt, | ||
2383 | 2379 | ||
2384 | #ifdef CONFIG_SMP | 2380 | #ifdef CONFIG_SMP |
2385 | .select_task_rq = select_task_rq_rt, | 2381 | .select_task_rq = select_task_rq_rt, |
@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = { | |||
2391 | .switched_from = switched_from_rt, | 2387 | .switched_from = switched_from_rt, |
2392 | #endif | 2388 | #endif |
2393 | 2389 | ||
2394 | .set_curr_task = set_curr_task_rt, | ||
2395 | .task_tick = task_tick_rt, | 2390 | .task_tick = task_tick_rt, |
2396 | 2391 | ||
2397 | .get_rr_interval = get_rr_interval_rt, | 2392 | .get_rr_interval = get_rr_interval_rt, |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b3449d0dd7f0..f3c50445bf22 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1707,6 +1707,7 @@ struct sched_class { | |||
1707 | struct task_struct *prev, | 1707 | struct task_struct *prev, |
1708 | struct rq_flags *rf); | 1708 | struct rq_flags *rf); |
1709 | void (*put_prev_task)(struct rq *rq, struct task_struct *p); | 1709 | void (*put_prev_task)(struct rq *rq, struct task_struct *p); |
1710 | void (*set_next_task)(struct rq *rq, struct task_struct *p); | ||
1710 | 1711 | ||
1711 | #ifdef CONFIG_SMP | 1712 | #ifdef CONFIG_SMP |
1712 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); | 1713 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
@@ -1721,7 +1722,6 @@ struct sched_class { | |||
1721 | void (*rq_offline)(struct rq *rq); | 1722 | void (*rq_offline)(struct rq *rq); |
1722 | #endif | 1723 | #endif |
1723 | 1724 | ||
1724 | void (*set_curr_task)(struct rq *rq); | ||
1725 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); | 1725 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
1726 | void (*task_fork)(struct task_struct *p); | 1726 | void (*task_fork)(struct task_struct *p); |
1727 | void (*task_dead)(struct task_struct *p); | 1727 | void (*task_dead)(struct task_struct *p); |
@@ -1755,9 +1755,10 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) | |||
1755 | prev->sched_class->put_prev_task(rq, prev); | 1755 | prev->sched_class->put_prev_task(rq, prev); |
1756 | } | 1756 | } |
1757 | 1757 | ||
1758 | static inline void set_curr_task(struct rq *rq, struct task_struct *curr) | 1758 | static inline void set_next_task(struct rq *rq, struct task_struct *next) |
1759 | { | 1759 | { |
1760 | curr->sched_class->set_curr_task(rq); | 1760 | WARN_ON_ONCE(rq->curr != next); |
1761 | next->sched_class->set_next_task(rq, next); | ||
1761 | } | 1762 | } |
1762 | 1763 | ||
1763 | #ifdef CONFIG_SMP | 1764 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index c183b790ca54..47a3d2a18a9a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | |||
23 | /* we're never preempted */ | 23 | /* we're never preempted */ |
24 | } | 24 | } |
25 | 25 | ||
26 | static void set_next_task_stop(struct rq *rq, struct task_struct *stop) | ||
27 | { | ||
28 | stop->se.exec_start = rq_clock_task(rq); | ||
29 | } | ||
30 | |||
26 | static struct task_struct * | 31 | static struct task_struct * |
27 | pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | 32 | pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
28 | { | 33 | { |
@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf | |||
32 | return NULL; | 37 | return NULL; |
33 | 38 | ||
34 | put_prev_task(rq, prev); | 39 | put_prev_task(rq, prev); |
35 | 40 | set_next_task_stop(rq, stop); | |
36 | stop->se.exec_start = rq_clock_task(rq); | ||
37 | 41 | ||
38 | return stop; | 42 | return stop; |
39 | } | 43 | } |
@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | |||
86 | { | 90 | { |
87 | } | 91 | } |
88 | 92 | ||
89 | static void set_curr_task_stop(struct rq *rq) | ||
90 | { | ||
91 | struct task_struct *stop = rq->stop; | ||
92 | |||
93 | stop->se.exec_start = rq_clock_task(rq); | ||
94 | } | ||
95 | |||
96 | static void switched_to_stop(struct rq *rq, struct task_struct *p) | 93 | static void switched_to_stop(struct rq *rq, struct task_struct *p) |
97 | { | 94 | { |
98 | BUG(); /* its impossible to change to this class */ | 95 | BUG(); /* its impossible to change to this class */ |
@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = { | |||
128 | 125 | ||
129 | .pick_next_task = pick_next_task_stop, | 126 | .pick_next_task = pick_next_task_stop, |
130 | .put_prev_task = put_prev_task_stop, | 127 | .put_prev_task = put_prev_task_stop, |
128 | .set_next_task = set_next_task_stop, | ||
131 | 129 | ||
132 | #ifdef CONFIG_SMP | 130 | #ifdef CONFIG_SMP |
133 | .select_task_rq = select_task_rq_stop, | 131 | .select_task_rq = select_task_rq_stop, |
134 | .set_cpus_allowed = set_cpus_allowed_common, | 132 | .set_cpus_allowed = set_cpus_allowed_common, |
135 | #endif | 133 | #endif |
136 | 134 | ||
137 | .set_curr_task = set_curr_task_stop, | ||
138 | .task_tick = task_tick_stop, | 135 | .task_tick = task_tick_stop, |
139 | 136 | ||
140 | .get_rr_interval = get_rr_interval_stop, | 137 | .get_rr_interval = get_rr_interval_stop, |