summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-03-03 09:44:39 -0500
committerIngo Molnar <mingo@kernel.org>2018-03-04 06:39:33 -0500
commita92057e14beb233e8c891f4de075f2a468c71f15 (patch)
tree44b4519c69a3bffba7a8f31437fe356b48783e3e /kernel/sched
parent325ea10c0809406ce23f038602abbc454f3f761d (diff)
sched/idle: Merge kernel/sched/idle.c and kernel/sched/idle_task.c
Merge these two small .c modules as they implement two aspects of idle task handling. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/Makefile5
-rw-r--r--kernel/sched/idle.c123
-rw-r--r--kernel/sched/idle_task.c117
3 files changed, 125 insertions, 120 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index e2f9d4feff40..d9a02b318108 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,8 +17,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
17endif 17endif
18 18
19obj-y += core.o loadavg.o clock.o cputime.o 19obj-y += core.o loadavg.o clock.o cputime.o
20obj-y += idle_task.o fair.o rt.o deadline.o 20obj-y += idle.o fair.o rt.o deadline.o
21obj-y += wait.o wait_bit.o swait.o completion.o idle.o 21obj-y += wait.o wait_bit.o swait.o completion.o
22
22obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o 23obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
23obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o 24obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
24obj-$(CONFIG_SCHEDSTATS) += stats.o 25obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2760e0357271..2975f195e1c4 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * Generic entry points for the idle threads 2 * Generic entry points for the idle threads and
3 * implementation of the idle task scheduling class.
4 *
5 * (NOTE: these are not related to SCHED_IDLE batch scheduled
6 * tasks which are handled in sched/fair.c )
3 */ 7 */
4#include "sched.h" 8#include "sched.h"
5 9
@@ -33,6 +37,7 @@ void cpu_idle_poll_ctrl(bool enable)
33static int __init cpu_idle_poll_setup(char *__unused) 37static int __init cpu_idle_poll_setup(char *__unused)
34{ 38{
35 cpu_idle_force_poll = 1; 39 cpu_idle_force_poll = 1;
40
36 return 1; 41 return 1;
37} 42}
38__setup("nohlt", cpu_idle_poll_setup); 43__setup("nohlt", cpu_idle_poll_setup);
@@ -40,6 +45,7 @@ __setup("nohlt", cpu_idle_poll_setup);
40static int __init cpu_idle_nopoll_setup(char *__unused) 45static int __init cpu_idle_nopoll_setup(char *__unused)
41{ 46{
42 cpu_idle_force_poll = 0; 47 cpu_idle_force_poll = 0;
48
43 return 1; 49 return 1;
44} 50}
45__setup("hlt", cpu_idle_nopoll_setup); 51__setup("hlt", cpu_idle_nopoll_setup);
@@ -51,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void)
51 trace_cpu_idle_rcuidle(0, smp_processor_id()); 57 trace_cpu_idle_rcuidle(0, smp_processor_id());
52 local_irq_enable(); 58 local_irq_enable();
53 stop_critical_timings(); 59 stop_critical_timings();
60
54 while (!tif_need_resched() && 61 while (!tif_need_resched() &&
55 (cpu_idle_force_poll || tick_check_broadcast_expired())) 62 (cpu_idle_force_poll || tick_check_broadcast_expired()))
56 cpu_relax(); 63 cpu_relax();
57 start_critical_timings(); 64 start_critical_timings();
58 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
59 rcu_idle_exit(); 66 rcu_idle_exit();
67
60 return 1; 68 return 1;
61} 69}
62 70
@@ -337,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state)
337 while (1) 345 while (1)
338 do_idle(); 346 do_idle();
339} 347}
348
349/*
350 * idle-task scheduling class.
351 */
352
353#ifdef CONFIG_SMP
354static int
355select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
356{
357 return task_cpu(p); /* IDLE tasks as never migrated */
358}
359#endif
360
361/*
362 * Idle tasks are unconditionally rescheduled:
363 */
364static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
365{
366 resched_curr(rq);
367}
368
369static struct task_struct *
370pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
371{
372 put_prev_task(rq, prev);
373 update_idle_core(rq);
374 schedstat_inc(rq->sched_goidle);
375
376 return rq->idle;
377}
378
379/*
380 * It is not legal to sleep in the idle task - print a warning
381 * message if some code attempts to do it:
382 */
383static void
384dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
385{
386 raw_spin_unlock_irq(&rq->lock);
387 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
388 dump_stack();
389 raw_spin_lock_irq(&rq->lock);
390}
391
392static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
393{
394}
395
396/*
397 * scheduler tick hitting a task of our scheduling class.
398 *
399 * NOTE: This function can be called remotely by the tick offload that
400 * goes along full dynticks. Therefore no local assumption can be made
401 * and everything must be accessed through the @rq and @curr passed in
402 * parameters.
403 */
404static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
405{
406}
407
408static void set_curr_task_idle(struct rq *rq)
409{
410}
411
412static void switched_to_idle(struct rq *rq, struct task_struct *p)
413{
414 BUG();
415}
416
417static void
418prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
419{
420 BUG();
421}
422
423static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
424{
425 return 0;
426}
427
428static void update_curr_idle(struct rq *rq)
429{
430}
431
432/*
433 * Simple, special scheduling class for the per-CPU idle tasks:
434 */
435const struct sched_class idle_sched_class = {
436 /* .next is NULL */
437 /* no enqueue/yield_task for idle tasks */
438
439 /* dequeue is not valid, we print a debug message there: */
440 .dequeue_task = dequeue_task_idle,
441
442 .check_preempt_curr = check_preempt_curr_idle,
443
444 .pick_next_task = pick_next_task_idle,
445 .put_prev_task = put_prev_task_idle,
446
447#ifdef CONFIG_SMP
448 .select_task_rq = select_task_rq_idle,
449 .set_cpus_allowed = set_cpus_allowed_common,
450#endif
451
452 .set_curr_task = set_curr_task_idle,
453 .task_tick = task_tick_idle,
454
455 .get_rr_interval = get_rr_interval_idle,
456
457 .prio_changed = prio_changed_idle,
458 .switched_to = switched_to_idle,
459 .update_curr = update_curr_idle,
460};
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
deleted file mode 100644
index 488222ac4651..000000000000
--- a/kernel/sched/idle_task.c
+++ /dev/null
@@ -1,117 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * idle-task scheduling class.
4 *
5 * (NOTE: these are not related to SCHED_IDLE batch scheduling tasks which are
6 * handled in sched/fair.c)
7 */
8#include "sched.h"
9
10#ifdef CONFIG_SMP
11static int
12select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
13{
14 return task_cpu(p); /* IDLE tasks as never migrated */
15}
16#endif
17
18/*
19 * Idle tasks are unconditionally rescheduled:
20 */
21static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
22{
23 resched_curr(rq);
24}
25
26static struct task_struct *
27pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
28{
29 put_prev_task(rq, prev);
30 update_idle_core(rq);
31 schedstat_inc(rq->sched_goidle);
32
33 return rq->idle;
34}
35
36/*
37 * It is not legal to sleep in the idle task - print a warning
38 * message if some code attempts to do it:
39 */
40static void
41dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
42{
43 raw_spin_unlock_irq(&rq->lock);
44 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
45 dump_stack();
46 raw_spin_lock_irq(&rq->lock);
47}
48
49static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
50{
51}
52
53/*
54 * scheduler tick hitting a task of our scheduling class.
55 *
56 * NOTE: This function can be called remotely by the tick offload that
57 * goes along full dynticks. Therefore no local assumption can be made
58 * and everything must be accessed through the @rq and @curr passed in
59 * parameters.
60 */
61static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
62{
63}
64
65static void set_curr_task_idle(struct rq *rq)
66{
67}
68
69static void switched_to_idle(struct rq *rq, struct task_struct *p)
70{
71 BUG();
72}
73
74static void
75prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
76{
77 BUG();
78}
79
80static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
81{
82 return 0;
83}
84
85static void update_curr_idle(struct rq *rq)
86{
87}
88
89/*
90 * Simple, special scheduling class for the per-CPU idle tasks:
91 */
92const struct sched_class idle_sched_class = {
93 /* .next is NULL */
94 /* no enqueue/yield_task for idle tasks */
95
96 /* dequeue is not valid, we print a debug message there: */
97 .dequeue_task = dequeue_task_idle,
98
99 .check_preempt_curr = check_preempt_curr_idle,
100
101 .pick_next_task = pick_next_task_idle,
102 .put_prev_task = put_prev_task_idle,
103
104#ifdef CONFIG_SMP
105 .select_task_rq = select_task_rq_idle,
106 .set_cpus_allowed = set_cpus_allowed_common,
107#endif
108
109 .set_curr_task = set_curr_task_idle,
110 .task_tick = task_tick_idle,
111
112 .get_rr_interval = get_rr_interval_idle,
113
114 .prio_changed = prio_changed_idle,
115 .switched_to = switched_to_idle,
116 .update_curr = update_curr_idle,
117};