diff options
Diffstat (limited to 'kernel/sched/idle_task.c')
-rw-r--r-- | kernel/sched/idle_task.c | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c new file mode 100644 index 000000000000..91b4c957f289 --- /dev/null +++ b/kernel/sched/idle_task.c | |||
@@ -0,0 +1,99 @@ | |||
1 | #include "sched.h" | ||
2 | |||
3 | /* | ||
4 | * idle-task scheduling class. | ||
5 | * | ||
6 | * (NOTE: these are not related to SCHED_IDLE tasks which are | ||
7 | * handled in sched_fair.c) | ||
8 | */ | ||
9 | |||
10 | #ifdef CONFIG_SMP | ||
11 | static int | ||
12 | select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) | ||
13 | { | ||
14 | return task_cpu(p); /* IDLE tasks as never migrated */ | ||
15 | } | ||
16 | #endif /* CONFIG_SMP */ | ||
17 | /* | ||
18 | * Idle tasks are unconditionally rescheduled: | ||
19 | */ | ||
20 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) | ||
21 | { | ||
22 | resched_task(rq->idle); | ||
23 | } | ||
24 | |||
25 | static struct task_struct *pick_next_task_idle(struct rq *rq) | ||
26 | { | ||
27 | schedstat_inc(rq, sched_goidle); | ||
28 | calc_load_account_idle(rq); | ||
29 | return rq->idle; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * It is not legal to sleep in the idle task - print a warning | ||
34 | * message if some code attempts to do it: | ||
35 | */ | ||
36 | static void | ||
37 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | ||
38 | { | ||
39 | raw_spin_unlock_irq(&rq->lock); | ||
40 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | ||
41 | dump_stack(); | ||
42 | raw_spin_lock_irq(&rq->lock); | ||
43 | } | ||
44 | |||
45 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | static void set_curr_task_idle(struct rq *rq) | ||
54 | { | ||
55 | } | ||
56 | |||
57 | static void switched_to_idle(struct rq *rq, struct task_struct *p) | ||
58 | { | ||
59 | BUG(); | ||
60 | } | ||
61 | |||
62 | static void | ||
63 | prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) | ||
64 | { | ||
65 | BUG(); | ||
66 | } | ||
67 | |||
68 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) | ||
69 | { | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Simple, special scheduling class for the per-CPU idle tasks: | ||
75 | */ | ||
76 | const struct sched_class idle_sched_class = { | ||
77 | /* .next is NULL */ | ||
78 | /* no enqueue/yield_task for idle tasks */ | ||
79 | |||
80 | /* dequeue is not valid, we print a debug message there: */ | ||
81 | .dequeue_task = dequeue_task_idle, | ||
82 | |||
83 | .check_preempt_curr = check_preempt_curr_idle, | ||
84 | |||
85 | .pick_next_task = pick_next_task_idle, | ||
86 | .put_prev_task = put_prev_task_idle, | ||
87 | |||
88 | #ifdef CONFIG_SMP | ||
89 | .select_task_rq = select_task_rq_idle, | ||
90 | #endif | ||
91 | |||
92 | .set_curr_task = set_curr_task_idle, | ||
93 | .task_tick = task_tick_idle, | ||
94 | |||
95 | .get_rr_interval = get_rr_interval_idle, | ||
96 | |||
97 | .prio_changed = prio_changed_idle, | ||
98 | .switched_to = switched_to_idle, | ||
99 | }; | ||