diff options
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 46 |
1 files changed, 40 insertions, 6 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7f6de4316990..802188fb6338 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -16,6 +16,8 @@ | |||
16 | */ | 16 | */ |
17 | #include "sched.h" | 17 | #include "sched.h" |
18 | 18 | ||
19 | struct dl_bandwidth def_dl_bandwidth; | ||
20 | |||
19 | static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) | 21 | static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) |
20 | { | 22 | { |
21 | return container_of(dl_se, struct task_struct, dl); | 23 | return container_of(dl_se, struct task_struct, dl); |
@@ -46,6 +48,27 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) | |||
46 | return dl_rq->rb_leftmost == &dl_se->rb_node; | 48 | return dl_rq->rb_leftmost == &dl_se->rb_node; |
47 | } | 49 | } |
48 | 50 | ||
51 | void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) | ||
52 | { | ||
53 | raw_spin_lock_init(&dl_b->dl_runtime_lock); | ||
54 | dl_b->dl_period = period; | ||
55 | dl_b->dl_runtime = runtime; | ||
56 | } | ||
57 | |||
58 | extern unsigned long to_ratio(u64 period, u64 runtime); | ||
59 | |||
60 | void init_dl_bw(struct dl_bw *dl_b) | ||
61 | { | ||
62 | raw_spin_lock_init(&dl_b->lock); | ||
63 | raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); | ||
64 | if (global_dl_runtime() == RUNTIME_INF) | ||
65 | dl_b->bw = -1; | ||
66 | else | ||
67 | dl_b->bw = to_ratio(global_dl_period(), global_dl_runtime()); | ||
68 | raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); | ||
69 | dl_b->total_bw = 0; | ||
70 | } | ||
71 | |||
49 | void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) | 72 | void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) |
50 | { | 73 | { |
51 | dl_rq->rb_root = RB_ROOT; | 74 | dl_rq->rb_root = RB_ROOT; |
@@ -57,6 +80,8 @@ void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) | |||
57 | dl_rq->dl_nr_migratory = 0; | 80 | dl_rq->dl_nr_migratory = 0; |
58 | dl_rq->overloaded = 0; | 81 | dl_rq->overloaded = 0; |
59 | dl_rq->pushable_dl_tasks_root = RB_ROOT; | 82 | dl_rq->pushable_dl_tasks_root = RB_ROOT; |
83 | #else | ||
84 | init_dl_bw(&dl_rq->dl_bw); | ||
60 | #endif | 85 | #endif |
61 | } | 86 | } |
62 | 87 | ||
@@ -359,8 +384,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, | |||
359 | * of anything below microseconds resolution is actually fiction | 384 | * of anything below microseconds resolution is actually fiction |
360 | * (but still we want to give the user that illusion >;). | 385 | * (but still we want to give the user that illusion >;). |
361 | */ | 386 | */ |
362 | left = (pi_se->dl_period >> 10) * (dl_se->runtime >> 10); | 387 | left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); |
363 | right = ((dl_se->deadline - t) >> 10) * (pi_se->dl_runtime >> 10); | 388 | right = ((dl_se->deadline - t) >> DL_SCALE) * |
389 | (pi_se->dl_runtime >> DL_SCALE); | ||
364 | 390 | ||
365 | return dl_time_before(right, left); | 391 | return dl_time_before(right, left); |
366 | } | 392 | } |
@@ -911,8 +937,8 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, | |||
911 | * In the unlikely case current and p have the same deadline | 937 | * In the unlikely case current and p have the same deadline |
912 | * let us try to decide what's the best thing to do... | 938 | * let us try to decide what's the best thing to do... |
913 | */ | 939 | */ |
914 | if ((s64)(p->dl.deadline - rq->curr->dl.deadline) == 0 && | 940 | if ((p->dl.deadline == rq->curr->dl.deadline) && |
915 | !need_resched()) | 941 | !test_tsk_need_resched(rq->curr)) |
916 | check_preempt_equal_dl(rq, p); | 942 | check_preempt_equal_dl(rq, p); |
917 | #endif /* CONFIG_SMP */ | 943 | #endif /* CONFIG_SMP */ |
918 | } | 944 | } |
@@ -1000,6 +1026,14 @@ static void task_fork_dl(struct task_struct *p) | |||
1000 | static void task_dead_dl(struct task_struct *p) | 1026 | static void task_dead_dl(struct task_struct *p) |
1001 | { | 1027 | { |
1002 | struct hrtimer *timer = &p->dl.dl_timer; | 1028 | struct hrtimer *timer = &p->dl.dl_timer; |
1029 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); | ||
1030 | |||
1031 | /* | ||
1032 | * Since we are TASK_DEAD we won't slip out of the domain! | ||
1033 | */ | ||
1034 | raw_spin_lock_irq(&dl_b->lock); | ||
1035 | dl_b->total_bw -= p->dl.dl_bw; | ||
1036 | raw_spin_unlock_irq(&dl_b->lock); | ||
1003 | 1037 | ||
1004 | hrtimer_cancel(timer); | 1038 | hrtimer_cancel(timer); |
1005 | } | 1039 | } |
@@ -1226,7 +1260,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) | |||
1226 | BUG_ON(task_current(rq, p)); | 1260 | BUG_ON(task_current(rq, p)); |
1227 | BUG_ON(p->nr_cpus_allowed <= 1); | 1261 | BUG_ON(p->nr_cpus_allowed <= 1); |
1228 | 1262 | ||
1229 | BUG_ON(!p->se.on_rq); | 1263 | BUG_ON(!p->on_rq); |
1230 | BUG_ON(!dl_task(p)); | 1264 | BUG_ON(!dl_task(p)); |
1231 | 1265 | ||
1232 | return p; | 1266 | return p; |
@@ -1373,7 +1407,7 @@ static int pull_dl_task(struct rq *this_rq) | |||
1373 | dl_time_before(p->dl.deadline, | 1407 | dl_time_before(p->dl.deadline, |
1374 | this_rq->dl.earliest_dl.curr))) { | 1408 | this_rq->dl.earliest_dl.curr))) { |
1375 | WARN_ON(p == src_rq->curr); | 1409 | WARN_ON(p == src_rq->curr); |
1376 | WARN_ON(!p->se.on_rq); | 1410 | WARN_ON(!p->on_rq); |
1377 | 1411 | ||
1378 | /* | 1412 | /* |
1379 | * Then we pull iff p has actually an earlier | 1413 | * Then we pull iff p has actually an earlier |