diff options
author | Tommaso Cucinotta <tommaso.cucinotta@sssup.it> | 2016-08-14 10:27:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-09-05 07:29:43 -0400 |
commit | d8206bb3ffe0eaee03abfad46fd44d8b17142e88 (patch) | |
tree | 8002a43ab30ac2e28d8662ce4f6f9ae6cc61fcb5 | |
parent | 8e1bc301aaf9f9a2d731bf8d50d549ac2dcfdab2 (diff) |
sched/deadline: Split cpudl_set() into cpudl_set() and cpudl_clear()
These 2 exercise independent code paths and need different arguments.
After this change, you call:
cpudl_clear(cp, cpu);
cpudl_set(cp, cpu, dl);
instead of:
cpudl_set(cp, cpu, 0 /* dl */, 0 /* is_valid */);
cpudl_set(cp, cpu, dl, 1 /* is_valid */);
Signed-off-by: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Luca Abeni <luca.abeni@unitn.it>
Reviewed-by: Juri Lelli <juri.lelli@arm.com>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-dl@retis.sssup.it
Link: http://lkml.kernel.org/r/1471184828-12644-4-git-send-email-tommaso.cucinotta@sssup.it
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/cpudeadline.c | 49 | ||||
-rw-r--r-- | kernel/sched/cpudeadline.h | 3 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 10 |
3 files changed, 40 insertions, 22 deletions
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 0ace75a7a87b..e73119013c53 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -145,16 +145,15 @@ out: | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * cpudl_set - update the cpudl max-heap | 148 | * cpudl_clear - remove a cpu from the cpudl max-heap |
149 | * @cp: the cpudl max-heap context | 149 | * @cp: the cpudl max-heap context |
150 | * @cpu: the target cpu | 150 | * @cpu: the target cpu |
151 | * @dl: the new earliest deadline for this cpu | ||
152 | * | 151 | * |
153 | * Notes: assumes cpu_rq(cpu)->lock is locked | 152 | * Notes: assumes cpu_rq(cpu)->lock is locked |
154 | * | 153 | * |
155 | * Returns: (void) | 154 | * Returns: (void) |
156 | */ | 155 | */ |
157 | void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | 156 | void cpudl_clear(struct cpudl *cp, int cpu) |
158 | { | 157 | { |
159 | int old_idx, new_cpu; | 158 | int old_idx, new_cpu; |
160 | unsigned long flags; | 159 | unsigned long flags; |
@@ -162,17 +161,15 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
162 | WARN_ON(!cpu_present(cpu)); | 161 | WARN_ON(!cpu_present(cpu)); |
163 | 162 | ||
164 | raw_spin_lock_irqsave(&cp->lock, flags); | 163 | raw_spin_lock_irqsave(&cp->lock, flags); |
164 | |||
165 | old_idx = cp->elements[cpu].idx; | 165 | old_idx = cp->elements[cpu].idx; |
166 | if (!is_valid) { | 166 | if (old_idx == IDX_INVALID) { |
167 | /* remove item */ | 167 | /* |
168 | if (old_idx == IDX_INVALID) { | 168 | * Nothing to remove if old_idx was invalid. |
169 | /* | 169 | * This could happen if a rq_offline_dl is |
170 | * Nothing to remove if old_idx was invalid. | 170 | * called for a CPU without -dl tasks running. |
171 | * This could happen if a rq_offline_dl is | 171 | */ |
172 | * called for a CPU without -dl tasks running. | 172 | } else { |
173 | */ | ||
174 | goto out; | ||
175 | } | ||
176 | new_cpu = cp->elements[cp->size - 1].cpu; | 173 | new_cpu = cp->elements[cp->size - 1].cpu; |
177 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; | 174 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; |
178 | cp->elements[old_idx].cpu = new_cpu; | 175 | cp->elements[old_idx].cpu = new_cpu; |
@@ -180,11 +177,32 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
180 | cp->elements[new_cpu].idx = old_idx; | 177 | cp->elements[new_cpu].idx = old_idx; |
181 | cp->elements[cpu].idx = IDX_INVALID; | 178 | cp->elements[cpu].idx = IDX_INVALID; |
182 | cpudl_heapify(cp, old_idx); | 179 | cpudl_heapify(cp, old_idx); |
183 | cpumask_set_cpu(cpu, cp->free_cpus); | ||
184 | 180 | ||
185 | goto out; | 181 | cpumask_set_cpu(cpu, cp->free_cpus); |
186 | } | 182 | } |
183 | raw_spin_unlock_irqrestore(&cp->lock, flags); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * cpudl_set - update the cpudl max-heap | ||
188 | * @cp: the cpudl max-heap context | ||
189 | * @cpu: the target cpu | ||
190 | * @dl: the new earliest deadline for this cpu | ||
191 | * | ||
192 | * Notes: assumes cpu_rq(cpu)->lock is locked | ||
193 | * | ||
194 | * Returns: (void) | ||
195 | */ | ||
196 | void cpudl_set(struct cpudl *cp, int cpu, u64 dl) | ||
197 | { | ||
198 | int old_idx; | ||
199 | unsigned long flags; | ||
200 | |||
201 | WARN_ON(!cpu_present(cpu)); | ||
187 | 202 | ||
203 | raw_spin_lock_irqsave(&cp->lock, flags); | ||
204 | |||
205 | old_idx = cp->elements[cpu].idx; | ||
188 | if (old_idx == IDX_INVALID) { | 206 | if (old_idx == IDX_INVALID) { |
189 | int new_idx = cp->size++; | 207 | int new_idx = cp->size++; |
190 | cp->elements[new_idx].dl = dl; | 208 | cp->elements[new_idx].dl = dl; |
@@ -197,7 +215,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
197 | cpudl_heapify(cp, old_idx); | 215 | cpudl_heapify(cp, old_idx); |
198 | } | 216 | } |
199 | 217 | ||
200 | out: | ||
201 | raw_spin_unlock_irqrestore(&cp->lock, flags); | 218 | raw_spin_unlock_irqrestore(&cp->lock, flags); |
202 | } | 219 | } |
203 | 220 | ||
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index fcbdf83fed7e..f7da8c55bba0 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h | |||
@@ -23,7 +23,8 @@ struct cpudl { | |||
23 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
24 | int cpudl_find(struct cpudl *cp, struct task_struct *p, | 24 | int cpudl_find(struct cpudl *cp, struct task_struct *p, |
25 | struct cpumask *later_mask); | 25 | struct cpumask *later_mask); |
26 | void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid); | 26 | void cpudl_set(struct cpudl *cp, int cpu, u64 dl); |
27 | void cpudl_clear(struct cpudl *cp, int cpu); | ||
27 | int cpudl_init(struct cpudl *cp); | 28 | int cpudl_init(struct cpudl *cp); |
28 | void cpudl_set_freecpu(struct cpudl *cp, int cpu); | 29 | void cpudl_set_freecpu(struct cpudl *cp, int cpu); |
29 | void cpudl_clear_freecpu(struct cpudl *cp, int cpu); | 30 | void cpudl_clear_freecpu(struct cpudl *cp, int cpu); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d091f4a95416..18fb0b8fc911 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -798,7 +798,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) | |||
798 | if (dl_rq->earliest_dl.curr == 0 || | 798 | if (dl_rq->earliest_dl.curr == 0 || |
799 | dl_time_before(deadline, dl_rq->earliest_dl.curr)) { | 799 | dl_time_before(deadline, dl_rq->earliest_dl.curr)) { |
800 | dl_rq->earliest_dl.curr = deadline; | 800 | dl_rq->earliest_dl.curr = deadline; |
801 | cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); | 801 | cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); |
802 | } | 802 | } |
803 | } | 803 | } |
804 | 804 | ||
@@ -813,14 +813,14 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) | |||
813 | if (!dl_rq->dl_nr_running) { | 813 | if (!dl_rq->dl_nr_running) { |
814 | dl_rq->earliest_dl.curr = 0; | 814 | dl_rq->earliest_dl.curr = 0; |
815 | dl_rq->earliest_dl.next = 0; | 815 | dl_rq->earliest_dl.next = 0; |
816 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); | 816 | cpudl_clear(&rq->rd->cpudl, rq->cpu); |
817 | } else { | 817 | } else { |
818 | struct rb_node *leftmost = dl_rq->rb_leftmost; | 818 | struct rb_node *leftmost = dl_rq->rb_leftmost; |
819 | struct sched_dl_entity *entry; | 819 | struct sched_dl_entity *entry; |
820 | 820 | ||
821 | entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); | 821 | entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); |
822 | dl_rq->earliest_dl.curr = entry->deadline; | 822 | dl_rq->earliest_dl.curr = entry->deadline; |
823 | cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); | 823 | cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); |
824 | } | 824 | } |
825 | } | 825 | } |
826 | 826 | ||
@@ -1671,7 +1671,7 @@ static void rq_online_dl(struct rq *rq) | |||
1671 | 1671 | ||
1672 | cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); | 1672 | cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); |
1673 | if (rq->dl.dl_nr_running > 0) | 1673 | if (rq->dl.dl_nr_running > 0) |
1674 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); | 1674 | cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); |
1675 | } | 1675 | } |
1676 | 1676 | ||
1677 | /* Assumes rq->lock is held */ | 1677 | /* Assumes rq->lock is held */ |
@@ -1680,7 +1680,7 @@ static void rq_offline_dl(struct rq *rq) | |||
1680 | if (rq->dl.overloaded) | 1680 | if (rq->dl.overloaded) |
1681 | dl_clear_overload(rq); | 1681 | dl_clear_overload(rq); |
1682 | 1682 | ||
1683 | cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); | 1683 | cpudl_clear(&rq->rd->cpudl, rq->cpu); |
1684 | cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); | 1684 | cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); |
1685 | } | 1685 | } |
1686 | 1686 | ||