aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-05-22 04:55:03 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-22 04:55:03 -0400
commit6669dc89078da020241e78e5589489e494203332 (patch)
treec897152d44348dc4571f228baea283b8f48b137c /kernel/sched
parentec6e7f4082aa0c6c4334149e74673b6ed736fb63 (diff)
parent6acbfb96976fc3350e30d964acb1dbbdf876d55e (diff)
Merge branch 'sched/urgent' into sched/core to avoid conflicts with upcoming changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c55
-rw-r--r--kernel/sched/cpudeadline.c33
-rw-r--r--kernel/sched/cpudeadline.h6
-rw-r--r--kernel/sched/cpupri.c7
-rw-r--r--kernel/sched/cpupri.h2
5 files changed, 74 insertions, 29 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 092e511605ec..2551b6db470e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3226,17 +3226,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3226 * We ask for the deadline not being zero, and greater or equal 3226 * We ask for the deadline not being zero, and greater or equal
3227 * than the runtime, as well as the period of being zero or 3227 * than the runtime, as well as the period of being zero or
3228 * greater than deadline. Furthermore, we have to be sure that 3228 * greater than deadline. Furthermore, we have to be sure that
3229 * user parameters are above the internal resolution (1us); we 3229 * user parameters are above the internal resolution of 1us (we
3230 * check sched_runtime only since it is always the smaller one. 3230 * check sched_runtime only since it is always the smaller one) and
3231 * below 2^63 ns (we have to check both sched_deadline and
3232 * sched_period, as the latter can be zero).
3231 */ 3233 */
3232static bool 3234static bool
3233__checkparam_dl(const struct sched_attr *attr) 3235__checkparam_dl(const struct sched_attr *attr)
3234{ 3236{
3235 return attr && attr->sched_deadline != 0 && 3237 /* deadline != 0 */
3236 (attr->sched_period == 0 || 3238 if (attr->sched_deadline == 0)
3237 (s64)(attr->sched_period - attr->sched_deadline) >= 0) && 3239 return false;
3238 (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && 3240
3239 attr->sched_runtime >= (2 << (DL_SCALE - 1)); 3241 /*
3242 * Since we truncate DL_SCALE bits, make sure we're at least
3243 * that big.
3244 */
3245 if (attr->sched_runtime < (1ULL << DL_SCALE))
3246 return false;
3247
3248 /*
3249 * Since we use the MSB for wrap-around and sign issues, make
3250 * sure it's not set (mind that period can be equal to zero).
3251 */
3252 if (attr->sched_deadline & (1ULL << 63) ||
3253 attr->sched_period & (1ULL << 63))
3254 return false;
3255
3256 /* runtime <= deadline <= period (if period != 0) */
3257 if ((attr->sched_period != 0 &&
3258 attr->sched_period < attr->sched_deadline) ||
3259 attr->sched_deadline < attr->sched_runtime)
3260 return false;
3261
3262 return true;
3240} 3263}
3241 3264
3242/* 3265/*
@@ -3689,8 +3712,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3689 if (!uattr || pid < 0 || flags) 3712 if (!uattr || pid < 0 || flags)
3690 return -EINVAL; 3713 return -EINVAL;
3691 3714
3692 if (sched_copy_attr(uattr, &attr)) 3715 retval = sched_copy_attr(uattr, &attr);
3693 return -EFAULT; 3716 if (retval)
3717 return retval;
3718
3719 if (attr.sched_policy < 0)
3720 return -EINVAL;
3694 3721
3695 rcu_read_lock(); 3722 rcu_read_lock();
3696 retval = -ESRCH; 3723 retval = -ESRCH;
@@ -3740,7 +3767,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3740 */ 3767 */
3741SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3768SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3742{ 3769{
3743 struct sched_param lp; 3770 struct sched_param lp = { .sched_priority = 0 };
3744 struct task_struct *p; 3771 struct task_struct *p;
3745 int retval; 3772 int retval;
3746 3773
@@ -3757,11 +3784,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3757 if (retval) 3784 if (retval)
3758 goto out_unlock; 3785 goto out_unlock;
3759 3786
3760 if (task_has_dl_policy(p)) { 3787 if (task_has_rt_policy(p))
3761 retval = -EINVAL; 3788 lp.sched_priority = p->rt_priority;
3762 goto out_unlock;
3763 }
3764 lp.sched_priority = p->rt_priority;
3765 rcu_read_unlock(); 3789 rcu_read_unlock();
3766 3790
3767 /* 3791 /*
@@ -5083,7 +5107,6 @@ static int sched_cpu_active(struct notifier_block *nfb,
5083 unsigned long action, void *hcpu) 5107 unsigned long action, void *hcpu)
5084{ 5108{
5085 switch (action & ~CPU_TASKS_FROZEN) { 5109 switch (action & ~CPU_TASKS_FROZEN) {
5086 case CPU_STARTING:
5087 case CPU_DOWN_FAILED: 5110 case CPU_DOWN_FAILED:
5088 set_cpu_active((long)hcpu, true); 5111 set_cpu_active((long)hcpu, true);
5089 return NOTIFY_OK; 5112 return NOTIFY_OK;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index ab001b5d5048..bd95963dae80 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/slab.h>
16#include "cpudeadline.h" 17#include "cpudeadline.h"
17 18
18static inline int parent(int i) 19static inline int parent(int i)
@@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b)
39{ 40{
40 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; 41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
41 42
42 swap(cp->elements[a], cp->elements[b]); 43 swap(cp->elements[a].cpu, cp->elements[b].cpu);
43 swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); 44 swap(cp->elements[a].dl , cp->elements[b].dl );
45
46 swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
44} 47}
45 48
46static void cpudl_heapify(struct cpudl *cp, int idx) 49static void cpudl_heapify(struct cpudl *cp, int idx)
@@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
140 WARN_ON(!cpu_present(cpu)); 143 WARN_ON(!cpu_present(cpu));
141 144
142 raw_spin_lock_irqsave(&cp->lock, flags); 145 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 146 old_idx = cp->elements[cpu].idx;
144 if (!is_valid) { 147 if (!is_valid) {
145 /* remove item */ 148 /* remove item */
146 if (old_idx == IDX_INVALID) { 149 if (old_idx == IDX_INVALID) {
@@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
155 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; 158 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
156 cp->elements[old_idx].cpu = new_cpu; 159 cp->elements[old_idx].cpu = new_cpu;
157 cp->size--; 160 cp->size--;
158 cp->cpu_to_idx[new_cpu] = old_idx; 161 cp->elements[new_cpu].idx = old_idx;
159 cp->cpu_to_idx[cpu] = IDX_INVALID; 162 cp->elements[cpu].idx = IDX_INVALID;
160 while (old_idx > 0 && dl_time_before( 163 while (old_idx > 0 && dl_time_before(
161 cp->elements[parent(old_idx)].dl, 164 cp->elements[parent(old_idx)].dl,
162 cp->elements[old_idx].dl)) { 165 cp->elements[old_idx].dl)) {
@@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
173 cp->size++; 176 cp->size++;
174 cp->elements[cp->size - 1].dl = 0; 177 cp->elements[cp->size - 1].dl = 0;
175 cp->elements[cp->size - 1].cpu = cpu; 178 cp->elements[cp->size - 1].cpu = cpu;
176 cp->cpu_to_idx[cpu] = cp->size - 1; 179 cp->elements[cpu].idx = cp->size - 1;
177 cpudl_change_key(cp, cp->size - 1, dl); 180 cpudl_change_key(cp, cp->size - 1, dl);
178 cpumask_clear_cpu(cpu, cp->free_cpus); 181 cpumask_clear_cpu(cpu, cp->free_cpus);
179 } else { 182 } else {
@@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp)
195 memset(cp, 0, sizeof(*cp)); 198 memset(cp, 0, sizeof(*cp));
196 raw_spin_lock_init(&cp->lock); 199 raw_spin_lock_init(&cp->lock);
197 cp->size = 0; 200 cp->size = 0;
198 for (i = 0; i < NR_CPUS; i++) 201
199 cp->cpu_to_idx[i] = IDX_INVALID; 202 cp->elements = kcalloc(nr_cpu_ids,
200 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) 203 sizeof(struct cpudl_item),
204 GFP_KERNEL);
205 if (!cp->elements)
206 return -ENOMEM;
207
208 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
209 kfree(cp->elements);
201 return -ENOMEM; 210 return -ENOMEM;
211 }
212
213 for_each_possible_cpu(i)
214 cp->elements[i].idx = IDX_INVALID;
215
202 cpumask_setall(cp->free_cpus); 216 cpumask_setall(cp->free_cpus);
203 217
204 return 0; 218 return 0;
@@ -211,4 +225,5 @@ int cpudl_init(struct cpudl *cp)
211void cpudl_cleanup(struct cpudl *cp) 225void cpudl_cleanup(struct cpudl *cp)
212{ 226{
213 free_cpumask_var(cp->free_cpus); 227 free_cpumask_var(cp->free_cpus);
228 kfree(cp->elements);
214} 229}
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index a202789a412c..538c9796ad4a 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -5,17 +5,17 @@
5 5
6#define IDX_INVALID -1 6#define IDX_INVALID -1
7 7
8struct array_item { 8struct cpudl_item {
9 u64 dl; 9 u64 dl;
10 int cpu; 10 int cpu;
11 int idx;
11}; 12};
12 13
13struct cpudl { 14struct cpudl {
14 raw_spinlock_t lock; 15 raw_spinlock_t lock;
15 int size; 16 int size;
16 int cpu_to_idx[NR_CPUS];
17 struct array_item elements[NR_CPUS];
18 cpumask_var_t free_cpus; 17 cpumask_var_t free_cpus;
18 struct cpudl_item *elements;
19}; 19};
20 20
21 21
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 3031bac8aa3e..8834243abee2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/sched/rt.h> 32#include <linux/sched/rt.h>
33#include <linux/slab.h>
33#include "cpupri.h" 34#include "cpupri.h"
34 35
35/* Convert between a 140 based task->prio, and our 102 based cpupri */ 36/* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -218,8 +219,13 @@ int cpupri_init(struct cpupri *cp)
218 goto cleanup; 219 goto cleanup;
219 } 220 }
220 221
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
223 if (!cp->cpu_to_pri)
224 goto cleanup;
225
221 for_each_possible_cpu(i) 226 for_each_possible_cpu(i)
222 cp->cpu_to_pri[i] = CPUPRI_INVALID; 227 cp->cpu_to_pri[i] = CPUPRI_INVALID;
228
223 return 0; 229 return 0;
224 230
225cleanup: 231cleanup:
@@ -236,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp)
236{ 242{
237 int i; 243 int i;
238 244
245 kfree(cp->cpu_to_pri);
239 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
240 free_cpumask_var(cp->pri_to_cpu[i].mask); 247 free_cpumask_var(cp->pri_to_cpu[i].mask);
241} 248}
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index f6d756173491..6b033347fdfd 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -17,7 +17,7 @@ struct cpupri_vec {
17 17
18struct cpupri { 18struct cpupri {
19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; 19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
20 int cpu_to_pri[NR_CPUS]; 20 int *cpu_to_pri;
21}; 21};
22 22
23#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP