diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy, | |||
1952 | { | 1952 | { |
1953 | 1953 | ||
1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); | 1954 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); |
1955 | u64 period = attr->sched_period; | 1955 | u64 period = attr->sched_period ?: attr->sched_deadline; |
1956 | u64 runtime = attr->sched_runtime; | 1956 | u64 runtime = attr->sched_runtime; |
1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; | 1957 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; |
1958 | int cpus, err = -1; | 1958 | int cpus, err = -1; |
@@ -3338,6 +3338,15 @@ recheck: | |||
3338 | return -EPERM; | 3338 | return -EPERM; |
3339 | } | 3339 | } |
3340 | 3340 | ||
3341 | /* | ||
3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
3343 | * (safest behavior); in the future we would like to allow | ||
3344 | * unprivileged DL tasks to increase their relative deadline | ||
3345 | * or reduce their runtime (both ways reducing utilization) | ||
3346 | */ | ||
3347 | if (dl_policy(policy)) | ||
3348 | return -EPERM; | ||
3349 | |||
3341 | /* | 3350 | /* |
3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
@@ -3661,13 +3670,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
3661 | * @pid: the pid in question. | 3670 | * @pid: the pid in question. |
3662 | * @uattr: structure containing the extended parameters. | 3671 | * @uattr: structure containing the extended parameters. |
3663 | */ | 3672 | */ |
3664 | SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) | 3673 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
3674 | unsigned int, flags) | ||
3665 | { | 3675 | { |
3666 | struct sched_attr attr; | 3676 | struct sched_attr attr; |
3667 | struct task_struct *p; | 3677 | struct task_struct *p; |
3668 | int retval; | 3678 | int retval; |
3669 | 3679 | ||
3670 | if (!uattr || pid < 0) | 3680 | if (!uattr || pid < 0 || flags) |
3671 | return -EINVAL; | 3681 | return -EINVAL; |
3672 | 3682 | ||
3673 | if (sched_copy_attr(uattr, &attr)) | 3683 | if (sched_copy_attr(uattr, &attr)) |
@@ -3786,7 +3796,7 @@ static int sched_read_attr(struct sched_attr __user *uattr, | |||
3786 | attr->size = usize; | 3796 | attr->size = usize; |
3787 | } | 3797 | } |
3788 | 3798 | ||
3789 | ret = copy_to_user(uattr, attr, usize); | 3799 | ret = copy_to_user(uattr, attr, attr->size); |
3790 | if (ret) | 3800 | if (ret) |
3791 | return -EFAULT; | 3801 | return -EFAULT; |
3792 | 3802 | ||
@@ -3804,8 +3814,8 @@ err_size: | |||
3804 | * @uattr: structure containing the extended parameters. | 3814 | * @uattr: structure containing the extended parameters. |
3805 | * @size: sizeof(attr) for fwd/bwd comp. | 3815 | * @size: sizeof(attr) for fwd/bwd comp. |
3806 | */ | 3816 | */ |
3807 | SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | 3817 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
3808 | unsigned int, size) | 3818 | unsigned int, size, unsigned int, flags) |
3809 | { | 3819 | { |
3810 | struct sched_attr attr = { | 3820 | struct sched_attr attr = { |
3811 | .size = sizeof(struct sched_attr), | 3821 | .size = sizeof(struct sched_attr), |
@@ -3814,7 +3824,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, | |||
3814 | int retval; | 3824 | int retval; |
3815 | 3825 | ||
3816 | if (!uattr || pid < 0 || size > PAGE_SIZE || | 3826 | if (!uattr || pid < 0 || size > PAGE_SIZE || |
3817 | size < SCHED_ATTR_SIZE_VER0) | 3827 | size < SCHED_ATTR_SIZE_VER0 || flags) |
3818 | return -EINVAL; | 3828 | return -EINVAL; |
3819 | 3829 | ||
3820 | rcu_read_lock(); | 3830 | rcu_read_lock(); |
@@ -7422,6 +7432,7 @@ static int sched_dl_global_constraints(void) | |||
7422 | u64 period = global_rt_period(); | 7432 | u64 period = global_rt_period(); |
7423 | u64 new_bw = to_ratio(period, runtime); | 7433 | u64 new_bw = to_ratio(period, runtime); |
7424 | int cpu, ret = 0; | 7434 | int cpu, ret = 0; |
7435 | unsigned long flags; | ||
7425 | 7436 | ||
7426 | /* | 7437 | /* |
7427 | * Here we want to check the bandwidth not being set to some | 7438 | * Here we want to check the bandwidth not being set to some |
@@ -7435,10 +7446,10 @@ static int sched_dl_global_constraints(void) | |||
7435 | for_each_possible_cpu(cpu) { | 7446 | for_each_possible_cpu(cpu) { |
7436 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7447 | struct dl_bw *dl_b = dl_bw_of(cpu); |
7437 | 7448 | ||
7438 | raw_spin_lock(&dl_b->lock); | 7449 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
7439 | if (new_bw < dl_b->total_bw) | 7450 | if (new_bw < dl_b->total_bw) |
7440 | ret = -EBUSY; | 7451 | ret = -EBUSY; |
7441 | raw_spin_unlock(&dl_b->lock); | 7452 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
7442 | 7453 | ||
7443 | if (ret) | 7454 | if (ret) |
7444 | break; | 7455 | break; |
@@ -7451,6 +7462,7 @@ static void sched_dl_do_global(void) | |||
7451 | { | 7462 | { |
7452 | u64 new_bw = -1; | 7463 | u64 new_bw = -1; |
7453 | int cpu; | 7464 | int cpu; |
7465 | unsigned long flags; | ||
7454 | 7466 | ||
7455 | def_dl_bandwidth.dl_period = global_rt_period(); | 7467 | def_dl_bandwidth.dl_period = global_rt_period(); |
7456 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); | 7468 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); |
@@ -7464,9 +7476,9 @@ static void sched_dl_do_global(void) | |||
7464 | for_each_possible_cpu(cpu) { | 7476 | for_each_possible_cpu(cpu) { |
7465 | struct dl_bw *dl_b = dl_bw_of(cpu); | 7477 | struct dl_bw *dl_b = dl_bw_of(cpu); |
7466 | 7478 | ||
7467 | raw_spin_lock(&dl_b->lock); | 7479 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
7468 | dl_b->bw = new_bw; | 7480 | dl_b->bw = new_bw; |
7469 | raw_spin_unlock(&dl_b->lock); | 7481 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
7470 | } | 7482 | } |
7471 | } | 7483 | } |
7472 | 7484 | ||
@@ -7475,7 +7487,8 @@ static int sched_rt_global_validate(void) | |||
7475 | if (sysctl_sched_rt_period <= 0) | 7487 | if (sysctl_sched_rt_period <= 0) |
7476 | return -EINVAL; | 7488 | return -EINVAL; |
7477 | 7489 | ||
7478 | if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) | 7490 | if ((sysctl_sched_rt_runtime != RUNTIME_INF) && |
7491 | (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) | ||
7479 | return -EINVAL; | 7492 | return -EINVAL; |
7480 | 7493 | ||
7481 | return 0; | 7494 | return 0; |