aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/sched_clock.c30
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/user.c18
-rw-r--r--kernel/user_namespace.c21
5 files changed, 84 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e63ffb6ed05..dfae1bf6d5b2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{ 223{
224 ktime_t now; 224 ktime_t now;
225 225
226 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return; 227 return;
228 228
229 if (hrtimer_active(&rt_b->rt_period_timer)) 229 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -9319,6 +9319,16 @@ static int sched_rt_global_constraints(void)
9319 9319
9320 return ret; 9320 return ret;
9321} 9321}
9322
9323int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9324{
9325 /* Don't accept realtime tasks when there is no way for them to run */
9326 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9327 return 0;
9328
9329 return 1;
9330}
9331
9322#else /* !CONFIG_RT_GROUP_SCHED */ 9332#else /* !CONFIG_RT_GROUP_SCHED */
9323static int sched_rt_global_constraints(void) 9333static int sched_rt_global_constraints(void)
9324{ 9334{
@@ -9412,8 +9422,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9412 struct task_struct *tsk) 9422 struct task_struct *tsk)
9413{ 9423{
9414#ifdef CONFIG_RT_GROUP_SCHED 9424#ifdef CONFIG_RT_GROUP_SCHED
9415 /* Don't accept realtime tasks when there is no way for them to run */ 9425 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9416 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
9417 return -EINVAL; 9426 return -EINVAL;
9418#else 9427#else
9419 /* We don't support RT-tasks being in separate groups */ 9428 /* We don't support RT-tasks being in separate groups */
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414cc..390f33234bd0 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
46 47
47struct sched_clock_data { 48struct sched_clock_data {
48 /* 49 /*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
87} 88}
88 89
89/* 90/*
90 * min,max except they take wrapping into account 91 * min, max except they take wrapping into account
91 */ 92 */
92 93
93static inline u64 wrap_min(u64 x, u64 y) 94static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
111 s64 delta = now - scd->tick_raw; 112 s64 delta = now - scd->tick_raw;
112 u64 clock, min_clock, max_clock; 113 u64 clock, min_clock, max_clock;
113 114
114 WARN_ON_ONCE(!irqs_disabled());
115
116 if (unlikely(delta < 0)) 115 if (unlikely(delta < 0))
117 delta = 0; 116 delta = 0;
118 117
119 /* 118 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 119 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 120 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 121 * scd->tick_gtod + TICK_NSEC);
123 */ 122 */
124 123
125 clock = scd->tick_gtod + delta; 124 clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 147
149u64 sched_clock_cpu(int cpu) 148u64 sched_clock_cpu(int cpu)
150{ 149{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 150 u64 now, clock, this_clock, remote_clock;
151 struct sched_clock_data *scd;
153 152
154 if (unlikely(!sched_clock_running)) 153 if (sched_clock_stable)
155 return 0ull; 154 return sched_clock();
156 155
156 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 158 now = sched_clock();
159 159
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
195 195
196void sched_clock_tick(void) 196void sched_clock_tick(void)
197{ 197{
198 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd;
199 u64 now, now_gtod; 199 u64 now, now_gtod;
200 200
201 if (sched_clock_stable)
202 return;
203
201 if (unlikely(!sched_clock_running)) 204 if (unlikely(!sched_clock_running))
202 return; 205 return;
203 206
204 WARN_ON_ONCE(!irqs_disabled()); 207 WARN_ON_ONCE(!irqs_disabled());
205 208
209 scd = this_scd();
206 now_gtod = ktime_to_ns(ktime_get()); 210 now_gtod = ktime_to_ns(ktime_get());
207 now = sched_clock(); 211 now = sched_clock();
208 212
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
250 return sched_clock(); 254 return sched_clock();
251} 255}
252 256
253#endif 257#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
254 258
255unsigned long long cpu_clock(int cpu) 259unsigned long long cpu_clock(int cpu)
256{ 260{
diff --git a/kernel/sys.c b/kernel/sys.c
index f145c415bc16..37f458e6882a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -559,7 +559,7 @@ error:
559 abort_creds(new); 559 abort_creds(new);
560 return retval; 560 return retval;
561} 561}
562 562
563/* 563/*
564 * change the user struct in a credentials set to match the new UID 564 * change the user struct in a credentials set to match the new UID
565 */ 565 */
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
571 if (!new_user) 571 if (!new_user)
572 return -EAGAIN; 572 return -EAGAIN;
573 573
574 if (!task_can_switch_user(new_user, current)) {
575 free_uid(new_user);
576 return -EINVAL;
577 }
578
574 if (atomic_read(&new_user->processes) >= 579 if (atomic_read(&new_user->processes) >=
575 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 580 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
576 new_user != INIT_USER) { 581 new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
631 goto error; 636 goto error;
632 } 637 }
633 638
634 retval = -EAGAIN; 639 if (new->uid != old->uid) {
635 if (new->uid != old->uid && set_user(new) < 0) 640 retval = set_user(new);
636 goto error; 641 if (retval < 0)
637 642 goto error;
643 }
638 if (ruid != (uid_t) -1 || 644 if (ruid != (uid_t) -1 ||
639 (euid != (uid_t) -1 && euid != old->uid)) 645 (euid != (uid_t) -1 && euid != old->uid))
640 new->suid = new->euid; 646 new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
680 retval = -EPERM; 686 retval = -EPERM;
681 if (capable(CAP_SETUID)) { 687 if (capable(CAP_SETUID)) {
682 new->suid = new->uid = uid; 688 new->suid = new->uid = uid;
683 if (uid != old->uid && set_user(new) < 0) { 689 if (uid != old->uid) {
684 retval = -EAGAIN; 690 retval = set_user(new);
685 goto error; 691 if (retval < 0)
692 goto error;
686 } 693 }
687 } else if (uid != old->uid && uid != new->suid) { 694 } else if (uid != old->uid && uid != new->suid) {
688 goto error; 695 goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
734 goto error; 741 goto error;
735 } 742 }
736 743
737 retval = -EAGAIN;
738 if (ruid != (uid_t) -1) { 744 if (ruid != (uid_t) -1) {
739 new->uid = ruid; 745 new->uid = ruid;
740 if (ruid != old->uid && set_user(new) < 0) 746 if (ruid != old->uid) {
741 goto error; 747 retval = set_user(new);
748 if (retval < 0)
749 goto error;
750 }
742 } 751 }
743 if (euid != (uid_t) -1) 752 if (euid != (uid_t) -1)
744 new->euid = euid; 753 new->euid = euid;
diff --git a/kernel/user.c b/kernel/user.c
index 3551ac742395..6a9b696128c8 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
362 362
363#endif 363#endif
364 364
365#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
366/*
367 * We need to check if a setuid can take place. This function should be called
368 * before successfully completing the setuid.
369 */
370int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
371{
372
373 return sched_rt_can_attach(up->tg, tsk);
374
375}
376#else
377int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
378{
379 return 1;
380}
381#endif
382
365/* 383/*
366 * Locate the user_struct for the passed UID. If found, take a ref on it. The 384 * Locate the user_struct for the passed UID. If found, take a ref on it. The
367 * caller must undo that ref with free_uid(). 385 * caller must undo that ref with free_uid().
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 79084311ee57..076c7c8215b0 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -60,12 +60,25 @@ int create_user_ns(struct cred *new)
60 return 0; 60 return 0;
61} 61}
62 62
63void free_user_ns(struct kref *kref) 63/*
64 * Deferred destructor for a user namespace. This is required because
65 * free_user_ns() may be called with uidhash_lock held, but we need to call
66 * back to free_uid() which will want to take the lock again.
67 */
68static void free_user_ns_work(struct work_struct *work)
64{ 69{
65 struct user_namespace *ns; 70 struct user_namespace *ns =
66 71 container_of(work, struct user_namespace, destroyer);
67 ns = container_of(kref, struct user_namespace, kref);
68 free_uid(ns->creator); 72 free_uid(ns->creator);
69 kfree(ns); 73 kfree(ns);
70} 74}
75
76void free_user_ns(struct kref *kref)
77{
78 struct user_namespace *ns =
79 container_of(kref, struct user_namespace, kref);
80
81 INIT_WORK(&ns->destroyer, free_user_ns_work);
82 schedule_work(&ns->destroyer);
83}
71EXPORT_SYMBOL(free_user_ns); 84EXPORT_SYMBOL(free_user_ns);