aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c115
1 files changed, 59 insertions, 56 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index edd5a54b95da..8f351c56567f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -307,52 +307,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD;
307 */ 307 */
308struct task_group init_task_group; 308struct task_group init_task_group;
309 309
310/* return group to which a task belongs */
311static inline struct task_group *task_group(struct task_struct *p)
312{
313 struct task_group *tg;
314
315#ifdef CONFIG_CGROUP_SCHED
316 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
317 struct task_group, css);
318#else
319 tg = &init_task_group;
320#endif
321 return tg;
322}
323
324/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
325static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
326{
327 /*
328 * Strictly speaking this rcu_read_lock() is not needed since the
329 * task_group is tied to the cgroup, which in turn can never go away
330 * as long as there are tasks attached to it.
331 *
332 * However since task_group() uses task_subsys_state() which is an
333 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
334 */
335 rcu_read_lock();
336#ifdef CONFIG_FAIR_GROUP_SCHED
337 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
338 p->se.parent = task_group(p)->se[cpu];
339#endif
340
341#ifdef CONFIG_RT_GROUP_SCHED
342 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
343 p->rt.parent = task_group(p)->rt_se[cpu];
344#endif
345 rcu_read_unlock();
346}
347
348#else
349
350static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
351static inline struct task_group *task_group(struct task_struct *p)
352{
353 return NULL;
354}
355
356#endif /* CONFIG_CGROUP_SCHED */ 310#endif /* CONFIG_CGROUP_SCHED */
357 311
358/* CFS-related fields in a runqueue */ 312/* CFS-related fields in a runqueue */
@@ -645,6 +599,49 @@ static inline int cpu_of(struct rq *rq)
645#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 599#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
646#define raw_rq() (&__raw_get_cpu_var(runqueues)) 600#define raw_rq() (&__raw_get_cpu_var(runqueues))
647 601
602#ifdef CONFIG_CGROUP_SCHED
603
604/*
605 * Return the group to which this tasks belongs.
606 *
607 * We use task_subsys_state_check() and extend the RCU verification
608 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
609 * holds that lock for each task it moves into the cgroup. Therefore
610 * by holding that lock, we pin the task to the current cgroup.
611 */
612static inline struct task_group *task_group(struct task_struct *p)
613{
614 struct cgroup_subsys_state *css;
615
616 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
617 lockdep_is_held(&task_rq(p)->lock));
618 return container_of(css, struct task_group, css);
619}
620
621/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
622static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
623{
624#ifdef CONFIG_FAIR_GROUP_SCHED
625 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
626 p->se.parent = task_group(p)->se[cpu];
627#endif
628
629#ifdef CONFIG_RT_GROUP_SCHED
630 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
631 p->rt.parent = task_group(p)->rt_se[cpu];
632#endif
633}
634
635#else /* CONFIG_CGROUP_SCHED */
636
637static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
638static inline struct task_group *task_group(struct task_struct *p)
639{
640 return NULL;
641}
642
643#endif /* CONFIG_CGROUP_SCHED */
644
648inline void update_rq_clock(struct rq *rq) 645inline void update_rq_clock(struct rq *rq)
649{ 646{
650 if (!rq->skip_clock_update) 647 if (!rq->skip_clock_update)
@@ -4529,16 +4526,6 @@ recheck:
4529 } 4526 }
4530 4527
4531 if (user) { 4528 if (user) {
4532#ifdef CONFIG_RT_GROUP_SCHED
4533 /*
4534 * Do not allow realtime tasks into groups that have no runtime
4535 * assigned.
4536 */
4537 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4538 task_group(p)->rt_bandwidth.rt_runtime == 0)
4539 return -EPERM;
4540#endif
4541
4542 retval = security_task_setscheduler(p, policy, param); 4529 retval = security_task_setscheduler(p, policy, param);
4543 if (retval) 4530 if (retval)
4544 return retval; 4531 return retval;
@@ -4554,6 +4541,22 @@ recheck:
4554 * runqueue lock must be held. 4541 * runqueue lock must be held.
4555 */ 4542 */
4556 rq = __task_rq_lock(p); 4543 rq = __task_rq_lock(p);
4544
4545#ifdef CONFIG_RT_GROUP_SCHED
4546 if (user) {
4547 /*
4548 * Do not allow realtime tasks into groups that have no runtime
4549 * assigned.
4550 */
4551 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4552 task_group(p)->rt_bandwidth.rt_runtime == 0) {
4553 __task_rq_unlock(rq);
4554 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4555 return -EPERM;
4556 }
4557 }
4558#endif
4559
4557 /* recheck policy now with rq lock held */ 4560 /* recheck policy now with rq lock held */
4558 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4561 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4559 policy = oldpolicy = -1; 4562 policy = oldpolicy = -1;