diff options
Diffstat (limited to 'kernel/sched_autogroup.c')
-rw-r--r-- | kernel/sched_autogroup.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 938d52f80a2..9fb65628315 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c | |||
@@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref) | |||
27 | { | 27 | { |
28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); | 28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); |
29 | 29 | ||
30 | #ifdef CONFIG_RT_GROUP_SCHED | ||
31 | /* We've redirected RT tasks to the root task group... */ | ||
32 | ag->tg->rt_se = NULL; | ||
33 | ag->tg->rt_rq = NULL; | ||
34 | #endif | ||
30 | sched_destroy_group(ag->tg); | 35 | sched_destroy_group(ag->tg); |
31 | } | 36 | } |
32 | 37 | ||
@@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) | |||
55 | return ag; | 60 | return ag; |
56 | } | 61 | } |
57 | 62 | ||
63 | #ifdef CONFIG_RT_GROUP_SCHED | ||
64 | static void free_rt_sched_group(struct task_group *tg); | ||
65 | #endif | ||
66 | |||
58 | static inline struct autogroup *autogroup_create(void) | 67 | static inline struct autogroup *autogroup_create(void) |
59 | { | 68 | { |
60 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); | 69 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); |
@@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void) | |||
72 | init_rwsem(&ag->lock); | 81 | init_rwsem(&ag->lock); |
73 | ag->id = atomic_inc_return(&autogroup_seq_nr); | 82 | ag->id = atomic_inc_return(&autogroup_seq_nr); |
74 | ag->tg = tg; | 83 | ag->tg = tg; |
84 | #ifdef CONFIG_RT_GROUP_SCHED | ||
85 | /* | ||
86 | * Autogroup RT tasks are redirected to the root task group | ||
87 | * so we don't have to move tasks around upon policy change, | ||
88 | * or flail around trying to allocate bandwidth on the fly. | ||
89 | * A bandwidth exception in __sched_setscheduler() allows | ||
90 | * the policy change to proceed. Thereafter, task_group() | ||
91 | * returns &root_task_group, so zero bandwidth is required. | ||
92 | */ | ||
93 | free_rt_sched_group(tg); | ||
94 | tg->rt_se = root_task_group.rt_se; | ||
95 | tg->rt_rq = root_task_group.rt_rq; | ||
96 | #endif | ||
75 | tg->autogroup = ag; | 97 | tg->autogroup = ag; |
76 | 98 | ||
77 | return ag; | 99 | return ag; |
@@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
106 | return true; | 128 | return true; |
107 | } | 129 | } |
108 | 130 | ||
131 | static inline bool task_group_is_autogroup(struct task_group *tg) | ||
132 | { | ||
133 | return tg != &root_task_group && tg->autogroup; | ||
134 | } | ||
135 | |||
109 | static inline struct task_group * | 136 | static inline struct task_group * |
110 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 137 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
111 | { | 138 | { |