diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 156 |
1 files changed, 77 insertions, 79 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 05c39f030314..5ac63c9a995a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -978,13 +978,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
978 | rq->skip_clock_update = 1; | 978 | rq->skip_clock_update = 1; |
979 | } | 979 | } |
980 | 980 | ||
981 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
982 | |||
983 | void register_task_migration_notifier(struct notifier_block *n) | ||
984 | { | ||
985 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
986 | } | ||
987 | |||
988 | #ifdef CONFIG_SMP | 981 | #ifdef CONFIG_SMP |
989 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 982 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
990 | { | 983 | { |
@@ -1015,18 +1008,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1015 | trace_sched_migrate_task(p, new_cpu); | 1008 | trace_sched_migrate_task(p, new_cpu); |
1016 | 1009 | ||
1017 | if (task_cpu(p) != new_cpu) { | 1010 | if (task_cpu(p) != new_cpu) { |
1018 | struct task_migration_notifier tmn; | ||
1019 | |||
1020 | if (p->sched_class->migrate_task_rq) | 1011 | if (p->sched_class->migrate_task_rq) |
1021 | p->sched_class->migrate_task_rq(p, new_cpu); | 1012 | p->sched_class->migrate_task_rq(p, new_cpu); |
1022 | p->se.nr_migrations++; | 1013 | p->se.nr_migrations++; |
1023 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); | 1014 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
1024 | |||
1025 | tmn.task = p; | ||
1026 | tmn.from_cpu = task_cpu(p); | ||
1027 | tmn.to_cpu = new_cpu; | ||
1028 | |||
1029 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
1030 | } | 1015 | } |
1031 | 1016 | ||
1032 | __set_task_cpu(p, new_cpu); | 1017 | __set_task_cpu(p, new_cpu); |
@@ -2527,13 +2512,11 @@ void __sched schedule_preempt_disabled(void) | |||
2527 | */ | 2512 | */ |
2528 | asmlinkage void __sched notrace preempt_schedule(void) | 2513 | asmlinkage void __sched notrace preempt_schedule(void) |
2529 | { | 2514 | { |
2530 | struct thread_info *ti = current_thread_info(); | ||
2531 | |||
2532 | /* | 2515 | /* |
2533 | * If there is a non-zero preempt_count or interrupts are disabled, | 2516 | * If there is a non-zero preempt_count or interrupts are disabled, |
2534 | * we do not want to preempt the current task. Just return.. | 2517 | * we do not want to preempt the current task. Just return.. |
2535 | */ | 2518 | */ |
2536 | if (likely(ti->preempt_count || irqs_disabled())) | 2519 | if (likely(!preemptible())) |
2537 | return; | 2520 | return; |
2538 | 2521 | ||
2539 | do { | 2522 | do { |
@@ -2677,7 +2660,7 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, | |||
2677 | if (unlikely(!q)) | 2660 | if (unlikely(!q)) |
2678 | return; | 2661 | return; |
2679 | 2662 | ||
2680 | if (unlikely(!nr_exclusive)) | 2663 | if (unlikely(nr_exclusive != 1)) |
2681 | wake_flags = 0; | 2664 | wake_flags = 0; |
2682 | 2665 | ||
2683 | spin_lock_irqsave(&q->lock, flags); | 2666 | spin_lock_irqsave(&q->lock, flags); |
@@ -4964,7 +4947,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
4964 | SD_BALANCE_FORK | | 4947 | SD_BALANCE_FORK | |
4965 | SD_BALANCE_EXEC | | 4948 | SD_BALANCE_EXEC | |
4966 | SD_SHARE_CPUPOWER | | 4949 | SD_SHARE_CPUPOWER | |
4967 | SD_SHARE_PKG_RESOURCES); | 4950 | SD_SHARE_PKG_RESOURCES | |
4951 | SD_PREFER_SIBLING); | ||
4968 | if (nr_node_ids == 1) | 4952 | if (nr_node_ids == 1) |
4969 | pflags &= ~SD_SERIALIZE; | 4953 | pflags &= ~SD_SERIALIZE; |
4970 | } | 4954 | } |
@@ -5133,18 +5117,23 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) | |||
5133 | * two cpus are in the same cache domain, see cpus_share_cache(). | 5117 | * two cpus are in the same cache domain, see cpus_share_cache(). |
5134 | */ | 5118 | */ |
5135 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); | 5119 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); |
5120 | DEFINE_PER_CPU(int, sd_llc_size); | ||
5136 | DEFINE_PER_CPU(int, sd_llc_id); | 5121 | DEFINE_PER_CPU(int, sd_llc_id); |
5137 | 5122 | ||
5138 | static void update_top_cache_domain(int cpu) | 5123 | static void update_top_cache_domain(int cpu) |
5139 | { | 5124 | { |
5140 | struct sched_domain *sd; | 5125 | struct sched_domain *sd; |
5141 | int id = cpu; | 5126 | int id = cpu; |
5127 | int size = 1; | ||
5142 | 5128 | ||
5143 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); | 5129 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); |
5144 | if (sd) | 5130 | if (sd) { |
5145 | id = cpumask_first(sched_domain_span(sd)); | 5131 | id = cpumask_first(sched_domain_span(sd)); |
5132 | size = cpumask_weight(sched_domain_span(sd)); | ||
5133 | } | ||
5146 | 5134 | ||
5147 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 5135 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
5136 | per_cpu(sd_llc_size, cpu) = size; | ||
5148 | per_cpu(sd_llc_id, cpu) = id; | 5137 | per_cpu(sd_llc_id, cpu) = id; |
5149 | } | 5138 | } |
5150 | 5139 | ||
@@ -5168,6 +5157,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
5168 | tmp->parent = parent->parent; | 5157 | tmp->parent = parent->parent; |
5169 | if (parent->parent) | 5158 | if (parent->parent) |
5170 | parent->parent->child = tmp; | 5159 | parent->parent->child = tmp; |
5160 | /* | ||
5161 | * Transfer SD_PREFER_SIBLING down in case of a | ||
5162 | * degenerate parent; the spans match for this | ||
5163 | * so the property transfers. | ||
5164 | */ | ||
5165 | if (parent->flags & SD_PREFER_SIBLING) | ||
5166 | tmp->flags |= SD_PREFER_SIBLING; | ||
5171 | destroy_sched_domain(parent, cpu); | 5167 | destroy_sched_domain(parent, cpu); |
5172 | } else | 5168 | } else |
5173 | tmp = tmp->parent; | 5169 | tmp = tmp->parent; |
@@ -6234,8 +6230,9 @@ match1: | |||
6234 | ; | 6230 | ; |
6235 | } | 6231 | } |
6236 | 6232 | ||
6233 | n = ndoms_cur; | ||
6237 | if (doms_new == NULL) { | 6234 | if (doms_new == NULL) { |
6238 | ndoms_cur = 0; | 6235 | n = 0; |
6239 | doms_new = &fallback_doms; | 6236 | doms_new = &fallback_doms; |
6240 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); | 6237 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
6241 | WARN_ON_ONCE(dattr_new); | 6238 | WARN_ON_ONCE(dattr_new); |
@@ -6243,7 +6240,7 @@ match1: | |||
6243 | 6240 | ||
6244 | /* Build new domains */ | 6241 | /* Build new domains */ |
6245 | for (i = 0; i < ndoms_new; i++) { | 6242 | for (i = 0; i < ndoms_new; i++) { |
6246 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 6243 | for (j = 0; j < n && !new_topology; j++) { |
6247 | if (cpumask_equal(doms_new[i], doms_cur[j]) | 6244 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
6248 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 6245 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
6249 | goto match2; | 6246 | goto match2; |
@@ -6815,7 +6812,7 @@ void sched_move_task(struct task_struct *tsk) | |||
6815 | if (unlikely(running)) | 6812 | if (unlikely(running)) |
6816 | tsk->sched_class->put_prev_task(rq, tsk); | 6813 | tsk->sched_class->put_prev_task(rq, tsk); |
6817 | 6814 | ||
6818 | tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id, | 6815 | tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, |
6819 | lockdep_is_held(&tsk->sighand->siglock)), | 6816 | lockdep_is_held(&tsk->sighand->siglock)), |
6820 | struct task_group, css); | 6817 | struct task_group, css); |
6821 | tg = autogroup_task_group(tsk, tg); | 6818 | tg = autogroup_task_group(tsk, tg); |
@@ -7137,23 +7134,22 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
7137 | 7134 | ||
7138 | #ifdef CONFIG_CGROUP_SCHED | 7135 | #ifdef CONFIG_CGROUP_SCHED |
7139 | 7136 | ||
7140 | /* return corresponding task_group object of a cgroup */ | 7137 | static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
7141 | static inline struct task_group *cgroup_tg(struct cgroup *cgrp) | ||
7142 | { | 7138 | { |
7143 | return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), | 7139 | return css ? container_of(css, struct task_group, css) : NULL; |
7144 | struct task_group, css); | ||
7145 | } | 7140 | } |
7146 | 7141 | ||
7147 | static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) | 7142 | static struct cgroup_subsys_state * |
7143 | cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | ||
7148 | { | 7144 | { |
7149 | struct task_group *tg, *parent; | 7145 | struct task_group *parent = css_tg(parent_css); |
7146 | struct task_group *tg; | ||
7150 | 7147 | ||
7151 | if (!cgrp->parent) { | 7148 | if (!parent) { |
7152 | /* This is early initialization for the top cgroup */ | 7149 | /* This is early initialization for the top cgroup */ |
7153 | return &root_task_group.css; | 7150 | return &root_task_group.css; |
7154 | } | 7151 | } |
7155 | 7152 | ||
7156 | parent = cgroup_tg(cgrp->parent); | ||
7157 | tg = sched_create_group(parent); | 7153 | tg = sched_create_group(parent); |
7158 | if (IS_ERR(tg)) | 7154 | if (IS_ERR(tg)) |
7159 | return ERR_PTR(-ENOMEM); | 7155 | return ERR_PTR(-ENOMEM); |
@@ -7161,41 +7157,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) | |||
7161 | return &tg->css; | 7157 | return &tg->css; |
7162 | } | 7158 | } |
7163 | 7159 | ||
7164 | static int cpu_cgroup_css_online(struct cgroup *cgrp) | 7160 | static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) |
7165 | { | 7161 | { |
7166 | struct task_group *tg = cgroup_tg(cgrp); | 7162 | struct task_group *tg = css_tg(css); |
7167 | struct task_group *parent; | 7163 | struct task_group *parent = css_tg(css_parent(css)); |
7168 | 7164 | ||
7169 | if (!cgrp->parent) | 7165 | if (parent) |
7170 | return 0; | 7166 | sched_online_group(tg, parent); |
7171 | |||
7172 | parent = cgroup_tg(cgrp->parent); | ||
7173 | sched_online_group(tg, parent); | ||
7174 | return 0; | 7167 | return 0; |
7175 | } | 7168 | } |
7176 | 7169 | ||
7177 | static void cpu_cgroup_css_free(struct cgroup *cgrp) | 7170 | static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) |
7178 | { | 7171 | { |
7179 | struct task_group *tg = cgroup_tg(cgrp); | 7172 | struct task_group *tg = css_tg(css); |
7180 | 7173 | ||
7181 | sched_destroy_group(tg); | 7174 | sched_destroy_group(tg); |
7182 | } | 7175 | } |
7183 | 7176 | ||
7184 | static void cpu_cgroup_css_offline(struct cgroup *cgrp) | 7177 | static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) |
7185 | { | 7178 | { |
7186 | struct task_group *tg = cgroup_tg(cgrp); | 7179 | struct task_group *tg = css_tg(css); |
7187 | 7180 | ||
7188 | sched_offline_group(tg); | 7181 | sched_offline_group(tg); |
7189 | } | 7182 | } |
7190 | 7183 | ||
7191 | static int cpu_cgroup_can_attach(struct cgroup *cgrp, | 7184 | static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, |
7192 | struct cgroup_taskset *tset) | 7185 | struct cgroup_taskset *tset) |
7193 | { | 7186 | { |
7194 | struct task_struct *task; | 7187 | struct task_struct *task; |
7195 | 7188 | ||
7196 | cgroup_taskset_for_each(task, cgrp, tset) { | 7189 | cgroup_taskset_for_each(task, css, tset) { |
7197 | #ifdef CONFIG_RT_GROUP_SCHED | 7190 | #ifdef CONFIG_RT_GROUP_SCHED |
7198 | if (!sched_rt_can_attach(cgroup_tg(cgrp), task)) | 7191 | if (!sched_rt_can_attach(css_tg(css), task)) |
7199 | return -EINVAL; | 7192 | return -EINVAL; |
7200 | #else | 7193 | #else |
7201 | /* We don't support RT-tasks being in separate groups */ | 7194 | /* We don't support RT-tasks being in separate groups */ |
@@ -7206,18 +7199,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp, | |||
7206 | return 0; | 7199 | return 0; |
7207 | } | 7200 | } |
7208 | 7201 | ||
7209 | static void cpu_cgroup_attach(struct cgroup *cgrp, | 7202 | static void cpu_cgroup_attach(struct cgroup_subsys_state *css, |
7210 | struct cgroup_taskset *tset) | 7203 | struct cgroup_taskset *tset) |
7211 | { | 7204 | { |
7212 | struct task_struct *task; | 7205 | struct task_struct *task; |
7213 | 7206 | ||
7214 | cgroup_taskset_for_each(task, cgrp, tset) | 7207 | cgroup_taskset_for_each(task, css, tset) |
7215 | sched_move_task(task); | 7208 | sched_move_task(task); |
7216 | } | 7209 | } |
7217 | 7210 | ||
7218 | static void | 7211 | static void cpu_cgroup_exit(struct cgroup_subsys_state *css, |
7219 | cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, | 7212 | struct cgroup_subsys_state *old_css, |
7220 | struct task_struct *task) | 7213 | struct task_struct *task) |
7221 | { | 7214 | { |
7222 | /* | 7215 | /* |
7223 | * cgroup_exit() is called in the copy_process() failure path. | 7216 | * cgroup_exit() is called in the copy_process() failure path. |
@@ -7231,15 +7224,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, | |||
7231 | } | 7224 | } |
7232 | 7225 | ||
7233 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7226 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7234 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 7227 | static int cpu_shares_write_u64(struct cgroup_subsys_state *css, |
7235 | u64 shareval) | 7228 | struct cftype *cftype, u64 shareval) |
7236 | { | 7229 | { |
7237 | return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); | 7230 | return sched_group_set_shares(css_tg(css), scale_load(shareval)); |
7238 | } | 7231 | } |
7239 | 7232 | ||
7240 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) | 7233 | static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, |
7234 | struct cftype *cft) | ||
7241 | { | 7235 | { |
7242 | struct task_group *tg = cgroup_tg(cgrp); | 7236 | struct task_group *tg = css_tg(css); |
7243 | 7237 | ||
7244 | return (u64) scale_load_down(tg->shares); | 7238 | return (u64) scale_load_down(tg->shares); |
7245 | } | 7239 | } |
@@ -7361,26 +7355,28 @@ long tg_get_cfs_period(struct task_group *tg) | |||
7361 | return cfs_period_us; | 7355 | return cfs_period_us; |
7362 | } | 7356 | } |
7363 | 7357 | ||
7364 | static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) | 7358 | static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, |
7359 | struct cftype *cft) | ||
7365 | { | 7360 | { |
7366 | return tg_get_cfs_quota(cgroup_tg(cgrp)); | 7361 | return tg_get_cfs_quota(css_tg(css)); |
7367 | } | 7362 | } |
7368 | 7363 | ||
7369 | static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, | 7364 | static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, |
7370 | s64 cfs_quota_us) | 7365 | struct cftype *cftype, s64 cfs_quota_us) |
7371 | { | 7366 | { |
7372 | return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); | 7367 | return tg_set_cfs_quota(css_tg(css), cfs_quota_us); |
7373 | } | 7368 | } |
7374 | 7369 | ||
7375 | static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) | 7370 | static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, |
7371 | struct cftype *cft) | ||
7376 | { | 7372 | { |
7377 | return tg_get_cfs_period(cgroup_tg(cgrp)); | 7373 | return tg_get_cfs_period(css_tg(css)); |
7378 | } | 7374 | } |
7379 | 7375 | ||
7380 | static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 7376 | static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, |
7381 | u64 cfs_period_us) | 7377 | struct cftype *cftype, u64 cfs_period_us) |
7382 | { | 7378 | { |
7383 | return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); | 7379 | return tg_set_cfs_period(css_tg(css), cfs_period_us); |
7384 | } | 7380 | } |
7385 | 7381 | ||
7386 | struct cfs_schedulable_data { | 7382 | struct cfs_schedulable_data { |
@@ -7461,10 +7457,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) | |||
7461 | return ret; | 7457 | return ret; |
7462 | } | 7458 | } |
7463 | 7459 | ||
7464 | static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, | 7460 | static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft, |
7465 | struct cgroup_map_cb *cb) | 7461 | struct cgroup_map_cb *cb) |
7466 | { | 7462 | { |
7467 | struct task_group *tg = cgroup_tg(cgrp); | 7463 | struct task_group *tg = css_tg(css); |
7468 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; | 7464 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
7469 | 7465 | ||
7470 | cb->fill(cb, "nr_periods", cfs_b->nr_periods); | 7466 | cb->fill(cb, "nr_periods", cfs_b->nr_periods); |
@@ -7477,26 +7473,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, | |||
7477 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7473 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
7478 | 7474 | ||
7479 | #ifdef CONFIG_RT_GROUP_SCHED | 7475 | #ifdef CONFIG_RT_GROUP_SCHED |
7480 | static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, | 7476 | static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, |
7481 | s64 val) | 7477 | struct cftype *cft, s64 val) |
7482 | { | 7478 | { |
7483 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); | 7479 | return sched_group_set_rt_runtime(css_tg(css), val); |
7484 | } | 7480 | } |
7485 | 7481 | ||
7486 | static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) | 7482 | static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, |
7483 | struct cftype *cft) | ||
7487 | { | 7484 | { |
7488 | return sched_group_rt_runtime(cgroup_tg(cgrp)); | 7485 | return sched_group_rt_runtime(css_tg(css)); |
7489 | } | 7486 | } |
7490 | 7487 | ||
7491 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, | 7488 | static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, |
7492 | u64 rt_period_us) | 7489 | struct cftype *cftype, u64 rt_period_us) |
7493 | { | 7490 | { |
7494 | return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); | 7491 | return sched_group_set_rt_period(css_tg(css), rt_period_us); |
7495 | } | 7492 | } |
7496 | 7493 | ||
7497 | static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) | 7494 | static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, |
7495 | struct cftype *cft) | ||
7498 | { | 7496 | { |
7499 | return sched_group_rt_period(cgroup_tg(cgrp)); | 7497 | return sched_group_rt_period(css_tg(css)); |
7500 | } | 7498 | } |
7501 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7499 | #endif /* CONFIG_RT_GROUP_SCHED */ |
7502 | 7500 | ||