diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 526d237b8ce5..eecf070ffd1a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
298 | 298 | ||
299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp) | |||
780 | return single_open(filp, sched_feat_show, NULL); | 780 | return single_open(filp, sched_feat_show, NULL); |
781 | } | 781 | } |
782 | 782 | ||
783 | static struct file_operations sched_feat_fops = { | 783 | static const struct file_operations sched_feat_fops = { |
784 | .open = sched_feat_open, | 784 | .open = sched_feat_open, |
785 | .write = sched_feat_write, | 785 | .write = sched_feat_write, |
786 | .read = seq_read, | 786 | .read = seq_read, |
@@ -8195,14 +8195,14 @@ enum s_alloc { | |||
8195 | */ | 8195 | */ |
8196 | #ifdef CONFIG_SCHED_SMT | 8196 | #ifdef CONFIG_SCHED_SMT |
8197 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8197 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8198 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8198 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8199 | 8199 | ||
8200 | static int | 8200 | static int |
8201 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8201 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8202 | struct sched_group **sg, struct cpumask *unused) | 8202 | struct sched_group **sg, struct cpumask *unused) |
8203 | { | 8203 | { |
8204 | if (sg) | 8204 | if (sg) |
8205 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8205 | *sg = &per_cpu(sched_groups, cpu).sg; |
8206 | return cpu; | 8206 | return cpu; |
8207 | } | 8207 | } |
8208 | #endif /* CONFIG_SCHED_SMT */ | 8208 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9470,7 +9470,7 @@ void __init sched_init(void) | |||
9470 | #elif defined CONFIG_USER_SCHED | 9470 | #elif defined CONFIG_USER_SCHED |
9471 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9471 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9472 | init_tg_rt_entry(&init_task_group, | 9472 | init_tg_rt_entry(&init_task_group, |
9473 | &per_cpu(init_rt_rq, i), | 9473 | &per_cpu(init_rt_rq_var, i), |
9474 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9474 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9475 | root_task_group.rt_se[i]); | 9475 | root_task_group.rt_se[i]); |
9476 | #endif | 9476 | #endif |