diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 77 |
1 files changed, 75 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1a0c084b1cf9..26f83e2f1534 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6506,6 +6506,7 @@ struct s_data { | |||
6506 | cpumask_var_t nodemask; | 6506 | cpumask_var_t nodemask; |
6507 | cpumask_var_t this_sibling_map; | 6507 | cpumask_var_t this_sibling_map; |
6508 | cpumask_var_t this_core_map; | 6508 | cpumask_var_t this_core_map; |
6509 | cpumask_var_t this_book_map; | ||
6509 | cpumask_var_t send_covered; | 6510 | cpumask_var_t send_covered; |
6510 | cpumask_var_t tmpmask; | 6511 | cpumask_var_t tmpmask; |
6511 | struct sched_group **sched_group_nodes; | 6512 | struct sched_group **sched_group_nodes; |
@@ -6517,6 +6518,7 @@ enum s_alloc { | |||
6517 | sa_rootdomain, | 6518 | sa_rootdomain, |
6518 | sa_tmpmask, | 6519 | sa_tmpmask, |
6519 | sa_send_covered, | 6520 | sa_send_covered, |
6521 | sa_this_book_map, | ||
6520 | sa_this_core_map, | 6522 | sa_this_core_map, |
6521 | sa_this_sibling_map, | 6523 | sa_this_sibling_map, |
6522 | sa_nodemask, | 6524 | sa_nodemask, |
@@ -6570,6 +6572,31 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | |||
6570 | } | 6572 | } |
6571 | #endif /* CONFIG_SCHED_MC */ | 6573 | #endif /* CONFIG_SCHED_MC */ |
6572 | 6574 | ||
6575 | /* | ||
6576 | * book sched-domains: | ||
6577 | */ | ||
6578 | #ifdef CONFIG_SCHED_BOOK | ||
6579 | static DEFINE_PER_CPU(struct static_sched_domain, book_domains); | ||
6580 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); | ||
6581 | |||
6582 | static int | ||
6583 | cpu_to_book_group(int cpu, const struct cpumask *cpu_map, | ||
6584 | struct sched_group **sg, struct cpumask *mask) | ||
6585 | { | ||
6586 | int group = cpu; | ||
6587 | #ifdef CONFIG_SCHED_MC | ||
6588 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | ||
6589 | group = cpumask_first(mask); | ||
6590 | #elif defined(CONFIG_SCHED_SMT) | ||
6591 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | ||
6592 | group = cpumask_first(mask); | ||
6593 | #endif | ||
6594 | if (sg) | ||
6595 | *sg = &per_cpu(sched_group_book, group).sg; | ||
6596 | return group; | ||
6597 | } | ||
6598 | #endif /* CONFIG_SCHED_BOOK */ | ||
6599 | |||
6573 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); | 6600 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
6574 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); | 6601 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
6575 | 6602 | ||
@@ -6578,7 +6605,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
6578 | struct sched_group **sg, struct cpumask *mask) | 6605 | struct sched_group **sg, struct cpumask *mask) |
6579 | { | 6606 | { |
6580 | int group; | 6607 | int group; |
6581 | #ifdef CONFIG_SCHED_MC | 6608 | #ifdef CONFIG_SCHED_BOOK |
6609 | cpumask_and(mask, cpu_book_mask(cpu), cpu_map); | ||
6610 | group = cpumask_first(mask); | ||
6611 | #elif defined(CONFIG_SCHED_MC) | ||
6582 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | 6612 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
6583 | group = cpumask_first(mask); | 6613 | group = cpumask_first(mask); |
6584 | #elif defined(CONFIG_SCHED_SMT) | 6614 | #elif defined(CONFIG_SCHED_SMT) |
@@ -6839,6 +6869,9 @@ SD_INIT_FUNC(CPU) | |||
6839 | #ifdef CONFIG_SCHED_MC | 6869 | #ifdef CONFIG_SCHED_MC |
6840 | SD_INIT_FUNC(MC) | 6870 | SD_INIT_FUNC(MC) |
6841 | #endif | 6871 | #endif |
6872 | #ifdef CONFIG_SCHED_BOOK | ||
6873 | SD_INIT_FUNC(BOOK) | ||
6874 | #endif | ||
6842 | 6875 | ||
6843 | static int default_relax_domain_level = -1; | 6876 | static int default_relax_domain_level = -1; |
6844 | 6877 | ||
@@ -6888,6 +6921,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, | |||
6888 | free_cpumask_var(d->tmpmask); /* fall through */ | 6921 | free_cpumask_var(d->tmpmask); /* fall through */ |
6889 | case sa_send_covered: | 6922 | case sa_send_covered: |
6890 | free_cpumask_var(d->send_covered); /* fall through */ | 6923 | free_cpumask_var(d->send_covered); /* fall through */ |
6924 | case sa_this_book_map: | ||
6925 | free_cpumask_var(d->this_book_map); /* fall through */ | ||
6891 | case sa_this_core_map: | 6926 | case sa_this_core_map: |
6892 | free_cpumask_var(d->this_core_map); /* fall through */ | 6927 | free_cpumask_var(d->this_core_map); /* fall through */ |
6893 | case sa_this_sibling_map: | 6928 | case sa_this_sibling_map: |
@@ -6934,8 +6969,10 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
6934 | return sa_nodemask; | 6969 | return sa_nodemask; |
6935 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) | 6970 | if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) |
6936 | return sa_this_sibling_map; | 6971 | return sa_this_sibling_map; |
6937 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | 6972 | if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) |
6938 | return sa_this_core_map; | 6973 | return sa_this_core_map; |
6974 | if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) | ||
6975 | return sa_this_book_map; | ||
6939 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) | 6976 | if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) |
6940 | return sa_send_covered; | 6977 | return sa_send_covered; |
6941 | d->rd = alloc_rootdomain(); | 6978 | d->rd = alloc_rootdomain(); |
@@ -6993,6 +7030,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, | |||
6993 | return sd; | 7030 | return sd; |
6994 | } | 7031 | } |
6995 | 7032 | ||
7033 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, | ||
7034 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7035 | struct sched_domain *parent, int i) | ||
7036 | { | ||
7037 | struct sched_domain *sd = parent; | ||
7038 | #ifdef CONFIG_SCHED_BOOK | ||
7039 | sd = &per_cpu(book_domains, i).sd; | ||
7040 | SD_INIT(sd, BOOK); | ||
7041 | set_domain_attribute(sd, attr); | ||
7042 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); | ||
7043 | sd->parent = parent; | ||
7044 | parent->child = sd; | ||
7045 | cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); | ||
7046 | #endif | ||
7047 | return sd; | ||
7048 | } | ||
7049 | |||
6996 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, | 7050 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, |
6997 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7051 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
6998 | struct sched_domain *parent, int i) | 7052 | struct sched_domain *parent, int i) |
@@ -7050,6 +7104,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, | |||
7050 | d->send_covered, d->tmpmask); | 7104 | d->send_covered, d->tmpmask); |
7051 | break; | 7105 | break; |
7052 | #endif | 7106 | #endif |
7107 | #ifdef CONFIG_SCHED_BOOK | ||
7108 | case SD_LV_BOOK: /* set up book groups */ | ||
7109 | cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); | ||
7110 | if (cpu == cpumask_first(d->this_book_map)) | ||
7111 | init_sched_build_groups(d->this_book_map, cpu_map, | ||
7112 | &cpu_to_book_group, | ||
7113 | d->send_covered, d->tmpmask); | ||
7114 | break; | ||
7115 | #endif | ||
7053 | case SD_LV_CPU: /* set up physical groups */ | 7116 | case SD_LV_CPU: /* set up physical groups */ |
7054 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); | 7117 | cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); |
7055 | if (!cpumask_empty(d->nodemask)) | 7118 | if (!cpumask_empty(d->nodemask)) |
@@ -7097,12 +7160,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7097 | 7160 | ||
7098 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); | 7161 | sd = __build_numa_sched_domains(&d, cpu_map, attr, i); |
7099 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); | 7162 | sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); |
7163 | sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); | ||
7100 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); | 7164 | sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); |
7101 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); | 7165 | sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); |
7102 | } | 7166 | } |
7103 | 7167 | ||
7104 | for_each_cpu(i, cpu_map) { | 7168 | for_each_cpu(i, cpu_map) { |
7105 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); | 7169 | build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); |
7170 | build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); | ||
7106 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); | 7171 | build_sched_groups(&d, SD_LV_MC, cpu_map, i); |
7107 | } | 7172 | } |
7108 | 7173 | ||
@@ -7133,6 +7198,12 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7133 | init_sched_groups_power(i, sd); | 7198 | init_sched_groups_power(i, sd); |
7134 | } | 7199 | } |
7135 | #endif | 7200 | #endif |
7201 | #ifdef CONFIG_SCHED_BOOK | ||
7202 | for_each_cpu(i, cpu_map) { | ||
7203 | sd = &per_cpu(book_domains, i).sd; | ||
7204 | init_sched_groups_power(i, sd); | ||
7205 | } | ||
7206 | #endif | ||
7136 | 7207 | ||
7137 | for_each_cpu(i, cpu_map) { | 7208 | for_each_cpu(i, cpu_map) { |
7138 | sd = &per_cpu(phys_domains, i).sd; | 7209 | sd = &per_cpu(phys_domains, i).sd; |
@@ -7158,6 +7229,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7158 | sd = &per_cpu(cpu_domains, i).sd; | 7229 | sd = &per_cpu(cpu_domains, i).sd; |
7159 | #elif defined(CONFIG_SCHED_MC) | 7230 | #elif defined(CONFIG_SCHED_MC) |
7160 | sd = &per_cpu(core_domains, i).sd; | 7231 | sd = &per_cpu(core_domains, i).sd; |
7232 | #elif defined(CONFIG_SCHED_BOOK) | ||
7233 | sd = &per_cpu(book_domains, i).sd; | ||
7161 | #else | 7234 | #else |
7162 | sd = &per_cpu(phys_domains, i).sd; | 7235 | sd = &per_cpu(phys_domains, i).sd; |
7163 | #endif | 7236 | #endif |