diff options
Diffstat (limited to 'arch/s390/kernel/topology.c')
| -rw-r--r-- | arch/s390/kernel/topology.c | 150 |
1 files changed, 93 insertions, 57 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bcef00766a64..13559c993847 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
| @@ -57,8 +57,8 @@ struct tl_info { | |||
| 57 | union tl_entry tle[0]; | 57 | union tl_entry tle[0]; |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | struct core_info { | 60 | struct mask_info { |
| 61 | struct core_info *next; | 61 | struct mask_info *next; |
| 62 | unsigned char id; | 62 | unsigned char id; |
| 63 | cpumask_t mask; | 63 | cpumask_t mask; |
| 64 | }; | 64 | }; |
| @@ -66,7 +66,6 @@ struct core_info { | |||
| 66 | static int topology_enabled; | 66 | static int topology_enabled; |
| 67 | static void topology_work_fn(struct work_struct *work); | 67 | static void topology_work_fn(struct work_struct *work); |
| 68 | static struct tl_info *tl_info; | 68 | static struct tl_info *tl_info; |
| 69 | static struct core_info core_info; | ||
| 70 | static int machine_has_topology; | 69 | static int machine_has_topology; |
| 71 | static struct timer_list topology_timer; | 70 | static struct timer_list topology_timer; |
| 72 | static void set_topology_timer(void); | 71 | static void set_topology_timer(void); |
| @@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn); | |||
| 74 | /* topology_lock protects the core linked list */ | 73 | /* topology_lock protects the core linked list */ |
| 75 | static DEFINE_SPINLOCK(topology_lock); | 74 | static DEFINE_SPINLOCK(topology_lock); |
| 76 | 75 | ||
| 76 | static struct mask_info core_info; | ||
| 77 | cpumask_t cpu_core_map[NR_CPUS]; | 77 | cpumask_t cpu_core_map[NR_CPUS]; |
| 78 | unsigned char cpu_core_id[NR_CPUS]; | 78 | unsigned char cpu_core_id[NR_CPUS]; |
| 79 | 79 | ||
| 80 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 80 | #ifdef CONFIG_SCHED_BOOK |
| 81 | static struct mask_info book_info; | ||
| 82 | cpumask_t cpu_book_map[NR_CPUS]; | ||
| 83 | unsigned char cpu_book_id[NR_CPUS]; | ||
| 84 | #endif | ||
| 85 | |||
| 86 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | ||
| 81 | { | 87 | { |
| 82 | struct core_info *core = &core_info; | ||
| 83 | unsigned long flags; | ||
| 84 | cpumask_t mask; | 88 | cpumask_t mask; |
| 85 | 89 | ||
| 86 | cpus_clear(mask); | 90 | cpus_clear(mask); |
| 87 | if (!topology_enabled || !machine_has_topology) | 91 | if (!topology_enabled || !machine_has_topology) |
| 88 | return cpu_possible_map; | 92 | return cpu_possible_map; |
| 89 | spin_lock_irqsave(&topology_lock, flags); | 93 | while (info) { |
| 90 | while (core) { | 94 | if (cpu_isset(cpu, info->mask)) { |
| 91 | if (cpu_isset(cpu, core->mask)) { | 95 | mask = info->mask; |
| 92 | mask = core->mask; | ||
| 93 | break; | 96 | break; |
| 94 | } | 97 | } |
| 95 | core = core->next; | 98 | info = info->next; |
| 96 | } | 99 | } |
| 97 | spin_unlock_irqrestore(&topology_lock, flags); | ||
| 98 | if (cpus_empty(mask)) | 100 | if (cpus_empty(mask)) |
| 99 | mask = cpumask_of_cpu(cpu); | 101 | mask = cpumask_of_cpu(cpu); |
| 100 | return mask; | 102 | return mask; |
| 101 | } | 103 | } |
| 102 | 104 | ||
| 103 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 105 | static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book, |
| 104 | { | 106 | struct mask_info *core) |
| 105 | return &cpu_core_map[cpu]; | ||
| 106 | } | ||
| 107 | |||
| 108 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | ||
| 109 | { | 107 | { |
| 110 | unsigned int cpu; | 108 | unsigned int cpu; |
| 111 | 109 | ||
| @@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | |||
| 117 | 115 | ||
| 118 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | 116 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; |
| 119 | for_each_present_cpu(lcpu) { | 117 | for_each_present_cpu(lcpu) { |
| 120 | if (cpu_logical_map(lcpu) == rcpu) { | 118 | if (cpu_logical_map(lcpu) != rcpu) |
| 121 | cpu_set(lcpu, core->mask); | 119 | continue; |
| 122 | cpu_core_id[lcpu] = core->id; | 120 | #ifdef CONFIG_SCHED_BOOK |
| 123 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 121 | cpu_set(lcpu, book->mask); |
| 124 | } | 122 | cpu_book_id[lcpu] = book->id; |
| 123 | #endif | ||
| 124 | cpu_set(lcpu, core->mask); | ||
| 125 | cpu_core_id[lcpu] = core->id; | ||
| 126 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | ||
| 125 | } | 127 | } |
| 126 | } | 128 | } |
| 127 | } | 129 | } |
| 128 | 130 | ||
| 129 | static void clear_cores(void) | 131 | static void clear_masks(void) |
| 130 | { | 132 | { |
| 131 | struct core_info *core = &core_info; | 133 | struct mask_info *info; |
| 132 | 134 | ||
| 133 | while (core) { | 135 | info = &core_info; |
| 134 | cpus_clear(core->mask); | 136 | while (info) { |
| 135 | core = core->next; | 137 | cpus_clear(info->mask); |
| 138 | info = info->next; | ||
| 139 | } | ||
| 140 | #ifdef CONFIG_SCHED_BOOK | ||
| 141 | info = &book_info; | ||
| 142 | while (info) { | ||
| 143 | cpus_clear(info->mask); | ||
| 144 | info = info->next; | ||
| 136 | } | 145 | } |
| 146 | #endif | ||
| 137 | } | 147 | } |
| 138 | 148 | ||
| 139 | static union tl_entry *next_tle(union tl_entry *tle) | 149 | static union tl_entry *next_tle(union tl_entry *tle) |
| @@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle) | |||
| 146 | 156 | ||
| 147 | static void tl_to_cores(struct tl_info *info) | 157 | static void tl_to_cores(struct tl_info *info) |
| 148 | { | 158 | { |
| 159 | #ifdef CONFIG_SCHED_BOOK | ||
| 160 | struct mask_info *book = &book_info; | ||
| 161 | #else | ||
| 162 | struct mask_info *book = NULL; | ||
| 163 | #endif | ||
| 164 | struct mask_info *core = &core_info; | ||
| 149 | union tl_entry *tle, *end; | 165 | union tl_entry *tle, *end; |
| 150 | struct core_info *core = &core_info; | 166 | |
| 151 | 167 | ||
| 152 | spin_lock_irq(&topology_lock); | 168 | spin_lock_irq(&topology_lock); |
| 153 | clear_cores(); | 169 | clear_masks(); |
| 154 | tle = info->tle; | 170 | tle = info->tle; |
| 155 | end = (union tl_entry *)((unsigned long)info + info->length); | 171 | end = (union tl_entry *)((unsigned long)info + info->length); |
| 156 | while (tle < end) { | 172 | while (tle < end) { |
| 157 | switch (tle->nl) { | 173 | switch (tle->nl) { |
| 158 | case 5: | 174 | #ifdef CONFIG_SCHED_BOOK |
| 159 | case 4: | ||
| 160 | case 3: | ||
| 161 | case 2: | 175 | case 2: |
| 176 | book = book->next; | ||
| 177 | book->id = tle->container.id; | ||
| 162 | break; | 178 | break; |
| 179 | #endif | ||
| 163 | case 1: | 180 | case 1: |
| 164 | core = core->next; | 181 | core = core->next; |
| 165 | core->id = tle->container.id; | 182 | core->id = tle->container.id; |
| 166 | break; | 183 | break; |
| 167 | case 0: | 184 | case 0: |
| 168 | add_cpus_to_core(&tle->cpu, core); | 185 | add_cpus_to_mask(&tle->cpu, book, core); |
| 169 | break; | 186 | break; |
| 170 | default: | 187 | default: |
| 171 | clear_cores(); | 188 | clear_masks(); |
| 172 | machine_has_topology = 0; | 189 | machine_has_topology = 0; |
| 173 | goto out; | 190 | goto out; |
| 174 | } | 191 | } |
| @@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc) | |||
| 221 | 238 | ||
| 222 | static void update_cpu_core_map(void) | 239 | static void update_cpu_core_map(void) |
| 223 | { | 240 | { |
| 241 | unsigned long flags; | ||
| 224 | int cpu; | 242 | int cpu; |
| 225 | 243 | ||
| 226 | for_each_possible_cpu(cpu) | 244 | spin_lock_irqsave(&topology_lock, flags); |
| 227 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 245 | for_each_possible_cpu(cpu) { |
| 246 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | ||
| 247 | #ifdef CONFIG_SCHED_BOOK | ||
| 248 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | ||
| 249 | #endif | ||
| 250 | } | ||
| 251 | spin_unlock_irqrestore(&topology_lock, flags); | ||
| 252 | } | ||
| 253 | |||
| 254 | static void store_topology(struct tl_info *info) | ||
| 255 | { | ||
| 256 | #ifdef CONFIG_SCHED_BOOK | ||
| 257 | int rc; | ||
| 258 | |||
| 259 | rc = stsi(info, 15, 1, 3); | ||
| 260 | if (rc != -ENOSYS) | ||
| 261 | return; | ||
| 262 | #endif | ||
| 263 | stsi(info, 15, 1, 2); | ||
| 228 | } | 264 | } |
| 229 | 265 | ||
| 230 | int arch_update_cpu_topology(void) | 266 | int arch_update_cpu_topology(void) |
| @@ -238,7 +274,7 @@ int arch_update_cpu_topology(void) | |||
| 238 | topology_update_polarization_simple(); | 274 | topology_update_polarization_simple(); |
| 239 | return 0; | 275 | return 0; |
| 240 | } | 276 | } |
| 241 | stsi(info, 15, 1, 2); | 277 | store_topology(info); |
| 242 | tl_to_cores(info); | 278 | tl_to_cores(info); |
| 243 | update_cpu_core_map(); | 279 | update_cpu_core_map(); |
| 244 | for_each_online_cpu(cpu) { | 280 | for_each_online_cpu(cpu) { |
| @@ -299,12 +335,24 @@ out: | |||
| 299 | } | 335 | } |
| 300 | __initcall(init_topology_update); | 336 | __initcall(init_topology_update); |
| 301 | 337 | ||
| 338 | static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset) | ||
| 339 | { | ||
| 340 | int i, nr_masks; | ||
| 341 | |||
| 342 | nr_masks = info->mag[NR_MAG - offset]; | ||
| 343 | for (i = 0; i < info->mnest - offset; i++) | ||
| 344 | nr_masks *= info->mag[NR_MAG - offset - 1 - i]; | ||
| 345 | nr_masks = max(nr_masks, 1); | ||
| 346 | for (i = 0; i < nr_masks; i++) { | ||
| 347 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | ||
| 348 | mask = mask->next; | ||
| 349 | } | ||
| 350 | } | ||
| 351 | |||
| 302 | void __init s390_init_cpu_topology(void) | 352 | void __init s390_init_cpu_topology(void) |
| 303 | { | 353 | { |
| 304 | unsigned long long facility_bits; | 354 | unsigned long long facility_bits; |
| 305 | struct tl_info *info; | 355 | struct tl_info *info; |
| 306 | struct core_info *core; | ||
| 307 | int nr_cores; | ||
| 308 | int i; | 356 | int i; |
| 309 | 357 | ||
| 310 | if (stfle(&facility_bits, 1) <= 0) | 358 | if (stfle(&facility_bits, 1) <= 0) |
| @@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void) | |||
| 315 | 363 | ||
| 316 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 364 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
| 317 | info = tl_info; | 365 | info = tl_info; |
| 318 | stsi(info, 15, 1, 2); | 366 | store_topology(info); |
| 319 | |||
| 320 | nr_cores = info->mag[NR_MAG - 2]; | ||
| 321 | for (i = 0; i < info->mnest - 2; i++) | ||
| 322 | nr_cores *= info->mag[NR_MAG - 3 - i]; | ||
| 323 | |||
| 324 | pr_info("The CPU configuration topology of the machine is:"); | 367 | pr_info("The CPU configuration topology of the machine is:"); |
| 325 | for (i = 0; i < NR_MAG; i++) | 368 | for (i = 0; i < NR_MAG; i++) |
| 326 | printk(" %d", info->mag[i]); | 369 | printk(" %d", info->mag[i]); |
| 327 | printk(" / %d\n", info->mnest); | 370 | printk(" / %d\n", info->mnest); |
| 328 | 371 | alloc_masks(info, &core_info, 2); | |
| 329 | core = &core_info; | 372 | #ifdef CONFIG_SCHED_BOOK |
| 330 | for (i = 0; i < nr_cores; i++) { | 373 | alloc_masks(info, &book_info, 3); |
| 331 | core->next = alloc_bootmem(sizeof(struct core_info)); | 374 | #endif |
| 332 | core = core->next; | ||
| 333 | if (!core) | ||
| 334 | goto error; | ||
| 335 | } | ||
| 336 | return; | ||
| 337 | error: | ||
| 338 | machine_has_topology = 0; | ||
| 339 | } | 375 | } |
