diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
commit | bc4016f48161454a9a8e5eb209b0693c6cde9f62 (patch) | |
tree | f470f5d711e975b152eec90282f5dd30a1d5dba5 /arch/s390 | |
parent | 5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (diff) | |
parent | b7dadc38797584f6203386da1947ed5edf516646 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (29 commits)
sched: Export account_system_vtime()
sched: Call tick_check_idle before __irq_enter
sched: Remove irq time from available CPU power
sched: Do not account irq time to current task
x86: Add IRQ_TIME_ACCOUNTING
sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time
sched: Add a PF flag for ksoftirqd identification
sched: Consolidate account_system_vtime extern declaration
sched: Fix softirq time accounting
sched: Drop group_capacity to 1 only if local group has extra capacity
sched: Force balancing on newidle balance if local group has capacity
sched: Set group_imb only a task can be pulled from the busiest cpu
sched: Do not consider SCHED_IDLE tasks to be cache hot
sched: Drop all load weight manipulation for RT tasks
sched: Create special class for stop/migrate work
sched: Unindent labels
sched: Comment updates: fix default latency and granularity numbers
tracing/sched: Add sched_pi_setprio tracepoint
sched: Give CPU bound RT tasks preference
sched: Try not to migrate higher priority RT tasks
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 7 | ||||
-rw-r--r-- | arch/s390/include/asm/system.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/topology.h | 27 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 150 |
4 files changed, 123 insertions, 62 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 958f0dadeadf..75976a141947 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -199,6 +199,13 @@ config HOTPLUG_CPU | |||
199 | can be controlled through /sys/devices/system/cpu/cpu#. | 199 | can be controlled through /sys/devices/system/cpu/cpu#. |
200 | Say N if you want to disable CPU hotplug. | 200 | Say N if you want to disable CPU hotplug. |
201 | 201 | ||
202 | config SCHED_BOOK | ||
203 | bool "Book scheduler support" | ||
204 | depends on SMP | ||
205 | help | ||
206 | Book scheduler support improves the CPU scheduler's decision making | ||
207 | when dealing with machines that have several books. | ||
208 | |||
202 | config MATHEMU | 209 | config MATHEMU |
203 | bool "IEEE FPU emulation" | 210 | bool "IEEE FPU emulation" |
204 | depends on MARCH_G5 | 211 | depends on MARCH_G5 |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index cef66210c846..38ddd8a9a9e8 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
97 | 97 | ||
98 | extern void account_vtime(struct task_struct *, struct task_struct *); | 98 | extern void account_vtime(struct task_struct *, struct task_struct *); |
99 | extern void account_tick_vtime(struct task_struct *); | 99 | extern void account_tick_vtime(struct task_struct *); |
100 | extern void account_system_vtime(struct task_struct *); | ||
101 | 100 | ||
102 | #ifdef CONFIG_PFAULT | 101 | #ifdef CONFIG_PFAULT |
103 | extern void pfault_irq_init(void); | 102 | extern void pfault_irq_init(void); |
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 831bd033ea77..051107a2c5e2 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -3,15 +3,32 @@ | |||
3 | 3 | ||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | 5 | ||
6 | #define mc_capable() (1) | ||
7 | |||
8 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
9 | |||
10 | extern unsigned char cpu_core_id[NR_CPUS]; | 6 | extern unsigned char cpu_core_id[NR_CPUS]; |
11 | extern cpumask_t cpu_core_map[NR_CPUS]; | 7 | extern cpumask_t cpu_core_map[NR_CPUS]; |
12 | 8 | ||
9 | static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
10 | { | ||
11 | return &cpu_core_map[cpu]; | ||
12 | } | ||
13 | |||
13 | #define topology_core_id(cpu) (cpu_core_id[cpu]) | 14 | #define topology_core_id(cpu) (cpu_core_id[cpu]) |
14 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | 15 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) |
16 | #define mc_capable() (1) | ||
17 | |||
18 | #ifdef CONFIG_SCHED_BOOK | ||
19 | |||
20 | extern unsigned char cpu_book_id[NR_CPUS]; | ||
21 | extern cpumask_t cpu_book_map[NR_CPUS]; | ||
22 | |||
23 | static inline const struct cpumask *cpu_book_mask(unsigned int cpu) | ||
24 | { | ||
25 | return &cpu_book_map[cpu]; | ||
26 | } | ||
27 | |||
28 | #define topology_book_id(cpu) (cpu_book_id[cpu]) | ||
29 | #define topology_book_cpumask(cpu) (&cpu_book_map[cpu]) | ||
30 | |||
31 | #endif /* CONFIG_SCHED_BOOK */ | ||
15 | 32 | ||
16 | int topology_set_cpu_management(int fc); | 33 | int topology_set_cpu_management(int fc); |
17 | void topology_schedule_update(void); | 34 | void topology_schedule_update(void); |
@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void) | |||
30 | }; | 47 | }; |
31 | #endif | 48 | #endif |
32 | 49 | ||
50 | #define SD_BOOK_INIT SD_CPU_INIT | ||
51 | |||
33 | #include <asm-generic/topology.h> | 52 | #include <asm-generic/topology.h> |
34 | 53 | ||
35 | #endif /* _ASM_S390_TOPOLOGY_H */ | 54 | #endif /* _ASM_S390_TOPOLOGY_H */ |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bcef00766a64..13559c993847 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -57,8 +57,8 @@ struct tl_info { | |||
57 | union tl_entry tle[0]; | 57 | union tl_entry tle[0]; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct core_info { | 60 | struct mask_info { |
61 | struct core_info *next; | 61 | struct mask_info *next; |
62 | unsigned char id; | 62 | unsigned char id; |
63 | cpumask_t mask; | 63 | cpumask_t mask; |
64 | }; | 64 | }; |
@@ -66,7 +66,6 @@ struct core_info { | |||
66 | static int topology_enabled; | 66 | static int topology_enabled; |
67 | static void topology_work_fn(struct work_struct *work); | 67 | static void topology_work_fn(struct work_struct *work); |
68 | static struct tl_info *tl_info; | 68 | static struct tl_info *tl_info; |
69 | static struct core_info core_info; | ||
70 | static int machine_has_topology; | 69 | static int machine_has_topology; |
71 | static struct timer_list topology_timer; | 70 | static struct timer_list topology_timer; |
72 | static void set_topology_timer(void); | 71 | static void set_topology_timer(void); |
@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn); | |||
74 | /* topology_lock protects the core linked list */ | 73 | /* topology_lock protects the core linked list */ |
75 | static DEFINE_SPINLOCK(topology_lock); | 74 | static DEFINE_SPINLOCK(topology_lock); |
76 | 75 | ||
76 | static struct mask_info core_info; | ||
77 | cpumask_t cpu_core_map[NR_CPUS]; | 77 | cpumask_t cpu_core_map[NR_CPUS]; |
78 | unsigned char cpu_core_id[NR_CPUS]; | 78 | unsigned char cpu_core_id[NR_CPUS]; |
79 | 79 | ||
80 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 80 | #ifdef CONFIG_SCHED_BOOK |
81 | static struct mask_info book_info; | ||
82 | cpumask_t cpu_book_map[NR_CPUS]; | ||
83 | unsigned char cpu_book_id[NR_CPUS]; | ||
84 | #endif | ||
85 | |||
86 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | ||
81 | { | 87 | { |
82 | struct core_info *core = &core_info; | ||
83 | unsigned long flags; | ||
84 | cpumask_t mask; | 88 | cpumask_t mask; |
85 | 89 | ||
86 | cpus_clear(mask); | 90 | cpus_clear(mask); |
87 | if (!topology_enabled || !machine_has_topology) | 91 | if (!topology_enabled || !machine_has_topology) |
88 | return cpu_possible_map; | 92 | return cpu_possible_map; |
89 | spin_lock_irqsave(&topology_lock, flags); | 93 | while (info) { |
90 | while (core) { | 94 | if (cpu_isset(cpu, info->mask)) { |
91 | if (cpu_isset(cpu, core->mask)) { | 95 | mask = info->mask; |
92 | mask = core->mask; | ||
93 | break; | 96 | break; |
94 | } | 97 | } |
95 | core = core->next; | 98 | info = info->next; |
96 | } | 99 | } |
97 | spin_unlock_irqrestore(&topology_lock, flags); | ||
98 | if (cpus_empty(mask)) | 100 | if (cpus_empty(mask)) |
99 | mask = cpumask_of_cpu(cpu); | 101 | mask = cpumask_of_cpu(cpu); |
100 | return mask; | 102 | return mask; |
101 | } | 103 | } |
102 | 104 | ||
103 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 105 | static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book, |
104 | { | 106 | struct mask_info *core) |
105 | return &cpu_core_map[cpu]; | ||
106 | } | ||
107 | |||
108 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | ||
109 | { | 107 | { |
110 | unsigned int cpu; | 108 | unsigned int cpu; |
111 | 109 | ||
@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | |||
117 | 115 | ||
118 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | 116 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; |
119 | for_each_present_cpu(lcpu) { | 117 | for_each_present_cpu(lcpu) { |
120 | if (cpu_logical_map(lcpu) == rcpu) { | 118 | if (cpu_logical_map(lcpu) != rcpu) |
121 | cpu_set(lcpu, core->mask); | 119 | continue; |
122 | cpu_core_id[lcpu] = core->id; | 120 | #ifdef CONFIG_SCHED_BOOK |
123 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 121 | cpu_set(lcpu, book->mask); |
124 | } | 122 | cpu_book_id[lcpu] = book->id; |
123 | #endif | ||
124 | cpu_set(lcpu, core->mask); | ||
125 | cpu_core_id[lcpu] = core->id; | ||
126 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | ||
125 | } | 127 | } |
126 | } | 128 | } |
127 | } | 129 | } |
128 | 130 | ||
129 | static void clear_cores(void) | 131 | static void clear_masks(void) |
130 | { | 132 | { |
131 | struct core_info *core = &core_info; | 133 | struct mask_info *info; |
132 | 134 | ||
133 | while (core) { | 135 | info = &core_info; |
134 | cpus_clear(core->mask); | 136 | while (info) { |
135 | core = core->next; | 137 | cpus_clear(info->mask); |
138 | info = info->next; | ||
139 | } | ||
140 | #ifdef CONFIG_SCHED_BOOK | ||
141 | info = &book_info; | ||
142 | while (info) { | ||
143 | cpus_clear(info->mask); | ||
144 | info = info->next; | ||
136 | } | 145 | } |
146 | #endif | ||
137 | } | 147 | } |
138 | 148 | ||
139 | static union tl_entry *next_tle(union tl_entry *tle) | 149 | static union tl_entry *next_tle(union tl_entry *tle) |
@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle) | |||
146 | 156 | ||
147 | static void tl_to_cores(struct tl_info *info) | 157 | static void tl_to_cores(struct tl_info *info) |
148 | { | 158 | { |
159 | #ifdef CONFIG_SCHED_BOOK | ||
160 | struct mask_info *book = &book_info; | ||
161 | #else | ||
162 | struct mask_info *book = NULL; | ||
163 | #endif | ||
164 | struct mask_info *core = &core_info; | ||
149 | union tl_entry *tle, *end; | 165 | union tl_entry *tle, *end; |
150 | struct core_info *core = &core_info; | 166 | |
151 | 167 | ||
152 | spin_lock_irq(&topology_lock); | 168 | spin_lock_irq(&topology_lock); |
153 | clear_cores(); | 169 | clear_masks(); |
154 | tle = info->tle; | 170 | tle = info->tle; |
155 | end = (union tl_entry *)((unsigned long)info + info->length); | 171 | end = (union tl_entry *)((unsigned long)info + info->length); |
156 | while (tle < end) { | 172 | while (tle < end) { |
157 | switch (tle->nl) { | 173 | switch (tle->nl) { |
158 | case 5: | 174 | #ifdef CONFIG_SCHED_BOOK |
159 | case 4: | ||
160 | case 3: | ||
161 | case 2: | 175 | case 2: |
176 | book = book->next; | ||
177 | book->id = tle->container.id; | ||
162 | break; | 178 | break; |
179 | #endif | ||
163 | case 1: | 180 | case 1: |
164 | core = core->next; | 181 | core = core->next; |
165 | core->id = tle->container.id; | 182 | core->id = tle->container.id; |
166 | break; | 183 | break; |
167 | case 0: | 184 | case 0: |
168 | add_cpus_to_core(&tle->cpu, core); | 185 | add_cpus_to_mask(&tle->cpu, book, core); |
169 | break; | 186 | break; |
170 | default: | 187 | default: |
171 | clear_cores(); | 188 | clear_masks(); |
172 | machine_has_topology = 0; | 189 | machine_has_topology = 0; |
173 | goto out; | 190 | goto out; |
174 | } | 191 | } |
@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc) | |||
221 | 238 | ||
222 | static void update_cpu_core_map(void) | 239 | static void update_cpu_core_map(void) |
223 | { | 240 | { |
241 | unsigned long flags; | ||
224 | int cpu; | 242 | int cpu; |
225 | 243 | ||
226 | for_each_possible_cpu(cpu) | 244 | spin_lock_irqsave(&topology_lock, flags); |
227 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 245 | for_each_possible_cpu(cpu) { |
246 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | ||
247 | #ifdef CONFIG_SCHED_BOOK | ||
248 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | ||
249 | #endif | ||
250 | } | ||
251 | spin_unlock_irqrestore(&topology_lock, flags); | ||
252 | } | ||
253 | |||
254 | static void store_topology(struct tl_info *info) | ||
255 | { | ||
256 | #ifdef CONFIG_SCHED_BOOK | ||
257 | int rc; | ||
258 | |||
259 | rc = stsi(info, 15, 1, 3); | ||
260 | if (rc != -ENOSYS) | ||
261 | return; | ||
262 | #endif | ||
263 | stsi(info, 15, 1, 2); | ||
228 | } | 264 | } |
229 | 265 | ||
230 | int arch_update_cpu_topology(void) | 266 | int arch_update_cpu_topology(void) |
@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void) | |||
238 | topology_update_polarization_simple(); | 274 | topology_update_polarization_simple(); |
239 | return 0; | 275 | return 0; |
240 | } | 276 | } |
241 | stsi(info, 15, 1, 2); | 277 | store_topology(info); |
242 | tl_to_cores(info); | 278 | tl_to_cores(info); |
243 | update_cpu_core_map(); | 279 | update_cpu_core_map(); |
244 | for_each_online_cpu(cpu) { | 280 | for_each_online_cpu(cpu) { |
@@ -299,12 +335,24 @@ out: | |||
299 | } | 335 | } |
300 | __initcall(init_topology_update); | 336 | __initcall(init_topology_update); |
301 | 337 | ||
338 | static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset) | ||
339 | { | ||
340 | int i, nr_masks; | ||
341 | |||
342 | nr_masks = info->mag[NR_MAG - offset]; | ||
343 | for (i = 0; i < info->mnest - offset; i++) | ||
344 | nr_masks *= info->mag[NR_MAG - offset - 1 - i]; | ||
345 | nr_masks = max(nr_masks, 1); | ||
346 | for (i = 0; i < nr_masks; i++) { | ||
347 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | ||
348 | mask = mask->next; | ||
349 | } | ||
350 | } | ||
351 | |||
302 | void __init s390_init_cpu_topology(void) | 352 | void __init s390_init_cpu_topology(void) |
303 | { | 353 | { |
304 | unsigned long long facility_bits; | 354 | unsigned long long facility_bits; |
305 | struct tl_info *info; | 355 | struct tl_info *info; |
306 | struct core_info *core; | ||
307 | int nr_cores; | ||
308 | int i; | 356 | int i; |
309 | 357 | ||
310 | if (stfle(&facility_bits, 1) <= 0) | 358 | if (stfle(&facility_bits, 1) <= 0) |
@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void) | |||
315 | 363 | ||
316 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 364 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
317 | info = tl_info; | 365 | info = tl_info; |
318 | stsi(info, 15, 1, 2); | 366 | store_topology(info); |
319 | |||
320 | nr_cores = info->mag[NR_MAG - 2]; | ||
321 | for (i = 0; i < info->mnest - 2; i++) | ||
322 | nr_cores *= info->mag[NR_MAG - 3 - i]; | ||
323 | |||
324 | pr_info("The CPU configuration topology of the machine is:"); | 367 | pr_info("The CPU configuration topology of the machine is:"); |
325 | for (i = 0; i < NR_MAG; i++) | 368 | for (i = 0; i < NR_MAG; i++) |
326 | printk(" %d", info->mag[i]); | 369 | printk(" %d", info->mag[i]); |
327 | printk(" / %d\n", info->mnest); | 370 | printk(" / %d\n", info->mnest); |
328 | 371 | alloc_masks(info, &core_info, 2); | |
329 | core = &core_info; | 372 | #ifdef CONFIG_SCHED_BOOK |
330 | for (i = 0; i < nr_cores; i++) { | 373 | alloc_masks(info, &book_info, 3); |
331 | core->next = alloc_bootmem(sizeof(struct core_info)); | 374 | #endif |
332 | core = core->next; | ||
333 | if (!core) | ||
334 | goto error; | ||
335 | } | ||
336 | return; | ||
337 | error: | ||
338 | machine_has_topology = 0; | ||
339 | } | 375 | } |