diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-08-10 11:10:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-08-25 05:12:20 -0400 |
commit | bbdacdfed2f5fa50a2cc9f500a36e05990a0837d (patch) | |
tree | 0ada5cb03b7133b9a42a456cede81e8e1b7a2ba9 /kernel/sched/debug.c | |
parent | 09e0dd8e0f2e197690d34fed8cb4737114d3dd5f (diff) |
sched/debug: Optimize sched_domain sysctl generation
Currently we unconditionally destroy all sysctl bits and regenerate
them after we've rebuild the domains (even if that rebuild is a
no-op).
And since we unconditionally (re)build the sysctl for all possible
CPUs, onlining all CPUs gets us O(n^2) time. Instead change this to
only rebuild the bits for CPUs we've actually installed new domains
on.
Reported-by: Ofer Levi(SW) <oferle@mellanox.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 68 |
1 files changed, 54 insertions, 14 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index cfd84f79e075..4a23bbc3111b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -327,38 +327,78 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
327 | return table; | 327 | return table; |
328 | } | 328 | } |
329 | 329 | ||
330 | static cpumask_var_t sd_sysctl_cpus; | ||
330 | static struct ctl_table_header *sd_sysctl_header; | 331 | static struct ctl_table_header *sd_sysctl_header; |
332 | |||
331 | void register_sched_domain_sysctl(void) | 333 | void register_sched_domain_sysctl(void) |
332 | { | 334 | { |
333 | int i, cpu_num = num_possible_cpus(); | 335 | static struct ctl_table *cpu_entries; |
334 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | 336 | static struct ctl_table **cpu_idx; |
335 | char buf[32]; | 337 | char buf[32]; |
338 | int i; | ||
336 | 339 | ||
337 | WARN_ON(sd_ctl_dir[0].child); | 340 | if (!cpu_entries) { |
338 | sd_ctl_dir[0].child = entry; | 341 | cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); |
342 | if (!cpu_entries) | ||
343 | return; | ||
339 | 344 | ||
340 | if (entry == NULL) | 345 | WARN_ON(sd_ctl_dir[0].child); |
341 | return; | 346 | sd_ctl_dir[0].child = cpu_entries; |
347 | } | ||
342 | 348 | ||
343 | for_each_possible_cpu(i) { | 349 | if (!cpu_idx) { |
344 | snprintf(buf, 32, "cpu%d", i); | 350 | struct ctl_table *e = cpu_entries; |
345 | entry->procname = kstrdup(buf, GFP_KERNEL); | 351 | |
346 | entry->mode = 0555; | 352 | cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); |
347 | entry->child = sd_alloc_ctl_cpu_table(i); | 353 | if (!cpu_idx) |
348 | entry++; | 354 | return; |
355 | |||
356 | /* deal with sparse possible map */ | ||
357 | for_each_possible_cpu(i) { | ||
358 | cpu_idx[i] = e; | ||
359 | e++; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (!cpumask_available(sd_sysctl_cpus)) { | ||
364 | if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) | ||
365 | return; | ||
366 | |||
367 | /* init to possible to not have holes in @cpu_entries */ | ||
368 | cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); | ||
369 | } | ||
370 | |||
371 | for_each_cpu(i, sd_sysctl_cpus) { | ||
372 | struct ctl_table *e = cpu_idx[i]; | ||
373 | |||
374 | if (e->child) | ||
375 | sd_free_ctl_entry(&e->child); | ||
376 | |||
377 | if (!e->procname) { | ||
378 | snprintf(buf, 32, "cpu%d", i); | ||
379 | e->procname = kstrdup(buf, GFP_KERNEL); | ||
380 | } | ||
381 | e->mode = 0555; | ||
382 | e->child = sd_alloc_ctl_cpu_table(i); | ||
383 | |||
384 | __cpumask_clear_cpu(i, sd_sysctl_cpus); | ||
349 | } | 385 | } |
350 | 386 | ||
351 | WARN_ON(sd_sysctl_header); | 387 | WARN_ON(sd_sysctl_header); |
352 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); | 388 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
353 | } | 389 | } |
354 | 390 | ||
391 | void dirty_sched_domain_sysctl(int cpu) | ||
392 | { | ||
393 | if (cpumask_available(sd_sysctl_cpus)) | ||
394 | __cpumask_set_cpu(cpu, sd_sysctl_cpus); | ||
395 | } | ||
396 | |||
355 | /* may be called multiple times per register */ | 397 | /* may be called multiple times per register */ |
356 | void unregister_sched_domain_sysctl(void) | 398 | void unregister_sched_domain_sysctl(void) |
357 | { | 399 | { |
358 | unregister_sysctl_table(sd_sysctl_header); | 400 | unregister_sysctl_table(sd_sysctl_header); |
359 | sd_sysctl_header = NULL; | 401 | sd_sysctl_header = NULL; |
360 | if (sd_ctl_dir[0].child) | ||
361 | sd_free_ctl_entry(&sd_ctl_dir[0].child); | ||
362 | } | 402 | } |
363 | #endif /* CONFIG_SYSCTL */ | 403 | #endif /* CONFIG_SYSCTL */ |
364 | #endif /* CONFIG_SMP */ | 404 | #endif /* CONFIG_SMP */ |