diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:27:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 21:27:32 -0400 |
commit | d79ee93de909dfb252279b9a95978bbda9a814a9 (patch) | |
tree | bfccca60fd36259ff4bcc5e78a2c272fbd680065 /arch/x86/mm | |
parent | 2ff2b289a695807e291e1ed9f639d8a3ba5f4254 (diff) | |
parent | 1c2927f18576d65631d8e0ddd19e1d023183222e (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The biggest change is the cleanup/simplification of the load-balancer:
instead of the current practice of architectures twiddling scheduler
internal data structures and providing the scheduler domains in
colorfully inconsistent ways, we now have generic scheduler code in
kernel/sched/core.c:sched_init_numa() that looks at the architecture's
node_distance() parameters and (while not fully trusting it) deducts a
NUMA topology from it.
This inevitably changes balancing behavior - hopefully for the better.
There are various smaller optimizations, cleanups and fixlets as well"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Taint kernel with TAINT_WARN after sleep-in-atomic bug
sched: Remove stale power aware scheduling remnants and dysfunctional knobs
sched/debug: Fix printing large integers on 32-bit platforms
sched/fair: Improve the ->group_imb logic
sched/nohz: Fix rq->cpu_load[] calculations
sched/numa: Don't scale the imbalance
sched/fair: Revert sched-domain iteration breakage
sched/x86: Rewrite set_cpu_sibling_map()
sched/numa: Fix the new NUMA topology bits
sched/numa: Rewrite the CONFIG_NUMA sched domain support
sched/fair: Propagate 'struct lb_env' usage into find_busiest_group
sched/fair: Add some serialization to the sched_domain load-balance walk
sched/fair: Let minimally loaded cpu balance the group
sched: Change rq->nr_running to unsigned int
x86/numa: Check for nonsensical topologies on real hw as well
x86/numa: Hard partition cpu topology masks on node boundaries
x86/numa: Allow specifying node_distance() for numa=fake
x86/sched: Make mwait_usable() heed to "idle=" kernel parameters properly
sched: Update documentation and comments
sched_rt: Avoid unnecessary dequeue and enqueue of pushable tasks in set_cpus_allowed_rt()
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/numa_emulation.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 53489ff6bf82..871dd8868170 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -339,9 +339,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
339 | } else { | 339 | } else { |
340 | unsigned long n; | 340 | unsigned long n; |
341 | 341 | ||
342 | n = simple_strtoul(emu_cmdline, NULL, 0); | 342 | n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); |
343 | ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); | 343 | ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); |
344 | } | 344 | } |
345 | if (*emu_cmdline == ':') | ||
346 | emu_cmdline++; | ||
345 | 347 | ||
346 | if (ret < 0) | 348 | if (ret < 0) |
347 | goto no_emu; | 349 | goto no_emu; |
@@ -418,7 +420,9 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
418 | int physj = emu_nid_to_phys[j]; | 420 | int physj = emu_nid_to_phys[j]; |
419 | int dist; | 421 | int dist; |
420 | 422 | ||
421 | if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) | 423 | if (get_option(&emu_cmdline, &dist) == 2) |
424 | ; | ||
425 | else if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) | ||
422 | dist = physi == physj ? | 426 | dist = physi == physj ? |
423 | LOCAL_DISTANCE : REMOTE_DISTANCE; | 427 | LOCAL_DISTANCE : REMOTE_DISTANCE; |
424 | else | 428 | else |