diff options
author | Mike Travis <travis@sgi.com> | 2008-05-12 15:21:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 05:31:20 -0400 |
commit | 23ca4bba3e20c6c3cb11c1bb0ab4770b724d39ac (patch) | |
tree | 39ba5f7705e48717d7a6f2621b8ca7e7015c9802 /arch/x86/mm/numa_64.c | |
parent | 1184dc2ffe2c8fb9afb766d870850f2c3165ef25 (diff) |
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is
used by some per_cpu variables that are initialized and accessed
before there are per_cpu areas allocated.
["Early" in respect to per_cpu variables is "earlier than the per_cpu
areas have been setup".]
This patchset adds these new macros:
DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)
EXPORT_EARLY_PER_CPU_SYMBOL(_name)
DECLARE_EARLY_PER_CPU(_type, _name)
early_per_cpu_ptr(_name)
early_per_cpu_map(_name, _idx)
early_per_cpu(_name, _cpu)
The DEFINE macro defines the per_cpu variable as well as the early
map and pointer. It also initializes the per_cpu variable and map
elements to "_initvalue". The early_* macros provide access to
the initial map (usually setup during system init) and the early
pointer. This pointer is initialized to point to the early map
but is then NULL'ed when the actual per_cpu areas are setup. After
that the per_cpu variable is the correct access to the variable.
The early_per_cpu() macro is not very efficient but does show how to
access the variable if you have a function that can be called both
"early" and "late". It tests the early ptr to be NULL, and if not
then it's still valid. Otherwise, the per_cpu variable is used
instead:
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
A better method is to actually check the pointer manually. In the
case below, numa_set_node can be called both "early" and "late":
void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
if (cpu_to_node_map)
cpu_to_node_map[cpu] = node;
else
per_cpu(x86_cpu_to_node_map, cpu) = node;
}
* Add a flag "arch_provides_topology_pointers" that indicates pointers
to topology cpumask_t maps are available. Otherwise, use the function
returning the cpumask_t value. This is useful if cpumask_t set size
is very large to avoid copying data on to/off of the stack.
* The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while
the non-debug case has been optimized a bit.
* Remove an unreferenced compiler warning in drivers/base/topology.c
* Clean up #ifdef in setup.c
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r-- | arch/x86/mm/numa_64.c | 43 |
1 files changed, 11 insertions, 32 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index c5066d519e5d..970f86775c41 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -31,16 +31,6 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES]; | |||
31 | 31 | ||
32 | struct memnode memnode; | 32 | struct memnode memnode; |
33 | 33 | ||
34 | #ifdef CONFIG_SMP | ||
35 | int x86_cpu_to_node_map_init[NR_CPUS] = { | ||
36 | [0 ... NR_CPUS-1] = NUMA_NO_NODE | ||
37 | }; | ||
38 | void *x86_cpu_to_node_map_early_ptr; | ||
39 | EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); | ||
40 | #endif | ||
41 | DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; | ||
42 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); | ||
43 | |||
44 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | 34 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
45 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | 35 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
46 | }; | 36 | }; |
@@ -577,24 +567,6 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) | |||
577 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); | 567 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); |
578 | } | 568 | } |
579 | 569 | ||
580 | __cpuinit void numa_add_cpu(int cpu) | ||
581 | { | ||
582 | set_bit(cpu, | ||
583 | (unsigned long *)&node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
584 | } | ||
585 | |||
586 | void __cpuinit numa_set_node(int cpu, int node) | ||
587 | { | ||
588 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; | ||
589 | |||
590 | if(cpu_to_node_map) | ||
591 | cpu_to_node_map[cpu] = node; | ||
592 | else if(per_cpu_offset(cpu)) | ||
593 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
594 | else | ||
595 | Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu); | ||
596 | } | ||
597 | |||
598 | unsigned long __init numa_free_all_bootmem(void) | 570 | unsigned long __init numa_free_all_bootmem(void) |
599 | { | 571 | { |
600 | unsigned long pages = 0; | 572 | unsigned long pages = 0; |
@@ -641,6 +613,7 @@ static __init int numa_setup(char *opt) | |||
641 | } | 613 | } |
642 | early_param("numa", numa_setup); | 614 | early_param("numa", numa_setup); |
643 | 615 | ||
616 | #ifdef CONFIG_NUMA | ||
644 | /* | 617 | /* |
645 | * Setup early cpu_to_node. | 618 | * Setup early cpu_to_node. |
646 | * | 619 | * |
@@ -652,14 +625,19 @@ early_param("numa", numa_setup); | |||
652 | * is already initialized in a round robin manner at numa_init_array, | 625 | * is already initialized in a round robin manner at numa_init_array, |
653 | * prior to this call, and this initialization is good enough | 626 | * prior to this call, and this initialization is good enough |
654 | * for the fake NUMA cases. | 627 | * for the fake NUMA cases. |
628 | * | ||
629 | * Called before the per_cpu areas are setup. | ||
655 | */ | 630 | */ |
656 | void __init init_cpu_to_node(void) | 631 | void __init init_cpu_to_node(void) |
657 | { | 632 | { |
658 | int i; | 633 | int cpu; |
634 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | ||
659 | 635 | ||
660 | for (i = 0; i < NR_CPUS; i++) { | 636 | BUG_ON(cpu_to_apicid == NULL); |
637 | |||
638 | for_each_possible_cpu(cpu) { | ||
661 | int node; | 639 | int node; |
662 | u16 apicid = x86_cpu_to_apicid_init[i]; | 640 | u16 apicid = cpu_to_apicid[cpu]; |
663 | 641 | ||
664 | if (apicid == BAD_APICID) | 642 | if (apicid == BAD_APICID) |
665 | continue; | 643 | continue; |
@@ -668,8 +646,9 @@ void __init init_cpu_to_node(void) | |||
668 | continue; | 646 | continue; |
669 | if (!node_online(node)) | 647 | if (!node_online(node)) |
670 | continue; | 648 | continue; |
671 | numa_set_node(i, node); | 649 | numa_set_node(cpu, node); |
672 | } | 650 | } |
673 | } | 651 | } |
652 | #endif | ||
674 | 653 | ||
675 | 654 | ||