diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/numa.c | 57 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 41 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 72 |
5 files changed, 110 insertions, 65 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b2e2f6509eb0..e1fb68ddec26 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o | |||
17 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
18 | obj-$(CONFIG_MODULES) += module.o | 18 | obj-$(CONFIG_MODULES) += module.o |
19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o | 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o |
20 | obj-$(CONFIG_NUMA) += numa.o | ||
20 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o | 21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o |
21 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | 22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o |
22 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 23 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index cda06f88c66e..542256e98e60 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -640,9 +640,11 @@ acpi_boot_init (void) | |||
640 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) | 640 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) |
641 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; | 641 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; |
642 | } | 642 | } |
643 | build_cpu_to_node_map(); | ||
644 | # endif | 643 | # endif |
645 | #endif | 644 | #endif |
645 | #ifdef CONFIG_ACPI_NUMA | ||
646 | build_cpu_to_node_map(); | ||
647 | #endif | ||
646 | /* Make boot-up look pretty */ | 648 | /* Make boot-up look pretty */ |
647 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); | 649 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); |
648 | return 0; | 650 | return 0; |
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c new file mode 100644 index 000000000000..a68ce6678092 --- /dev/null +++ b/arch/ia64/kernel/numa.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * ia64 kernel NUMA specific stuff | ||
17 | * | ||
18 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> | ||
19 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
20 | * Jesse Barnes <jbarnes@sgi.com> | ||
21 | */ | ||
22 | #include <linux/config.h> | ||
23 | #include <linux/topology.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/smp.h> | ||
27 | |||
28 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
29 | EXPORT_SYMBOL(cpu_to_node_map); | ||
30 | |||
31 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
32 | |||
33 | /** | ||
34 | * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays | ||
35 | * | ||
36 | * Build cpu to node mapping and initialize the per node cpu masks using | ||
37 | * info from the node_cpuid array handed to us by ACPI. | ||
38 | */ | ||
39 | void __init build_cpu_to_node_map(void) | ||
40 | { | ||
41 | int cpu, i, node; | ||
42 | |||
43 | for(node=0; node < MAX_NUMNODES; node++) | ||
44 | cpus_clear(node_to_cpu_mask[node]); | ||
45 | |||
46 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
47 | node = -1; | ||
48 | for (i = 0; i < NR_CPUS; ++i) | ||
49 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
50 | node = node_cpuid[i].nid; | ||
51 | break; | ||
52 | } | ||
53 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
54 | if (node >= 0) | ||
55 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
56 | } | ||
57 | } | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 623b0a546709..7d72c0d872b3 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -525,47 +525,6 @@ smp_build_cpu_map (void) | |||
525 | } | 525 | } |
526 | } | 526 | } |
527 | 527 | ||
528 | #ifdef CONFIG_NUMA | ||
529 | |||
530 | /* on which node is each logical CPU (one cacheline even for 64 CPUs) */ | ||
531 | u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | ||
532 | EXPORT_SYMBOL(cpu_to_node_map); | ||
533 | /* which logical CPUs are on which nodes */ | ||
534 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | ||
535 | |||
536 | /* | ||
537 | * Build cpu to node mapping and initialize the per node cpu masks. | ||
538 | */ | ||
539 | void __init | ||
540 | build_cpu_to_node_map (void) | ||
541 | { | ||
542 | int cpu, i, node; | ||
543 | |||
544 | for(node=0; node<MAX_NUMNODES; node++) | ||
545 | cpus_clear(node_to_cpu_mask[node]); | ||
546 | for(cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
547 | /* | ||
548 | * All Itanium NUMA platforms I know use ACPI, so maybe we | ||
549 | * can drop this ifdef completely. [EF] | ||
550 | */ | ||
551 | #ifdef CONFIG_ACPI_NUMA | ||
552 | node = -1; | ||
553 | for (i = 0; i < NR_CPUS; ++i) | ||
554 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { | ||
555 | node = node_cpuid[i].nid; | ||
556 | break; | ||
557 | } | ||
558 | #else | ||
559 | # error Fixme: Dunno how to build CPU-to-node map. | ||
560 | #endif | ||
561 | cpu_to_node_map[cpu] = (node >= 0) ? node : 0; | ||
562 | if (node >= 0) | ||
563 | cpu_set(cpu, node_to_cpu_mask[node]); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | #endif /* CONFIG_NUMA */ | ||
568 | |||
569 | /* | 528 | /* |
570 | * Cycle through the APs sending Wakeup IPIs to boot each. | 529 | * Cycle through the APs sending Wakeup IPIs to boot each. |
571 | */ | 530 | */ |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 54136fd00202..b5c90e548195 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -126,6 +126,33 @@ static unsigned long __init compute_pernodesize(int node) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * per_cpu_node_setup - setup per-cpu areas on each node | ||
130 | * @cpu_data: per-cpu area on this node | ||
131 | * @node: node to setup | ||
132 | * | ||
133 | * Copy the static per-cpu data into the region we just set aside and then | ||
134 | * setup __per_cpu_offset for each CPU on this node. Return a pointer to | ||
135 | * the end of the area. | ||
136 | */ | ||
137 | static void *per_cpu_node_setup(void *cpu_data, int node) | ||
138 | { | ||
139 | #ifdef CONFIG_SMP | ||
140 | int cpu; | ||
141 | |||
142 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
143 | if (node == node_cpuid[cpu].nid) { | ||
144 | memcpy(__va(cpu_data), __phys_per_cpu_start, | ||
145 | __per_cpu_end - __per_cpu_start); | ||
146 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
147 | __per_cpu_start; | ||
148 | cpu_data += PERCPU_PAGE_SIZE; | ||
149 | } | ||
150 | } | ||
151 | #endif | ||
152 | return cpu_data; | ||
153 | } | ||
154 | |||
155 | /** | ||
129 | * fill_pernode - initialize pernode data. | 156 | * fill_pernode - initialize pernode data. |
130 | * @node: the node id. | 157 | * @node: the node id. |
131 | * @pernode: physical address of pernode data | 158 | * @pernode: physical address of pernode data |
@@ -135,7 +162,7 @@ static void __init fill_pernode(int node, unsigned long pernode, | |||
135 | unsigned long pernodesize) | 162 | unsigned long pernodesize) |
136 | { | 163 | { |
137 | void *cpu_data; | 164 | void *cpu_data; |
138 | int cpus = early_nr_cpus_node(node), cpu; | 165 | int cpus = early_nr_cpus_node(node); |
139 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | 166 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
140 | 167 | ||
141 | mem_data[node].pernode_addr = pernode; | 168 | mem_data[node].pernode_addr = pernode; |
@@ -155,23 +182,11 @@ static void __init fill_pernode(int node, unsigned long pernode, | |||
155 | mem_data[node].pgdat->bdata = bdp; | 182 | mem_data[node].pgdat->bdata = bdp; |
156 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 183 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
157 | 184 | ||
158 | /* | 185 | cpu_data = per_cpu_node_setup(cpu_data, node); |
159 | * Copy the static per-cpu data into the region we | ||
160 | * just set aside and then setup __per_cpu_offset | ||
161 | * for each CPU on this node. | ||
162 | */ | ||
163 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
164 | if (node == node_cpuid[cpu].nid) { | ||
165 | memcpy(__va(cpu_data), __phys_per_cpu_start, | ||
166 | __per_cpu_end - __per_cpu_start); | ||
167 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | ||
168 | __per_cpu_start; | ||
169 | cpu_data += PERCPU_PAGE_SIZE; | ||
170 | } | ||
171 | } | ||
172 | 186 | ||
173 | return; | 187 | return; |
174 | } | 188 | } |
189 | |||
175 | /** | 190 | /** |
176 | * find_pernode_space - allocate memory for memory map and per-node structures | 191 | * find_pernode_space - allocate memory for memory map and per-node structures |
177 | * @start: physical start of range | 192 | * @start: physical start of range |
@@ -300,8 +315,8 @@ static void __init reserve_pernode_space(void) | |||
300 | */ | 315 | */ |
301 | static void __init initialize_pernode_data(void) | 316 | static void __init initialize_pernode_data(void) |
302 | { | 317 | { |
303 | int cpu, node; | ||
304 | pg_data_t *pgdat_list[MAX_NUMNODES]; | 318 | pg_data_t *pgdat_list[MAX_NUMNODES]; |
319 | int cpu, node; | ||
305 | 320 | ||
306 | for_each_online_node(node) | 321 | for_each_online_node(node) |
307 | pgdat_list[node] = mem_data[node].pgdat; | 322 | pgdat_list[node] = mem_data[node].pgdat; |
@@ -311,12 +326,22 @@ static void __init initialize_pernode_data(void) | |||
311 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | 326 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, |
312 | sizeof(pgdat_list)); | 327 | sizeof(pgdat_list)); |
313 | } | 328 | } |
314 | 329 | #ifdef CONFIG_SMP | |
315 | /* Set the node_data pointer for each per-cpu struct */ | 330 | /* Set the node_data pointer for each per-cpu struct */ |
316 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 331 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
317 | node = node_cpuid[cpu].nid; | 332 | node = node_cpuid[cpu].nid; |
318 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 333 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; |
319 | } | 334 | } |
335 | #else | ||
336 | { | ||
337 | struct cpuinfo_ia64 *cpu0_cpu_info; | ||
338 | cpu = 0; | ||
339 | node = node_cpuid[cpu].nid; | ||
340 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | ||
341 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | ||
342 | cpu0_cpu_info->node_data = mem_data[node].node_data; | ||
343 | } | ||
344 | #endif /* CONFIG_SMP */ | ||
320 | } | 345 | } |
321 | 346 | ||
322 | /** | 347 | /** |
@@ -461,6 +486,7 @@ void __init find_memory(void) | |||
461 | find_initrd(); | 486 | find_initrd(); |
462 | } | 487 | } |
463 | 488 | ||
489 | #ifdef CONFIG_SMP | ||
464 | /** | 490 | /** |
465 | * per_cpu_init - setup per-cpu variables | 491 | * per_cpu_init - setup per-cpu variables |
466 | * | 492 | * |
@@ -471,15 +497,15 @@ void *per_cpu_init(void) | |||
471 | { | 497 | { |
472 | int cpu; | 498 | int cpu; |
473 | 499 | ||
474 | if (smp_processor_id() == 0) { | 500 | if (smp_processor_id() != 0) |
475 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 501 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
476 | per_cpu(local_per_cpu_offset, cpu) = | 502 | |
477 | __per_cpu_offset[cpu]; | 503 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
478 | } | 504 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; |
479 | } | ||
480 | 505 | ||
481 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 506 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
482 | } | 507 | } |
508 | #endif /* CONFIG_SMP */ | ||
483 | 509 | ||
484 | /** | 510 | /** |
485 | * show_mem - give short summary of memory stats | 511 | * show_mem - give short summary of memory stats |