diff options
author | James Hogan <james.hogan@imgtec.com> | 2012-10-05 11:56:56 -0400 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:09:51 -0500 |
commit | 42682c6c42a5765b2c7cccfca170368fef6191ef (patch) | |
tree | adcfa5e96cd98527ee75fc541efc279357bbe6a2 | |
parent | fdabf525b4b7aab3945c19eac39d3a65b68d0c4f (diff) |
metag: SMP support
Add SMP support for metag. This allows Linux to take control of multiple
hardware threads on a single Meta core, treating them as separate Linux
CPUs.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
-rw-r--r-- | arch/metag/include/asm/cachepart.h | 42 | ||||
-rw-r--r-- | arch/metag/include/asm/core_reg.h | 35 | ||||
-rw-r--r-- | arch/metag/include/asm/smp.h | 29 | ||||
-rw-r--r-- | arch/metag/include/asm/topology.h | 53 | ||||
-rw-r--r-- | arch/metag/kernel/cachepart.c | 124 | ||||
-rw-r--r-- | arch/metag/kernel/core_reg.c | 117 | ||||
-rw-r--r-- | arch/metag/kernel/head.S | 12 | ||||
-rw-r--r-- | arch/metag/kernel/smp.c | 575 | ||||
-rw-r--r-- | arch/metag/kernel/topology.c | 77 |
9 files changed, 1064 insertions, 0 deletions
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h new file mode 100644 index 000000000000..cf6b44e916b5 --- /dev/null +++ b/arch/metag/include/asm/cachepart.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Meta cache partition manipulation. | ||
3 | * | ||
4 | * Copyright 2010 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #ifndef _METAG_CACHEPART_H_ | ||
8 | #define _METAG_CACHEPART_H_ | ||
9 | |||
10 | /** | ||
11 | * get_dcache_size() - Get size of data cache. | ||
12 | */ | ||
13 | unsigned int get_dcache_size(void); | ||
14 | |||
15 | /** | ||
16 | * get_icache_size() - Get size of code cache. | ||
17 | */ | ||
18 | unsigned int get_icache_size(void); | ||
19 | |||
20 | /** | ||
21 | * get_global_dcache_size() - Get the thread's global dcache. | ||
22 | * | ||
23 | * Returns the size of the current thread's global dcache partition. | ||
24 | */ | ||
25 | unsigned int get_global_dcache_size(void); | ||
26 | |||
27 | /** | ||
28 | * get_global_icache_size() - Get the thread's global icache. | ||
29 | * | ||
30 | * Returns the size of the current thread's global icache partition. | ||
31 | */ | ||
32 | unsigned int get_global_icache_size(void); | ||
33 | |||
34 | /** | ||
35 | * check_for_dache_aliasing() - Ensure that the bootloader has configured the | ||
36 | * dache and icache properly to avoid aliasing | ||
37 | * @thread_id: Hardware thread ID | ||
38 | * | ||
39 | */ | ||
40 | void check_for_cache_aliasing(int thread_id); | ||
41 | |||
42 | #endif | ||
diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h new file mode 100644 index 000000000000..bdbc3a51f31c --- /dev/null +++ b/arch/metag/include/asm/core_reg.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef __ASM_METAG_CORE_REG_H_ | ||
2 | #define __ASM_METAG_CORE_REG_H_ | ||
3 | |||
4 | #include <asm/metag_regs.h> | ||
5 | |||
6 | extern void core_reg_write(int unit, int reg, int thread, unsigned int val); | ||
7 | extern unsigned int core_reg_read(int unit, int reg, int thread); | ||
8 | |||
9 | /* | ||
10 | * These macros allow direct access from C to any register known to the | ||
11 | * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT. | ||
12 | */ | ||
13 | |||
14 | #define __core_reg_get(reg) ({ \ | ||
15 | unsigned int __grvalue; \ | ||
16 | asm volatile("MOV %0," #reg \ | ||
17 | : "=r" (__grvalue)); \ | ||
18 | __grvalue; \ | ||
19 | }) | ||
20 | |||
21 | #define __core_reg_set(reg, value) do { \ | ||
22 | unsigned int __srvalue = (value); \ | ||
23 | asm volatile("MOV " #reg ",%0" \ | ||
24 | : \ | ||
25 | : "r" (__srvalue)); \ | ||
26 | } while (0) | ||
27 | |||
28 | #define __core_reg_swap(reg, value) do { \ | ||
29 | unsigned int __srvalue = (value); \ | ||
30 | asm volatile("SWAP " #reg ",%0" \ | ||
31 | : "+r" (__srvalue)); \ | ||
32 | (value) = __srvalue; \ | ||
33 | } while (0) | ||
34 | |||
35 | #endif | ||
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h new file mode 100644 index 000000000000..e0373f81a117 --- /dev/null +++ b/arch/metag/include/asm/smp.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | |||
6 | #define raw_smp_processor_id() (current_thread_info()->cpu) | ||
7 | |||
8 | enum ipi_msg_type { | ||
9 | IPI_CALL_FUNC, | ||
10 | IPI_CALL_FUNC_SINGLE, | ||
11 | IPI_RESCHEDULE, | ||
12 | }; | ||
13 | |||
14 | extern void arch_send_call_function_single_ipi(int cpu); | ||
15 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
16 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
17 | |||
18 | asmlinkage void secondary_start_kernel(void); | ||
19 | |||
20 | extern void secondary_startup(void); | ||
21 | |||
22 | #ifdef CONFIG_HOTPLUG_CPU | ||
23 | extern void __cpu_die(unsigned int cpu); | ||
24 | extern int __cpu_disable(void); | ||
25 | extern void cpu_die(void); | ||
26 | #endif | ||
27 | |||
28 | extern void smp_init_cpus(void); | ||
29 | #endif /* __ASM_SMP_H */ | ||
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h new file mode 100644 index 000000000000..23f5118f58db --- /dev/null +++ b/arch/metag/include/asm/topology.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef _ASM_METAG_TOPOLOGY_H | ||
2 | #define _ASM_METAG_TOPOLOGY_H | ||
3 | |||
4 | #ifdef CONFIG_NUMA | ||
5 | |||
6 | /* sched_domains SD_NODE_INIT for Meta machines */ | ||
7 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
8 | .parent = NULL, \ | ||
9 | .child = NULL, \ | ||
10 | .groups = NULL, \ | ||
11 | .min_interval = 8, \ | ||
12 | .max_interval = 32, \ | ||
13 | .busy_factor = 32, \ | ||
14 | .imbalance_pct = 125, \ | ||
15 | .cache_nice_tries = 2, \ | ||
16 | .busy_idx = 3, \ | ||
17 | .idle_idx = 2, \ | ||
18 | .newidle_idx = 0, \ | ||
19 | .wake_idx = 0, \ | ||
20 | .forkexec_idx = 0, \ | ||
21 | .flags = SD_LOAD_BALANCE \ | ||
22 | | SD_BALANCE_FORK \ | ||
23 | | SD_BALANCE_EXEC \ | ||
24 | | SD_BALANCE_NEWIDLE \ | ||
25 | | SD_SERIALIZE, \ | ||
26 | .last_balance = jiffies, \ | ||
27 | .balance_interval = 1, \ | ||
28 | .nr_balance_failed = 0, \ | ||
29 | } | ||
30 | |||
31 | #define cpu_to_node(cpu) ((void)(cpu), 0) | ||
32 | #define parent_node(node) ((void)(node), 0) | ||
33 | |||
34 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
35 | |||
36 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
37 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
38 | cpu_all_mask : \ | ||
39 | cpumask_of_node(pcibus_to_node(bus))) | ||
40 | |||
41 | #endif | ||
42 | |||
43 | #define mc_capable() (1) | ||
44 | |||
45 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
46 | |||
47 | extern cpumask_t cpu_core_map[NR_CPUS]; | ||
48 | |||
49 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
50 | |||
51 | #include <asm-generic/topology.h> | ||
52 | |||
53 | #endif /* _ASM_METAG_TOPOLOGY_H */ | ||
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c new file mode 100644 index 000000000000..3a589dfb966b --- /dev/null +++ b/arch/metag/kernel/cachepart.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Meta cache partition manipulation. | ||
3 | * | ||
4 | * Copyright 2010 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/io.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/cachepart.h> | ||
12 | #include <asm/metag_isa.h> | ||
13 | #include <asm/metag_mem.h> | ||
14 | |||
15 | #define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) | ||
16 | #define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) | ||
17 | |||
18 | #define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */ | ||
19 | #define ICACHE 0 | ||
20 | #define DCACHE 1 | ||
21 | |||
22 | /* The CORE_CONFIG2 register is not available on Meta 1 */ | ||
23 | #ifdef CONFIG_METAG_META21 | ||
24 | unsigned int get_dcache_size(void) | ||
25 | { | ||
26 | unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); | ||
27 | return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) | ||
28 | >> METAC_CORECFG2_DCSZ_S); | ||
29 | } | ||
30 | |||
31 | unsigned int get_icache_size(void) | ||
32 | { | ||
33 | unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); | ||
34 | return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) | ||
35 | >> METAC_CORE_C2ICSZ_S); | ||
36 | } | ||
37 | |||
38 | unsigned int get_global_dcache_size(void) | ||
39 | { | ||
40 | unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id())); | ||
41 | unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; | ||
42 | return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; | ||
43 | } | ||
44 | |||
45 | unsigned int get_global_icache_size(void) | ||
46 | { | ||
47 | unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id())); | ||
48 | unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; | ||
49 | return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; | ||
50 | } | ||
51 | |||
52 | static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) | ||
53 | { | ||
54 | unsigned int cache_size; | ||
55 | unsigned int t_cache_part; | ||
56 | unsigned int isEnabled; | ||
57 | unsigned int offset = 0; | ||
58 | isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : | ||
59 | metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1); | ||
60 | if (!isEnabled) | ||
61 | return 0; | ||
62 | #if PAGE_OFFSET >= LINGLOBAL_BASE | ||
63 | /* Checking for global cache */ | ||
64 | cache_size = (cache == DCACHE ? get_global_dache_size() : | ||
65 | get_global_icache_size()); | ||
66 | offset = 8; | ||
67 | #else | ||
68 | cache_size = (cache == DCACHE ? get_dcache_size() : | ||
69 | get_icache_size()); | ||
70 | #endif | ||
71 | t_cache_part = (cache == DCACHE ? | ||
72 | (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF : | ||
73 | (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF); | ||
74 | switch (t_cache_part) { | ||
75 | case 0xF: | ||
76 | return cache_size; | ||
77 | case 0x7: | ||
78 | return cache_size / 2; | ||
79 | case 0x3: | ||
80 | return cache_size / 4; | ||
81 | case 0x1: | ||
82 | return cache_size / 8; | ||
83 | case 0: | ||
84 | return cache_size / 16; | ||
85 | } | ||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | void check_for_cache_aliasing(int thread_id) | ||
90 | { | ||
91 | unsigned int thread_cache_size; | ||
92 | unsigned int cache_type; | ||
93 | for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) { | ||
94 | thread_cache_size = | ||
95 | get_thread_cache_size(cache_type, thread_id); | ||
96 | if (thread_cache_size < 0) | ||
97 | pr_emerg("Can't read %s cache size", \ | ||
98 | cache_type ? "DCACHE" : "ICACHE"); | ||
99 | else if (thread_cache_size == 0) | ||
100 | /* Cache is off. No need to check for aliasing */ | ||
101 | continue; | ||
102 | if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { | ||
103 | pr_emerg("Cache aliasing detected in %s on Thread %d", | ||
104 | cache_type ? "DCACHE" : "ICACHE", thread_id); | ||
105 | pr_warn("Total %s size: %u bytes", | ||
106 | cache_type ? "DCACHE" : "ICACHE ", | ||
107 | cache_type ? get_dcache_size() | ||
108 | : get_icache_size()); | ||
109 | pr_warn("Thread %s size: %d bytes", | ||
110 | cache_type ? "CACHE" : "ICACHE", | ||
111 | thread_cache_size); | ||
112 | pr_warn("Page Size: %lu bytes", PAGE_SIZE); | ||
113 | } | ||
114 | } | ||
115 | } | ||
116 | |||
117 | #else | ||
118 | |||
119 | void check_for_cache_aliasing(int thread_id) | ||
120 | { | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | #endif | ||
diff --git a/arch/metag/kernel/core_reg.c b/arch/metag/kernel/core_reg.c new file mode 100644 index 000000000000..671cce8c34f2 --- /dev/null +++ b/arch/metag/kernel/core_reg.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Support for reading and writing Meta core internal registers. | ||
3 | * | ||
4 | * Copyright (C) 2011 Imagination Technologies Ltd. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/delay.h> | ||
9 | #include <linux/export.h> | ||
10 | |||
11 | #include <asm/core_reg.h> | ||
12 | #include <asm/global_lock.h> | ||
13 | #include <asm/hwthread.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/metag_mem.h> | ||
16 | #include <asm/metag_regs.h> | ||
17 | |||
18 | #define UNIT_BIT_MASK TXUXXRXRQ_UXX_BITS | ||
19 | #define REG_BIT_MASK TXUXXRXRQ_RX_BITS | ||
20 | #define THREAD_BIT_MASK TXUXXRXRQ_TX_BITS | ||
21 | |||
22 | #define UNIT_SHIFTS TXUXXRXRQ_UXX_S | ||
23 | #define REG_SHIFTS TXUXXRXRQ_RX_S | ||
24 | #define THREAD_SHIFTS TXUXXRXRQ_TX_S | ||
25 | |||
26 | #define UNIT_VAL(x) (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK) | ||
27 | #define REG_VAL(x) (((x) << REG_SHIFTS) & REG_BIT_MASK) | ||
28 | #define THREAD_VAL(x) (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK) | ||
29 | |||
30 | /* | ||
31 | * core_reg_write() - modify the content of a register in a core unit. | ||
32 | * @unit: The unit to be modified. | ||
33 | * @reg: Register number within the unit. | ||
34 | * @thread: The thread we want to access. | ||
35 | * @val: The new value to write. | ||
36 | * | ||
37 | * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, | ||
38 | * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, | ||
39 | * TXPOLLI_REGNUM, etc). | ||
40 | */ | ||
41 | void core_reg_write(int unit, int reg, int thread, unsigned int val) | ||
42 | { | ||
43 | unsigned long flags; | ||
44 | |||
45 | /* TXUCT_ID has its own memory mapped registers */ | ||
46 | if (unit == TXUCT_ID) { | ||
47 | void __iomem *cu_reg = __CU_addr(thread, reg); | ||
48 | metag_out32(val, cu_reg); | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | __global_lock2(flags); | ||
53 | |||
54 | /* wait for ready */ | ||
55 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
56 | udelay(10); | ||
57 | |||
58 | /* set the value to write */ | ||
59 | metag_out32(val, TXUXXRXDT); | ||
60 | |||
61 | /* set the register to write */ | ||
62 | val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread); | ||
63 | metag_out32(val, TXUXXRXRQ); | ||
64 | |||
65 | /* wait for finish */ | ||
66 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
67 | udelay(10); | ||
68 | |||
69 | __global_unlock2(flags); | ||
70 | } | ||
71 | EXPORT_SYMBOL(core_reg_write); | ||
72 | |||
73 | /* | ||
74 | * core_reg_read() - read the content of a register in a core unit. | ||
75 | * @unit: The unit to be modified. | ||
76 | * @reg: Register number within the unit. | ||
77 | * @thread: The thread we want to access. | ||
78 | * | ||
79 | * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, | ||
80 | * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, | ||
81 | * TXPOLLI_REGNUM, etc). | ||
82 | */ | ||
83 | unsigned int core_reg_read(int unit, int reg, int thread) | ||
84 | { | ||
85 | unsigned long flags; | ||
86 | unsigned int val; | ||
87 | |||
88 | /* TXUCT_ID has its own memory mapped registers */ | ||
89 | if (unit == TXUCT_ID) { | ||
90 | void __iomem *cu_reg = __CU_addr(thread, reg); | ||
91 | val = metag_in32(cu_reg); | ||
92 | return val; | ||
93 | } | ||
94 | |||
95 | __global_lock2(flags); | ||
96 | |||
97 | /* wait for ready */ | ||
98 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
99 | udelay(10); | ||
100 | |||
101 | /* set the register to read */ | ||
102 | val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) | | ||
103 | TXUXXRXRQ_RDnWR_BIT); | ||
104 | metag_out32(val, TXUXXRXRQ); | ||
105 | |||
106 | /* wait for finish */ | ||
107 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
108 | udelay(10); | ||
109 | |||
110 | /* read the register value */ | ||
111 | val = metag_in32(TXUXXRXDT); | ||
112 | |||
113 | __global_unlock2(flags); | ||
114 | |||
115 | return val; | ||
116 | } | ||
117 | EXPORT_SYMBOL(core_reg_read); | ||
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S index 8b1388663892..969dffabc03a 100644 --- a/arch/metag/kernel/head.S +++ b/arch/metag/kernel/head.S | |||
@@ -43,3 +43,15 @@ __start: | |||
43 | __exit: | 43 | __exit: |
44 | XOR TXENABLE,D0Re0,D0Re0 | 44 | XOR TXENABLE,D0Re0,D0Re0 |
45 | .size __exit,.-__exit | 45 | .size __exit,.-__exit |
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | .global _secondary_startup | ||
49 | .type _secondary_startup,function | ||
50 | _secondary_startup: | ||
51 | MOVT A0StP,#HI(_secondary_data_stack) | ||
52 | ADD A0StP,A0StP,#LO(_secondary_data_stack) | ||
53 | GETD A0StP,[A0StP] | ||
54 | ADD A0StP,A0StP,#THREAD_INFO_SIZE | ||
55 | B _secondary_start_kernel | ||
56 | .size _secondary_startup,.-_secondary_startup | ||
57 | #endif | ||
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c new file mode 100644 index 000000000000..d1163127eb68 --- /dev/null +++ b/arch/metag/kernel/smp.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/atomic.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/cache.h> | ||
17 | #include <linux/profile.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | |||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/cachepart.h> | ||
29 | #include <asm/core_reg.h> | ||
30 | #include <asm/cpu.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/setup.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/hwthread.h> | ||
38 | #include <asm/traps.h> | ||
39 | |||
40 | DECLARE_PER_CPU(PTBI, pTBI); | ||
41 | |||
42 | void *secondary_data_stack; | ||
43 | |||
44 | /* | ||
45 | * structures for inter-processor calls | ||
46 | * - A collection of single bit ipi messages. | ||
47 | */ | ||
48 | struct ipi_data { | ||
49 | spinlock_t lock; | ||
50 | unsigned long ipi_count; | ||
51 | unsigned long bits; | ||
52 | }; | ||
53 | |||
54 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
55 | .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock), | ||
56 | }; | ||
57 | |||
58 | static DEFINE_SPINLOCK(boot_lock); | ||
59 | |||
60 | /* | ||
61 | * "thread" is assumed to be a valid Meta hardware thread ID. | ||
62 | */ | ||
63 | int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) | ||
64 | { | ||
65 | u32 val; | ||
66 | |||
67 | /* | ||
68 | * set synchronisation state between this boot processor | ||
69 | * and the secondary one | ||
70 | */ | ||
71 | spin_lock(&boot_lock); | ||
72 | |||
73 | core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup); | ||
74 | core_reg_write(TXUPC_ID, 1, thread, 0); | ||
75 | |||
76 | /* | ||
77 | * Give the thread privilege (PSTAT) and clear potentially problematic | ||
78 | * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP). | ||
79 | */ | ||
80 | core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT); | ||
81 | |||
82 | /* Clear the minim enable bit. */ | ||
83 | val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread); | ||
84 | core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80); | ||
85 | |||
86 | /* | ||
87 | * set the ThreadEnable bit (0x1) in the TXENABLE register | ||
88 | * for the specified thread - off it goes! | ||
89 | */ | ||
90 | val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread); | ||
91 | core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1); | ||
92 | |||
93 | /* | ||
94 | * now the secondary core is starting up let it run its | ||
95 | * calibrations, then wait for it to finish | ||
96 | */ | ||
97 | spin_unlock(&boot_lock); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | ||
103 | { | ||
104 | unsigned int thread = cpu_2_hwthread_id[cpu]; | ||
105 | int ret; | ||
106 | |||
107 | load_pgd(swapper_pg_dir, thread); | ||
108 | |||
109 | flush_tlb_all(); | ||
110 | |||
111 | /* | ||
112 | * Tell the secondary CPU where to find its idle thread's stack. | ||
113 | */ | ||
114 | secondary_data_stack = task_stack_page(idle); | ||
115 | |||
116 | wmb(); | ||
117 | |||
118 | /* | ||
119 | * Now bring the CPU into our world. | ||
120 | */ | ||
121 | ret = boot_secondary(thread, idle); | ||
122 | if (ret == 0) { | ||
123 | unsigned long timeout; | ||
124 | |||
125 | /* | ||
126 | * CPU was successfully started, wait for it | ||
127 | * to come online or time out. | ||
128 | */ | ||
129 | timeout = jiffies + HZ; | ||
130 | while (time_before(jiffies, timeout)) { | ||
131 | if (cpu_online(cpu)) | ||
132 | break; | ||
133 | |||
134 | udelay(10); | ||
135 | barrier(); | ||
136 | } | ||
137 | |||
138 | if (!cpu_online(cpu)) | ||
139 | ret = -EIO; | ||
140 | } | ||
141 | |||
142 | secondary_data_stack = NULL; | ||
143 | |||
144 | if (ret) { | ||
145 | pr_crit("CPU%u: processor failed to boot\n", cpu); | ||
146 | |||
147 | /* | ||
148 | * FIXME: We need to clean up the new idle thread. --rmk | ||
149 | */ | ||
150 | } | ||
151 | |||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | #ifdef CONFIG_HOTPLUG_CPU | ||
156 | static DECLARE_COMPLETION(cpu_killed); | ||
157 | |||
158 | /* | ||
159 | * __cpu_disable runs on the processor to be shutdown. | ||
160 | */ | ||
161 | int __cpuexit __cpu_disable(void) | ||
162 | { | ||
163 | unsigned int cpu = smp_processor_id(); | ||
164 | struct task_struct *p; | ||
165 | |||
166 | /* | ||
167 | * Take this CPU offline. Once we clear this, we can't return, | ||
168 | * and we must not schedule until we're ready to give up the cpu. | ||
169 | */ | ||
170 | set_cpu_online(cpu, false); | ||
171 | |||
172 | /* | ||
173 | * OK - migrate IRQs away from this CPU | ||
174 | */ | ||
175 | migrate_irqs(); | ||
176 | |||
177 | /* | ||
178 | * Flush user cache and TLB mappings, and then remove this CPU | ||
179 | * from the vm mask set of all processes. | ||
180 | */ | ||
181 | flush_cache_all(); | ||
182 | local_flush_tlb_all(); | ||
183 | |||
184 | read_lock(&tasklist_lock); | ||
185 | for_each_process(p) { | ||
186 | if (p->mm) | ||
187 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
188 | } | ||
189 | read_unlock(&tasklist_lock); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * called on the thread which is asking for a CPU to be shutdown - | ||
196 | * waits until shutdown has completed, or it is timed out. | ||
197 | */ | ||
198 | void __cpuexit __cpu_die(unsigned int cpu) | ||
199 | { | ||
200 | if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) | ||
201 | pr_err("CPU%u: unable to kill\n", cpu); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Called from the idle thread for the CPU which has been shutdown. | ||
206 | * | ||
207 | * Note that we do not return from this function. If this cpu is | ||
208 | * brought online again it will need to run secondary_startup(). | ||
209 | */ | ||
210 | void __cpuexit cpu_die(void) | ||
211 | { | ||
212 | local_irq_disable(); | ||
213 | idle_task_exit(); | ||
214 | |||
215 | complete(&cpu_killed); | ||
216 | |||
217 | asm ("XOR TXENABLE, D0Re0,D0Re0\n"); | ||
218 | } | ||
219 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
220 | |||
221 | /* | ||
222 | * Called by both boot and secondaries to move global data into | ||
223 | * per-processor storage. | ||
224 | */ | ||
225 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
226 | { | ||
227 | struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); | ||
228 | |||
229 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * This is the secondary CPU boot entry. We're using this CPUs | ||
234 | * idle thread stack and the global page tables. | ||
235 | */ | ||
236 | asmlinkage void secondary_start_kernel(void) | ||
237 | { | ||
238 | struct mm_struct *mm = &init_mm; | ||
239 | unsigned int cpu = smp_processor_id(); | ||
240 | |||
241 | /* | ||
242 | * All kernel threads share the same mm context; grab a | ||
243 | * reference and switch to it. | ||
244 | */ | ||
245 | atomic_inc(&mm->mm_users); | ||
246 | atomic_inc(&mm->mm_count); | ||
247 | current->active_mm = mm; | ||
248 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
249 | enter_lazy_tlb(mm, current); | ||
250 | local_flush_tlb_all(); | ||
251 | |||
252 | /* | ||
253 | * TODO: Some day it might be useful for each Linux CPU to | ||
254 | * have its own TBI structure. That would allow each Linux CPU | ||
255 | * to run different interrupt handlers for the same IRQ | ||
256 | * number. | ||
257 | * | ||
258 | * For now, simply copying the pointer to the boot CPU's TBI | ||
259 | * structure is sufficient because we always want to run the | ||
260 | * same interrupt handler whatever CPU takes the interrupt. | ||
261 | */ | ||
262 | per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); | ||
263 | |||
264 | if (!per_cpu(pTBI, cpu)) | ||
265 | panic("No TBI found!"); | ||
266 | |||
267 | per_cpu_trap_init(cpu); | ||
268 | |||
269 | preempt_disable(); | ||
270 | |||
271 | setup_txprivext(); | ||
272 | |||
273 | /* | ||
274 | * Enable local interrupts. | ||
275 | */ | ||
276 | tbi_startup_interrupt(TBID_SIGNUM_TRT); | ||
277 | notify_cpu_starting(cpu); | ||
278 | local_irq_enable(); | ||
279 | |||
280 | pr_info("CPU%u (thread %u): Booted secondary processor\n", | ||
281 | cpu, cpu_2_hwthread_id[cpu]); | ||
282 | |||
283 | calibrate_delay(); | ||
284 | smp_store_cpu_info(cpu); | ||
285 | |||
286 | /* | ||
287 | * OK, now it's safe to let the boot CPU continue | ||
288 | */ | ||
289 | set_cpu_online(cpu, true); | ||
290 | |||
291 | /* | ||
292 | * Check for cache aliasing. | ||
293 | * Preemption is disabled | ||
294 | */ | ||
295 | check_for_cache_aliasing(cpu); | ||
296 | |||
297 | /* | ||
298 | * OK, it's off to the idle thread for us | ||
299 | */ | ||
300 | cpu_idle(); | ||
301 | } | ||
302 | |||
303 | void __init smp_cpus_done(unsigned int max_cpus) | ||
304 | { | ||
305 | int cpu; | ||
306 | unsigned long bogosum = 0; | ||
307 | |||
308 | for_each_online_cpu(cpu) | ||
309 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | ||
310 | |||
311 | pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
312 | num_online_cpus(), | ||
313 | bogosum / (500000/HZ), | ||
314 | (bogosum / (5000/HZ)) % 100); | ||
315 | } | ||
316 | |||
317 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
318 | { | ||
319 | unsigned int cpu = smp_processor_id(); | ||
320 | |||
321 | init_new_context(current, &init_mm); | ||
322 | current_thread_info()->cpu = cpu; | ||
323 | |||
324 | smp_store_cpu_info(cpu); | ||
325 | init_cpu_present(cpu_possible_mask); | ||
326 | } | ||
327 | |||
328 | void __init smp_prepare_boot_cpu(void) | ||
329 | { | ||
330 | unsigned int cpu = smp_processor_id(); | ||
331 | |||
332 | per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); | ||
333 | |||
334 | if (!per_cpu(pTBI, cpu)) | ||
335 | panic("No TBI found!"); | ||
336 | } | ||
337 | |||
338 | static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg); | ||
339 | |||
340 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) | ||
341 | { | ||
342 | unsigned long flags; | ||
343 | unsigned int cpu; | ||
344 | cpumask_t map; | ||
345 | |||
346 | cpumask_clear(&map); | ||
347 | local_irq_save(flags); | ||
348 | |||
349 | for_each_cpu(cpu, mask) { | ||
350 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
351 | |||
352 | spin_lock(&ipi->lock); | ||
353 | |||
354 | /* | ||
355 | * KICK interrupts are queued in hardware so we'll get | ||
356 | * multiple interrupts if we call smp_cross_call() | ||
357 | * multiple times for one msg. The problem is that we | ||
358 | * only have one bit for each message - we can't queue | ||
359 | * them in software. | ||
360 | * | ||
361 | * The first time through ipi_handler() we'll clear | ||
362 | * the msg bit, having done all the work. But when we | ||
363 | * return we'll get _another_ interrupt (and another, | ||
364 | * and another until we've handled all the queued | ||
365 | * KICKs). Running ipi_handler() when there's no work | ||
366 | * to do is bad because that's how kick handler | ||
367 | * chaining detects who the KICK was intended for. | ||
368 | * See arch/metag/kernel/kick.c for more details. | ||
369 | * | ||
370 | * So only add 'cpu' to 'map' if we haven't already | ||
371 | * queued a KICK interrupt for 'msg'. | ||
372 | */ | ||
373 | if (!(ipi->bits & (1 << msg))) { | ||
374 | ipi->bits |= 1 << msg; | ||
375 | cpumask_set_cpu(cpu, &map); | ||
376 | } | ||
377 | |||
378 | spin_unlock(&ipi->lock); | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Call the platform specific cross-CPU call function. | ||
383 | */ | ||
384 | smp_cross_call(map, msg); | ||
385 | |||
386 | local_irq_restore(flags); | ||
387 | } | ||
388 | |||
389 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
390 | { | ||
391 | send_ipi_message(mask, IPI_CALL_FUNC); | ||
392 | } | ||
393 | |||
394 | void arch_send_call_function_single_ipi(int cpu) | ||
395 | { | ||
396 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
397 | } | ||
398 | |||
399 | void show_ipi_list(struct seq_file *p) | ||
400 | { | ||
401 | unsigned int cpu; | ||
402 | |||
403 | seq_puts(p, "IPI:"); | ||
404 | |||
405 | for_each_present_cpu(cpu) | ||
406 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | ||
407 | |||
408 | seq_putc(p, '\n'); | ||
409 | } | ||
410 | |||
411 | static DEFINE_SPINLOCK(stop_lock); | ||
412 | |||
413 | /* | ||
414 | * Main handler for inter-processor interrupts | ||
415 | * | ||
416 | * For Meta, the ipimask now only identifies a single | ||
417 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
418 | * different mechanism): | ||
419 | * | ||
420 | * Bit 0 - Inter-processor function call | ||
421 | */ | ||
422 | static int do_IPI(struct pt_regs *regs) | ||
423 | { | ||
424 | unsigned int cpu = smp_processor_id(); | ||
425 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
426 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
427 | unsigned long msgs, nextmsg; | ||
428 | int handled = 0; | ||
429 | |||
430 | ipi->ipi_count++; | ||
431 | |||
432 | spin_lock(&ipi->lock); | ||
433 | msgs = ipi->bits; | ||
434 | nextmsg = msgs & -msgs; | ||
435 | ipi->bits &= ~nextmsg; | ||
436 | spin_unlock(&ipi->lock); | ||
437 | |||
438 | if (nextmsg) { | ||
439 | handled = 1; | ||
440 | |||
441 | nextmsg = ffz(~nextmsg); | ||
442 | switch (nextmsg) { | ||
443 | case IPI_RESCHEDULE: | ||
444 | scheduler_ipi(); | ||
445 | break; | ||
446 | |||
447 | case IPI_CALL_FUNC: | ||
448 | generic_smp_call_function_interrupt(); | ||
449 | break; | ||
450 | |||
451 | case IPI_CALL_FUNC_SINGLE: | ||
452 | generic_smp_call_function_single_interrupt(); | ||
453 | break; | ||
454 | |||
455 | default: | ||
456 | pr_crit("CPU%u: Unknown IPI message 0x%lx\n", | ||
457 | cpu, nextmsg); | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | set_irq_regs(old_regs); | ||
463 | |||
464 | return handled; | ||
465 | } | ||
466 | |||
467 | void smp_send_reschedule(int cpu) | ||
468 | { | ||
469 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | ||
470 | } | ||
471 | |||
472 | static void stop_this_cpu(void *data) | ||
473 | { | ||
474 | unsigned int cpu = smp_processor_id(); | ||
475 | |||
476 | if (system_state == SYSTEM_BOOTING || | ||
477 | system_state == SYSTEM_RUNNING) { | ||
478 | spin_lock(&stop_lock); | ||
479 | pr_crit("CPU%u: stopping\n", cpu); | ||
480 | dump_stack(); | ||
481 | spin_unlock(&stop_lock); | ||
482 | } | ||
483 | |||
484 | set_cpu_online(cpu, false); | ||
485 | |||
486 | local_irq_disable(); | ||
487 | |||
488 | hard_processor_halt(HALT_OK); | ||
489 | } | ||
490 | |||
491 | void smp_send_stop(void) | ||
492 | { | ||
493 | smp_call_function(stop_this_cpu, NULL, 0); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * not supported here | ||
498 | */ | ||
499 | int setup_profiling_timer(unsigned int multiplier) | ||
500 | { | ||
501 | return -EINVAL; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * We use KICKs for inter-processor interrupts. | ||
506 | * | ||
507 | * For every CPU in "callmap" the IPI data must already have been | ||
508 | * stored in that CPU's "ipi_data" member prior to calling this | ||
509 | * function. | ||
510 | */ | ||
511 | static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) | ||
512 | { | ||
513 | int cpu; | ||
514 | |||
515 | for_each_cpu(cpu, &callmap) { | ||
516 | unsigned int thread; | ||
517 | |||
518 | thread = cpu_2_hwthread_id[cpu]; | ||
519 | |||
520 | BUG_ON(thread == BAD_HWTHREAD_ID); | ||
521 | |||
522 | metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE)); | ||
523 | } | ||
524 | } | ||
525 | |||
526 | static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, | ||
527 | int Inst, PTBI pTBI, int *handled) | ||
528 | { | ||
529 | *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); | ||
530 | |||
531 | return State; | ||
532 | } | ||
533 | |||
534 | static struct kick_irq_handler ipi_irq = { | ||
535 | .func = ipi_handler, | ||
536 | }; | ||
537 | |||
538 | static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg) | ||
539 | { | ||
540 | kick_raise_softirq(callmap, 1); | ||
541 | } | ||
542 | |||
543 | static inline unsigned int get_core_count(void) | ||
544 | { | ||
545 | int i; | ||
546 | unsigned int ret = 0; | ||
547 | |||
548 | for (i = 0; i < CONFIG_NR_CPUS; i++) { | ||
549 | if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i)) | ||
550 | ret++; | ||
551 | } | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Initialise the CPU possible map early - this describes the CPUs | ||
558 | * which may be present or become present in the system. | ||
559 | */ | ||
560 | void __init smp_init_cpus(void) | ||
561 | { | ||
562 | unsigned int i, ncores = get_core_count(); | ||
563 | |||
564 | /* If no hwthread_map early param was set use default mapping */ | ||
565 | for (i = 0; i < NR_CPUS; i++) | ||
566 | if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) { | ||
567 | cpu_2_hwthread_id[i] = i; | ||
568 | hwthread_id_2_cpu[i] = i; | ||
569 | } | ||
570 | |||
571 | for (i = 0; i < ncores; i++) | ||
572 | set_cpu_possible(i, true); | ||
573 | |||
574 | kick_register_func(&ipi_irq); | ||
575 | } | ||
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c new file mode 100644 index 000000000000..bec3dec4922e --- /dev/null +++ b/arch/metag/kernel/topology.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Paul Mundt | ||
3 | * Copyright (C) 2010 Imagination Technolohies Ltd. | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | #include <linux/cpu.h> | ||
10 | #include <linux/cpumask.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/node.h> | ||
14 | #include <linux/nodemask.h> | ||
15 | #include <linux/topology.h> | ||
16 | |||
17 | #include <asm/cpu.h> | ||
18 | |||
19 | DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); | ||
20 | |||
21 | cpumask_t cpu_core_map[NR_CPUS]; | ||
22 | |||
23 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
24 | { | ||
25 | return *cpu_possible_mask; | ||
26 | } | ||
27 | |||
28 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
29 | { | ||
30 | return &cpu_core_map[cpu]; | ||
31 | } | ||
32 | |||
33 | int arch_update_cpu_topology(void) | ||
34 | { | ||
35 | unsigned int cpu; | ||
36 | |||
37 | for_each_possible_cpu(cpu) | ||
38 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int __init topology_init(void) | ||
44 | { | ||
45 | int i, ret; | ||
46 | |||
47 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
48 | for_each_online_node(i) | ||
49 | register_one_node(i); | ||
50 | #endif | ||
51 | |||
52 | for_each_present_cpu(i) { | ||
53 | struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); | ||
54 | #ifdef CONFIG_HOTPLUG_CPU | ||
55 | cpuinfo->cpu.hotpluggable = 1; | ||
56 | #endif | ||
57 | ret = register_cpu(&cpuinfo->cpu, i); | ||
58 | if (unlikely(ret)) | ||
59 | pr_warn("%s: register_cpu %d failed (%d)\n", | ||
60 | __func__, i, ret); | ||
61 | } | ||
62 | |||
63 | #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) | ||
64 | /* | ||
65 | * In the UP case, make sure the CPU association is still | ||
66 | * registered under each node. Without this, sysfs fails | ||
67 | * to make the connection between nodes other than node0 | ||
68 | * and cpu0. | ||
69 | */ | ||
70 | for_each_online_node(i) | ||
71 | if (i != numa_node_id()) | ||
72 | register_cpu_under_node(raw_smp_processor_id(), i); | ||
73 | #endif | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | subsys_initcall(topology_init); | ||