diff options
Diffstat (limited to 'arch/x86/kernel/genx2apic_uv_x.c')
-rw-r--r-- | arch/x86/kernel/genx2apic_uv_x.c | 245 |
1 files changed, 245 insertions, 0 deletions
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c new file mode 100644 index 000000000000..5d77c9cd8e15 --- /dev/null +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV APIC functions (note: not an Intel compatible APIC) | ||
7 | * | ||
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/threads.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/ctype.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <asm/smp.h> | ||
21 | #include <asm/ipi.h> | ||
22 | #include <asm/genapic.h> | ||
23 | #include <asm/uv/uv_mmrs.h> | ||
24 | #include <asm/uv/uv_hub.h> | ||
25 | |||
26 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
27 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); | ||
28 | |||
29 | struct uv_blade_info *uv_blade_info; | ||
30 | EXPORT_SYMBOL_GPL(uv_blade_info); | ||
31 | |||
32 | short *uv_node_to_blade; | ||
33 | EXPORT_SYMBOL_GPL(uv_node_to_blade); | ||
34 | |||
35 | short *uv_cpu_to_blade; | ||
36 | EXPORT_SYMBOL_GPL(uv_cpu_to_blade); | ||
37 | |||
38 | short uv_possible_blades; | ||
39 | EXPORT_SYMBOL_GPL(uv_possible_blades); | ||
40 | |||
41 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
42 | |||
43 | static cpumask_t uv_target_cpus(void) | ||
44 | { | ||
45 | return cpumask_of_cpu(0); | ||
46 | } | ||
47 | |||
48 | static cpumask_t uv_vector_allocation_domain(int cpu) | ||
49 | { | ||
50 | cpumask_t domain = CPU_MASK_NONE; | ||
51 | cpu_set(cpu, domain); | ||
52 | return domain; | ||
53 | } | ||
54 | |||
55 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | ||
56 | { | ||
57 | unsigned long val; | ||
58 | int nasid; | ||
59 | |||
60 | nasid = uv_apicid_to_nasid(phys_apicid); | ||
61 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
62 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | ||
63 | (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | ||
64 | (6 << UVH_IPI_INT_DELIVERY_MODE_SHFT); | ||
65 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void uv_send_IPI_one(int cpu, int vector) | ||
70 | { | ||
71 | unsigned long val, apicid; | ||
72 | int nasid; | ||
73 | |||
74 | apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */ | ||
75 | nasid = uv_apicid_to_nasid(apicid); | ||
76 | val = | ||
77 | (1UL << UVH_IPI_INT_SEND_SHFT) | (apicid << | ||
78 | UVH_IPI_INT_APIC_ID_SHFT) | | ||
79 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
80 | uv_write_global_mmr64(nasid, UVH_IPI_INT, val); | ||
81 | printk(KERN_DEBUG | ||
82 | "UV: IPI to cpu %d, apicid 0x%lx, vec %d, nasid%d, val 0x%lx\n", | ||
83 | cpu, apicid, vector, nasid, val); | ||
84 | } | ||
85 | |||
86 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | ||
87 | { | ||
88 | unsigned int cpu; | ||
89 | |||
90 | for (cpu = 0; cpu < NR_CPUS; ++cpu) | ||
91 | if (cpu_isset(cpu, mask)) | ||
92 | uv_send_IPI_one(cpu, vector); | ||
93 | } | ||
94 | |||
95 | static void uv_send_IPI_allbutself(int vector) | ||
96 | { | ||
97 | cpumask_t mask = cpu_online_map; | ||
98 | |||
99 | cpu_clear(smp_processor_id(), mask); | ||
100 | |||
101 | if (!cpus_empty(mask)) | ||
102 | uv_send_IPI_mask(mask, vector); | ||
103 | } | ||
104 | |||
105 | static void uv_send_IPI_all(int vector) | ||
106 | { | ||
107 | uv_send_IPI_mask(cpu_online_map, vector); | ||
108 | } | ||
109 | |||
110 | static int uv_apic_id_registered(void) | ||
111 | { | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | ||
116 | { | ||
117 | int cpu; | ||
118 | |||
119 | /* | ||
120 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
121 | * May as well be the first. | ||
122 | */ | ||
123 | cpu = first_cpu(cpumask); | ||
124 | if ((unsigned)cpu < NR_CPUS) | ||
125 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
126 | else | ||
127 | return BAD_APICID; | ||
128 | } | ||
129 | |||
130 | static unsigned int phys_pkg_id(int index_msb) | ||
131 | { | ||
132 | return GET_APIC_ID(read_apic_id()) >> index_msb; | ||
133 | } | ||
134 | |||
135 | #ifdef ZZZ /* Needs x2apic patch */ | ||
136 | static void uv_send_IPI_self(int vector) | ||
137 | { | ||
138 | apic_write(APIC_SELF_IPI, vector); | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | struct genapic apic_x2apic_uv_x = { | ||
143 | .name = "UV large system", | ||
144 | .int_delivery_mode = dest_Fixed, | ||
145 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | ||
146 | .target_cpus = uv_target_cpus, | ||
147 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ | ||
148 | .apic_id_registered = uv_apic_id_registered, | ||
149 | .send_IPI_all = uv_send_IPI_all, | ||
150 | .send_IPI_allbutself = uv_send_IPI_allbutself, | ||
151 | .send_IPI_mask = uv_send_IPI_mask, | ||
152 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ | ||
153 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | ||
154 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | ||
155 | }; | ||
156 | |||
157 | static __cpuinit void set_x2apic_extra_bits(int nasid) | ||
158 | { | ||
159 | __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6); | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Called on boot cpu. | ||
164 | */ | ||
165 | static __init void uv_system_init(void) | ||
166 | { | ||
167 | union uvh_si_addr_map_config_u m_n_config; | ||
168 | int bytes, nid, cpu, lcpu, nasid, last_nasid, blade; | ||
169 | unsigned long mmr_base; | ||
170 | |||
171 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | ||
172 | mmr_base = | ||
173 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | ||
174 | ~UV_MMR_ENABLE; | ||
175 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | ||
176 | |||
177 | last_nasid = -1; | ||
178 | for_each_possible_cpu(cpu) { | ||
179 | nid = cpu_to_node(cpu); | ||
180 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
181 | if (nasid != last_nasid) | ||
182 | uv_possible_blades++; | ||
183 | last_nasid = nasid; | ||
184 | } | ||
185 | printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); | ||
186 | |||
187 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | ||
188 | uv_blade_info = alloc_bootmem_pages(bytes); | ||
189 | |||
190 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | ||
191 | uv_node_to_blade = alloc_bootmem_pages(bytes); | ||
192 | memset(uv_node_to_blade, 255, bytes); | ||
193 | |||
194 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | ||
195 | uv_cpu_to_blade = alloc_bootmem_pages(bytes); | ||
196 | memset(uv_cpu_to_blade, 255, bytes); | ||
197 | |||
198 | last_nasid = -1; | ||
199 | blade = -1; | ||
200 | lcpu = -1; | ||
201 | for_each_possible_cpu(cpu) { | ||
202 | nid = cpu_to_node(cpu); | ||
203 | nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu)); | ||
204 | if (nasid != last_nasid) { | ||
205 | blade++; | ||
206 | lcpu = -1; | ||
207 | uv_blade_info[blade].nr_posible_cpus = 0; | ||
208 | uv_blade_info[blade].nr_online_cpus = 0; | ||
209 | } | ||
210 | last_nasid = nasid; | ||
211 | lcpu++; | ||
212 | |||
213 | uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt; | ||
214 | uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt; | ||
215 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | ||
216 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | ||
217 | uv_cpu_hub_info(cpu)->local_nasid = nasid; | ||
218 | uv_cpu_hub_info(cpu)->gnode_upper = | ||
219 | nasid & ~((1 << uv_hub_info->n_val) - 1); | ||
220 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | ||
221 | uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ | ||
222 | uv_blade_info[blade].nasid = nasid; | ||
223 | uv_blade_info[blade].nr_posible_cpus++; | ||
224 | uv_node_to_blade[nid] = blade; | ||
225 | uv_cpu_to_blade[cpu] = blade; | ||
226 | |||
227 | printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n", | ||
228 | cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid); | ||
229 | printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Called on each cpu to initialize the per_cpu UV data area. | ||
235 | */ | ||
236 | void __cpuinit uv_cpu_init(void) | ||
237 | { | ||
238 | if (!uv_node_to_blade) | ||
239 | uv_system_init(); | ||
240 | |||
241 | uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; | ||
242 | |||
243 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | ||
244 | set_x2apic_extra_bits(uv_hub_info->local_nasid); | ||
245 | } | ||