aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-17 14:35:16 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-17 14:35:47 -0500
commit2a05180fe2e5b414f0cb2ccfc80e6c90563e3c67 (patch)
tree5c14afab81ee44b44ec7ff7faf08fee2c165bf50 /arch/x86/kernel/apic
parentf62bae5009c1ba596cd475cafbc83e0570a36e26 (diff)
x86, apic: move remaining APIC drivers to arch/x86/kernel/apic/*
Move the 32-bit extended-arch APIC drivers to arch/x86/kernel/apic/ too, and rename apic_64.c to probe_64.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/Makefile9
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c274
-rw-r--r--arch/x86/kernel/apic/es7000_32.c757
-rw-r--r--arch/x86/kernel/apic/numaq_32.c565
-rw-r--r--arch/x86/kernel/apic/probe_32.c424
-rw-r--r--arch/x86/kernel/apic/probe_64.c (renamed from arch/x86/kernel/apic/apic_64.c)0
-rw-r--r--arch/x86/kernel/apic/summit_32.c601
7 files changed, 2628 insertions, 2 deletions
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index da20b70c4000..97f558db5c31 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -2,14 +2,19 @@
2# Makefile for local APIC drivers and for the IO-APIC code 2# Makefile for local APIC drivers and for the IO-APIC code
3# 3#
4 4
5obj-y := apic.o ipi.o nmi.o 5obj-y := apic.o probe_$(BITS).o ipi.o nmi.o
6obj-$(CONFIG_X86_IO_APIC) += io_apic.o 6obj-$(CONFIG_X86_IO_APIC) += io_apic.o
7obj-$(CONFIG_SMP) += ipi.o 7obj-$(CONFIG_SMP) += ipi.o
8obj-$
8 9
9ifeq ($(CONFIG_X86_64),y) 10ifeq ($(CONFIG_X86_64),y)
10obj-y += apic_64.o apic_flat_64.o 11obj-y += apic_flat_64.o
11obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o 12obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
12obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o 13obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o
13obj-$(CONFIG_X86_UV) += x2apic_uv_x.o 14obj-$(CONFIG_X86_UV) += x2apic_uv_x.o
14endif 15endif
15 16
17obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
18obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
19obj-$(CONFIG_X86_ES7000) += es7000_32.o
20obj-$(CONFIG_X86_SUMMIT) += summit_32.o
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
new file mode 100644
index 000000000000..0b1093394fdf
--- /dev/null
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -0,0 +1,274 @@
1/*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/dmi.h>
11#include <linux/smp.h>
12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18
19static inline unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24static inline int bigsmp_apic_id_registered(void)
25{
26 return 1;
27}
28
29static inline const cpumask_t *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return &cpu_online_map;
33#else
34 return &cpumask_of_cpu(0);
35#endif
36}
37
38static inline unsigned long
39bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
40{
41 return 0;
42}
43
44static inline unsigned long bigsmp_check_apicid_present(int bit)
45{
46 return 1;
47}
48
49static inline unsigned long calculate_ldr(int cpu)
50{
51 unsigned long val, id;
52
53 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
54 id = per_cpu(x86_bios_cpu_apicid, cpu);
55 val |= SET_APIC_LOGICAL_ID(id);
56
57 return val;
58}
59
60/*
61 * Set up the logical destination ID.
62 *
63 * Intel recommends to set DFR, LDR and TPR before enabling
64 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
65 * document number 292116). So here it goes...
66 */
67static inline void bigsmp_init_apic_ldr(void)
68{
69 unsigned long val;
70 int cpu = smp_processor_id();
71
72 apic_write(APIC_DFR, APIC_DFR_FLAT);
73 val = calculate_ldr(cpu);
74 apic_write(APIC_LDR, val);
75}
76
77static inline void bigsmp_setup_apic_routing(void)
78{
79 printk(KERN_INFO
80 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
81 nr_ioapics);
82}
83
84static inline int bigsmp_apicid_to_node(int logical_apicid)
85{
86 return apicid_2_node[hard_smp_processor_id()];
87}
88
89static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93
94 return BAD_APICID;
95}
96
97static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
98{
99 return physid_mask_of_physid(phys_apicid);
100}
101
102/* Mapping from cpu number to logical apicid */
103static inline int bigsmp_cpu_to_logical_apicid(int cpu)
104{
105 if (cpu >= nr_cpu_ids)
106 return BAD_APICID;
107 return cpu_physical_id(cpu);
108}
109
110static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
111{
112 /* For clustered we don't have a good way to do this yet - hack */
113 return physids_promote(0xFFL);
114}
115
116static inline void bigsmp_setup_portio_remap(void)
117{
118}
119
120static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
121{
122 return 1;
123}
124
125/* As we are using single CPU as destination, pick only one CPU here */
126static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
127{
128 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
129}
130
131static inline unsigned int
132bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask) {
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 }
145 if (cpu < nr_cpu_ids)
146 return bigsmp_cpu_to_logical_apicid(cpu);
147
148 return BAD_APICID;
149}
150
151static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
152{
153 return cpuid_apic >> index_msb;
154}
155
156static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
157{
158 default_send_IPI_mask_sequence_phys(mask, vector);
159}
160
161static inline void bigsmp_send_IPI_allbutself(int vector)
162{
163 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
164}
165
166static inline void bigsmp_send_IPI_all(int vector)
167{
168 bigsmp_send_IPI_mask(cpu_online_mask, vector);
169}
170
171static int dmi_bigsmp; /* can be set by dmi scanners */
172
173static int hp_ht_bigsmp(const struct dmi_system_id *d)
174{
175 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
176 dmi_bigsmp = 1;
177
178 return 0;
179}
180
181
182static const struct dmi_system_id bigsmp_dmi_table[] = {
183 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
184 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
185 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
186 }
187 },
188
189 { hp_ht_bigsmp, "HP ProLiant DL740",
190 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
191 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
192 }
193 },
194 { } /* NULL entry stops DMI scanning */
195};
196
197static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
198{
199 cpus_clear(*retmask);
200 cpu_set(cpu, *retmask);
201}
202
203static int probe_bigsmp(void)
204{
205 if (def_to_bigsmp)
206 dmi_bigsmp = 1;
207 else
208 dmi_check_system(bigsmp_dmi_table);
209
210 return dmi_bigsmp;
211}
212
213struct apic apic_bigsmp = {
214
215 .name = "bigsmp",
216 .probe = probe_bigsmp,
217 .acpi_madt_oem_check = NULL,
218 .apic_id_registered = bigsmp_apic_id_registered,
219
220 .irq_delivery_mode = dest_Fixed,
221 /* phys delivery to target CPU: */
222 .irq_dest_mode = 0,
223
224 .target_cpus = bigsmp_target_cpus,
225 .disable_esr = 1,
226 .dest_logical = 0,
227 .check_apicid_used = bigsmp_check_apicid_used,
228 .check_apicid_present = bigsmp_check_apicid_present,
229
230 .vector_allocation_domain = bigsmp_vector_allocation_domain,
231 .init_apic_ldr = bigsmp_init_apic_ldr,
232
233 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
234 .setup_apic_routing = bigsmp_setup_apic_routing,
235 .multi_timer_check = NULL,
236 .apicid_to_node = bigsmp_apicid_to_node,
237 .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
238 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
239 .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present,
240 .setup_portio_remap = NULL,
241 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
242 .enable_apic_mode = NULL,
243 .phys_pkg_id = bigsmp_phys_pkg_id,
244 .mps_oem_check = NULL,
245
246 .get_apic_id = bigsmp_get_apic_id,
247 .set_apic_id = NULL,
248 .apic_id_mask = 0xFF << 24,
249
250 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
251 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
252
253 .send_IPI_mask = bigsmp_send_IPI_mask,
254 .send_IPI_mask_allbutself = NULL,
255 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
256 .send_IPI_all = bigsmp_send_IPI_all,
257 .send_IPI_self = default_send_IPI_self,
258
259 .wakeup_cpu = NULL,
260 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
261 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
262
263 .wait_for_init_deassert = default_wait_for_init_deassert,
264
265 .smp_callin_clear_local_apic = NULL,
266 .inquire_remote_apic = default_inquire_remote_apic,
267
268 .read = native_apic_mem_read,
269 .write = native_apic_mem_write,
270 .icr_read = native_apic_icr_read,
271 .icr_write = native_apic_icr_write,
272 .wait_icr_idle = native_apic_wait_icr_idle,
273 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
274};
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
new file mode 100644
index 000000000000..320f2d2e4e54
--- /dev/null
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -0,0 +1,757 @@
1/*
2 * Written by: Garry Forsgren, Unisys Corporation
3 * Natalie Protasevich, Unisys Corporation
4 *
5 * This file contains the code to configure and interface
6 * with Unisys ES7000 series hardware system manager.
7 *
8 * Copyright (c) 2003 Unisys Corporation.
9 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
10 *
11 * All Rights Reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it would be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Unisys Corporation, Township Line & Union Meeting
26 * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
27 *
28 * http://www.unisys.com
29 */
30#include <linux/notifier.h>
31#include <linux/spinlock.h>
32#include <linux/cpumask.h>
33#include <linux/threads.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/reboot.h>
37#include <linux/string.h>
38#include <linux/types.h>
39#include <linux/errno.h>
40#include <linux/acpi.h>
41#include <linux/init.h>
42#include <linux/nmi.h>
43#include <linux/smp.h>
44#include <linux/io.h>
45
46#include <asm/apicdef.h>
47#include <asm/atomic.h>
48#include <asm/fixmap.h>
49#include <asm/mpspec.h>
50#include <asm/setup.h>
51#include <asm/apic.h>
52#include <asm/ipi.h>
53
54/*
55 * ES7000 chipsets
56 */
57
58#define NON_UNISYS 0
59#define ES7000_CLASSIC 1
60#define ES7000_ZORRO 2
61
62#define MIP_REG 1
63#define MIP_PSAI_REG 4
64
65#define MIP_BUSY 1
66#define MIP_SPIN 0xf0000
67#define MIP_VALID 0x0100000000000000ULL
68#define MIP_SW_APIC 0x1020b
69
70#define MIP_PORT(val) ((val >> 32) & 0xffff)
71
72#define MIP_RD_LO(val) (val & 0xffffffff)
73
74struct mip_reg {
75 unsigned long long off_0x00;
76 unsigned long long off_0x08;
77 unsigned long long off_0x10;
78 unsigned long long off_0x18;
79 unsigned long long off_0x20;
80 unsigned long long off_0x28;
81 unsigned long long off_0x30;
82 unsigned long long off_0x38;
83};
84
85struct mip_reg_info {
86 unsigned long long mip_info;
87 unsigned long long delivery_info;
88 unsigned long long host_reg;
89 unsigned long long mip_reg;
90};
91
92struct psai {
93 unsigned long long entry_type;
94 unsigned long long addr;
95 unsigned long long bep_addr;
96};
97
98#ifdef CONFIG_ACPI
99
100struct es7000_oem_table {
101 struct acpi_table_header Header;
102 u32 OEMTableAddr;
103 u32 OEMTableSize;
104};
105
106static unsigned long oem_addrX;
107static unsigned long oem_size;
108
109#endif
110
111/*
112 * ES7000 Globals
113 */
114
115static volatile unsigned long *psai;
116static struct mip_reg *mip_reg;
117static struct mip_reg *host_reg;
118static int mip_port;
119static unsigned long mip_addr;
120static unsigned long host_addr;
121
122int es7000_plat;
123
124/*
125 * GSI override for ES7000 platforms.
126 */
127
128static unsigned int base;
129
130static int
131es7000_rename_gsi(int ioapic, int gsi)
132{
133 if (es7000_plat == ES7000_ZORRO)
134 return gsi;
135
136 if (!base) {
137 int i;
138 for (i = 0; i < nr_ioapics; i++)
139 base += nr_ioapic_registers[i];
140 }
141
142 if (!ioapic && (gsi < 16))
143 gsi += base;
144
145 return gsi;
146}
147
148static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
149{
150 unsigned long vect = 0, psaival = 0;
151
152 if (psai == NULL)
153 return -1;
154
155 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
156 psaival = (0x1000000 | vect | cpu);
157
158 while (*psai & 0x1000000)
159 ;
160
161 *psai = psaival;
162
163 return 0;
164}
165
166static int __init es7000_update_apic(void)
167{
168 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
169
170 /* MPENTIUMIII */
171 if (boot_cpu_data.x86 == 6 &&
172 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
173 es7000_update_apic_to_cluster();
174 apic->wait_for_init_deassert = NULL;
175 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
176 }
177
178 return 0;
179}
180
181static void __init setup_unisys(void)
182{
183 /*
184 * Determine the generation of the ES7000 currently running.
185 *
186 * es7000_plat = 1 if the machine is a 5xx ES7000 box
187 * es7000_plat = 2 if the machine is a x86_64 ES7000 box
188 *
189 */
190 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
191 es7000_plat = ES7000_ZORRO;
192 else
193 es7000_plat = ES7000_CLASSIC;
194 ioapic_renumber_irq = es7000_rename_gsi;
195
196 x86_quirks->update_apic = es7000_update_apic;
197}
198
199/*
200 * Parse the OEM Table:
201 */
202static int __init parse_unisys_oem(char *oemptr)
203{
204 int i;
205 int success = 0;
206 unsigned char type, size;
207 unsigned long val;
208 char *tp = NULL;
209 struct psai *psaip = NULL;
210 struct mip_reg_info *mi;
211 struct mip_reg *host, *mip;
212
213 tp = oemptr;
214
215 tp += 8;
216
217 for (i = 0; i <= 6; i++) {
218 type = *tp++;
219 size = *tp++;
220 tp -= 2;
221 switch (type) {
222 case MIP_REG:
223 mi = (struct mip_reg_info *)tp;
224 val = MIP_RD_LO(mi->host_reg);
225 host_addr = val;
226 host = (struct mip_reg *)val;
227 host_reg = __va(host);
228 val = MIP_RD_LO(mi->mip_reg);
229 mip_port = MIP_PORT(mi->mip_info);
230 mip_addr = val;
231 mip = (struct mip_reg *)val;
232 mip_reg = __va(mip);
233 pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
234 (unsigned long)host_reg);
235 pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
236 (unsigned long)mip_reg);
237 success++;
238 break;
239 case MIP_PSAI_REG:
240 psaip = (struct psai *)tp;
241 if (tp != NULL) {
242 if (psaip->addr)
243 psai = __va(psaip->addr);
244 else
245 psai = NULL;
246 success++;
247 }
248 break;
249 default:
250 break;
251 }
252 tp += size;
253 }
254
255 if (success < 2)
256 es7000_plat = NON_UNISYS;
257 else
258 setup_unisys();
259
260 return es7000_plat;
261}
262
263#ifdef CONFIG_ACPI
264static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
265{
266 struct acpi_table_header *header = NULL;
267 struct es7000_oem_table *table;
268 acpi_size tbl_size;
269 acpi_status ret;
270 int i = 0;
271
272 for (;;) {
273 ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size);
274 if (!ACPI_SUCCESS(ret))
275 return -1;
276
277 if (!memcmp((char *) &header->oem_id, "UNISYS", 6))
278 break;
279
280 early_acpi_os_unmap_memory(header, tbl_size);
281 }
282
283 table = (void *)header;
284
285 oem_addrX = table->OEMTableAddr;
286 oem_size = table->OEMTableSize;
287
288 early_acpi_os_unmap_memory(header, tbl_size);
289
290 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size);
291
292 return 0;
293}
294
295static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
296{
297 if (!oem_addr)
298 return;
299
300 __acpi_unmap_table((char *)oem_addr, oem_size);
301}
302
303static int es7000_check_dsdt(void)
304{
305 struct acpi_table_header header;
306
307 if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
308 !strncmp(header.oem_id, "UNISYS", 6))
309 return 1;
310 return 0;
311}
312
313/* Hook from generic ACPI tables.c */
314static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
315{
316 unsigned long oem_addr = 0;
317 int check_dsdt;
318 int ret = 0;
319
320 /* check dsdt at first to avoid clear fix_map for oem_addr */
321 check_dsdt = es7000_check_dsdt();
322
323 if (!find_unisys_acpi_oem_table(&oem_addr)) {
324 if (check_dsdt) {
325 ret = parse_unisys_oem((char *)oem_addr);
326 } else {
327 setup_unisys();
328 ret = 1;
329 }
330 /*
331 * we need to unmap it
332 */
333 unmap_unisys_acpi_oem_table(oem_addr);
334 }
335 return ret;
336}
337#else /* !CONFIG_ACPI: */
338static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
339{
340 return 0;
341}
342#endif /* !CONFIG_ACPI */
343
344static void es7000_spin(int n)
345{
346 int i = 0;
347
348 while (i++ < n)
349 rep_nop();
350}
351
352static int __init
353es7000_mip_write(struct mip_reg *mip_reg)
354{
355 int status = 0;
356 int spin;
357
358 spin = MIP_SPIN;
359 while ((host_reg->off_0x38 & MIP_VALID) != 0) {
360 if (--spin <= 0) {
361 WARN(1, "Timeout waiting for Host Valid Flag\n");
362 return -1;
363 }
364 es7000_spin(MIP_SPIN);
365 }
366
367 memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
368 outb(1, mip_port);
369
370 spin = MIP_SPIN;
371
372 while ((mip_reg->off_0x38 & MIP_VALID) == 0) {
373 if (--spin <= 0) {
374 WARN(1, "Timeout waiting for MIP Valid Flag\n");
375 return -1;
376 }
377 es7000_spin(MIP_SPIN);
378 }
379
380 status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48;
381 mip_reg->off_0x38 &= ~MIP_VALID;
382
383 return status;
384}
385
386static void __init es7000_enable_apic_mode(void)
387{
388 struct mip_reg es7000_mip_reg;
389 int mip_status;
390
391 if (!es7000_plat)
392 return;
393
394 printk(KERN_INFO "ES7000: Enabling APIC mode.\n");
395 memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
396 es7000_mip_reg.off_0x00 = MIP_SW_APIC;
397 es7000_mip_reg.off_0x38 = MIP_VALID;
398
399 while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
400 WARN(1, "Command failed, status = %x\n", mip_status);
401}
402
403static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
404{
405 /* Careful. Some cpus do not strictly honor the set of cpus
406 * specified in the interrupt destination when using lowest
407 * priority interrupt delivery mode.
408 *
409 * In particular there was a hyperthreading cpu observed to
410 * deliver interrupts to the wrong hyperthread when only one
411 * hyperthread was specified in the interrupt desitination.
412 */
413 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
414}
415
416
417static void es7000_wait_for_init_deassert(atomic_t *deassert)
418{
419#ifndef CONFIG_ES7000_CLUSTERED_APIC
420 while (!atomic_read(deassert))
421 cpu_relax();
422#endif
423 return;
424}
425
426static unsigned int es7000_get_apic_id(unsigned long x)
427{
428 return (x >> 24) & 0xFF;
429}
430
431static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
432{
433 default_send_IPI_mask_sequence_phys(mask, vector);
434}
435
436static void es7000_send_IPI_allbutself(int vector)
437{
438 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
439}
440
441static void es7000_send_IPI_all(int vector)
442{
443 es7000_send_IPI_mask(cpu_online_mask, vector);
444}
445
446static int es7000_apic_id_registered(void)
447{
448 return 1;
449}
450
451static const cpumask_t *target_cpus_cluster(void)
452{
453 return &CPU_MASK_ALL;
454}
455
456static const cpumask_t *es7000_target_cpus(void)
457{
458 return &cpumask_of_cpu(smp_processor_id());
459}
460
461static unsigned long
462es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
463{
464 return 0;
465}
466static unsigned long es7000_check_apicid_present(int bit)
467{
468 return physid_isset(bit, phys_cpu_present_map);
469}
470
471static unsigned long calculate_ldr(int cpu)
472{
473 unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
474
475 return SET_APIC_LOGICAL_ID(id);
476}
477
478/*
479 * Set up the logical destination ID.
480 *
481 * Intel recommends to set DFR, LdR and TPR before enabling
482 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
483 * document number 292116). So here it goes...
484 */
485static void es7000_init_apic_ldr_cluster(void)
486{
487 unsigned long val;
488 int cpu = smp_processor_id();
489
490 apic_write(APIC_DFR, APIC_DFR_CLUSTER);
491 val = calculate_ldr(cpu);
492 apic_write(APIC_LDR, val);
493}
494
495static void es7000_init_apic_ldr(void)
496{
497 unsigned long val;
498 int cpu = smp_processor_id();
499
500 apic_write(APIC_DFR, APIC_DFR_FLAT);
501 val = calculate_ldr(cpu);
502 apic_write(APIC_LDR, val);
503}
504
505static void es7000_setup_apic_routing(void)
506{
507 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
508
509 printk(KERN_INFO
510 "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
511 (apic_version[apic] == 0x14) ?
512 "Physical Cluster" : "Logical Cluster",
513 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
514}
515
516static int es7000_apicid_to_node(int logical_apicid)
517{
518 return 0;
519}
520
521
522static int es7000_cpu_present_to_apicid(int mps_cpu)
523{
524 if (!mps_cpu)
525 return boot_cpu_physical_apicid;
526 else if (mps_cpu < nr_cpu_ids)
527 return per_cpu(x86_bios_cpu_apicid, mps_cpu);
528 else
529 return BAD_APICID;
530}
531
532static int cpu_id;
533
534static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
535{
536 physid_mask_t mask;
537
538 mask = physid_mask_of_physid(cpu_id);
539 ++cpu_id;
540
541 return mask;
542}
543
544/* Mapping from cpu number to logical apicid */
545static int es7000_cpu_to_logical_apicid(int cpu)
546{
547#ifdef CONFIG_SMP
548 if (cpu >= nr_cpu_ids)
549 return BAD_APICID;
550 return cpu_2_logical_apicid[cpu];
551#else
552 return logical_smp_processor_id();
553#endif
554}
555
556static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
557{
558 /* For clustered we don't have a good way to do this yet - hack */
559 return physids_promote(0xff);
560}
561
562static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
563{
564 boot_cpu_physical_apicid = read_apic_id();
565 return 1;
566}
567
568static unsigned int
569es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
570{
571 int cpus_found = 0;
572 int num_bits_set;
573 int apicid;
574 int cpu;
575
576 num_bits_set = cpumask_weight(cpumask);
577 /* Return id to all */
578 if (num_bits_set == nr_cpu_ids)
579 return 0xFF;
580 /*
581 * The cpus in the mask must all be on the apic cluster. If are not
582 * on the same apicid cluster return default value of target_cpus():
583 */
584 cpu = cpumask_first(cpumask);
585 apicid = es7000_cpu_to_logical_apicid(cpu);
586
587 while (cpus_found < num_bits_set) {
588 if (cpumask_test_cpu(cpu, cpumask)) {
589 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
590
591 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
592 WARN(1, "Not a valid mask!");
593
594 return 0xFF;
595 }
596 apicid = new_apicid;
597 cpus_found++;
598 }
599 cpu++;
600 }
601 return apicid;
602}
603
604static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
605{
606 int cpus_found = 0;
607 int num_bits_set;
608 int apicid;
609 int cpu;
610
611 num_bits_set = cpus_weight(*cpumask);
612 /* Return id to all */
613 if (num_bits_set == nr_cpu_ids)
614 return es7000_cpu_to_logical_apicid(0);
615 /*
616 * The cpus in the mask must all be on the apic cluster. If are not
617 * on the same apicid cluster return default value of target_cpus():
618 */
619 cpu = first_cpu(*cpumask);
620 apicid = es7000_cpu_to_logical_apicid(cpu);
621 while (cpus_found < num_bits_set) {
622 if (cpu_isset(cpu, *cpumask)) {
623 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
624
625 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
626 printk("%s: Not a valid mask!\n", __func__);
627
628 return es7000_cpu_to_logical_apicid(0);
629 }
630 apicid = new_apicid;
631 cpus_found++;
632 }
633 cpu++;
634 }
635 return apicid;
636}
637
638static unsigned int
639es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
640 const struct cpumask *andmask)
641{
642 int apicid = es7000_cpu_to_logical_apicid(0);
643 cpumask_var_t cpumask;
644
645 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
646 return apicid;
647
648 cpumask_and(cpumask, inmask, andmask);
649 cpumask_and(cpumask, cpumask, cpu_online_mask);
650 apicid = es7000_cpu_mask_to_apicid(cpumask);
651
652 free_cpumask_var(cpumask);
653
654 return apicid;
655}
656
657static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
658{
659 return cpuid_apic >> index_msb;
660}
661
662void __init es7000_update_apic_to_cluster(void)
663{
664 apic->target_cpus = target_cpus_cluster;
665 apic->irq_delivery_mode = dest_LowestPrio;
666 /* logical delivery broadcast to all procs: */
667 apic->irq_dest_mode = 1;
668
669 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
670
671 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
672}
673
674static int probe_es7000(void)
675{
676 /* probed later in mptable/ACPI hooks */
677 return 0;
678}
679
680static __init int
681es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
682{
683 if (mpc->oemptr) {
684 struct mpc_oemtable *oem_table =
685 (struct mpc_oemtable *)mpc->oemptr;
686
687 if (!strncmp(oem, "UNISYS", 6))
688 return parse_unisys_oem((char *)oem_table);
689 }
690 return 0;
691}
692
693
694struct apic apic_es7000 = {
695
696 .name = "es7000",
697 .probe = probe_es7000,
698 .acpi_madt_oem_check = es7000_acpi_madt_oem_check,
699 .apic_id_registered = es7000_apic_id_registered,
700
701 .irq_delivery_mode = dest_Fixed,
702 /* phys delivery to target CPUs: */
703 .irq_dest_mode = 0,
704
705 .target_cpus = es7000_target_cpus,
706 .disable_esr = 1,
707 .dest_logical = 0,
708 .check_apicid_used = es7000_check_apicid_used,
709 .check_apicid_present = es7000_check_apicid_present,
710
711 .vector_allocation_domain = es7000_vector_allocation_domain,
712 .init_apic_ldr = es7000_init_apic_ldr,
713
714 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
715 .setup_apic_routing = es7000_setup_apic_routing,
716 .multi_timer_check = NULL,
717 .apicid_to_node = es7000_apicid_to_node,
718 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
719 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
720 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
721 .setup_portio_remap = NULL,
722 .check_phys_apicid_present = es7000_check_phys_apicid_present,
723 .enable_apic_mode = es7000_enable_apic_mode,
724 .phys_pkg_id = es7000_phys_pkg_id,
725 .mps_oem_check = es7000_mps_oem_check,
726
727 .get_apic_id = es7000_get_apic_id,
728 .set_apic_id = NULL,
729 .apic_id_mask = 0xFF << 24,
730
731 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
732 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
733
734 .send_IPI_mask = es7000_send_IPI_mask,
735 .send_IPI_mask_allbutself = NULL,
736 .send_IPI_allbutself = es7000_send_IPI_allbutself,
737 .send_IPI_all = es7000_send_IPI_all,
738 .send_IPI_self = default_send_IPI_self,
739
740 .wakeup_cpu = NULL,
741
742 .trampoline_phys_low = 0x467,
743 .trampoline_phys_high = 0x469,
744
745 .wait_for_init_deassert = es7000_wait_for_init_deassert,
746
747 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
748 .smp_callin_clear_local_apic = NULL,
749 .inquire_remote_apic = default_inquire_remote_apic,
750
751 .read = native_apic_mem_read,
752 .write = native_apic_mem_write,
753 .icr_read = native_apic_icr_read,
754 .icr_write = native_apic_icr_write,
755 .wait_icr_idle = native_apic_wait_icr_idle,
756 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
757};
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
new file mode 100644
index 000000000000..d9d6d61eed82
--- /dev/null
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -0,0 +1,565 @@
1/*
2 * Written by: Patricia Gaughen, IBM Corporation
3 *
4 * Copyright (C) 2002, IBM Corp.
5 * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to <gone@us.ibm.com>
25 */
26#include <linux/nodemask.h>
27#include <linux/topology.h>
28#include <linux/bootmem.h>
29#include <linux/threads.h>
30#include <linux/cpumask.h>
31#include <linux/kernel.h>
32#include <linux/mmzone.h>
33#include <linux/module.h>
34#include <linux/string.h>
35#include <linux/init.h>
36#include <linux/numa.h>
37#include <linux/smp.h>
38#include <linux/io.h>
39#include <linux/mm.h>
40
41#include <asm/processor.h>
42#include <asm/fixmap.h>
43#include <asm/mpspec.h>
44#include <asm/numaq.h>
45#include <asm/setup.h>
46#include <asm/apic.h>
47#include <asm/e820.h>
48#include <asm/ipi.h>
49
50#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
51
52int found_numaq;
53
54/*
55 * Have to match translation table entries to main table entries by counter
56 * hence the mpc_record variable .... can't see a less disgusting way of
57 * doing this ....
58 */
59struct mpc_trans {
60 unsigned char mpc_type;
61 unsigned char trans_len;
62 unsigned char trans_type;
63 unsigned char trans_quad;
64 unsigned char trans_global;
65 unsigned char trans_local;
66 unsigned short trans_reserved;
67};
68
69/* x86_quirks member */
70static int mpc_record;
71
72static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY];
73
74int mp_bus_id_to_node[MAX_MP_BUSSES];
75int mp_bus_id_to_local[MAX_MP_BUSSES];
76int quad_local_to_mp_bus_id[NR_CPUS/4][4];
77
78
79static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
80{
81 struct eachquadmem *eq = scd->eq + node;
82
83 node_set_online(node);
84
85 /* Convert to pages */
86 node_start_pfn[node] =
87 MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size);
88
89 node_end_pfn[node] =
90 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
91
92 e820_register_active_regions(node, node_start_pfn[node],
93 node_end_pfn[node]);
94
95 memory_present(node, node_start_pfn[node], node_end_pfn[node]);
96
97 node_remap_size[node] = node_memmap_size_bytes(node,
98 node_start_pfn[node],
99 node_end_pfn[node]);
100}
101
102/*
103 * Function: smp_dump_qct()
104 *
105 * Description: gets memory layout from the quad config table. This
106 * function also updates node_online_map with the nodes (quads) present.
107 */
108static void __init smp_dump_qct(void)
109{
110 struct sys_cfg_data *scd;
111 int node;
112
113 scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR);
114
115 nodes_clear(node_online_map);
116 for_each_node(node) {
117 if (scd->quads_present31_0 & (1 << node))
118 numaq_register_node(node, scd);
119 }
120}
121
122void __cpuinit numaq_tsc_disable(void)
123{
124 if (!found_numaq)
125 return;
126
127 if (num_online_nodes() > 1) {
128 printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
129 setup_clear_cpu_cap(X86_FEATURE_TSC);
130 }
131}
132
133static int __init numaq_pre_time_init(void)
134{
135 numaq_tsc_disable();
136 return 0;
137}
138
139static inline int generate_logical_apicid(int quad, int phys_apicid)
140{
141 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
142}
143
144/* x86_quirks member */
145static int mpc_apic_id(struct mpc_cpu *m)
146{
147 int quad = translation_table[mpc_record]->trans_quad;
148 int logical_apicid = generate_logical_apicid(quad, m->apicid);
149
150 printk(KERN_DEBUG
151 "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
152 m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
153 (m->cpufeature & CPU_MODEL_MASK) >> 4,
154 m->apicver, quad, logical_apicid);
155
156 return logical_apicid;
157}
158
159/* x86_quirks member */
160static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
161{
162 int quad = translation_table[mpc_record]->trans_quad;
163 int local = translation_table[mpc_record]->trans_local;
164
165 mp_bus_id_to_node[m->busid] = quad;
166 mp_bus_id_to_local[m->busid] = local;
167
168 printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad);
169}
170
171/* x86_quirks member */
172static void mpc_oem_pci_bus(struct mpc_bus *m)
173{
174 int quad = translation_table[mpc_record]->trans_quad;
175 int local = translation_table[mpc_record]->trans_local;
176
177 quad_local_to_mp_bus_id[quad][local] = m->busid;
178}
179
180static void __init MP_translation_info(struct mpc_trans *m)
181{
182 printk(KERN_INFO
183 "Translation: record %d, type %d, quad %d, global %d, local %d\n",
184 mpc_record, m->trans_type, m->trans_quad, m->trans_global,
185 m->trans_local);
186
187 if (mpc_record >= MAX_MPC_ENTRY)
188 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
189 else
190 translation_table[mpc_record] = m; /* stash this for later */
191
192 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
193 node_set_online(m->trans_quad);
194}
195
196static int __init mpf_checksum(unsigned char *mp, int len)
197{
198 int sum = 0;
199
200 while (len--)
201 sum += *mp++;
202
203 return sum & 0xFF;
204}
205
206/*
207 * Read/parse the MPC oem tables
208 */
209static void __init
210 smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize)
211{
212 int count = sizeof(*oemtable); /* the header size */
213 unsigned char *oemptr = ((unsigned char *)oemtable) + count;
214
215 mpc_record = 0;
216 printk(KERN_INFO
217 "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
218
219 if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
220 printk(KERN_WARNING
221 "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
222 oemtable->signature[0], oemtable->signature[1],
223 oemtable->signature[2], oemtable->signature[3]);
224 return;
225 }
226
227 if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
228 printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
229 return;
230 }
231
232 while (count < oemtable->length) {
233 switch (*oemptr) {
234 case MP_TRANSLATION:
235 {
236 struct mpc_trans *m = (void *)oemptr;
237
238 MP_translation_info(m);
239 oemptr += sizeof(*m);
240 count += sizeof(*m);
241 ++mpc_record;
242 break;
243 }
244 default:
245 printk(KERN_WARNING
246 "Unrecognised OEM table entry type! - %d\n",
247 (int)*oemptr);
248 return;
249 }
250 }
251}
252
253static int __init numaq_setup_ioapic_ids(void)
254{
255 /* so can skip it */
256 return 1;
257}
258
259static int __init numaq_update_apic(void)
260{
261 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
262
263 return 0;
264}
265
266static struct x86_quirks numaq_x86_quirks __initdata = {
267 .arch_pre_time_init = numaq_pre_time_init,
268 .arch_time_init = NULL,
269 .arch_pre_intr_init = NULL,
270 .arch_memory_setup = NULL,
271 .arch_intr_init = NULL,
272 .arch_trap_init = NULL,
273 .mach_get_smp_config = NULL,
274 .mach_find_smp_config = NULL,
275 .mpc_record = &mpc_record,
276 .mpc_apic_id = mpc_apic_id,
277 .mpc_oem_bus_info = mpc_oem_bus_info,
278 .mpc_oem_pci_bus = mpc_oem_pci_bus,
279 .smp_read_mpc_oem = smp_read_mpc_oem,
280 .setup_ioapic_ids = numaq_setup_ioapic_ids,
281 .update_apic = numaq_update_apic,
282};
283
284static __init void early_check_numaq(void)
285{
286 /*
287 * Find possible boot-time SMP configuration:
288 */
289 early_find_smp_config();
290
291 /*
292 * get boot-time SMP configuration:
293 */
294 if (smp_found_config)
295 early_get_smp_config();
296
297 if (found_numaq)
298 x86_quirks = &numaq_x86_quirks;
299}
300
301int __init get_memcfg_numaq(void)
302{
303 early_check_numaq();
304 if (!found_numaq)
305 return 0;
306 smp_dump_qct();
307
308 return 1;
309}
310
311#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
312
313static inline unsigned int numaq_get_apic_id(unsigned long x)
314{
315 return (x >> 24) & 0x0F;
316}
317
318static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
319{
320 default_send_IPI_mask_sequence_logical(mask, vector);
321}
322
323static inline void numaq_send_IPI_allbutself(int vector)
324{
325 default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
326}
327
328static inline void numaq_send_IPI_all(int vector)
329{
330 numaq_send_IPI_mask(cpu_online_mask, vector);
331}
332
333#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
334#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
335
336/*
337 * Because we use NMIs rather than the INIT-STARTUP sequence to
338 * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
339 */
340static inline void numaq_smp_callin_clear_local_apic(void)
341{
342 clear_local_APIC();
343}
344
345static inline const cpumask_t *numaq_target_cpus(void)
346{
347 return &CPU_MASK_ALL;
348}
349
350static inline unsigned long
351numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
352{
353 return physid_isset(apicid, bitmap);
354}
355
356static inline unsigned long numaq_check_apicid_present(int bit)
357{
358 return physid_isset(bit, phys_cpu_present_map);
359}
360
361static inline int numaq_apic_id_registered(void)
362{
363 return 1;
364}
365
366static inline void numaq_init_apic_ldr(void)
367{
368 /* Already done in NUMA-Q firmware */
369}
370
371static inline void numaq_setup_apic_routing(void)
372{
373 printk(KERN_INFO
374 "Enabling APIC mode: NUMA-Q. Using %d I/O APICs\n",
375 nr_ioapics);
376}
377
378/*
379 * Skip adding the timer int on secondary nodes, which causes
380 * a small but painful rift in the time-space continuum.
381 */
382static inline int numaq_multi_timer_check(int apic, int irq)
383{
384 return apic != 0 && irq == 0;
385}
386
387static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
388{
389 /* We don't have a good way to do this yet - hack */
390 return physids_promote(0xFUL);
391}
392
393static inline int numaq_cpu_to_logical_apicid(int cpu)
394{
395 if (cpu >= nr_cpu_ids)
396 return BAD_APICID;
397 return cpu_2_logical_apicid[cpu];
398}
399
400/*
401 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
402 * cpu to APIC ID relation to properly interact with the intelligent
403 * mode of the cluster controller.
404 */
405static inline int numaq_cpu_present_to_apicid(int mps_cpu)
406{
407 if (mps_cpu < 60)
408 return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
409 else
410 return BAD_APICID;
411}
412
413static inline int numaq_apicid_to_node(int logical_apicid)
414{
415 return logical_apicid >> 4;
416}
417
418static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
419{
420 int node = numaq_apicid_to_node(logical_apicid);
421 int cpu = __ffs(logical_apicid & 0xf);
422
423 return physid_mask_of_physid(cpu + 4*node);
424}
425
426/* Where the IO area was mapped on multiquad, always 0 otherwise */
427void *xquad_portio;
428
429static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
430{
431 return 1;
432}
433
434/*
435 * We use physical apicids here, not logical, so just return the default
436 * physical broadcast to stop people from breaking us
437 */
438static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
439{
440 return 0x0F;
441}
442
443static inline unsigned int
444numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
445 const struct cpumask *andmask)
446{
447 return 0x0F;
448}
449
450/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
451static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
452{
453 return cpuid_apic >> index_msb;
454}
455
456static int
457numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
458{
459 if (strncmp(oem, "IBM NUMA", 8))
460 printk(KERN_ERR "Warning! Not a NUMA-Q system!\n");
461 else
462 found_numaq = 1;
463
464 return found_numaq;
465}
466
467static int probe_numaq(void)
468{
469 /* already know from get_memcfg_numaq() */
470 return found_numaq;
471}
472
473static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
474{
475 /* Careful. Some cpus do not strictly honor the set of cpus
476 * specified in the interrupt destination when using lowest
477 * priority interrupt delivery mode.
478 *
479 * In particular there was a hyperthreading cpu observed to
480 * deliver interrupts to the wrong hyperthread when only one
481 * hyperthread was specified in the interrupt desitination.
482 */
483 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
484}
485
486static void numaq_setup_portio_remap(void)
487{
488 int num_quads = num_online_nodes();
489
490 if (num_quads <= 1)
491 return;
492
493 printk(KERN_INFO
494 "Remapping cross-quad port I/O for %d quads\n", num_quads);
495
496 xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
497
498 printk(KERN_INFO
499 "xquad_portio vaddr 0x%08lx, len %08lx\n",
500 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
501}
502
503struct apic apic_numaq = {
504
505 .name = "NUMAQ",
506 .probe = probe_numaq,
507 .acpi_madt_oem_check = NULL,
508 .apic_id_registered = numaq_apic_id_registered,
509
510 .irq_delivery_mode = dest_LowestPrio,
511 /* physical delivery on LOCAL quad: */
512 .irq_dest_mode = 0,
513
514 .target_cpus = numaq_target_cpus,
515 .disable_esr = 1,
516 .dest_logical = APIC_DEST_LOGICAL,
517 .check_apicid_used = numaq_check_apicid_used,
518 .check_apicid_present = numaq_check_apicid_present,
519
520 .vector_allocation_domain = numaq_vector_allocation_domain,
521 .init_apic_ldr = numaq_init_apic_ldr,
522
523 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
524 .setup_apic_routing = numaq_setup_apic_routing,
525 .multi_timer_check = numaq_multi_timer_check,
526 .apicid_to_node = numaq_apicid_to_node,
527 .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
528 .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
529 .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
530 .setup_portio_remap = numaq_setup_portio_remap,
531 .check_phys_apicid_present = numaq_check_phys_apicid_present,
532 .enable_apic_mode = NULL,
533 .phys_pkg_id = numaq_phys_pkg_id,
534 .mps_oem_check = numaq_mps_oem_check,
535
536 .get_apic_id = numaq_get_apic_id,
537 .set_apic_id = NULL,
538 .apic_id_mask = 0x0F << 24,
539
540 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
541 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
542
543 .send_IPI_mask = numaq_send_IPI_mask,
544 .send_IPI_mask_allbutself = NULL,
545 .send_IPI_allbutself = numaq_send_IPI_allbutself,
546 .send_IPI_all = numaq_send_IPI_all,
547 .send_IPI_self = default_send_IPI_self,
548
549 .wakeup_cpu = NULL,
550 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
551 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
552
553 /* We don't do anything here because we use NMI's to boot instead */
554 .wait_for_init_deassert = NULL,
555
556 .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
557 .inquire_remote_apic = NULL,
558
559 .read = native_apic_mem_read,
560 .write = native_apic_mem_write,
561 .icr_read = native_apic_icr_read,
562 .icr_write = native_apic_icr_write,
563 .wait_icr_idle = native_apic_wait_icr_idle,
564 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
565};
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
new file mode 100644
index 000000000000..5fa48332c5c8
--- /dev/null
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -0,0 +1,424 @@
1/*
2 * Default generic APIC driver. This handles up to 8 CPUs.
3 *
4 * Copyright 2003 Andi Kleen, SuSE Labs.
5 * Subject to the GNU Public License, v.2
6 *
7 * Generic x86 APIC driver probe layer.
8 */
9#include <linux/threads.h>
10#include <linux/cpumask.h>
11#include <linux/module.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/ctype.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <asm/fixmap.h>
18#include <asm/mpspec.h>
19#include <asm/apicdef.h>
20#include <asm/apic.h>
21#include <asm/setup.h>
22
23#include <linux/threads.h>
24#include <linux/cpumask.h>
25#include <asm/mpspec.h>
26#include <asm/fixmap.h>
27#include <asm/apicdef.h>
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/smp.h>
31#include <linux/init.h>
32#include <asm/ipi.h>
33
34#include <linux/smp.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <asm/acpi.h>
38#include <asm/arch_hooks.h>
39#include <asm/e820.h>
40#include <asm/setup.h>
41
42#ifdef CONFIG_HOTPLUG_CPU
43#define DEFAULT_SEND_IPI (1)
44#else
45#define DEFAULT_SEND_IPI (0)
46#endif
47
48int no_broadcast = DEFAULT_SEND_IPI;
49
50#ifdef CONFIG_X86_LOCAL_APIC
51
52void default_setup_apic_routing(void)
53{
54#ifdef CONFIG_X86_IO_APIC
55 printk(KERN_INFO
56 "Enabling APIC mode: Flat. Using %d I/O APICs\n",
57 nr_ioapics);
58#endif
59}
60
61static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
62{
63 /*
64 * Careful. Some cpus do not strictly honor the set of cpus
65 * specified in the interrupt destination when using lowest
66 * priority interrupt delivery mode.
67 *
68 * In particular there was a hyperthreading cpu observed to
69 * deliver interrupts to the wrong hyperthread when only one
70 * hyperthread was specified in the interrupt desitination.
71 */
72 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
73}
74
75/* should be called last. */
76static int probe_default(void)
77{
78 return 1;
79}
80
81struct apic apic_default = {
82
83 .name = "default",
84 .probe = probe_default,
85 .acpi_madt_oem_check = NULL,
86 .apic_id_registered = default_apic_id_registered,
87
88 .irq_delivery_mode = dest_LowestPrio,
89 /* logical delivery broadcast to all CPUs: */
90 .irq_dest_mode = 1,
91
92 .target_cpus = default_target_cpus,
93 .disable_esr = 0,
94 .dest_logical = APIC_DEST_LOGICAL,
95 .check_apicid_used = default_check_apicid_used,
96 .check_apicid_present = default_check_apicid_present,
97
98 .vector_allocation_domain = default_vector_allocation_domain,
99 .init_apic_ldr = default_init_apic_ldr,
100
101 .ioapic_phys_id_map = default_ioapic_phys_id_map,
102 .setup_apic_routing = default_setup_apic_routing,
103 .multi_timer_check = NULL,
104 .apicid_to_node = default_apicid_to_node,
105 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
106 .cpu_present_to_apicid = default_cpu_present_to_apicid,
107 .apicid_to_cpu_present = default_apicid_to_cpu_present,
108 .setup_portio_remap = NULL,
109 .check_phys_apicid_present = default_check_phys_apicid_present,
110 .enable_apic_mode = NULL,
111 .phys_pkg_id = default_phys_pkg_id,
112 .mps_oem_check = NULL,
113
114 .get_apic_id = default_get_apic_id,
115 .set_apic_id = NULL,
116 .apic_id_mask = 0x0F << 24,
117
118 .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
119 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
120
121 .send_IPI_mask = default_send_IPI_mask_logical,
122 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
123 .send_IPI_allbutself = default_send_IPI_allbutself,
124 .send_IPI_all = default_send_IPI_all,
125 .send_IPI_self = default_send_IPI_self,
126
127 .wakeup_cpu = NULL,
128 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
129 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
130
131 .wait_for_init_deassert = default_wait_for_init_deassert,
132
133 .smp_callin_clear_local_apic = NULL,
134 .inquire_remote_apic = default_inquire_remote_apic,
135
136 .read = native_apic_mem_read,
137 .write = native_apic_mem_write,
138 .icr_read = native_apic_icr_read,
139 .icr_write = native_apic_icr_write,
140 .wait_icr_idle = native_apic_wait_icr_idle,
141 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
142};
143
144extern struct apic apic_numaq;
145extern struct apic apic_summit;
146extern struct apic apic_bigsmp;
147extern struct apic apic_es7000;
148extern struct apic apic_default;
149
150struct apic *apic = &apic_default;
151EXPORT_SYMBOL_GPL(apic);
152
153static struct apic *apic_probe[] __initdata = {
154#ifdef CONFIG_X86_NUMAQ
155 &apic_numaq,
156#endif
157#ifdef CONFIG_X86_SUMMIT
158 &apic_summit,
159#endif
160#ifdef CONFIG_X86_BIGSMP
161 &apic_bigsmp,
162#endif
163#ifdef CONFIG_X86_ES7000
164 &apic_es7000,
165#endif
166 &apic_default, /* must be last */
167 NULL,
168};
169
170static int cmdline_apic __initdata;
171static int __init parse_apic(char *arg)
172{
173 int i;
174
175 if (!arg)
176 return -EINVAL;
177
178 for (i = 0; apic_probe[i]; i++) {
179 if (!strcmp(apic_probe[i]->name, arg)) {
180 apic = apic_probe[i];
181 cmdline_apic = 1;
182 return 0;
183 }
184 }
185
186 if (x86_quirks->update_apic)
187 x86_quirks->update_apic();
188
189 /* Parsed again by __setup for debug/verbose */
190 return 0;
191}
192early_param("apic", parse_apic);
193
194void __init generic_bigsmp_probe(void)
195{
196#ifdef CONFIG_X86_BIGSMP
197 /*
198 * This routine is used to switch to bigsmp mode when
199 * - There is no apic= option specified by the user
200 * - generic_apic_probe() has chosen apic_default as the sub_arch
201 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
202 */
203
204 if (!cmdline_apic && apic == &apic_default) {
205 if (apic_bigsmp.probe()) {
206 apic = &apic_bigsmp;
207 if (x86_quirks->update_apic)
208 x86_quirks->update_apic();
209 printk(KERN_INFO "Overriding APIC driver with %s\n",
210 apic->name);
211 }
212 }
213#endif
214}
215
216void __init generic_apic_probe(void)
217{
218 if (!cmdline_apic) {
219 int i;
220 for (i = 0; apic_probe[i]; i++) {
221 if (apic_probe[i]->probe()) {
222 apic = apic_probe[i];
223 break;
224 }
225 }
226 /* Not visible without early console */
227 if (!apic_probe[i])
228 panic("Didn't find an APIC driver");
229
230 if (x86_quirks->update_apic)
231 x86_quirks->update_apic();
232 }
233 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
234}
235
236/* These functions can switch the APIC even after the initial ->probe() */
237
238int __init
239generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
240{
241 int i;
242
243 for (i = 0; apic_probe[i]; ++i) {
244 if (!apic_probe[i]->mps_oem_check)
245 continue;
246 if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
247 continue;
248
249 if (!cmdline_apic) {
250 apic = apic_probe[i];
251 if (x86_quirks->update_apic)
252 x86_quirks->update_apic();
253 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
254 apic->name);
255 }
256 return 1;
257 }
258 return 0;
259}
260
261int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
262{
263 int i;
264
265 for (i = 0; apic_probe[i]; ++i) {
266 if (!apic_probe[i]->acpi_madt_oem_check)
267 continue;
268 if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
269 continue;
270
271 if (!cmdline_apic) {
272 apic = apic_probe[i];
273 if (x86_quirks->update_apic)
274 x86_quirks->update_apic();
275 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
276 apic->name);
277 }
278 return 1;
279 }
280 return 0;
281}
282
283#endif /* CONFIG_X86_LOCAL_APIC */
284
285/**
286 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
287 *
288 * Description:
289 * Perform any necessary interrupt initialisation prior to setting up
290 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
291 * interrupts should be initialised here if the machine emulates a PC
292 * in any way.
293 **/
294void __init pre_intr_init_hook(void)
295{
296 if (x86_quirks->arch_pre_intr_init) {
297 if (x86_quirks->arch_pre_intr_init())
298 return;
299 }
300 init_ISA_irqs();
301}
302
303/**
304 * intr_init_hook - post gate setup interrupt initialisation
305 *
306 * Description:
307 * Fill in any interrupts that may have been left out by the general
308 * init_IRQ() routine. interrupts having to do with the machine rather
309 * than the devices on the I/O bus (like APIC interrupts in intel MP
310 * systems) are started here.
311 **/
312void __init intr_init_hook(void)
313{
314 if (x86_quirks->arch_intr_init) {
315 if (x86_quirks->arch_intr_init())
316 return;
317 }
318}
319
320/**
321 * pre_setup_arch_hook - hook called prior to any setup_arch() execution
322 *
323 * Description:
324 * generally used to activate any machine specific identification
325 * routines that may be needed before setup_arch() runs. On Voyager
326 * this is used to get the board revision and type.
327 **/
328void __init pre_setup_arch_hook(void)
329{
330}
331
332/**
333 * trap_init_hook - initialise system specific traps
334 *
335 * Description:
336 * Called as the final act of trap_init(). Used in VISWS to initialise
337 * the various board specific APIC traps.
338 **/
339void __init trap_init_hook(void)
340{
341 if (x86_quirks->arch_trap_init) {
342 if (x86_quirks->arch_trap_init())
343 return;
344 }
345}
346
347static struct irqaction irq0 = {
348 .handler = timer_interrupt,
349 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
350 .mask = CPU_MASK_NONE,
351 .name = "timer"
352};
353
354/**
355 * pre_time_init_hook - do any specific initialisations before.
356 *
357 **/
358void __init pre_time_init_hook(void)
359{
360 if (x86_quirks->arch_pre_time_init)
361 x86_quirks->arch_pre_time_init();
362}
363
364/**
365 * time_init_hook - do any specific initialisations for the system timer.
366 *
367 * Description:
368 * Must plug the system timer interrupt source at HZ into the IRQ listed
369 * in irq_vectors.h:TIMER_IRQ
370 **/
371void __init time_init_hook(void)
372{
373 if (x86_quirks->arch_time_init) {
374 /*
375 * A nonzero return code does not mean failure, it means
376 * that the architecture quirk does not want any
377 * generic (timer) setup to be performed after this:
378 */
379 if (x86_quirks->arch_time_init())
380 return;
381 }
382
383 irq0.mask = cpumask_of_cpu(0);
384 setup_irq(0, &irq0);
385}
386
387#ifdef CONFIG_MCA
388/**
389 * mca_nmi_hook - hook into MCA specific NMI chain
390 *
391 * Description:
392 * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
393 * along the MCA bus. Use this to hook into that chain if you will need
394 * it.
395 **/
396void mca_nmi_hook(void)
397{
398 /*
399 * If I recall correctly, there's a whole bunch of other things that
400 * we can do to check for NMI problems, but that's all I know about
401 * at the moment.
402 */
403 pr_warning("NMI generated from unknown source!\n");
404}
405#endif
406
407static __init int no_ipi_broadcast(char *str)
408{
409 get_option(&str, &no_broadcast);
410 pr_info("Using %s mode\n",
411 no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
412 return 1;
413}
414__setup("no_ipi_broadcast=", no_ipi_broadcast);
415
416static int __init print_ipi_mode(void)
417{
418 pr_info("Using IPI %s mode\n",
419 no_broadcast ? "No-Shortcut" : "Shortcut");
420 return 0;
421}
422
423late_initcall(print_ipi_mode);
424
diff --git a/arch/x86/kernel/apic/apic_64.c b/arch/x86/kernel/apic/probe_64.c
index 70935dd904db..70935dd904db 100644
--- a/arch/x86/kernel/apic/apic_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
new file mode 100644
index 000000000000..cfe7b09015d8
--- /dev/null
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -0,0 +1,601 @@
1/*
2 * IBM Summit-Specific Code
3 *
4 * Written By: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (c) 2003 IBM Corp.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/init.h>
31#include <asm/io.h>
32#include <asm/bios_ebda.h>
33
34/*
35 * APIC driver for the IBM "Summit" chipset.
36 */
37#include <linux/threads.h>
38#include <linux/cpumask.h>
39#include <asm/mpspec.h>
40#include <asm/apic.h>
41#include <asm/smp.h>
42#include <asm/fixmap.h>
43#include <asm/apicdef.h>
44#include <asm/ipi.h>
45#include <linux/kernel.h>
46#include <linux/string.h>
47#include <linux/init.h>
48#include <linux/gfp.h>
49#include <linux/smp.h>
50
51static inline unsigned summit_get_apic_id(unsigned long x)
52{
53 return (x >> 24) & 0xFF;
54}
55
56static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
57{
58 default_send_IPI_mask_sequence_logical(mask, vector);
59}
60
61static inline void summit_send_IPI_allbutself(int vector)
62{
63 cpumask_t mask = cpu_online_map;
64 cpu_clear(smp_processor_id(), mask);
65
66 if (!cpus_empty(mask))
67 summit_send_IPI_mask(&mask, vector);
68}
69
70static inline void summit_send_IPI_all(int vector)
71{
72 summit_send_IPI_mask(&cpu_online_map, vector);
73}
74
75#include <asm/tsc.h>
76
77extern int use_cyclone;
78
79#ifdef CONFIG_X86_SUMMIT_NUMA
80extern void setup_summit(void);
81#else
82#define setup_summit() {}
83#endif
84
85static inline int
86summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
87{
88 if (!strncmp(oem, "IBM ENSW", 8) &&
89 (!strncmp(productid, "VIGIL SMP", 9)
90 || !strncmp(productid, "EXA", 3)
91 || !strncmp(productid, "RUTHLESS SMP", 12))){
92 mark_tsc_unstable("Summit based system");
93 use_cyclone = 1; /*enable cyclone-timer*/
94 setup_summit();
95 return 1;
96 }
97 return 0;
98}
99
100/* Hook from generic ACPI tables.c */
101static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
102{
103 if (!strncmp(oem_id, "IBM", 3) &&
104 (!strncmp(oem_table_id, "SERVIGIL", 8)
105 || !strncmp(oem_table_id, "EXA", 3))){
106 mark_tsc_unstable("Summit based system");
107 use_cyclone = 1; /*enable cyclone-timer*/
108 setup_summit();
109 return 1;
110 }
111 return 0;
112}
113
114struct rio_table_hdr {
115 unsigned char version; /* Version number of this data structure */
116 /* Version 3 adds chassis_num & WP_index */
117 unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
118 unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
119} __attribute__((packed));
120
121struct scal_detail {
122 unsigned char node_id; /* Scalability Node ID */
123 unsigned long CBAR; /* Address of 1MB register space */
124 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
125 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
126 unsigned char port1node; /* Node ID port connected to: 0xFF = None */
127 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
128 unsigned char port2node; /* Node ID port connected to: 0xFF = None */
129 unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
130 unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
131} __attribute__((packed));
132
133struct rio_detail {
134 unsigned char node_id; /* RIO Node ID */
135 unsigned long BBAR; /* Address of 1MB register space */
136 unsigned char type; /* Type of device */
137 unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
138 /* For CYC: Node ID of Twister that owns this CYC */
139 unsigned char port0node; /* Node ID port connected to: 0xFF=None */
140 unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
141 unsigned char port1node; /* Node ID port connected to: 0xFF=None */
142 unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
143 unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
144 /* For CYC: 0 */
145 unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
146 /* = 0 : the XAPIC is not used, ie:*/
147 /* ints fwded to another XAPIC */
148 /* Bits1:7 Reserved */
149 /* For CYC: Bits0:7 Reserved */
150 unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
151 /* lower slot numbers/PCI bus numbers */
152 /* For CYC: No meaning */
153 unsigned char chassis_num; /* 1 based Chassis number */
154 /* For LookOut WPEGs this field indicates the */
155 /* Expansion Chassis #, enumerated from Boot */
156 /* Node WPEG external port, then Boot Node CYC */
157 /* external port, then Next Vigil chassis WPEG */
158 /* external port, etc. */
159 /* Shared Lookouts have only 1 chassis number (the */
160 /* first one assigned) */
161} __attribute__((packed));
162
163
164typedef enum {
165 CompatTwister = 0, /* Compatibility Twister */
166 AltTwister = 1, /* Alternate Twister of internal 8-way */
167 CompatCyclone = 2, /* Compatibility Cyclone */
168 AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
169 CompatWPEG = 4, /* Compatibility WPEG */
170 AltWPEG = 5, /* Second Planar WPEG */
171 LookOutAWPEG = 6, /* LookOut WPEG */
172 LookOutBWPEG = 7, /* LookOut WPEG */
173} node_type;
174
175static inline int is_WPEG(struct rio_detail *rio){
176 return (rio->type == CompatWPEG || rio->type == AltWPEG ||
177 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
178}
179
180
181/* In clustered mode, the high nibble of APIC ID is a cluster number.
182 * The low nibble is a 4-bit bitmap. */
183#define XAPIC_DEST_CPUS_SHIFT 4
184#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
185#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
186
187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
188
189static inline const cpumask_t *summit_target_cpus(void)
190{
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing
193 * Just start on cpu 0. IRQ balancing will spread load
194 */
195 return &cpumask_of_cpu(0);
196}
197
198static inline unsigned long
199summit_check_apicid_used(physid_mask_t bitmap, int apicid)
200{
201 return 0;
202}
203
204/* we don't use the phys_cpu_present_map to indicate apicid presence */
205static inline unsigned long summit_check_apicid_present(int bit)
206{
207 return 1;
208}
209
210static inline void summit_init_apic_ldr(void)
211{
212 unsigned long val, id;
213 int count = 0;
214 u8 my_id = (u8)hard_smp_processor_id();
215 u8 my_cluster = APIC_CLUSTER(my_id);
216#ifdef CONFIG_SMP
217 u8 lid;
218 int i;
219
220 /* Create logical APIC IDs by counting CPUs already in cluster. */
221 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
222 lid = cpu_2_logical_apicid[i];
223 if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
224 ++count;
225 }
226#endif
227 /* We only have a 4 wide bitmap in cluster mode. If a deranged
228 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
229 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
230 id = my_cluster | (1UL << count);
231 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
232 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
233 val |= SET_APIC_LOGICAL_ID(id);
234 apic_write(APIC_LDR, val);
235}
236
237static inline int summit_apic_id_registered(void)
238{
239 return 1;
240}
241
242static inline void summit_setup_apic_routing(void)
243{
244 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
245 nr_ioapics);
246}
247
248static inline int summit_apicid_to_node(int logical_apicid)
249{
250#ifdef CONFIG_SMP
251 return apicid_2_node[hard_smp_processor_id()];
252#else
253 return 0;
254#endif
255}
256
257/* Mapping from cpu number to logical apicid */
258static inline int summit_cpu_to_logical_apicid(int cpu)
259{
260#ifdef CONFIG_SMP
261 if (cpu >= nr_cpu_ids)
262 return BAD_APICID;
263 return cpu_2_logical_apicid[cpu];
264#else
265 return logical_smp_processor_id();
266#endif
267}
268
269static inline int summit_cpu_present_to_apicid(int mps_cpu)
270{
271 if (mps_cpu < nr_cpu_ids)
272 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
273 else
274 return BAD_APICID;
275}
276
277static inline physid_mask_t
278summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
279{
280 /* For clustered we don't have a good way to do this yet - hack */
281 return physids_promote(0x0F);
282}
283
284static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
285{
286 return physid_mask_of_physid(0);
287}
288
289static inline void summit_setup_portio_remap(void)
290{
291}
292
293static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
294{
295 return 1;
296}
297
298static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
299{
300 int cpus_found = 0;
301 int num_bits_set;
302 int apicid;
303 int cpu;
304
305 num_bits_set = cpus_weight(*cpumask);
306 /* Return id to all */
307 if (num_bits_set >= nr_cpu_ids)
308 return 0xFF;
309 /*
310 * The cpus in the mask must all be on the apic cluster. If are not
311 * on the same apicid cluster return default value of target_cpus():
312 */
313 cpu = first_cpu(*cpumask);
314 apicid = summit_cpu_to_logical_apicid(cpu);
315
316 while (cpus_found < num_bits_set) {
317 if (cpu_isset(cpu, *cpumask)) {
318 int new_apicid = summit_cpu_to_logical_apicid(cpu);
319
320 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
321 printk ("%s: Not a valid mask!\n", __func__);
322
323 return 0xFF;
324 }
325 apicid = apicid | new_apicid;
326 cpus_found++;
327 }
328 cpu++;
329 }
330 return apicid;
331}
332
333static inline unsigned int
334summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
335 const struct cpumask *andmask)
336{
337 int apicid = summit_cpu_to_logical_apicid(0);
338 cpumask_var_t cpumask;
339
340 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
341 return apicid;
342
343 cpumask_and(cpumask, inmask, andmask);
344 cpumask_and(cpumask, cpumask, cpu_online_mask);
345 apicid = summit_cpu_mask_to_apicid(cpumask);
346
347 free_cpumask_var(cpumask);
348
349 return apicid;
350}
351
352/*
353 * cpuid returns the value latched in the HW at reset, not the APIC ID
354 * register's value. For any box whose BIOS changes APIC IDs, like
355 * clustered APIC systems, we must use hard_smp_processor_id.
356 *
357 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
358 */
359static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
360{
361 return hard_smp_processor_id() >> index_msb;
362}
363
364static int probe_summit(void)
365{
366 /* probed later in mptable/ACPI hooks */
367 return 0;
368}
369
370static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
371{
372 /* Careful. Some cpus do not strictly honor the set of cpus
373 * specified in the interrupt destination when using lowest
374 * priority interrupt delivery mode.
375 *
376 * In particular there was a hyperthreading cpu observed to
377 * deliver interrupts to the wrong hyperthread when only one
378 * hyperthread was specified in the interrupt desitination.
379 */
380 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
381}
382
383#ifdef CONFIG_X86_SUMMIT_NUMA
384static struct rio_table_hdr *rio_table_hdr __initdata;
385static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
386static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
387
388#ifndef CONFIG_X86_NUMAQ
389static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata;
390#endif
391
392static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
393{
394 int twister = 0, node = 0;
395 int i, bus, num_buses;
396
397 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
398 if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
399 twister = rio_devs[i]->owner_id;
400 break;
401 }
402 }
403 if (i == rio_table_hdr->num_rio_dev) {
404 printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
405 return last_bus;
406 }
407
408 for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
409 if (scal_devs[i]->node_id == twister) {
410 node = scal_devs[i]->node_id;
411 break;
412 }
413 }
414 if (i == rio_table_hdr->num_scal_dev) {
415 printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
416 return last_bus;
417 }
418
419 switch (rio_devs[wpeg_num]->type) {
420 case CompatWPEG:
421 /*
422 * The Compatibility Winnipeg controls the 2 legacy buses,
423 * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
424 * a PCI-PCI bridge card is used in either slot: total 5 buses.
425 */
426 num_buses = 5;
427 break;
428 case AltWPEG:
429 /*
430 * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
431 * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
432 * the "extra" buses for each of those slots: total 7 buses.
433 */
434 num_buses = 7;
435 break;
436 case LookOutAWPEG:
437 case LookOutBWPEG:
438 /*
439 * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
440 * & the "extra" buses for each of those slots: total 9 buses.
441 */
442 num_buses = 9;
443 break;
444 default:
445 printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
446 return last_bus;
447 }
448
449 for (bus = last_bus; bus < last_bus + num_buses; bus++)
450 mp_bus_id_to_node[bus] = node;
451 return bus;
452}
453
454static int __init build_detail_arrays(void)
455{
456 unsigned long ptr;
457 int i, scal_detail_size, rio_detail_size;
458
459 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
460 printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
461 return 0;
462 }
463
464 switch (rio_table_hdr->version) {
465 default:
466 printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
467 return 0;
468 case 2:
469 scal_detail_size = 11;
470 rio_detail_size = 13;
471 break;
472 case 3:
473 scal_detail_size = 12;
474 rio_detail_size = 15;
475 break;
476 }
477
478 ptr = (unsigned long)rio_table_hdr + 3;
479 for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
480 scal_devs[i] = (struct scal_detail *)ptr;
481
482 for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
483 rio_devs[i] = (struct rio_detail *)ptr;
484
485 return 1;
486}
487
488void __init setup_summit(void)
489{
490 unsigned long ptr;
491 unsigned short offset;
492 int i, next_wpeg, next_bus = 0;
493
494 /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
495 ptr = get_bios_ebda();
496 ptr = (unsigned long)phys_to_virt(ptr);
497
498 rio_table_hdr = NULL;
499 offset = 0x180;
500 while (offset) {
501 /* The block id is stored in the 2nd word */
502 if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
503 /* set the pointer past the offset & block id */
504 rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
505 break;
506 }
507 /* The next offset is stored in the 1st word. 0 means no more */
508 offset = *((unsigned short *)(ptr + offset));
509 }
510 if (!rio_table_hdr) {
511 printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
512 return;
513 }
514
515 if (!build_detail_arrays())
516 return;
517
518 /* The first Winnipeg we're looking for has an index of 0 */
519 next_wpeg = 0;
520 do {
521 for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
522 if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
523 /* It's the Winnipeg we're looking for! */
524 next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
525 next_wpeg++;
526 break;
527 }
528 }
529 /*
530 * If we go through all Rio devices and don't find one with
531 * the next index, it means we've found all the Winnipegs,
532 * and thus all the PCI buses.
533 */
534 if (i == rio_table_hdr->num_rio_dev)
535 next_wpeg = 0;
536 } while (next_wpeg != 0);
537}
538#endif
539
540struct apic apic_summit = {
541
542 .name = "summit",
543 .probe = probe_summit,
544 .acpi_madt_oem_check = summit_acpi_madt_oem_check,
545 .apic_id_registered = summit_apic_id_registered,
546
547 .irq_delivery_mode = dest_LowestPrio,
548 /* logical delivery broadcast to all CPUs: */
549 .irq_dest_mode = 1,
550
551 .target_cpus = summit_target_cpus,
552 .disable_esr = 1,
553 .dest_logical = APIC_DEST_LOGICAL,
554 .check_apicid_used = summit_check_apicid_used,
555 .check_apicid_present = summit_check_apicid_present,
556
557 .vector_allocation_domain = summit_vector_allocation_domain,
558 .init_apic_ldr = summit_init_apic_ldr,
559
560 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
561 .setup_apic_routing = summit_setup_apic_routing,
562 .multi_timer_check = NULL,
563 .apicid_to_node = summit_apicid_to_node,
564 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
565 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
566 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
567 .setup_portio_remap = NULL,
568 .check_phys_apicid_present = summit_check_phys_apicid_present,
569 .enable_apic_mode = NULL,
570 .phys_pkg_id = summit_phys_pkg_id,
571 .mps_oem_check = summit_mps_oem_check,
572
573 .get_apic_id = summit_get_apic_id,
574 .set_apic_id = NULL,
575 .apic_id_mask = 0xFF << 24,
576
577 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
578 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
579
580 .send_IPI_mask = summit_send_IPI_mask,
581 .send_IPI_mask_allbutself = NULL,
582 .send_IPI_allbutself = summit_send_IPI_allbutself,
583 .send_IPI_all = summit_send_IPI_all,
584 .send_IPI_self = default_send_IPI_self,
585
586 .wakeup_cpu = NULL,
587 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
588 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
589
590 .wait_for_init_deassert = default_wait_for_init_deassert,
591
592 .smp_callin_clear_local_apic = NULL,
593 .inquire_remote_apic = default_inquire_remote_apic,
594
595 .read = native_apic_mem_read,
596 .write = native_apic_mem_write,
597 .icr_read = native_apic_icr_read,
598 .icr_write = native_apic_icr_write,
599 .wait_icr_idle = native_apic_wait_icr_idle,
600 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
601};