aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
commit905ec87e93bc9e01b15c60035cd6a50c636cbaef (patch)
tree46fd7618d6511611ffc19eb0dd4d7bc6b90a41c2 /arch/ia64/kernel
parent1d6ae775d7a948c9575658eb41184fd2e506c0df (diff)
parent2f4ba45a75d6383b4a1201169a808ffea416ffa0 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi-ext.c37
-rw-r--r--arch/ia64/kernel/acpi.c328
-rw-r--r--arch/ia64/kernel/asm-offsets.c40
-rw-r--r--arch/ia64/kernel/entry.S31
-rw-r--r--arch/ia64/kernel/fsys.S2
-rw-r--r--arch/ia64/kernel/gate.S2
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/iosapic.c24
-rw-r--r--arch/ia64/kernel/ivt.S3
-rw-r--r--arch/ia64/kernel/mca.c830
-rw-r--r--arch/ia64/kernel/mca_asm.S1358
-rw-r--r--arch/ia64/kernel/mca_drv.c37
-rw-r--r--arch/ia64/kernel/minstate.h88
-rw-r--r--arch/ia64/kernel/palinfo.c115
-rw-r--r--arch/ia64/kernel/perfmon.c9
-rw-r--r--arch/ia64/kernel/salinfo.c62
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/unwind.c22
19 files changed, 1629 insertions, 1367 deletions
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 2623df5e2633..13a5b3b49bf8 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -17,20 +17,20 @@
17#include <asm/acpi-ext.h> 17#include <asm/acpi-ext.h>
18 18
19struct acpi_vendor_descriptor { 19struct acpi_vendor_descriptor {
20 u8 guid_id; 20 u8 guid_id;
21 efi_guid_t guid; 21 efi_guid_t guid;
22}; 22};
23 23
24struct acpi_vendor_info { 24struct acpi_vendor_info {
25 struct acpi_vendor_descriptor *descriptor; 25 struct acpi_vendor_descriptor *descriptor;
26 u8 *data; 26 u8 *data;
27 u32 length; 27 u32 length;
28}; 28};
29 29
30acpi_status 30acpi_status
31acpi_vendor_resource_match(struct acpi_resource *resource, void *context) 31acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
32{ 32{
33 struct acpi_vendor_info *info = (struct acpi_vendor_info *) context; 33 struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
34 struct acpi_resource_vendor *vendor; 34 struct acpi_resource_vendor *vendor;
35 struct acpi_vendor_descriptor *descriptor; 35 struct acpi_vendor_descriptor *descriptor;
36 u32 length; 36 u32 length;
@@ -38,8 +38,8 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
38 if (resource->id != ACPI_RSTYPE_VENDOR) 38 if (resource->id != ACPI_RSTYPE_VENDOR)
39 return AE_OK; 39 return AE_OK;
40 40
41 vendor = (struct acpi_resource_vendor *) &resource->data; 41 vendor = (struct acpi_resource_vendor *)&resource->data;
42 descriptor = (struct acpi_vendor_descriptor *) vendor->reserved; 42 descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
43 if (vendor->length <= sizeof(*info->descriptor) || 43 if (vendor->length <= sizeof(*info->descriptor) ||
44 descriptor->guid_id != info->descriptor->guid_id || 44 descriptor->guid_id != info->descriptor->guid_id ||
45 efi_guidcmp(descriptor->guid, info->descriptor->guid)) 45 efi_guidcmp(descriptor->guid, info->descriptor->guid))
@@ -50,21 +50,24 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
50 if (!info->data) 50 if (!info->data)
51 return AE_NO_MEMORY; 51 return AE_NO_MEMORY;
52 52
53 memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length); 53 memcpy(info->data,
54 vendor->reserved + sizeof(struct acpi_vendor_descriptor),
55 length);
54 info->length = length; 56 info->length = length;
55 return AE_CTRL_TERMINATE; 57 return AE_CTRL_TERMINATE;
56} 58}
57 59
58acpi_status 60acpi_status
59acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, 61acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
60 u8 **data, u32 *length) 62 u8 ** data, u32 * length)
61{ 63{
62 struct acpi_vendor_info info; 64 struct acpi_vendor_info info;
63 65
64 info.descriptor = id; 66 info.descriptor = id;
65 info.data = NULL; 67 info.data = NULL;
66 68
67 acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info); 69 acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match,
70 &info);
68 if (!info.data) 71 if (!info.data)
69 return AE_NOT_FOUND; 72 return AE_NOT_FOUND;
70 73
@@ -75,17 +78,19 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id,
75 78
76struct acpi_vendor_descriptor hp_ccsr_descriptor = { 79struct acpi_vendor_descriptor hp_ccsr_descriptor = {
77 .guid_id = 2, 80 .guid_id = 2,
78 .guid = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) 81 .guid =
82 EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
83 0x37, 0x0e, 0xad)
79}; 84};
80 85
81acpi_status 86acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length)
82hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
83{ 87{
84 acpi_status status; 88 acpi_status status;
85 u8 *data; 89 u8 *data;
86 u32 length; 90 u32 length;
87 91
88 status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); 92 status =
93 acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
89 94
90 if (ACPI_FAILURE(status) || length != 16) 95 if (ACPI_FAILURE(status) || length != 16)
91 return AE_NOT_FOUND; 96 return AE_NOT_FOUND;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 9609f243e5d0..28a4529fdd60 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -74,12 +74,11 @@ unsigned int acpi_cpei_override;
74unsigned int acpi_cpei_phys_cpuid; 74unsigned int acpi_cpei_phys_cpuid;
75 75
76#define MAX_SAPICS 256 76#define MAX_SAPICS 256
77u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = 77u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
78 { [0 ... MAX_SAPICS - 1] = -1 }; 78
79EXPORT_SYMBOL(ia64_acpiid_to_sapicid); 79EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
80 80
81const char * 81const char *acpi_get_sysname(void)
82acpi_get_sysname (void)
83{ 82{
84#ifdef CONFIG_IA64_GENERIC 83#ifdef CONFIG_IA64_GENERIC
85 unsigned long rsdp_phys; 84 unsigned long rsdp_phys;
@@ -89,27 +88,29 @@ acpi_get_sysname (void)
89 88
90 rsdp_phys = acpi_find_rsdp(); 89 rsdp_phys = acpi_find_rsdp();
91 if (!rsdp_phys) { 90 if (!rsdp_phys) {
92 printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); 91 printk(KERN_ERR
92 "ACPI 2.0 RSDP not found, default to \"dig\"\n");
93 return "dig"; 93 return "dig";
94 } 94 }
95 95
96 rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); 96 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
97 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 97 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
98 printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 98 printk(KERN_ERR
99 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
99 return "dig"; 100 return "dig";
100 } 101 }
101 102
102 xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); 103 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
103 hdr = &xsdt->header; 104 hdr = &xsdt->header;
104 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 105 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
105 printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 106 printk(KERN_ERR
107 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
106 return "dig"; 108 return "dig";
107 } 109 }
108 110
109 if (!strcmp(hdr->oem_id, "HP")) { 111 if (!strcmp(hdr->oem_id, "HP")) {
110 return "hpzx1"; 112 return "hpzx1";
111 } 113 } else if (!strcmp(hdr->oem_id, "SGI")) {
112 else if (!strcmp(hdr->oem_id, "SGI")) {
113 return "sn2"; 114 return "sn2";
114 } 115 }
115 116
@@ -131,7 +132,7 @@ acpi_get_sysname (void)
131#endif 132#endif
132} 133}
133 134
134#ifdef CONFIG_ACPI_BOOT 135#ifdef CONFIG_ACPI
135 136
136#define ACPI_MAX_PLATFORM_INTERRUPTS 256 137#define ACPI_MAX_PLATFORM_INTERRUPTS 256
137 138
@@ -146,8 +147,7 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
146 * Interrupt routing API for device drivers. Provides interrupt vector for 147 * Interrupt routing API for device drivers. Provides interrupt vector for
147 * a generic platform event. Currently only CPEI is implemented. 148 * a generic platform event. Currently only CPEI is implemented.
148 */ 149 */
149int 150int acpi_request_vector(u32 int_type)
150acpi_request_vector (u32 int_type)
151{ 151{
152 int vector = -1; 152 int vector = -1;
153 153
@@ -155,12 +155,12 @@ acpi_request_vector (u32 int_type)
155 /* corrected platform error interrupt */ 155 /* corrected platform error interrupt */
156 vector = platform_intr_list[int_type]; 156 vector = platform_intr_list[int_type];
157 } else 157 } else
158 printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); 158 printk(KERN_ERR
159 "acpi_request_vector(): invalid interrupt type\n");
159 return vector; 160 return vector;
160} 161}
161 162
162char * 163char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
163__acpi_map_table (unsigned long phys_addr, unsigned long size)
164{ 164{
165 return __va(phys_addr); 165 return __va(phys_addr);
166} 166}
@@ -169,19 +169,18 @@ __acpi_map_table (unsigned long phys_addr, unsigned long size)
169 Boot-time Table Parsing 169 Boot-time Table Parsing
170 -------------------------------------------------------------------------- */ 170 -------------------------------------------------------------------------- */
171 171
172static int total_cpus __initdata; 172static int total_cpus __initdata;
173static int available_cpus __initdata; 173static int available_cpus __initdata;
174struct acpi_table_madt * acpi_madt __initdata; 174struct acpi_table_madt *acpi_madt __initdata;
175static u8 has_8259; 175static u8 has_8259;
176
177 176
178static int __init 177static int __init
179acpi_parse_lapic_addr_ovr ( 178acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
180 acpi_table_entry_header *header, const unsigned long end) 179 const unsigned long end)
181{ 180{
182 struct acpi_table_lapic_addr_ovr *lapic; 181 struct acpi_table_lapic_addr_ovr *lapic;
183 182
184 lapic = (struct acpi_table_lapic_addr_ovr *) header; 183 lapic = (struct acpi_table_lapic_addr_ovr *)header;
185 184
186 if (BAD_MADT_ENTRY(lapic, end)) 185 if (BAD_MADT_ENTRY(lapic, end))
187 return -EINVAL; 186 return -EINVAL;
@@ -193,22 +192,23 @@ acpi_parse_lapic_addr_ovr (
193 return 0; 192 return 0;
194} 193}
195 194
196
197static int __init 195static int __init
198acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) 196acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
199{ 197{
200 struct acpi_table_lsapic *lsapic; 198 struct acpi_table_lsapic *lsapic;
201 199
202 lsapic = (struct acpi_table_lsapic *) header; 200 lsapic = (struct acpi_table_lsapic *)header;
203 201
204 if (BAD_MADT_ENTRY(lsapic, end)) 202 if (BAD_MADT_ENTRY(lsapic, end))
205 return -EINVAL; 203 return -EINVAL;
206 204
207 if (lsapic->flags.enabled) { 205 if (lsapic->flags.enabled) {
208#ifdef CONFIG_SMP 206#ifdef CONFIG_SMP
209 smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; 207 smp_boot_data.cpu_phys_id[available_cpus] =
208 (lsapic->id << 8) | lsapic->eid;
210#endif 209#endif
211 ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid; 210 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
211 (lsapic->id << 8) | lsapic->eid;
212 ++available_cpus; 212 ++available_cpus;
213 } 213 }
214 214
@@ -216,13 +216,12 @@ acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
216 return 0; 216 return 0;
217} 217}
218 218
219
220static int __init 219static int __init
221acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) 220acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
222{ 221{
223 struct acpi_table_lapic_nmi *lacpi_nmi; 222 struct acpi_table_lapic_nmi *lacpi_nmi;
224 223
225 lacpi_nmi = (struct acpi_table_lapic_nmi*) header; 224 lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
226 225
227 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 226 if (BAD_MADT_ENTRY(lacpi_nmi, end))
228 return -EINVAL; 227 return -EINVAL;
@@ -231,13 +230,12 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
231 return 0; 230 return 0;
232} 231}
233 232
234
235static int __init 233static int __init
236acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) 234acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
237{ 235{
238 struct acpi_table_iosapic *iosapic; 236 struct acpi_table_iosapic *iosapic;
239 237
240 iosapic = (struct acpi_table_iosapic *) header; 238 iosapic = (struct acpi_table_iosapic *)header;
241 239
242 if (BAD_MADT_ENTRY(iosapic, end)) 240 if (BAD_MADT_ENTRY(iosapic, end))
243 return -EINVAL; 241 return -EINVAL;
@@ -245,15 +243,14 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
245 return iosapic_init(iosapic->address, iosapic->global_irq_base); 243 return iosapic_init(iosapic->address, iosapic->global_irq_base);
246} 244}
247 245
248
249static int __init 246static int __init
250acpi_parse_plat_int_src ( 247acpi_parse_plat_int_src(acpi_table_entry_header * header,
251 acpi_table_entry_header *header, const unsigned long end) 248 const unsigned long end)
252{ 249{
253 struct acpi_table_plat_int_src *plintsrc; 250 struct acpi_table_plat_int_src *plintsrc;
254 int vector; 251 int vector;
255 252
256 plintsrc = (struct acpi_table_plat_int_src *) header; 253 plintsrc = (struct acpi_table_plat_int_src *)header;
257 254
258 if (BAD_MADT_ENTRY(plintsrc, end)) 255 if (BAD_MADT_ENTRY(plintsrc, end))
259 return -EINVAL; 256 return -EINVAL;
@@ -267,8 +264,12 @@ acpi_parse_plat_int_src (
267 plintsrc->iosapic_vector, 264 plintsrc->iosapic_vector,
268 plintsrc->eid, 265 plintsrc->eid,
269 plintsrc->id, 266 plintsrc->id,
270 (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 267 (plintsrc->flags.polarity ==
271 (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 268 1) ? IOSAPIC_POL_HIGH :
269 IOSAPIC_POL_LOW,
270 (plintsrc->flags.trigger ==
271 1) ? IOSAPIC_EDGE :
272 IOSAPIC_LEVEL);
272 273
273 platform_intr_list[plintsrc->type] = vector; 274 platform_intr_list[plintsrc->type] = vector;
274 if (acpi_madt_rev > 1) { 275 if (acpi_madt_rev > 1) {
@@ -283,7 +284,6 @@ acpi_parse_plat_int_src (
283 return 0; 284 return 0;
284} 285}
285 286
286
287unsigned int can_cpei_retarget(void) 287unsigned int can_cpei_retarget(void)
288{ 288{
289 extern int cpe_vector; 289 extern int cpe_vector;
@@ -322,29 +322,30 @@ unsigned int get_cpei_target_cpu(void)
322} 322}
323 323
324static int __init 324static int __init
325acpi_parse_int_src_ovr ( 325acpi_parse_int_src_ovr(acpi_table_entry_header * header,
326 acpi_table_entry_header *header, const unsigned long end) 326 const unsigned long end)
327{ 327{
328 struct acpi_table_int_src_ovr *p; 328 struct acpi_table_int_src_ovr *p;
329 329
330 p = (struct acpi_table_int_src_ovr *) header; 330 p = (struct acpi_table_int_src_ovr *)header;
331 331
332 if (BAD_MADT_ENTRY(p, end)) 332 if (BAD_MADT_ENTRY(p, end))
333 return -EINVAL; 333 return -EINVAL;
334 334
335 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 335 iosapic_override_isa_irq(p->bus_irq, p->global_irq,
336 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 336 (p->flags.polarity ==
337 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 337 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
338 (p->flags.trigger ==
339 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
338 return 0; 340 return 0;
339} 341}
340 342
341
342static int __init 343static int __init
343acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) 344acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
344{ 345{
345 struct acpi_table_nmi_src *nmi_src; 346 struct acpi_table_nmi_src *nmi_src;
346 347
347 nmi_src = (struct acpi_table_nmi_src*) header; 348 nmi_src = (struct acpi_table_nmi_src *)header;
348 349
349 if (BAD_MADT_ENTRY(nmi_src, end)) 350 if (BAD_MADT_ENTRY(nmi_src, end))
350 return -EINVAL; 351 return -EINVAL;
@@ -353,11 +354,9 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
353 return 0; 354 return 0;
354} 355}
355 356
356static void __init 357static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
357acpi_madt_oem_check (char *oem_id, char *oem_table_id)
358{ 358{
359 if (!strncmp(oem_id, "IBM", 3) && 359 if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
360 (!strncmp(oem_table_id, "SERMOW", 6))) {
361 360
362 /* 361 /*
363 * Unfortunately ITC_DRIFT is not yet part of the 362 * Unfortunately ITC_DRIFT is not yet part of the
@@ -370,19 +369,18 @@ acpi_madt_oem_check (char *oem_id, char *oem_table_id)
370 } 369 }
371} 370}
372 371
373static int __init 372static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
374acpi_parse_madt (unsigned long phys_addr, unsigned long size)
375{ 373{
376 if (!phys_addr || !size) 374 if (!phys_addr || !size)
377 return -EINVAL; 375 return -EINVAL;
378 376
379 acpi_madt = (struct acpi_table_madt *) __va(phys_addr); 377 acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
380 378
381 acpi_madt_rev = acpi_madt->header.revision; 379 acpi_madt_rev = acpi_madt->header.revision;
382 380
383 /* remember the value for reference after free_initmem() */ 381 /* remember the value for reference after free_initmem() */
384#ifdef CONFIG_ITANIUM 382#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 383 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 384#else
387 has_8259 = acpi_madt->flags.pcat_compat; 385 has_8259 = acpi_madt->flags.pcat_compat;
388#endif 386#endif
@@ -396,19 +394,18 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 394 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 395
398 acpi_madt_oem_check(acpi_madt->header.oem_id, 396 acpi_madt_oem_check(acpi_madt->header.oem_id,
399 acpi_madt->header.oem_table_id); 397 acpi_madt->header.oem_table_id);
400 398
401 return 0; 399 return 0;
402} 400}
403 401
404
405#ifdef CONFIG_ACPI_NUMA 402#ifdef CONFIG_ACPI_NUMA
406 403
407#undef SLIT_DEBUG 404#undef SLIT_DEBUG
408 405
409#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) 406#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
410 407
411static int __initdata srat_num_cpus; /* number of cpus */ 408static int __initdata srat_num_cpus; /* number of cpus */
412static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; 409static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) 410#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
414#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 411#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
@@ -421,15 +418,15 @@ static struct acpi_table_slit __initdata *slit_table;
421 * ACPI 2.0 SLIT (System Locality Information Table) 418 * ACPI 2.0 SLIT (System Locality Information Table)
422 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 419 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
423 */ 420 */
424void __init 421void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
425acpi_numa_slit_init (struct acpi_table_slit *slit)
426{ 422{
427 u32 len; 423 u32 len;
428 424
429 len = sizeof(struct acpi_table_header) + 8 425 len = sizeof(struct acpi_table_header) + 8
430 + slit->localities * slit->localities; 426 + slit->localities * slit->localities;
431 if (slit->header.length != len) { 427 if (slit->header.length != len) {
432 printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 428 printk(KERN_ERR
429 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
433 len, slit->header.length); 430 len, slit->header.length);
434 memset(numa_slit, 10, sizeof(numa_slit)); 431 memset(numa_slit, 10, sizeof(numa_slit));
435 return; 432 return;
@@ -438,19 +435,20 @@ acpi_numa_slit_init (struct acpi_table_slit *slit)
438} 435}
439 436
440void __init 437void __init
441acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) 438acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
442{ 439{
443 /* record this node in proximity bitmap */ 440 /* record this node in proximity bitmap */
444 pxm_bit_set(pa->proximity_domain); 441 pxm_bit_set(pa->proximity_domain);
445 442
446 node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); 443 node_cpuid[srat_num_cpus].phys_id =
444 (pa->apic_id << 8) | (pa->lsapic_eid);
447 /* nid should be overridden as logical node id later */ 445 /* nid should be overridden as logical node id later */
448 node_cpuid[srat_num_cpus].nid = pa->proximity_domain; 446 node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
449 srat_num_cpus++; 447 srat_num_cpus++;
450} 448}
451 449
452void __init 450void __init
453acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) 451acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
454{ 452{
455 unsigned long paddr, size; 453 unsigned long paddr, size;
456 u8 pxm; 454 u8 pxm;
@@ -487,8 +485,7 @@ acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
487 num_node_memblks++; 485 num_node_memblks++;
488} 486}
489 487
490void __init 488void __init acpi_numa_arch_fixup(void)
491acpi_numa_arch_fixup (void)
492{ 489{
493 int i, j, node_from, node_to; 490 int i, j, node_from, node_to;
494 491
@@ -534,21 +531,24 @@ acpi_numa_arch_fixup (void)
534 for (i = 0; i < srat_num_cpus; i++) 531 for (i = 0; i < srat_num_cpus; i++)
535 node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; 532 node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
536 533
537 printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); 534 printk(KERN_INFO "Number of logical nodes in system = %d\n",
538 printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); 535 num_online_nodes());
536 printk(KERN_INFO "Number of memory chunks in system = %d\n",
537 num_node_memblks);
539 538
540 if (!slit_table) return; 539 if (!slit_table)
540 return;
541 memset(numa_slit, -1, sizeof(numa_slit)); 541 memset(numa_slit, -1, sizeof(numa_slit));
542 for (i=0; i<slit_table->localities; i++) { 542 for (i = 0; i < slit_table->localities; i++) {
543 if (!pxm_bit_test(i)) 543 if (!pxm_bit_test(i))
544 continue; 544 continue;
545 node_from = pxm_to_nid_map[i]; 545 node_from = pxm_to_nid_map[i];
546 for (j=0; j<slit_table->localities; j++) { 546 for (j = 0; j < slit_table->localities; j++) {
547 if (!pxm_bit_test(j)) 547 if (!pxm_bit_test(j))
548 continue; 548 continue;
549 node_to = pxm_to_nid_map[j]; 549 node_to = pxm_to_nid_map[j];
550 node_distance(node_from, node_to) = 550 node_distance(node_from, node_to) =
551 slit_table->entry[i*slit_table->localities + j]; 551 slit_table->entry[i * slit_table->localities + j];
552 } 552 }
553 } 553 }
554 554
@@ -556,36 +556,41 @@ acpi_numa_arch_fixup (void)
556 printk("ACPI 2.0 SLIT locality table:\n"); 556 printk("ACPI 2.0 SLIT locality table:\n");
557 for_each_online_node(i) { 557 for_each_online_node(i) {
558 for_each_online_node(j) 558 for_each_online_node(j)
559 printk("%03d ", node_distance(i,j)); 559 printk("%03d ", node_distance(i, j));
560 printk("\n"); 560 printk("\n");
561 } 561 }
562#endif 562#endif
563} 563}
564#endif /* CONFIG_ACPI_NUMA */ 564#endif /* CONFIG_ACPI_NUMA */
565 565
566unsigned int 566/*
567acpi_register_gsi (u32 gsi, int edge_level, int active_high_low) 567 * success: return IRQ number (>=0)
568 * failure: return < 0
569 */
570int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
568{ 571{
569 if (has_8259 && gsi < 16) 572 if (has_8259 && gsi < 16)
570 return isa_irq_to_vector(gsi); 573 return isa_irq_to_vector(gsi);
571 574
572 return iosapic_register_intr(gsi, 575 return iosapic_register_intr(gsi,
573 (active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 576 (active_high_low ==
574 (edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 577 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
578 IOSAPIC_POL_LOW,
579 (edge_level ==
580 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
581 IOSAPIC_LEVEL);
575} 582}
583
576EXPORT_SYMBOL(acpi_register_gsi); 584EXPORT_SYMBOL(acpi_register_gsi);
577 585
578#ifdef CONFIG_ACPI_DEALLOCATE_IRQ 586void acpi_unregister_gsi(u32 gsi)
579void
580acpi_unregister_gsi (u32 gsi)
581{ 587{
582 iosapic_unregister_intr(gsi); 588 iosapic_unregister_intr(gsi);
583} 589}
590
584EXPORT_SYMBOL(acpi_unregister_gsi); 591EXPORT_SYMBOL(acpi_unregister_gsi);
585#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
586 592
587static int __init 593static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
588acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
589{ 594{
590 struct acpi_table_header *fadt_header; 595 struct acpi_table_header *fadt_header;
591 struct fadt_descriptor_rev2 *fadt; 596 struct fadt_descriptor_rev2 *fadt;
@@ -593,11 +598,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
593 if (!phys_addr || !size) 598 if (!phys_addr || !size)
594 return -EINVAL; 599 return -EINVAL;
595 600
596 fadt_header = (struct acpi_table_header *) __va(phys_addr); 601 fadt_header = (struct acpi_table_header *)__va(phys_addr);
597 if (fadt_header->revision != 3) 602 if (fadt_header->revision != 3)
598 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 603 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
599 604
600 fadt = (struct fadt_descriptor_rev2 *) fadt_header; 605 fadt = (struct fadt_descriptor_rev2 *)fadt_header;
601 606
602 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) 607 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
603 acpi_kbd_controller_present = 0; 608 acpi_kbd_controller_present = 0;
@@ -609,22 +614,19 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
609 return 0; 614 return 0;
610} 615}
611 616
612 617unsigned long __init acpi_find_rsdp(void)
613unsigned long __init
614acpi_find_rsdp (void)
615{ 618{
616 unsigned long rsdp_phys = 0; 619 unsigned long rsdp_phys = 0;
617 620
618 if (efi.acpi20) 621 if (efi.acpi20)
619 rsdp_phys = __pa(efi.acpi20); 622 rsdp_phys = __pa(efi.acpi20);
620 else if (efi.acpi) 623 else if (efi.acpi)
621 printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); 624 printk(KERN_WARNING PREFIX
625 "v1.0/r0.71 tables no longer supported\n");
622 return rsdp_phys; 626 return rsdp_phys;
623} 627}
624 628
625 629int __init acpi_boot_init(void)
626int __init
627acpi_boot_init (void)
628{ 630{
629 631
630 /* 632 /*
@@ -642,31 +644,43 @@ acpi_boot_init (void)
642 644
643 /* Local APIC */ 645 /* Local APIC */
644 646
645 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 647 if (acpi_table_parse_madt
646 printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 648 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
649 printk(KERN_ERR PREFIX
650 "Error parsing LAPIC address override entry\n");
647 651
648 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) 652 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
649 printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); 653 < 1)
654 printk(KERN_ERR PREFIX
655 "Error parsing MADT - no LAPIC entries\n");
650 656
651 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) 657 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
658 < 0)
652 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 659 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
653 660
654 /* I/O APIC */ 661 /* I/O APIC */
655 662
656 if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 663 if (acpi_table_parse_madt
657 printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); 664 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
665 printk(KERN_ERR PREFIX
666 "Error parsing MADT - no IOSAPIC entries\n");
658 667
659 /* System-Level Interrupt Routing */ 668 /* System-Level Interrupt Routing */
660 669
661 if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 670 if (acpi_table_parse_madt
662 printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); 671 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
672 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
673 printk(KERN_ERR PREFIX
674 "Error parsing platform interrupt source entry\n");
663 675
664 if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 676 if (acpi_table_parse_madt
665 printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 677 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
678 printk(KERN_ERR PREFIX
679 "Error parsing interrupt source overrides entry\n");
666 680
667 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 681 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
668 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 682 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
669 skip_madt: 683 skip_madt:
670 684
671 /* 685 /*
672 * FADT says whether a legacy keyboard controller is present. 686 * FADT says whether a legacy keyboard controller is present.
@@ -681,8 +695,9 @@ acpi_boot_init (void)
681 if (available_cpus == 0) { 695 if (available_cpus == 0) {
682 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); 696 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
683 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); 697 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
684 smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); 698 smp_boot_data.cpu_phys_id[available_cpus] =
685 available_cpus = 1; /* We've got at least one of these, no? */ 699 hard_smp_processor_id();
700 available_cpus = 1; /* We've got at least one of these, no? */
686 } 701 }
687 smp_boot_data.cpu_count = available_cpus; 702 smp_boot_data.cpu_count = available_cpus;
688 703
@@ -691,8 +706,10 @@ acpi_boot_init (void)
691 if (srat_num_cpus == 0) { 706 if (srat_num_cpus == 0) {
692 int cpu, i = 1; 707 int cpu, i = 1;
693 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 708 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
694 if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) 709 if (smp_boot_data.cpu_phys_id[cpu] !=
695 node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; 710 hard_smp_processor_id())
711 node_cpuid[i++].phys_id =
712 smp_boot_data.cpu_phys_id[cpu];
696 } 713 }
697# endif 714# endif
698#endif 715#endif
@@ -700,12 +717,12 @@ acpi_boot_init (void)
700 build_cpu_to_node_map(); 717 build_cpu_to_node_map();
701#endif 718#endif
702 /* Make boot-up look pretty */ 719 /* Make boot-up look pretty */
703 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); 720 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
721 total_cpus);
704 return 0; 722 return 0;
705} 723}
706 724
707int 725int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
708acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
709{ 726{
710 int vector; 727 int vector;
711 728
@@ -726,11 +743,10 @@ acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
726 */ 743 */
727#ifdef CONFIG_ACPI_HOTPLUG_CPU 744#ifdef CONFIG_ACPI_HOTPLUG_CPU
728static 745static
729int 746int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
730acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
731{ 747{
732#ifdef CONFIG_ACPI_NUMA 748#ifdef CONFIG_ACPI_NUMA
733 int pxm_id; 749 int pxm_id;
734 750
735 pxm_id = acpi_get_pxm(handle); 751 pxm_id = acpi_get_pxm(handle);
736 752
@@ -738,31 +754,28 @@ acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
738 * Assuming that the container driver would have set the proximity 754 * Assuming that the container driver would have set the proximity
739 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag 755 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
740 */ 756 */
741 node_cpuid[cpu].nid = (pxm_id < 0) ? 0: 757 node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
742 pxm_to_nid_map[pxm_id];
743 758
744 node_cpuid[cpu].phys_id = physid; 759 node_cpuid[cpu].phys_id = physid;
745#endif 760#endif
746 return(0); 761 return (0);
747} 762}
748 763
749 764int acpi_map_lsapic(acpi_handle handle, int *pcpu)
750int
751acpi_map_lsapic(acpi_handle handle, int *pcpu)
752{ 765{
753 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
754 union acpi_object *obj; 767 union acpi_object *obj;
755 struct acpi_table_lsapic *lsapic; 768 struct acpi_table_lsapic *lsapic;
756 cpumask_t tmp_map; 769 cpumask_t tmp_map;
757 long physid; 770 long physid;
758 int cpu; 771 int cpu;
759 772
760 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 773 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
761 return -EINVAL; 774 return -EINVAL;
762 775
763 if (!buffer.length || !buffer.pointer) 776 if (!buffer.length || !buffer.pointer)
764 return -EINVAL; 777 return -EINVAL;
765 778
766 obj = buffer.pointer; 779 obj = buffer.pointer;
767 if (obj->type != ACPI_TYPE_BUFFER || 780 if (obj->type != ACPI_TYPE_BUFFER ||
768 obj->buffer.length < sizeof(*lsapic)) { 781 obj->buffer.length < sizeof(*lsapic)) {
@@ -778,7 +791,7 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu)
778 return -EINVAL; 791 return -EINVAL;
779 } 792 }
780 793
781 physid = ((lsapic->id <<8) | (lsapic->eid)); 794 physid = ((lsapic->id << 8) | (lsapic->eid));
782 795
783 acpi_os_free(buffer.pointer); 796 acpi_os_free(buffer.pointer);
784 buffer.length = ACPI_ALLOCATE_BUFFER; 797 buffer.length = ACPI_ALLOCATE_BUFFER;
@@ -786,50 +799,49 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu)
786 799
787 cpus_complement(tmp_map, cpu_present_map); 800 cpus_complement(tmp_map, cpu_present_map);
788 cpu = first_cpu(tmp_map); 801 cpu = first_cpu(tmp_map);
789 if(cpu >= NR_CPUS) 802 if (cpu >= NR_CPUS)
790 return -EINVAL; 803 return -EINVAL;
791 804
792 acpi_map_cpu2node(handle, cpu, physid); 805 acpi_map_cpu2node(handle, cpu, physid);
793 806
794 cpu_set(cpu, cpu_present_map); 807 cpu_set(cpu, cpu_present_map);
795 ia64_cpu_to_sapicid[cpu] = physid; 808 ia64_cpu_to_sapicid[cpu] = physid;
796 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; 809 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
797 810
798 *pcpu = cpu; 811 *pcpu = cpu;
799 return(0); 812 return (0);
800} 813}
801EXPORT_SYMBOL(acpi_map_lsapic);
802 814
815EXPORT_SYMBOL(acpi_map_lsapic);
803 816
804int 817int acpi_unmap_lsapic(int cpu)
805acpi_unmap_lsapic(int cpu)
806{ 818{
807 int i; 819 int i;
808 820
809 for (i=0; i<MAX_SAPICS; i++) { 821 for (i = 0; i < MAX_SAPICS; i++) {
810 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { 822 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
811 ia64_acpiid_to_sapicid[i] = -1; 823 ia64_acpiid_to_sapicid[i] = -1;
812 break; 824 break;
813 } 825 }
814 } 826 }
815 ia64_cpu_to_sapicid[cpu] = -1; 827 ia64_cpu_to_sapicid[cpu] = -1;
816 cpu_clear(cpu,cpu_present_map); 828 cpu_clear(cpu, cpu_present_map);
817 829
818#ifdef CONFIG_ACPI_NUMA 830#ifdef CONFIG_ACPI_NUMA
819 /* NUMA specific cleanup's */ 831 /* NUMA specific cleanup's */
820#endif 832#endif
821 833
822 return(0); 834 return (0);
823} 835}
836
824EXPORT_SYMBOL(acpi_unmap_lsapic); 837EXPORT_SYMBOL(acpi_unmap_lsapic);
825#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 838#endif /* CONFIG_ACPI_HOTPLUG_CPU */
826
827 839
828#ifdef CONFIG_ACPI_NUMA 840#ifdef CONFIG_ACPI_NUMA
829acpi_status __devinit 841acpi_status __devinit
830acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) 842acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
831{ 843{
832 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 844 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
833 union acpi_object *obj; 845 union acpi_object *obj;
834 struct acpi_table_iosapic *iosapic; 846 struct acpi_table_iosapic *iosapic;
835 unsigned int gsi_base; 847 unsigned int gsi_base;
@@ -878,10 +890,9 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
878 map_iosapic_to_node(gsi_base, node); 890 map_iosapic_to_node(gsi_base, node);
879 return AE_OK; 891 return AE_OK;
880} 892}
881#endif /* CONFIG_NUMA */ 893#endif /* CONFIG_NUMA */
882 894
883int 895int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
884acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base)
885{ 896{
886 int err; 897 int err;
887 898
@@ -890,17 +901,18 @@ acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base)
890 901
891#if CONFIG_ACPI_NUMA 902#if CONFIG_ACPI_NUMA
892 acpi_map_iosapic(handle, 0, NULL, NULL); 903 acpi_map_iosapic(handle, 0, NULL, NULL);
893#endif /* CONFIG_ACPI_NUMA */ 904#endif /* CONFIG_ACPI_NUMA */
894 905
895 return 0; 906 return 0;
896} 907}
908
897EXPORT_SYMBOL(acpi_register_ioapic); 909EXPORT_SYMBOL(acpi_register_ioapic);
898 910
899int 911int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
900acpi_unregister_ioapic (acpi_handle handle, u32 gsi_base)
901{ 912{
902 return iosapic_remove(gsi_base); 913 return iosapic_remove(gsi_base);
903} 914}
915
904EXPORT_SYMBOL(acpi_unregister_ioapic); 916EXPORT_SYMBOL(acpi_unregister_ioapic);
905 917
906#endif /* CONFIG_ACPI_BOOT */ 918#endif /* CONFIG_ACPI */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 7d1ae2982c53..f6a234289341 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -211,17 +211,41 @@ void foo(void)
211#endif 211#endif
212 212
213 BLANK(); 213 BLANK();
214 DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, 214 DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
215 offsetof (struct ia64_mca_cpu, proc_state_dump)); 215 offsetof (struct ia64_mca_cpu, mca_stack));
216 DEFINE(IA64_MCA_CPU_STACK_OFFSET,
217 offsetof (struct ia64_mca_cpu, stack));
218 DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
219 offsetof (struct ia64_mca_cpu, stackframe));
220 DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
221 offsetof (struct ia64_mca_cpu, rbstore));
222 DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, 216 DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
223 offsetof (struct ia64_mca_cpu, init_stack)); 217 offsetof (struct ia64_mca_cpu, init_stack));
224 BLANK(); 218 BLANK();
219 DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
220 offsetof (struct ia64_sal_os_state, sal_ra));
221 DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
222 offsetof (struct ia64_sal_os_state, os_gp));
223 DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
224 offsetof (struct ia64_sal_os_state, pal_min_state));
225 DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
226 offsetof (struct ia64_sal_os_state, proc_state_param));
227 DEFINE(IA64_SAL_OS_STATE_SIZE,
228 sizeof (struct ia64_sal_os_state));
229 DEFINE(IA64_PMSA_GR_OFFSET,
230 offsetof (struct pal_min_state_area_s, pmsa_gr));
231 DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
232 offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
233 DEFINE(IA64_PMSA_PR_OFFSET,
234 offsetof (struct pal_min_state_area_s, pmsa_pr));
235 DEFINE(IA64_PMSA_BR0_OFFSET,
236 offsetof (struct pal_min_state_area_s, pmsa_br0));
237 DEFINE(IA64_PMSA_RSC_OFFSET,
238 offsetof (struct pal_min_state_area_s, pmsa_rsc));
239 DEFINE(IA64_PMSA_IIP_OFFSET,
240 offsetof (struct pal_min_state_area_s, pmsa_iip));
241 DEFINE(IA64_PMSA_IPSR_OFFSET,
242 offsetof (struct pal_min_state_area_s, pmsa_ipsr));
243 DEFINE(IA64_PMSA_IFS_OFFSET,
244 offsetof (struct pal_min_state_area_s, pmsa_ifs));
245 DEFINE(IA64_PMSA_XIP_OFFSET,
246 offsetof (struct pal_min_state_area_s, pmsa_xip));
247 BLANK();
248
225 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ 249 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
226 DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); 250 DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
227 DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); 251 DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9be53e1ea404..ba0b6a1f429f 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -37,7 +37,7 @@
37#include <asm/cache.h> 37#include <asm/cache.h>
38#include <asm/errno.h> 38#include <asm/errno.h>
39#include <asm/kregs.h> 39#include <asm/kregs.h>
40#include <asm/offsets.h> 40#include <asm/asm-offsets.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <asm/percpu.h> 42#include <asm/percpu.h>
43#include <asm/processor.h> 43#include <asm/processor.h>
@@ -204,9 +204,6 @@ GLOBAL_ENTRY(ia64_switch_to)
204(p6) br.cond.dpnt .map 204(p6) br.cond.dpnt .map
205 ;; 205 ;;
206.done: 206.done:
207(p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
208 ;;
209(p6) srlz.d
210 ld8 sp=[r21] // load kernel stack pointer of new task 207 ld8 sp=[r21] // load kernel stack pointer of new task
211 mov IA64_KR(CURRENT)=in0 // update "current" application register 208 mov IA64_KR(CURRENT)=in0 // update "current" application register
212 mov r8=r13 // return pointer to previously running task 209 mov r8=r13 // return pointer to previously running task
@@ -234,6 +231,9 @@ GLOBAL_ENTRY(ia64_switch_to)
234 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... 231 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
235 ;; 232 ;;
236 itr.d dtr[r25]=r23 // wire in new mapping... 233 itr.d dtr[r25]=r23 // wire in new mapping...
234 ssm psr.ic // reenable the psr.ic bit
235 ;;
236 srlz.d
237 br.cond.sptk .done 237 br.cond.sptk .done
238END(ia64_switch_to) 238END(ia64_switch_to)
239 239
@@ -470,6 +470,29 @@ ENTRY(load_switch_stack)
470 br.cond.sptk.many b7 470 br.cond.sptk.many b7
471END(load_switch_stack) 471END(load_switch_stack)
472 472
473GLOBAL_ENTRY(prefetch_stack)
474 add r14 = -IA64_SWITCH_STACK_SIZE, sp
475 add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0
476 ;;
477 ld8 r16 = [r15] // load next's stack pointer
478 lfetch.fault.excl [r14], 128
479 ;;
480 lfetch.fault.excl [r14], 128
481 lfetch.fault [r16], 128
482 ;;
483 lfetch.fault.excl [r14], 128
484 lfetch.fault [r16], 128
485 ;;
486 lfetch.fault.excl [r14], 128
487 lfetch.fault [r16], 128
488 ;;
489 lfetch.fault.excl [r14], 128
490 lfetch.fault [r16], 128
491 ;;
492 lfetch.fault [r16], 128
493 br.ret.sptk.many rp
494END(prefetch_switch_stack)
495
473GLOBAL_ENTRY(execve) 496GLOBAL_ENTRY(execve)
474 mov r15=__NR_execve // put syscall number in place 497 mov r15=__NR_execve // put syscall number in place
475 break __BREAK_SYSCALL 498 break __BREAK_SYSCALL
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 7d7684a369d3..2ddbac6f4999 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -14,7 +14,7 @@
14 14
15#include <asm/asmmacro.h> 15#include <asm/asmmacro.h>
16#include <asm/errno.h> 16#include <asm/errno.h>
17#include <asm/offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/percpu.h> 18#include <asm/percpu.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/sal.h> 20#include <asm/sal.h>
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 86948ce63e43..86064ca98952 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -10,7 +10,7 @@
10 10
11#include <asm/asmmacro.h> 11#include <asm/asmmacro.h>
12#include <asm/errno.h> 12#include <asm/errno.h>
13#include <asm/offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/sigcontext.h> 14#include <asm/sigcontext.h>
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 8d3a9291b47f..bfe65b2e8621 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -25,7 +25,7 @@
25#include <asm/fpu.h> 25#include <asm/fpu.h>
26#include <asm/kregs.h> 26#include <asm/kregs.h>
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/pal.h> 29#include <asm/pal.h>
30#include <asm/pgtable.h> 30#include <asm/pgtable.h>
31#include <asm/processor.h> 31#include <asm/processor.h>
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7936b62f7a2e..574084f343fa 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -561,7 +561,7 @@ static inline int vector_is_shared (int vector)
561 return (iosapic_intr_info[vector].count > 1); 561 return (iosapic_intr_info[vector].count > 1);
562} 562}
563 563
564static void 564static int
565register_intr (unsigned int gsi, int vector, unsigned char delivery, 565register_intr (unsigned int gsi, int vector, unsigned char delivery,
566 unsigned long polarity, unsigned long trigger) 566 unsigned long polarity, unsigned long trigger)
567{ 567{
@@ -576,7 +576,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
576 index = find_iosapic(gsi); 576 index = find_iosapic(gsi);
577 if (index < 0) { 577 if (index < 0) {
578 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); 578 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi);
579 return; 579 return -ENODEV;
580 } 580 }
581 581
582 iosapic_address = iosapic_lists[index].addr; 582 iosapic_address = iosapic_lists[index].addr;
@@ -587,7 +587,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
587 rte = iosapic_alloc_rte(); 587 rte = iosapic_alloc_rte();
588 if (!rte) { 588 if (!rte) {
589 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); 589 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
590 return; 590 return -ENOMEM;
591 } 591 }
592 592
593 rte_index = gsi - gsi_base; 593 rte_index = gsi - gsi_base;
@@ -603,7 +603,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
603 struct iosapic_intr_info *info = &iosapic_intr_info[vector]; 603 struct iosapic_intr_info *info = &iosapic_intr_info[vector];
604 if (info->trigger != trigger || info->polarity != polarity) { 604 if (info->trigger != trigger || info->polarity != polarity) {
605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); 605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
606 return; 606 return -EINVAL;
607 } 607 }
608 } 608 }
609 609
@@ -623,6 +623,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
623 __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); 623 __FUNCTION__, vector, idesc->handler->typename, irq_type->typename);
624 idesc->handler = irq_type; 624 idesc->handler = irq_type;
625 } 625 }
626 return 0;
626} 627}
627 628
628static unsigned int 629static unsigned int
@@ -710,7 +711,7 @@ int
710iosapic_register_intr (unsigned int gsi, 711iosapic_register_intr (unsigned int gsi,
711 unsigned long polarity, unsigned long trigger) 712 unsigned long polarity, unsigned long trigger)
712{ 713{
713 int vector, mask = 1; 714 int vector, mask = 1, err;
714 unsigned int dest; 715 unsigned int dest;
715 unsigned long flags; 716 unsigned long flags;
716 struct iosapic_rte_info *rte; 717 struct iosapic_rte_info *rte;
@@ -737,8 +738,8 @@ again:
737 vector = assign_irq_vector(AUTO_ASSIGN); 738 vector = assign_irq_vector(AUTO_ASSIGN);
738 if (vector < 0) { 739 if (vector < 0) {
739 vector = iosapic_find_sharable_vector(trigger, polarity); 740 vector = iosapic_find_sharable_vector(trigger, polarity);
740 if (vector < 0) 741 if (vector < 0)
741 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 742 return -ENOSPC;
742 } 743 }
743 744
744 spin_lock_irqsave(&irq_descp(vector)->lock, flags); 745 spin_lock_irqsave(&irq_descp(vector)->lock, flags);
@@ -753,8 +754,13 @@ again:
753 } 754 }
754 755
755 dest = get_target_cpu(gsi, vector); 756 dest = get_target_cpu(gsi, vector);
756 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, 757 err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
757 polarity, trigger); 758 polarity, trigger);
759 if (err < 0) {
760 spin_unlock(&iosapic_lock);
761 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
762 return err;
763 }
758 764
759 /* 765 /*
760 * If the vector is shared and already unmasked for 766 * If the vector is shared and already unmasked for
@@ -776,7 +782,6 @@ again:
776 return vector; 782 return vector;
777} 783}
778 784
779#ifdef CONFIG_ACPI_DEALLOCATE_IRQ
780void 785void
781iosapic_unregister_intr (unsigned int gsi) 786iosapic_unregister_intr (unsigned int gsi)
782{ 787{
@@ -859,7 +864,6 @@ iosapic_unregister_intr (unsigned int gsi)
859 spin_unlock(&iosapic_lock); 864 spin_unlock(&iosapic_lock);
860 spin_unlock_irqrestore(&idesc->lock, flags); 865 spin_unlock_irqrestore(&idesc->lock, flags);
861} 866}
862#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
863 867
864/* 868/*
865 * ACPI calls this when it finds an entry for a platform interrupt. 869 * ACPI calls this when it finds an entry for a platform interrupt.
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 3bb3a13c4047..c13ca0d49c4a 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -44,7 +44,7 @@
44#include <asm/break.h> 44#include <asm/break.h>
45#include <asm/ia32.h> 45#include <asm/ia32.h>
46#include <asm/kregs.h> 46#include <asm/kregs.h>
47#include <asm/offsets.h> 47#include <asm/asm-offsets.h>
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49#include <asm/processor.h> 49#include <asm/processor.h>
50#include <asm/ptrace.h> 50#include <asm/ptrace.h>
@@ -69,7 +69,6 @@
69# define DBG_FAULT(i) 69# define DBG_FAULT(i)
70#endif 70#endif
71 71
72#define MINSTATE_VIRT /* needed by minstate.h */
73#include "minstate.h" 72#include "minstate.h"
74 73
75#define FAULT(n) \ 74#define FAULT(n) \
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 4ebbf3974381..6dc726ad7137 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -48,6 +48,9 @@
48 * Delete dead variables and functions. 48 * Delete dead variables and functions.
49 * Reorder to remove the need for forward declarations and to consolidate 49 * Reorder to remove the need for forward declarations and to consolidate
50 * related code. 50 * related code.
51 *
52 * 2005-08-12 Keith Owens <kaos@sgi.com>
53 * Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
51 */ 54 */
52#include <linux/config.h> 55#include <linux/config.h>
53#include <linux/types.h> 56#include <linux/types.h>
@@ -77,6 +80,8 @@
77#include <asm/irq.h> 80#include <asm/irq.h>
78#include <asm/hw_irq.h> 81#include <asm/hw_irq.h>
79 82
83#include "entry.h"
84
80#if defined(IA64_MCA_DEBUG_INFO) 85#if defined(IA64_MCA_DEBUG_INFO)
81# define IA64_MCA_DEBUG(fmt...) printk(fmt) 86# define IA64_MCA_DEBUG(fmt...) printk(fmt)
82#else 87#else
@@ -84,9 +89,7 @@
84#endif 89#endif
85 90
86/* Used by mca_asm.S */ 91/* Used by mca_asm.S */
87ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; 92u32 ia64_mca_serialize;
88ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
89u64 ia64_mca_serialize;
90DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 93DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
91DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 94DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
92DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 95DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
@@ -95,8 +98,10 @@ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
95unsigned long __per_cpu_mca[NR_CPUS]; 98unsigned long __per_cpu_mca[NR_CPUS];
96 99
97/* In mca_asm.S */ 100/* In mca_asm.S */
98extern void ia64_monarch_init_handler (void); 101extern void ia64_os_init_dispatch_monarch (void);
99extern void ia64_slave_init_handler (void); 102extern void ia64_os_init_dispatch_slave (void);
103
104static int monarch_cpu = -1;
100 105
101static ia64_mc_info_t ia64_mc_info; 106static ia64_mc_info_t ia64_mc_info;
102 107
@@ -234,7 +239,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
234 * This function retrieves a specified error record type from SAL 239 * This function retrieves a specified error record type from SAL
235 * and wakes up any processes waiting for error records. 240 * and wakes up any processes waiting for error records.
236 * 241 *
237 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) 242 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
243 * FIXME: remove MCA and irq_safe.
238 */ 244 */
239static void 245static void
240ia64_mca_log_sal_error_record(int sal_info_type) 246ia64_mca_log_sal_error_record(int sal_info_type)
@@ -242,7 +248,7 @@ ia64_mca_log_sal_error_record(int sal_info_type)
242 u8 *buffer; 248 u8 *buffer;
243 sal_log_record_header_t *rh; 249 sal_log_record_header_t *rh;
244 u64 size; 250 u64 size;
245 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; 251 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
246#ifdef IA64_MCA_DEBUG_INFO 252#ifdef IA64_MCA_DEBUG_INFO
247 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; 253 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
248#endif 254#endif
@@ -330,191 +336,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
330 336
331#endif /* CONFIG_ACPI */ 337#endif /* CONFIG_ACPI */
332 338
333static void
334show_min_state (pal_min_state_area_t *minstate)
335{
336 u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
337 u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
338
339 printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
340 printk("pr\t\t%016lx\n", minstate->pmsa_pr);
341 printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
342 printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
343 printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
344 printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
345 printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
346 printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
347 printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
348 printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
349 printk("b1\t\t%016lx ", minstate->pmsa_br1);
350 print_symbol("%s\n", minstate->pmsa_br1);
351
352 printk("\nstatic registers r0-r15:\n");
353 printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
354 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
355 printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
356 minstate->pmsa_gr[3], minstate->pmsa_gr[4],
357 minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
358 printk(" r8-11 %016lx %016lx %016lx %016lx\n",
359 minstate->pmsa_gr[7], minstate->pmsa_gr[8],
360 minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
361 printk("r12-15 %016lx %016lx %016lx %016lx\n",
362 minstate->pmsa_gr[11], minstate->pmsa_gr[12],
363 minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
364
365 printk("\nbank 0:\n");
366 printk("r16-19 %016lx %016lx %016lx %016lx\n",
367 minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
368 minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
369 printk("r20-23 %016lx %016lx %016lx %016lx\n",
370 minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
371 minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
372 printk("r24-27 %016lx %016lx %016lx %016lx\n",
373 minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
374 minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
375 printk("r28-31 %016lx %016lx %016lx %016lx\n",
376 minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
377 minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
378
379 printk("\nbank 1:\n");
380 printk("r16-19 %016lx %016lx %016lx %016lx\n",
381 minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
382 minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
383 printk("r20-23 %016lx %016lx %016lx %016lx\n",
384 minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
385 minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
386 printk("r24-27 %016lx %016lx %016lx %016lx\n",
387 minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
388 minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
389 printk("r28-31 %016lx %016lx %016lx %016lx\n",
390 minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
391 minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
392}
393
394static void
395fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
396{
397 u64 *dst_banked, *src_banked, bit, shift, nat_bits;
398 int i;
399
400 /*
401 * First, update the pt-regs and switch-stack structures with the contents stored
402 * in the min-state area:
403 */
404 if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
405 pt->cr_ipsr = ms->pmsa_xpsr;
406 pt->cr_iip = ms->pmsa_xip;
407 pt->cr_ifs = ms->pmsa_xfs;
408 } else {
409 pt->cr_ipsr = ms->pmsa_ipsr;
410 pt->cr_iip = ms->pmsa_iip;
411 pt->cr_ifs = ms->pmsa_ifs;
412 }
413 pt->ar_rsc = ms->pmsa_rsc;
414 pt->pr = ms->pmsa_pr;
415 pt->r1 = ms->pmsa_gr[0];
416 pt->r2 = ms->pmsa_gr[1];
417 pt->r3 = ms->pmsa_gr[2];
418 sw->r4 = ms->pmsa_gr[3];
419 sw->r5 = ms->pmsa_gr[4];
420 sw->r6 = ms->pmsa_gr[5];
421 sw->r7 = ms->pmsa_gr[6];
422 pt->r8 = ms->pmsa_gr[7];
423 pt->r9 = ms->pmsa_gr[8];
424 pt->r10 = ms->pmsa_gr[9];
425 pt->r11 = ms->pmsa_gr[10];
426 pt->r12 = ms->pmsa_gr[11];
427 pt->r13 = ms->pmsa_gr[12];
428 pt->r14 = ms->pmsa_gr[13];
429 pt->r15 = ms->pmsa_gr[14];
430 dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */
431 src_banked = ms->pmsa_bank1_gr;
432 for (i = 0; i < 16; ++i)
433 dst_banked[i] = src_banked[i];
434 pt->b0 = ms->pmsa_br0;
435 sw->b1 = ms->pmsa_br1;
436
437 /* construct the NaT bits for the pt-regs structure: */
438# define PUT_NAT_BIT(dst, addr) \
439 do { \
440 bit = nat_bits & 1; nat_bits >>= 1; \
441 shift = ((unsigned long) addr >> 3) & 0x3f; \
442 dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
443 } while (0)
444
445 /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
446 shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
447 nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
448
449 PUT_NAT_BIT(sw->caller_unat, &pt->r1);
450 PUT_NAT_BIT(sw->caller_unat, &pt->r2);
451 PUT_NAT_BIT(sw->caller_unat, &pt->r3);
452 PUT_NAT_BIT(sw->ar_unat, &sw->r4);
453 PUT_NAT_BIT(sw->ar_unat, &sw->r5);
454 PUT_NAT_BIT(sw->ar_unat, &sw->r6);
455 PUT_NAT_BIT(sw->ar_unat, &sw->r7);
456 PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9);
457 PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
458 PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
459 PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
460 nat_bits >>= 16; /* skip over bank0 NaT bits */
461 PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
462 PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
463 PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
464 PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
465 PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
466 PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
467 PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
468 PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
469}
470
471static void
472init_handler_platform (pal_min_state_area_t *ms,
473 struct pt_regs *pt, struct switch_stack *sw)
474{
475 struct unw_frame_info info;
476
477 /* if a kernel debugger is available call it here else just dump the registers */
478
479 /*
480 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
481 * generated via the BMC's command-line interface, but since the console is on the
482 * same serial line, the user will need some time to switch out of the BMC before
483 * the dump begins.
484 */
485 printk("Delaying for 5 seconds...\n");
486 udelay(5*1000000);
487 show_min_state(ms);
488
489 printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
490 fetch_min_state(ms, pt, sw);
491 unw_init_from_interruption(&info, current, pt, sw);
492 ia64_do_show_stack(&info, NULL);
493
494#ifdef CONFIG_SMP
495 /* read_trylock() would be handy... */
496 if (!tasklist_lock.write_lock)
497 read_lock(&tasklist_lock);
498#endif
499 {
500 struct task_struct *g, *t;
501 do_each_thread (g, t) {
502 if (t == current)
503 continue;
504
505 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
506 show_stack(t, NULL);
507 } while_each_thread (g, t);
508 }
509#ifdef CONFIG_SMP
510 if (!tasklist_lock.write_lock)
511 read_unlock(&tasklist_lock);
512#endif
513
514 printk("\nINIT dump complete. Please reboot now.\n");
515 while (1); /* hang city if no debugger */
516}
517
518#ifdef CONFIG_ACPI 339#ifdef CONFIG_ACPI
519/* 340/*
520 * ia64_mca_register_cpev 341 * ia64_mca_register_cpev
@@ -657,42 +478,6 @@ ia64_mca_cmc_vector_enable_keventd(void *unused)
657} 478}
658 479
659/* 480/*
660 * ia64_mca_wakeup_ipi_wait
661 *
662 * Wait for the inter-cpu interrupt to be sent by the
663 * monarch processor once it is done with handling the
664 * MCA.
665 *
666 * Inputs : None
667 * Outputs : None
668 */
669static void
670ia64_mca_wakeup_ipi_wait(void)
671{
672 int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
673 int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
674 u64 irr = 0;
675
676 do {
677 switch(irr_num) {
678 case 0:
679 irr = ia64_getreg(_IA64_REG_CR_IRR0);
680 break;
681 case 1:
682 irr = ia64_getreg(_IA64_REG_CR_IRR1);
683 break;
684 case 2:
685 irr = ia64_getreg(_IA64_REG_CR_IRR2);
686 break;
687 case 3:
688 irr = ia64_getreg(_IA64_REG_CR_IRR3);
689 break;
690 }
691 cpu_relax();
692 } while (!(irr & (1UL << irr_bit))) ;
693}
694
695/*
696 * ia64_mca_wakeup 481 * ia64_mca_wakeup
697 * 482 *
698 * Send an inter-cpu interrupt to wake-up a particular cpu 483 * Send an inter-cpu interrupt to wake-up a particular cpu
@@ -757,11 +542,9 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
757 */ 542 */
758 ia64_sal_mc_rendez(); 543 ia64_sal_mc_rendez();
759 544
760 /* Wait for the wakeup IPI from the monarch 545 /* Wait for the monarch cpu to exit. */
761 * This waiting is done by polling on the wakeup-interrupt 546 while (monarch_cpu != -1)
762 * vector bit in the processor's IRRs 547 cpu_relax(); /* spin until monarch leaves */
763 */
764 ia64_mca_wakeup_ipi_wait();
765 548
766 /* Enable all interrupts */ 549 /* Enable all interrupts */
767 local_irq_restore(flags); 550 local_irq_restore(flags);
@@ -789,53 +572,13 @@ ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
789 return IRQ_HANDLED; 572 return IRQ_HANDLED;
790} 573}
791 574
792/*
793 * ia64_return_to_sal_check
794 *
795 * This is function called before going back from the OS_MCA handler
796 * to the OS_MCA dispatch code which finally takes the control back
797 * to the SAL.
798 * The main purpose of this routine is to setup the OS_MCA to SAL
799 * return state which can be used by the OS_MCA dispatch code
800 * just before going back to SAL.
801 *
802 * Inputs : None
803 * Outputs : None
804 */
805
806static void
807ia64_return_to_sal_check(int recover)
808{
809
810 /* Copy over some relevant stuff from the sal_to_os_mca_handoff
811 * so that it can be used at the time of os_mca_to_sal_handoff
812 */
813 ia64_os_to_sal_handoff_state.imots_sal_gp =
814 ia64_sal_to_os_handoff_state.imsto_sal_gp;
815
816 ia64_os_to_sal_handoff_state.imots_sal_check_ra =
817 ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
818
819 if (recover)
820 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
821 else
822 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
823
824 /* Default = tell SAL to return to same context */
825 ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
826
827 ia64_os_to_sal_handoff_state.imots_new_min_state =
828 (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
829
830}
831
832/* Function pointer for extra MCA recovery */ 575/* Function pointer for extra MCA recovery */
833int (*ia64_mca_ucmc_extension) 576int (*ia64_mca_ucmc_extension)
834 (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) 577 (void*,struct ia64_sal_os_state*)
835 = NULL; 578 = NULL;
836 579
837int 580int
838ia64_reg_MCA_extension(void *fn) 581ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
839{ 582{
840 if (ia64_mca_ucmc_extension) 583 if (ia64_mca_ucmc_extension)
841 return 1; 584 return 1;
@@ -854,8 +597,321 @@ ia64_unreg_MCA_extension(void)
854EXPORT_SYMBOL(ia64_reg_MCA_extension); 597EXPORT_SYMBOL(ia64_reg_MCA_extension);
855EXPORT_SYMBOL(ia64_unreg_MCA_extension); 598EXPORT_SYMBOL(ia64_unreg_MCA_extension);
856 599
600
601static inline void
602copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
603{
604 u64 fslot, tslot, nat;
605 *tr = *fr;
606 fslot = ((unsigned long)fr >> 3) & 63;
607 tslot = ((unsigned long)tr >> 3) & 63;
608 *tnat &= ~(1UL << tslot);
609 nat = (fnat >> fslot) & 1;
610 *tnat |= (nat << tslot);
611}
612
613/* On entry to this routine, we are running on the per cpu stack, see
614 * mca_asm.h. The original stack has not been touched by this event. Some of
615 * the original stack's registers will be in the RBS on this stack. This stack
616 * also contains a partial pt_regs and switch_stack, the rest of the data is in
617 * PAL minstate.
618 *
619 * The first thing to do is modify the original stack to look like a blocked
620 * task so we can run backtrace on the original task. Also mark the per cpu
621 * stack as current to ensure that we use the correct task state, it also means
622 * that we can do backtrace on the MCA/INIT handler code itself.
623 */
624
625static task_t *
626ia64_mca_modify_original_stack(struct pt_regs *regs,
627 const struct switch_stack *sw,
628 struct ia64_sal_os_state *sos,
629 const char *type)
630{
631 char *p, comm[sizeof(current->comm)];
632 ia64_va va;
633 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
634 const pal_min_state_area_t *ms = sos->pal_min_state;
635 task_t *previous_current;
636 struct pt_regs *old_regs;
637 struct switch_stack *old_sw;
638 unsigned size = sizeof(struct pt_regs) +
639 sizeof(struct switch_stack) + 16;
640 u64 *old_bspstore, *old_bsp;
641 u64 *new_bspstore, *new_bsp;
642 u64 old_unat, old_rnat, new_rnat, nat;
643 u64 slots, loadrs = regs->loadrs;
644 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
645 u64 ar_bspstore = regs->ar_bspstore;
646 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
647 const u64 *bank;
648 const char *msg;
649 int cpu = smp_processor_id();
650
651 previous_current = curr_task(cpu);
652 set_curr_task(cpu, current);
653 if ((p = strchr(current->comm, ' ')))
654 *p = '\0';
655
656 /* Best effort attempt to cope with MCA/INIT delivered while in
657 * physical mode.
658 */
659 regs->cr_ipsr = ms->pmsa_ipsr;
660 if (ia64_psr(regs)->dt == 0) {
661 va.l = r12;
662 if (va.f.reg == 0) {
663 va.f.reg = 7;
664 r12 = va.l;
665 }
666 va.l = r13;
667 if (va.f.reg == 0) {
668 va.f.reg = 7;
669 r13 = va.l;
670 }
671 }
672 if (ia64_psr(regs)->rt == 0) {
673 va.l = ar_bspstore;
674 if (va.f.reg == 0) {
675 va.f.reg = 7;
676 ar_bspstore = va.l;
677 }
678 va.l = ar_bsp;
679 if (va.f.reg == 0) {
680 va.f.reg = 7;
681 ar_bsp = va.l;
682 }
683 }
684
685 /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
686 * have been copied to the old stack, the old stack may fail the
687 * validation tests below. So ia64_old_stack() must restore the dirty
688 * registers from the new stack. The old and new bspstore probably
689 * have different alignments, so loadrs calculated on the old bsp
690 * cannot be used to restore from the new bsp. Calculate a suitable
691 * loadrs for the new stack and save it in the new pt_regs, where
692 * ia64_old_stack() can get it.
693 */
694 old_bspstore = (u64 *)ar_bspstore;
695 old_bsp = (u64 *)ar_bsp;
696 slots = ia64_rse_num_regs(old_bspstore, old_bsp);
697 new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
698 new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
699 regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
700
701 /* Verify the previous stack state before we change it */
702 if (user_mode(regs)) {
703 msg = "occurred in user space";
704 goto no_mod;
705 }
706 if (r13 != sos->prev_IA64_KR_CURRENT) {
707 msg = "inconsistent previous current and r13";
708 goto no_mod;
709 }
710 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
711 msg = "inconsistent r12 and r13";
712 goto no_mod;
713 }
714 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
715 msg = "inconsistent ar.bspstore and r13";
716 goto no_mod;
717 }
718 va.p = old_bspstore;
719 if (va.f.reg < 5) {
720 msg = "old_bspstore is in the wrong region";
721 goto no_mod;
722 }
723 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
724 msg = "inconsistent ar.bsp and r13";
725 goto no_mod;
726 }
727 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
728 if (ar_bspstore + size > r12) {
729 msg = "no room for blocked state";
730 goto no_mod;
731 }
732
733 /* Change the comm field on the MCA/INT task to include the pid that
734 * was interrupted, it makes for easier debugging. If that pid was 0
735 * (swapper or nested MCA/INIT) then use the start of the previous comm
736 * field suffixed with its cpu.
737 */
738 if (previous_current->pid)
739 snprintf(comm, sizeof(comm), "%s %d",
740 current->comm, previous_current->pid);
741 else {
742 int l;
743 if ((p = strchr(previous_current->comm, ' ')))
744 l = p - previous_current->comm;
745 else
746 l = strlen(previous_current->comm);
747 snprintf(comm, sizeof(comm), "%s %*s %d",
748 current->comm, l, previous_current->comm,
749 previous_current->thread_info->cpu);
750 }
751 memcpy(current->comm, comm, sizeof(current->comm));
752
753 /* Make the original task look blocked. First stack a struct pt_regs,
754 * describing the state at the time of interrupt. mca_asm.S built a
755 * partial pt_regs, copy it and fill in the blanks using minstate.
756 */
757 p = (char *)r12 - sizeof(*regs);
758 old_regs = (struct pt_regs *)p;
759 memcpy(old_regs, regs, sizeof(*regs));
760 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
761 * pmsa_{xip,xpsr,xfs}
762 */
763 if (ia64_psr(regs)->ic) {
764 old_regs->cr_iip = ms->pmsa_iip;
765 old_regs->cr_ipsr = ms->pmsa_ipsr;
766 old_regs->cr_ifs = ms->pmsa_ifs;
767 } else {
768 old_regs->cr_iip = ms->pmsa_xip;
769 old_regs->cr_ipsr = ms->pmsa_xpsr;
770 old_regs->cr_ifs = ms->pmsa_xfs;
771 }
772 old_regs->pr = ms->pmsa_pr;
773 old_regs->b0 = ms->pmsa_br0;
774 old_regs->loadrs = loadrs;
775 old_regs->ar_rsc = ms->pmsa_rsc;
776 old_unat = old_regs->ar_unat;
777 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
778 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
779 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
780 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
781 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
782 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
783 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
784 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
785 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
786 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
787 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
788 if (ia64_psr(old_regs)->bn)
789 bank = ms->pmsa_bank1_gr;
790 else
791 bank = ms->pmsa_bank0_gr;
792 copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
793 copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
794 copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
795 copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
796 copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
797 copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
798 copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
799 copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
800 copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
801 copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
802 copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
803 copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
804 copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
805 copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
806 copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
807 copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
808
809 /* Next stack a struct switch_stack. mca_asm.S built a partial
810 * switch_stack, copy it and fill in the blanks using pt_regs and
811 * minstate.
812 *
813 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
814 * ar.pfs is set to 0.
815 *
816 * unwind.c::unw_unwind() does special processing for interrupt frames.
817 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
818 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
819 * that this is documented, of course. Set PRED_NON_SYSCALL in the
820 * switch_stack on the original stack so it will unwind correctly when
821 * unwind.c reads pt_regs.
822 *
823 * thread.ksp is updated to point to the synthesized switch_stack.
824 */
825 p -= sizeof(struct switch_stack);
826 old_sw = (struct switch_stack *)p;
827 memcpy(old_sw, sw, sizeof(*sw));
828 old_sw->caller_unat = old_unat;
829 old_sw->ar_fpsr = old_regs->ar_fpsr;
830 copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
831 copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
832 copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
833 copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
834 old_sw->b0 = (u64)ia64_leave_kernel;
835 old_sw->b1 = ms->pmsa_br1;
836 old_sw->ar_pfs = 0;
837 old_sw->ar_unat = old_unat;
838 old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
839 previous_current->thread.ksp = (u64)p - 16;
840
841 /* Finally copy the original stack's registers back to its RBS.
842 * Registers from ar.bspstore through ar.bsp at the time of the event
843 * are in the current RBS, copy them back to the original stack. The
844 * copy must be done register by register because the original bspstore
845 * and the current one have different alignments, so the saved RNAT
846 * data occurs at different places.
847 *
848 * mca_asm does cover, so the old_bsp already includes all registers at
849 * the time of MCA/INIT. It also does flushrs, so all registers before
850 * this function have been written to backing store on the MCA/INIT
851 * stack.
852 */
853 new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
854 old_rnat = regs->ar_rnat;
855 while (slots--) {
856 if (ia64_rse_is_rnat_slot(new_bspstore)) {
857 new_rnat = ia64_get_rnat(new_bspstore++);
858 }
859 if (ia64_rse_is_rnat_slot(old_bspstore)) {
860 *old_bspstore++ = old_rnat;
861 old_rnat = 0;
862 }
863 nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
864 old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
865 old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
866 *old_bspstore++ = *new_bspstore++;
867 }
868 old_sw->ar_bspstore = (unsigned long)old_bspstore;
869 old_sw->ar_rnat = old_rnat;
870
871 sos->prev_task = previous_current;
872 return previous_current;
873
874no_mod:
875 printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
876 smp_processor_id(), type, msg);
877 return previous_current;
878}
879
880/* The monarch/slave interaction is based on monarch_cpu and requires that all
881 * slaves have entered rendezvous before the monarch leaves. If any cpu has
882 * not entered rendezvous yet then wait a bit. The assumption is that any
883 * slave that has not rendezvoused after a reasonable time is never going to do
884 * so. In this context, slave includes cpus that respond to the MCA rendezvous
885 * interrupt, as well as cpus that receive the INIT slave event.
886 */
887
888static void
889ia64_wait_for_slaves(int monarch)
890{
891 int c, wait = 0;
892 for_each_online_cpu(c) {
893 if (c == monarch)
894 continue;
895 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
896 udelay(1000); /* short wait first */
897 wait = 1;
898 break;
899 }
900 }
901 if (!wait)
902 return;
903 for_each_online_cpu(c) {
904 if (c == monarch)
905 continue;
906 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
907 udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
908 break;
909 }
910 }
911}
912
857/* 913/*
858 * ia64_mca_ucmc_handler 914 * ia64_mca_handler
859 * 915 *
860 * This is uncorrectable machine check handler called from OS_MCA 916 * This is uncorrectable machine check handler called from OS_MCA
861 * dispatch code which is in turn called from SAL_CHECK(). 917 * dispatch code which is in turn called from SAL_CHECK().
@@ -866,16 +922,28 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension);
866 * further MCA logging is enabled by clearing logs. 922 * further MCA logging is enabled by clearing logs.
867 * Monarch also has the duty of sending wakeup-IPIs to pull the 923 * Monarch also has the duty of sending wakeup-IPIs to pull the
868 * slave processors out of rendezvous spinloop. 924 * slave processors out of rendezvous spinloop.
869 *
870 * Inputs : None
871 * Outputs : None
872 */ 925 */
873void 926void
874ia64_mca_ucmc_handler(void) 927ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
928 struct ia64_sal_os_state *sos)
875{ 929{
876 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 930 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
877 &ia64_sal_to_os_handoff_state.proc_state_param; 931 &sos->proc_state_param;
878 int recover; 932 int recover, cpu = smp_processor_id();
933 task_t *previous_current;
934
935 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
936 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
937 monarch_cpu = cpu;
938 ia64_wait_for_slaves(cpu);
939
940 /* Wakeup all the processors which are spinning in the rendezvous loop.
941 * They will leave SAL, then spin in the OS with interrupts disabled
942 * until this monarch cpu leaves the MCA handler. That gets control
943 * back to the OS so we can backtrace the other cpus, backtrace when
944 * spinning in SAL does not work.
945 */
946 ia64_mca_wakeup_all();
879 947
880 /* Get the MCA error record and log it */ 948 /* Get the MCA error record and log it */
881 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 949 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -883,25 +951,20 @@ ia64_mca_ucmc_handler(void)
883 /* TLB error is only exist in this SAL error record */ 951 /* TLB error is only exist in this SAL error record */
884 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) 952 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
885 /* other error recovery */ 953 /* other error recovery */
886 || (ia64_mca_ucmc_extension 954 || (ia64_mca_ucmc_extension
887 && ia64_mca_ucmc_extension( 955 && ia64_mca_ucmc_extension(
888 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), 956 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
889 &ia64_sal_to_os_handoff_state, 957 sos));
890 &ia64_os_to_sal_handoff_state));
891 958
892 if (recover) { 959 if (recover) {
893 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); 960 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
894 rh->severity = sal_log_severity_corrected; 961 rh->severity = sal_log_severity_corrected;
895 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 962 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
963 sos->os_status = IA64_MCA_CORRECTED;
896 } 964 }
897 /*
898 * Wakeup all the processors which are spinning in the rendezvous
899 * loop.
900 */
901 ia64_mca_wakeup_all();
902 965
903 /* Return to SAL */ 966 set_curr_task(cpu, previous_current);
904 ia64_return_to_sal_check(recover); 967 monarch_cpu = -1;
905} 968}
906 969
907static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); 970static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
@@ -1125,34 +1188,114 @@ ia64_mca_cpe_poll (unsigned long dummy)
1125/* 1188/*
1126 * C portion of the OS INIT handler 1189 * C portion of the OS INIT handler
1127 * 1190 *
1128 * Called from ia64_monarch_init_handler 1191 * Called from ia64_os_init_dispatch
1129 *
1130 * Inputs: pointer to pt_regs where processor info was saved.
1131 * 1192 *
1132 * Returns: 1193 * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
1133 * 0 if SAL must warm boot the System 1194 * this event. This code is used for both monarch and slave INIT events, see
1134 * 1 if SAL must return to interrupted context using PAL_MC_RESUME 1195 * sos->monarch.
1135 * 1196 *
1197 * All INIT events switch to the INIT stack and change the previous process to
1198 * blocked status. If one of the INIT events is the monarch then we are
1199 * probably processing the nmi button/command. Use the monarch cpu to dump all
1200 * the processes. The slave INIT events all spin until the monarch cpu
1201 * returns. We can also get INIT slave events for MCA, in which case the MCA
1202 * process is the monarch.
1136 */ 1203 */
1204
1137void 1205void
1138ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) 1206ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1207 struct ia64_sal_os_state *sos)
1139{ 1208{
1140 pal_min_state_area_t *ms; 1209 static atomic_t slaves;
1210 static atomic_t monarchs;
1211 task_t *previous_current;
1212 int cpu = smp_processor_id(), c;
1213 struct task_struct *g, *t;
1141 1214
1142 oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ 1215 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
1143 console_loglevel = 15; /* make sure printks make it to console */ 1216 console_loglevel = 15; /* make sure printks make it to console */
1144 1217
1145 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", 1218 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1146 ia64_sal_to_os_handoff_state.proc_state_param); 1219 sos->proc_state_param, cpu, sos->monarch);
1220 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1147 1221
1148 /* 1222 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
1149 * Address of minstate area provided by PAL is physical, 1223 sos->os_status = IA64_INIT_RESUME;
1150 * uncacheable (bit 63 set). Convert to Linux virtual 1224
1151 * address in region 6. 1225 /* FIXME: Workaround for broken proms that drive all INIT events as
1226 * slaves. The last slave that enters is promoted to be a monarch.
1227 * Remove this code in September 2006, that gives platforms a year to
1228 * fix their proms and get their customers updated.
1152 */ 1229 */
1153 ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); 1230 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1231 printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1232 __FUNCTION__, cpu);
1233 atomic_dec(&slaves);
1234 sos->monarch = 1;
1235 }
1236
1237 /* FIXME: Workaround for broken proms that drive all INIT events as
1238 * monarchs. Second and subsequent monarchs are demoted to slaves.
1239 * Remove this code in September 2006, that gives platforms a year to
1240 * fix their proms and get their customers updated.
1241 */
1242 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1243 printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1244 __FUNCTION__, cpu);
1245 atomic_dec(&monarchs);
1246 sos->monarch = 0;
1247 }
1154 1248
1155 init_handler_platform(ms, pt, sw); /* call platform specific routines */ 1249 if (!sos->monarch) {
1250 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1251 while (monarch_cpu == -1)
1252 cpu_relax(); /* spin until monarch enters */
1253 while (monarch_cpu != -1)
1254 cpu_relax(); /* spin until monarch leaves */
1255 printk("Slave on cpu %d returning to normal service.\n", cpu);
1256 set_curr_task(cpu, previous_current);
1257 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1258 atomic_dec(&slaves);
1259 return;
1260 }
1261
1262 monarch_cpu = cpu;
1263
1264 /*
1265 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1266 * generated via the BMC's command-line interface, but since the console is on the
1267 * same serial line, the user will need some time to switch out of the BMC before
1268 * the dump begins.
1269 */
1270 printk("Delaying for 5 seconds...\n");
1271 udelay(5*1000000);
1272 ia64_wait_for_slaves(cpu);
1273 printk(KERN_ERR "Processes interrupted by INIT -");
1274 for_each_online_cpu(c) {
1275 struct ia64_sal_os_state *s;
1276 t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
1277 s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
1278 g = s->prev_task;
1279 if (g) {
1280 if (g->pid)
1281 printk(" %d", g->pid);
1282 else
1283 printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
1284 }
1285 }
1286 printk("\n\n");
1287 if (read_trylock(&tasklist_lock)) {
1288 do_each_thread (g, t) {
1289 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1290 show_stack(t, NULL);
1291 } while_each_thread (g, t);
1292 read_unlock(&tasklist_lock);
1293 }
1294 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1295 atomic_dec(&monarchs);
1296 set_curr_task(cpu, previous_current);
1297 monarch_cpu = -1;
1298 return;
1156} 1299}
1157 1300
1158static int __init 1301static int __init
@@ -1202,6 +1345,34 @@ static struct irqaction mca_cpep_irqaction = {
1202}; 1345};
1203#endif /* CONFIG_ACPI */ 1346#endif /* CONFIG_ACPI */
1204 1347
1348/* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
1349 * these stacks can never sleep, they cannot return from the kernel to user
1350 * space, they do not appear in a normal ps listing. So there is no need to
1351 * format most of the fields.
1352 */
1353
1354static void
1355format_mca_init_stack(void *mca_data, unsigned long offset,
1356 const char *type, int cpu)
1357{
1358 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1359 struct thread_info *ti;
1360 memset(p, 0, KERNEL_STACK_SIZE);
1361 ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE);
1362 ti->flags = _TIF_MCA_INIT;
1363 ti->preempt_count = 1;
1364 ti->task = p;
1365 ti->cpu = cpu;
1366 p->thread_info = ti;
1367 p->state = TASK_UNINTERRUPTIBLE;
1368 __set_bit(cpu, &p->cpus_allowed);
1369 INIT_LIST_HEAD(&p->tasks);
1370 p->parent = p->real_parent = p->group_leader = p;
1371 INIT_LIST_HEAD(&p->children);
1372 INIT_LIST_HEAD(&p->sibling);
1373 strncpy(p->comm, type, sizeof(p->comm)-1);
1374}
1375
1205/* Do per-CPU MCA-related initialization. */ 1376/* Do per-CPU MCA-related initialization. */
1206 1377
1207void __devinit 1378void __devinit
@@ -1214,19 +1385,28 @@ ia64_mca_cpu_init(void *cpu_data)
1214 int cpu; 1385 int cpu;
1215 1386
1216 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1387 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
1217 * NR_CPUS); 1388 * NR_CPUS + KERNEL_STACK_SIZE);
1389 mca_data = (void *)(((unsigned long)mca_data +
1390 KERNEL_STACK_SIZE - 1) &
1391 (-KERNEL_STACK_SIZE));
1218 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1392 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1393 format_mca_init_stack(mca_data,
1394 offsetof(struct ia64_mca_cpu, mca_stack),
1395 "MCA", cpu);
1396 format_mca_init_stack(mca_data,
1397 offsetof(struct ia64_mca_cpu, init_stack),
1398 "INIT", cpu);
1219 __per_cpu_mca[cpu] = __pa(mca_data); 1399 __per_cpu_mca[cpu] = __pa(mca_data);
1220 mca_data += sizeof(struct ia64_mca_cpu); 1400 mca_data += sizeof(struct ia64_mca_cpu);
1221 } 1401 }
1222 } 1402 }
1223 1403
1224 /* 1404 /*
1225 * The MCA info structure was allocated earlier and its 1405 * The MCA info structure was allocated earlier and its
1226 * physical address saved in __per_cpu_mca[cpu]. Copy that 1406 * physical address saved in __per_cpu_mca[cpu]. Copy that
1227 * address * to ia64_mca_data so we can access it as a per-CPU 1407 * address * to ia64_mca_data so we can access it as a per-CPU
1228 * variable. 1408 * variable.
1229 */ 1409 */
1230 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1410 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
1231 1411
1232 /* 1412 /*
@@ -1236,11 +1416,11 @@ ia64_mca_cpu_init(void *cpu_data)
1236 __get_cpu_var(ia64_mca_per_cpu_pte) = 1416 __get_cpu_var(ia64_mca_per_cpu_pte) =
1237 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1417 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1238 1418
1239 /* 1419 /*
1240 * Also, stash away a copy of the PAL address and the PTE 1420 * Also, stash away a copy of the PAL address and the PTE
1241 * needed to map it. 1421 * needed to map it.
1242 */ 1422 */
1243 pal_vaddr = efi_get_pal_addr(); 1423 pal_vaddr = efi_get_pal_addr();
1244 if (!pal_vaddr) 1424 if (!pal_vaddr)
1245 return; 1425 return;
1246 __get_cpu_var(ia64_mca_pal_base) = 1426 __get_cpu_var(ia64_mca_pal_base) =
@@ -1272,8 +1452,8 @@ ia64_mca_cpu_init(void *cpu_data)
1272void __init 1452void __init
1273ia64_mca_init(void) 1453ia64_mca_init(void)
1274{ 1454{
1275 ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; 1455 ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
1276 ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; 1456 ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
1277 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; 1457 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1278 int i; 1458 int i;
1279 s64 rc; 1459 s64 rc;
@@ -1351,9 +1531,9 @@ ia64_mca_init(void)
1351 * XXX - disable SAL checksum by setting size to 0, should be 1531 * XXX - disable SAL checksum by setting size to 0, should be
1352 * size of the actual init handler in mca_asm.S. 1532 * size of the actual init handler in mca_asm.S.
1353 */ 1533 */
1354 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); 1534 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
1355 ia64_mc_info.imi_monarch_init_handler_size = 0; 1535 ia64_mc_info.imi_monarch_init_handler_size = 0;
1356 ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); 1536 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1357 ia64_mc_info.imi_slave_init_handler_size = 0; 1537 ia64_mc_info.imi_slave_init_handler_size = 0;
1358 1538
1359 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1539 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index ef3fd7265b67..499a065f4e60 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -16,6 +16,9 @@
16// 04/11/12 Russ Anderson <rja@sgi.com> 16// 04/11/12 Russ Anderson <rja@sgi.com>
17// Added per cpu MCA/INIT stack save areas. 17// Added per cpu MCA/INIT stack save areas.
18// 18//
19// 12/08/05 Keith Owens <kaos@sgi.com>
20// Use per cpu MCA/INIT stacks for all data.
21//
19#include <linux/config.h> 22#include <linux/config.h>
20#include <linux/threads.h> 23#include <linux/threads.h>
21 24
@@ -25,96 +28,23 @@
25#include <asm/mca_asm.h> 28#include <asm/mca_asm.h>
26#include <asm/mca.h> 29#include <asm/mca.h>
27 30
28/* 31#include "entry.h"
29 * When we get a machine check, the kernel stack pointer is no longer
30 * valid, so we need to set a new stack pointer.
31 */
32#define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
33
34/*
35 * Needed for return context to SAL
36 */
37#define IA64_MCA_SAME_CONTEXT 0
38#define IA64_MCA_COLD_BOOT -2
39
40#include "minstate.h"
41
42/*
43 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
44 * 1. GR1 = OS GP
45 * 2. GR8 = PAL_PROC physical address
46 * 3. GR9 = SAL_PROC physical address
47 * 4. GR10 = SAL GP (physical)
48 * 5. GR11 = Rendez state
49 * 6. GR12 = Return address to location within SAL_CHECK
50 */
51#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
52 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
53 st8 [_tmp]=r1,0x08;; \
54 st8 [_tmp]=r8,0x08;; \
55 st8 [_tmp]=r9,0x08;; \
56 st8 [_tmp]=r10,0x08;; \
57 st8 [_tmp]=r11,0x08;; \
58 st8 [_tmp]=r12,0x08;; \
59 st8 [_tmp]=r17,0x08;; \
60 st8 [_tmp]=r18,0x08
61
62/*
63 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
64 * (p6) is executed if we never entered virtual mode (TLB error)
65 * (p7) is executed if we entered virtual mode as expected (normal case)
66 * 1. GR8 = OS_MCA return status
67 * 2. GR9 = SAL GP (physical)
68 * 3. GR10 = 0/1 returning same/new context
69 * 4. GR22 = New min state save area pointer
70 * returns ptr to SAL rtn save loc in _tmp
71 */
72#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
73 movl _tmp=ia64_os_to_sal_handoff_state;; \
74 DATA_VA_TO_PA(_tmp);; \
75 ld8 r8=[_tmp],0x08;; \
76 ld8 r9=[_tmp],0x08;; \
77 ld8 r10=[_tmp],0x08;; \
78 ld8 r22=[_tmp],0x08;;
79 // now _tmp is pointing to SAL rtn save location
80
81/*
82 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
83 * imots_os_status=IA64_MCA_COLD_BOOT
84 * imots_sal_gp=SAL GP
85 * imots_context=IA64_MCA_SAME_CONTEXT
86 * imots_new_min_state=Min state save area pointer
87 * imots_sal_check_ra=Return address to location within SAL_CHECK
88 *
89 */
90#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
91 movl tmp=IA64_MCA_COLD_BOOT; \
92 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
93 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
94 st8 [os_to_sal_handoff]=tmp,8;; \
95 ld8 tmp=[sal_to_os_handoff],48;; \
96 st8 [os_to_sal_handoff]=tmp,8;; \
97 movl tmp=IA64_MCA_SAME_CONTEXT;; \
98 st8 [os_to_sal_handoff]=tmp,8;; \
99 ld8 tmp=[sal_to_os_handoff],-8;; \
100 st8 [os_to_sal_handoff]=tmp,8;; \
101 ld8 tmp=[sal_to_os_handoff];; \
102 st8 [os_to_sal_handoff]=tmp;;
103 32
104#define GET_IA64_MCA_DATA(reg) \ 33#define GET_IA64_MCA_DATA(reg) \
105 GET_THIS_PADDR(reg, ia64_mca_data) \ 34 GET_THIS_PADDR(reg, ia64_mca_data) \
106 ;; \ 35 ;; \
107 ld8 reg=[reg] 36 ld8 reg=[reg]
108 37
109 .global ia64_os_mca_dispatch
110 .global ia64_os_mca_dispatch_end
111 .global ia64_sal_to_os_handoff_state
112 .global ia64_os_to_sal_handoff_state
113 .global ia64_do_tlb_purge 38 .global ia64_do_tlb_purge
39 .global ia64_os_mca_dispatch
40 .global ia64_os_init_dispatch_monarch
41 .global ia64_os_init_dispatch_slave
114 42
115 .text 43 .text
116 .align 16 44 .align 16
117 45
46//StartMain////////////////////////////////////////////////////////////////////
47
118/* 48/*
119 * Just the TLB purge part is moved to a separate function 49 * Just the TLB purge part is moved to a separate function
120 * so we can re-use the code for cpu hotplug code as well 50 * so we can re-use the code for cpu hotplug code as well
@@ -207,34 +137,31 @@ ia64_do_tlb_purge:
207 br.sptk.many b1 137 br.sptk.many b1
208 ;; 138 ;;
209 139
210ia64_os_mca_dispatch: 140//EndMain//////////////////////////////////////////////////////////////////////
141
142//StartMain////////////////////////////////////////////////////////////////////
211 143
144ia64_os_mca_dispatch:
212 // Serialize all MCA processing 145 // Serialize all MCA processing
213 mov r3=1;; 146 mov r3=1;;
214 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; 147 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
215ia64_os_mca_spin: 148ia64_os_mca_spin:
216 xchg8 r4=[r2],r3;; 149 xchg4 r4=[r2],r3;;
217 cmp.ne p6,p0=r4,r0 150 cmp.ne p6,p0=r4,r0
218(p6) br ia64_os_mca_spin 151(p6) br ia64_os_mca_spin
219 152
220 // Save the SAL to OS MCA handoff state as defined 153 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
221 // by SAL SPEC 3.0 154 LOAD_PHYSICAL(p0,r2,1f) // return address
222 // NOTE : The order in which the state gets saved 155 mov r19=1 // All MCA events are treated as monarch (for now)
223 // is dependent on the way the C-structure 156 br.sptk ia64_state_save // save the state that is not in minstate
224 // for ia64_mca_sal_to_os_state_t has been 1571:
225 // defined in include/asm/mca.h
226 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
227 ;;
228
229 // LOG PROCESSOR STATE INFO FROM HERE ON..
230begin_os_mca_dump:
231 br ia64_os_mca_proc_state_dump;;
232 158
233ia64_os_mca_done_dump: 159 GET_IA64_MCA_DATA(r2)
234 160 // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
235 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) 161 ;;
162 add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2
236 ;; 163 ;;
237 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. 164 ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
238 ;; 165 ;;
239 tbit.nz p6,p7=r18,60 166 tbit.nz p6,p7=r18,60
240(p7) br.spnt done_tlb_purge_and_reload 167(p7) br.spnt done_tlb_purge_and_reload
@@ -323,624 +250,775 @@ ia64_reload_tr:
323 itr.d dtr[r20]=r16 250 itr.d dtr[r20]=r16
324 ;; 251 ;;
325 srlz.d 252 srlz.d
326 ;;
327 br.sptk.many done_tlb_purge_and_reload
328err:
329 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
330 br.sptk.many ia64_os_mca_done_restore
331 253
332done_tlb_purge_and_reload: 254done_tlb_purge_and_reload:
333 255
334 // Setup new stack frame for OS_MCA handling 256 // switch to per cpu MCA stack
335 GET_IA64_MCA_DATA(r2) 257 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
336 ;; 258 LOAD_PHYSICAL(p0,r2,1f) // return address
337 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 259 br.sptk ia64_new_stack
338 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 2601:
339 ;; 261
340 rse_switch_context(r6,r3,r2);; // RSC management in this new context 262 // everything saved, now we can set the kernel registers
263 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
264 LOAD_PHYSICAL(p0,r2,1f) // return address
265 br.sptk ia64_set_kernel_registers
2661:
341 267
268 // This must be done in physical mode
342 GET_IA64_MCA_DATA(r2) 269 GET_IA64_MCA_DATA(r2)
343 ;; 270 ;;
344 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 271 mov r7=r2
345 ;;
346 mov r12=r2 // establish new stack-pointer
347 272
348 // Enter virtual mode from physical mode 273 // Enter virtual mode from physical mode
349 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) 274 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
350ia64_os_mca_virtual_begin: 275
276 // This code returns to SAL via SOS r2, in general SAL has no unwind
277 // data. To get a clean termination when backtracing the C MCA/INIT
278 // handler, set a dummy return address of 0 in this routine. That
279 // requires that ia64_os_mca_virtual_begin be a global function.
280ENTRY(ia64_os_mca_virtual_begin)
281 .prologue
282 .save rp,r0
283 .body
284
285 mov ar.rsc=3 // set eager mode for C handler
286 mov r2=r7 // see GET_IA64_MCA_DATA above
287 ;;
351 288
352 // Call virtual mode handler 289 // Call virtual mode handler
353 movl r2=ia64_mca_ucmc_handler;; 290 alloc r14=ar.pfs,0,0,3,0
354 mov b6=r2;; 291 ;;
355 br.call.sptk.many b0=b6;; 292 DATA_PA_TO_VA(r2,r7)
356.ret0: 293 ;;
294 add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
295 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
296 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
297 br.call.sptk.many b0=ia64_mca_handler
298
357 // Revert back to physical mode before going back to SAL 299 // Revert back to physical mode before going back to SAL
358 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) 300 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
359ia64_os_mca_virtual_end: 301ia64_os_mca_virtual_end:
360 302
361 // restore the original stack frame here 303END(ia64_os_mca_virtual_begin)
304
305 // switch back to previous stack
306 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
307 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
308 LOAD_PHYSICAL(p0,r2,1f) // return address
309 br.sptk ia64_old_stack
3101:
311
312 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
313 LOAD_PHYSICAL(p0,r2,1f) // return address
314 br.sptk ia64_state_restore // restore the SAL state
3151:
316
317 mov b0=r12 // SAL_CHECK return address
318
319 // release lock
320 LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
321 st4.rel [r3]=r0
322
323 br b0
324
325//EndMain//////////////////////////////////////////////////////////////////////
326
327//StartMain////////////////////////////////////////////////////////////////////
328
329//
330// SAL to OS entry point for INIT on all processors. This has been defined for
331// registration purposes with SAL as a part of ia64_mca_init. Monarch and
332// slave INIT have identical processing, except for the value of the
333// sos->monarch flag in r19.
334//
335
336ia64_os_init_dispatch_monarch:
337 mov r19=1 // Bow, bow, ye lower middle classes!
338 br.sptk ia64_os_init_dispatch
339
340ia64_os_init_dispatch_slave:
341 mov r19=0 // <igor>yeth, mathter</igor>
342
343ia64_os_init_dispatch:
344
345 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
346 LOAD_PHYSICAL(p0,r2,1f) // return address
347 br.sptk ia64_state_save // save the state that is not in minstate
3481:
349
350 // switch to per cpu INIT stack
351 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
352 LOAD_PHYSICAL(p0,r2,1f) // return address
353 br.sptk ia64_new_stack
3541:
355
356 // everything saved, now we can set the kernel registers
357 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
358 LOAD_PHYSICAL(p0,r2,1f) // return address
359 br.sptk ia64_set_kernel_registers
3601:
361
362 // This must be done in physical mode
362 GET_IA64_MCA_DATA(r2) 363 GET_IA64_MCA_DATA(r2)
363 ;; 364 ;;
364 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 365 mov r7=r2
365 ;; 366
366 movl r4=IA64_PSR_MC 367 // Enter virtual mode from physical mode
368 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
369
370 // This code returns to SAL via SOS r2, in general SAL has no unwind
371 // data. To get a clean termination when backtracing the C MCA/INIT
372 // handler, set a dummy return address of 0 in this routine. That
373 // requires that ia64_os_init_virtual_begin be a global function.
374ENTRY(ia64_os_init_virtual_begin)
375 .prologue
376 .save rp,r0
377 .body
378
379 mov ar.rsc=3 // set eager mode for C handler
380 mov r2=r7 // see GET_IA64_MCA_DATA above
367 ;; 381 ;;
368 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
369 382
370 // let us restore all the registers from our PSI structure 383 // Call virtual mode handler
371 mov r8=gp 384 alloc r14=ar.pfs,0,0,3,0
385 ;;
386 DATA_PA_TO_VA(r2,r7)
372 ;; 387 ;;
373begin_os_mca_restore: 388 add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
374 br ia64_os_mca_proc_state_restore;; 389 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
390 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
391 br.call.sptk.many b0=ia64_init_handler
375 392
376ia64_os_mca_done_restore: 393 // Revert back to physical mode before going back to SAL
377 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; 394 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
378 // branch back to SALE_CHECK 395ia64_os_init_virtual_end:
379 ld8 r3=[r2];;
380 mov b0=r3;; // SAL_CHECK return address
381 396
382 // release lock 397END(ia64_os_init_virtual_begin)
383 movl r3=ia64_mca_serialize;; 398
384 DATA_VA_TO_PA(r3);; 399 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
385 st8.rel [r3]=r0 400 LOAD_PHYSICAL(p0,r2,1f) // return address
401 br.sptk ia64_state_restore // restore the SAL state
4021:
386 403
404 // switch back to previous stack
405 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
406 mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
407 LOAD_PHYSICAL(p0,r2,1f) // return address
408 br.sptk ia64_old_stack
4091:
410
411 mov b0=r12 // SAL_CHECK return address
387 br b0 412 br b0
388 ;; 413
389ia64_os_mca_dispatch_end:
390//EndMain////////////////////////////////////////////////////////////////////// 414//EndMain//////////////////////////////////////////////////////////////////////
391 415
416// common defines for the stubs
417#define ms r4
418#define regs r5
419#define temp1 r2 /* careful, it overlaps with input registers */
420#define temp2 r3 /* careful, it overlaps with input registers */
421#define temp3 r7
422#define temp4 r14
423
392 424
393//++ 425//++
394// Name: 426// Name:
395// ia64_os_mca_proc_state_dump() 427// ia64_state_save()
396// 428//
397// Stub Description: 429// Stub Description:
398// 430//
399// This stub dumps the processor state during MCHK to a data area 431// Save the state that is not in minstate. This is sensitive to the layout of
432// struct ia64_sal_os_state in mca.h.
433//
434// r2 contains the return address, r3 contains either
435// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
436//
437// The OS to SAL section of struct ia64_sal_os_state is set to a default
438// value of cold boot (MCA) or warm boot (INIT) and return to the same
439// context. ia64_sal_os_state is also used to hold some registers that
440// need to be saved and restored across the stack switches.
441//
442// Most input registers to this stub come from PAL/SAL
443// r1 os gp, physical
444// r8 pal_proc entry point
445// r9 sal_proc entry point
446// r10 sal gp
447// r11 MCA - rendevzous state, INIT - reason code
448// r12 sal return address
449// r17 pal min_state
450// r18 processor state parameter
451// r19 monarch flag, set by the caller of this routine
452//
453// In addition to the SAL to OS state, this routine saves all the
454// registers that appear in struct pt_regs and struct switch_stack,
455// excluding those that are already in the PAL minstate area. This
456// results in a partial pt_regs and switch_stack, the C code copies the
457// remaining registers from PAL minstate to pt_regs and switch_stack. The
458// resulting structures contain all the state of the original process when
459// MCA/INIT occurred.
400// 460//
401//-- 461//--
402 462
403ia64_os_mca_proc_state_dump: 463ia64_state_save:
404// Save bank 1 GRs 16-31 which will be used by c-language code when we switch 464 add regs=MCA_SOS_OFFSET, r3
405// to virtual addressing mode. 465 add ms=MCA_SOS_OFFSET+8, r3
406 GET_IA64_MCA_DATA(r2) 466 mov b0=r2 // save return address
467 cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
468 ;;
469 GET_IA64_MCA_DATA(temp2)
470 ;;
471 add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
472 add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
473 ;;
474 mov regs=temp1 // save the start of sos
475 st8 [temp1]=r1,16 // os_gp
476 st8 [temp2]=r8,16 // pal_proc
477 ;;
478 st8 [temp1]=r9,16 // sal_proc
479 st8 [temp2]=r11,16 // rv_rc
480 mov r11=cr.iipa
481 ;;
482 st8 [temp1]=r18,16 // proc_state_param
483 st8 [temp2]=r19,16 // monarch
484 mov r6=IA64_KR(CURRENT)
485 ;;
486 st8 [temp1]=r12,16 // sal_ra
487 st8 [temp2]=r10,16 // sal_gp
488 mov r12=cr.isr
489 ;;
490 st8 [temp1]=r17,16 // pal_min_state
491 st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
492 mov r6=cr.ifa
493 ;;
494 st8 [temp1]=r0,16 // prev_task, starts off as NULL
495 st8 [temp2]=r12,16 // cr.isr
496 mov r12=cr.itir
497 ;;
498 st8 [temp1]=r6,16 // cr.ifa
499 st8 [temp2]=r12,16 // cr.itir
500 mov r12=cr.iim
501 ;;
502 st8 [temp1]=r11,16 // cr.iipa
503 st8 [temp2]=r12,16 // cr.iim
504 mov r6=cr.iha
505(p1) mov r12=IA64_MCA_COLD_BOOT
506(p2) mov r12=IA64_INIT_WARM_BOOT
507 ;;
508 st8 [temp1]=r6,16 // cr.iha
509 st8 [temp2]=r12 // os_status, default is cold boot
510 mov r6=IA64_MCA_SAME_CONTEXT
511 ;;
512 st8 [temp1]=r6 // context, default is same context
513
514 // Save the pt_regs data that is not in minstate. The previous code
515 // left regs at sos.
516 add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
517 ;;
518 add temp1=PT(B6), regs
519 mov temp3=b6
520 mov temp4=b7
521 add temp2=PT(B7), regs
522 ;;
523 st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
524 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
525 mov temp3=ar.csd
526 mov temp4=ar.ssd
527 cover // must be last in group
407 ;; 528 ;;
408 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 529 st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
409 ;; 530 st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
410// save ar.NaT 531 mov temp3=ar.unat
411 mov r5=ar.unat // ar.unat 532 mov temp4=ar.pfs
412 533 ;;
413// save banked GRs 16-31 along with NaT bits 534 st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
414 bsw.1;; 535 st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
415 st8.spill [r2]=r16,8;; 536 mov temp3=ar.rnat
416 st8.spill [r2]=r17,8;; 537 mov temp4=ar.bspstore
417 st8.spill [r2]=r18,8;; 538 ;;
418 st8.spill [r2]=r19,8;; 539 st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
419 st8.spill [r2]=r20,8;; 540 st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
420 st8.spill [r2]=r21,8;; 541 mov temp3=ar.bsp
421 st8.spill [r2]=r22,8;; 542 ;;
422 st8.spill [r2]=r23,8;; 543 sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
423 st8.spill [r2]=r24,8;; 544 mov temp4=ar.fpsr
424 st8.spill [r2]=r25,8;; 545 ;;
425 st8.spill [r2]=r26,8;; 546 shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
426 st8.spill [r2]=r27,8;; 547 ;;
427 st8.spill [r2]=r28,8;; 548 st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
428 st8.spill [r2]=r29,8;; 549 st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
429 st8.spill [r2]=r30,8;; 550 mov temp3=ar.ccv
430 st8.spill [r2]=r31,8;; 551 ;;
431 552 st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
432 mov r4=ar.unat;; 553 stf.spill [temp2]=f6,PT(F8)-PT(F6)
433 st8 [r2]=r4,8 // save User NaT bits for r16-r31 554 ;;
434 mov ar.unat=r5 // restore original unat 555 stf.spill [temp1]=f7,PT(F9)-PT(F7)
435 bsw.0;; 556 stf.spill [temp2]=f8,PT(F10)-PT(F8)
436 557 ;;
437//save BRs 558 stf.spill [temp1]=f9,PT(F11)-PT(F9)
438 add r4=8,r2 // duplicate r2 in r4 559 stf.spill [temp2]=f10
439 add r6=2*8,r2 // duplicate r2 in r4 560 ;;
440 561 stf.spill [temp1]=f11
441 mov r3=b0 562
442 mov r5=b1 563 // Save the switch_stack data that is not in minstate nor pt_regs. The
443 mov r7=b2;; 564 // previous code left regs at pt_regs.
444 st8 [r2]=r3,3*8 565 add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
445 st8 [r4]=r5,3*8 566 ;;
446 st8 [r6]=r7,3*8;; 567 add temp1=SW(F2), regs
447 568 add temp2=SW(F3), regs
448 mov r3=b3 569 ;;
449 mov r5=b4 570 stf.spill [temp1]=f2,32
450 mov r7=b5;; 571 stf.spill [temp2]=f3,32
451 st8 [r2]=r3,3*8 572 ;;
452 st8 [r4]=r5,3*8 573 stf.spill [temp1]=f4,32
453 st8 [r6]=r7,3*8;; 574 stf.spill [temp2]=f5,32
454 575 ;;
455 mov r3=b6 576 stf.spill [temp1]=f12,32
456 mov r5=b7;; 577 stf.spill [temp2]=f13,32
457 st8 [r2]=r3,2*8 578 ;;
458 st8 [r4]=r5,2*8;; 579 stf.spill [temp1]=f14,32
459 580 stf.spill [temp2]=f15,32
460cSaveCRs: 581 ;;
461// save CRs 582 stf.spill [temp1]=f16,32
462 add r4=8,r2 // duplicate r2 in r4 583 stf.spill [temp2]=f17,32
463 add r6=2*8,r2 // duplicate r2 in r4 584 ;;
464 585 stf.spill [temp1]=f18,32
465 mov r3=cr.dcr 586 stf.spill [temp2]=f19,32
466 mov r5=cr.itm 587 ;;
467 mov r7=cr.iva;; 588 stf.spill [temp1]=f20,32
468 589 stf.spill [temp2]=f21,32
469 st8 [r2]=r3,8*8 590 ;;
470 st8 [r4]=r5,3*8 591 stf.spill [temp1]=f22,32
471 st8 [r6]=r7,3*8;; // 48 byte rements 592 stf.spill [temp2]=f23,32
472 593 ;;
473 mov r3=cr.pta;; 594 stf.spill [temp1]=f24,32
474 st8 [r2]=r3,8*8;; // 64 byte rements 595 stf.spill [temp2]=f25,32
475 596 ;;
476// if PSR.ic=0, reading interruption registers causes an illegal operation fault 597 stf.spill [temp1]=f26,32
477 mov r3=psr;; 598 stf.spill [temp2]=f27,32
478 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test 599 ;;
479(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. 600 stf.spill [temp1]=f28,32
480begin_skip_intr_regs: 601 stf.spill [temp2]=f29,32
481(p6) br SkipIntrRegs;; 602 ;;
482 603 stf.spill [temp1]=f30,SW(B2)-SW(F30)
483 add r4=8,r2 // duplicate r2 in r4 604 stf.spill [temp2]=f31,SW(B3)-SW(F31)
484 add r6=2*8,r2 // duplicate r2 in r6 605 mov temp3=b2
485 606 mov temp4=b3
486 mov r3=cr.ipsr 607 ;;
487 mov r5=cr.isr 608 st8 [temp1]=temp3,16 // save b2
488 mov r7=r0;; 609 st8 [temp2]=temp4,16 // save b3
489 st8 [r2]=r3,3*8 610 mov temp3=b4
490 st8 [r4]=r5,3*8 611 mov temp4=b5
491 st8 [r6]=r7,3*8;; 612 ;;
492 613 st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
493 mov r3=cr.iip 614 st8 [temp2]=temp4 // save b5
494 mov r5=cr.ifa 615 mov temp3=ar.lc
495 mov r7=cr.itir;; 616 ;;
496 st8 [r2]=r3,3*8 617 st8 [temp1]=temp3 // save ar.lc
497 st8 [r4]=r5,3*8 618
498 st8 [r6]=r7,3*8;; 619 // FIXME: Some proms are incorrectly accessing the minstate area as
499 620 // cached data. The C code uses region 6, uncached virtual. Ensure
500 mov r3=cr.iipa 621 // that there is no cache data lying around for the first 1K of the
501 mov r5=cr.ifs 622 // minstate area.
502 mov r7=cr.iim;; 623 // Remove this code in September 2006, that gives platforms a year to
503 st8 [r2]=r3,3*8 624 // fix their proms and get their customers updated.
504 st8 [r4]=r5,3*8 625
505 st8 [r6]=r7,3*8;; 626 add r1=32*1,r17
506 627 add r2=32*2,r17
507 mov r3=cr25;; // cr.iha 628 add r3=32*3,r17
508 st8 [r2]=r3,160;; // 160 byte rement 629 add r4=32*4,r17
509 630 add r5=32*5,r17
510SkipIntrRegs: 631 add r6=32*6,r17
511 st8 [r2]=r0,152;; // another 152 byte . 632 add r7=32*7,r17
512 633 ;;
513 add r4=8,r2 // duplicate r2 in r4 634 fc r17
514 add r6=2*8,r2 // duplicate r2 in r6 635 fc r1
515 636 fc r2
516 mov r3=cr.lid 637 fc r3
517// mov r5=cr.ivr // cr.ivr, don't read it 638 fc r4
518 mov r7=cr.tpr;; 639 fc r5
519 st8 [r2]=r3,3*8 640 fc r6
520 st8 [r4]=r5,3*8 641 fc r7
521 st8 [r6]=r7,3*8;; 642 add r17=32*8,r17
522 643 add r1=32*8,r1
523 mov r3=r0 // cr.eoi => cr67 644 add r2=32*8,r2
524 mov r5=r0 // cr.irr0 => cr68 645 add r3=32*8,r3
525 mov r7=r0;; // cr.irr1 => cr69 646 add r4=32*8,r4
526 st8 [r2]=r3,3*8 647 add r5=32*8,r5
527 st8 [r4]=r5,3*8 648 add r6=32*8,r6
528 st8 [r6]=r7,3*8;; 649 add r7=32*8,r7
529 650 ;;
530 mov r3=r0 // cr.irr2 => cr70 651 fc r17
531 mov r5=r0 // cr.irr3 => cr71 652 fc r1
532 mov r7=cr.itv;; 653 fc r2
533 st8 [r2]=r3,3*8 654 fc r3
534 st8 [r4]=r5,3*8 655 fc r4
535 st8 [r6]=r7,3*8;; 656 fc r5
536 657 fc r6
537 mov r3=cr.pmv 658 fc r7
538 mov r5=cr.cmcv;; 659 add r17=32*8,r17
539 st8 [r2]=r3,7*8 660 add r1=32*8,r1
540 st8 [r4]=r5,7*8;; 661 add r2=32*8,r2
541 662 add r3=32*8,r3
542 mov r3=r0 // cr.lrr0 => cr80 663 add r4=32*8,r4
543 mov r5=r0;; // cr.lrr1 => cr81 664 add r5=32*8,r5
544 st8 [r2]=r3,23*8 665 add r6=32*8,r6
545 st8 [r4]=r5,23*8;; 666 add r7=32*8,r7
546 667 ;;
547 adds r2=25*8,r2;; 668 fc r17
548 669 fc r1
549cSaveARs: 670 fc r2
550// save ARs 671 fc r3
551 add r4=8,r2 // duplicate r2 in r4 672 fc r4
552 add r6=2*8,r2 // duplicate r2 in r6 673 fc r5
553 674 fc r6
554 mov r3=ar.k0 675 fc r7
555 mov r5=ar.k1 676 add r17=32*8,r17
556 mov r7=ar.k2;; 677 add r1=32*8,r1
557 st8 [r2]=r3,3*8 678 add r2=32*8,r2
558 st8 [r4]=r5,3*8 679 add r3=32*8,r3
559 st8 [r6]=r7,3*8;; 680 add r4=32*8,r4
560 681 add r5=32*8,r5
561 mov r3=ar.k3 682 add r6=32*8,r6
562 mov r5=ar.k4 683 add r7=32*8,r7
563 mov r7=ar.k5;; 684 ;;
564 st8 [r2]=r3,3*8 685 fc r17
565 st8 [r4]=r5,3*8 686 fc r1
566 st8 [r6]=r7,3*8;; 687 fc r2
567 688 fc r3
568 mov r3=ar.k6 689 fc r4
569 mov r5=ar.k7 690 fc r5
570 mov r7=r0;; // ar.kr8 691 fc r6
571 st8 [r2]=r3,10*8 692 fc r7
572 st8 [r4]=r5,10*8 693
573 st8 [r6]=r7,10*8;; // rement by 72 bytes 694 br.sptk b0
574
575 mov r3=ar.rsc
576 mov ar.rsc=r0 // put RSE in enforced lazy mode
577 mov r5=ar.bsp
578 ;;
579 mov r7=ar.bspstore;;
580 st8 [r2]=r3,3*8
581 st8 [r4]=r5,3*8
582 st8 [r6]=r7,3*8;;
583
584 mov r3=ar.rnat;;
585 st8 [r2]=r3,8*13 // increment by 13x8 bytes
586
587 mov r3=ar.ccv;;
588 st8 [r2]=r3,8*4
589
590 mov r3=ar.unat;;
591 st8 [r2]=r3,8*4
592
593 mov r3=ar.fpsr;;
594 st8 [r2]=r3,8*4
595
596 mov r3=ar.itc;;
597 st8 [r2]=r3,160 // 160
598
599 mov r3=ar.pfs;;
600 st8 [r2]=r3,8
601
602 mov r3=ar.lc;;
603 st8 [r2]=r3,8
604
605 mov r3=ar.ec;;
606 st8 [r2]=r3
607 add r2=8*62,r2 //padding
608
609// save RRs
610 mov ar.lc=0x08-1
611 movl r4=0x00;;
612
613cStRR:
614 dep.z r5=r4,61,3;;
615 mov r3=rr[r5];;
616 st8 [r2]=r3,8
617 add r4=1,r4
618 br.cloop.sptk.few cStRR
619 ;;
620end_os_mca_dump:
621 br ia64_os_mca_done_dump;;
622 695
623//EndStub////////////////////////////////////////////////////////////////////// 696//EndStub//////////////////////////////////////////////////////////////////////
624 697
625 698
626//++ 699//++
627// Name: 700// Name:
628// ia64_os_mca_proc_state_restore() 701// ia64_state_restore()
629// 702//
630// Stub Description: 703// Stub Description:
631// 704//
632// This is a stub to restore the saved processor state during MCHK 705// Restore the SAL/OS state. This is sensitive to the layout of struct
706// ia64_sal_os_state in mca.h.
707//
708// r2 contains the return address, r3 contains either
709// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
710//
711// In addition to the SAL to OS state, this routine restores all the
712// registers that appear in struct pt_regs and struct switch_stack,
713// excluding those in the PAL minstate area.
633// 714//
634//-- 715//--
635 716
636ia64_os_mca_proc_state_restore: 717ia64_state_restore:
718 // Restore the switch_stack data that is not in minstate nor pt_regs.
719 add regs=MCA_SWITCH_STACK_OFFSET, r3
720 mov b0=r2 // save return address
721 ;;
722 GET_IA64_MCA_DATA(temp2)
723 ;;
724 add regs=temp2, regs
725 ;;
726 add temp1=SW(F2), regs
727 add temp2=SW(F3), regs
728 ;;
729 ldf.fill f2=[temp1],32
730 ldf.fill f3=[temp2],32
731 ;;
732 ldf.fill f4=[temp1],32
733 ldf.fill f5=[temp2],32
734 ;;
735 ldf.fill f12=[temp1],32
736 ldf.fill f13=[temp2],32
737 ;;
738 ldf.fill f14=[temp1],32
739 ldf.fill f15=[temp2],32
740 ;;
741 ldf.fill f16=[temp1],32
742 ldf.fill f17=[temp2],32
743 ;;
744 ldf.fill f18=[temp1],32
745 ldf.fill f19=[temp2],32
746 ;;
747 ldf.fill f20=[temp1],32
748 ldf.fill f21=[temp2],32
749 ;;
750 ldf.fill f22=[temp1],32
751 ldf.fill f23=[temp2],32
752 ;;
753 ldf.fill f24=[temp1],32
754 ldf.fill f25=[temp2],32
755 ;;
756 ldf.fill f26=[temp1],32
757 ldf.fill f27=[temp2],32
758 ;;
759 ldf.fill f28=[temp1],32
760 ldf.fill f29=[temp2],32
761 ;;
762 ldf.fill f30=[temp1],SW(B2)-SW(F30)
763 ldf.fill f31=[temp2],SW(B3)-SW(F31)
764 ;;
765 ld8 temp3=[temp1],16 // restore b2
766 ld8 temp4=[temp2],16 // restore b3
767 ;;
768 mov b2=temp3
769 mov b3=temp4
770 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
771 ld8 temp4=[temp2] // restore b5
772 ;;
773 mov b4=temp3
774 mov b5=temp4
775 ld8 temp3=[temp1] // restore ar.lc
776 ;;
777 mov ar.lc=temp3
637 778
638// Restore bank1 GR16-31 779 // Restore the pt_regs data that is not in minstate. The previous code
639 GET_IA64_MCA_DATA(r2) 780 // left regs at switch_stack.
781 add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
782 ;;
783 add temp1=PT(B6), regs
784 add temp2=PT(B7), regs
785 ;;
786 ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
787 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
788 ;;
789 mov b6=temp3
790 mov b7=temp4
791 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
792 ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
793 ;;
794 mov ar.csd=temp3
795 mov ar.ssd=temp4
796 ld8 temp3=[temp1] // restore ar.unat
797 add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
798 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
799 ;;
800 mov ar.unat=temp3
801 mov ar.pfs=temp4
802 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
803 ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
804 ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
805 ;;
806 mov ar.ccv=temp3
807 mov ar.fpsr=temp4
808 ldf.fill f6=[temp1],PT(F8)-PT(F6)
809 ldf.fill f7=[temp2],PT(F9)-PT(F7)
810 ;;
811 ldf.fill f8=[temp1],PT(F10)-PT(F8)
812 ldf.fill f9=[temp2],PT(F11)-PT(F9)
813 ;;
814 ldf.fill f10=[temp1]
815 ldf.fill f11=[temp2]
816
817 // Restore the SAL to OS state. The previous code left regs at pt_regs.
818 add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
640 ;; 819 ;;
641 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 820 add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs
642 821 add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs
643restore_GRs: // restore bank-1 GRs 16-31 822 ;;
644 bsw.1;; 823 ld8 r12=[temp1],16 // sal_ra
645 add r3=16*8,r2;; // to get to NaT of GR 16-31 824 ld8 r9=[temp2],16 // sal_gp
646 ld8 r3=[r3];; 825 ;;
647 mov ar.unat=r3;; // first restore NaT 826 ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task
648 827 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
649 ld8.fill r16=[r2],8;; 828 ;;
650 ld8.fill r17=[r2],8;; 829 ld8 temp3=[temp1],16 // cr.isr
651 ld8.fill r18=[r2],8;; 830 ld8 temp4=[temp2],16 // cr.ifa
652 ld8.fill r19=[r2],8;; 831 ;;
653 ld8.fill r20=[r2],8;; 832 mov cr.isr=temp3
654 ld8.fill r21=[r2],8;; 833 mov cr.ifa=temp4
655 ld8.fill r22=[r2],8;; 834 ld8 temp3=[temp1],16 // cr.itir
656 ld8.fill r23=[r2],8;; 835 ld8 temp4=[temp2],16 // cr.iipa
657 ld8.fill r24=[r2],8;; 836 ;;
658 ld8.fill r25=[r2],8;; 837 mov cr.itir=temp3
659 ld8.fill r26=[r2],8;; 838 mov cr.iipa=temp4
660 ld8.fill r27=[r2],8;; 839 ld8 temp3=[temp1],16 // cr.iim
661 ld8.fill r28=[r2],8;; 840 ld8 temp4=[temp2],16 // cr.iha
662 ld8.fill r29=[r2],8;; 841 ;;
663 ld8.fill r30=[r2],8;; 842 mov cr.iim=temp3
664 ld8.fill r31=[r2],8;; 843 mov cr.iha=temp4
665 844 dep r22=0,r22,62,2 // pal_min_state, physical, uncached
666 ld8 r3=[r2],8;; // increment to skip NaT 845 mov IA64_KR(CURRENT)=r21
667 bsw.0;; 846 ld8 r8=[temp1] // os_status
668 847 ld8 r10=[temp2] // context
669restore_BRs: 848
670 add r4=8,r2 // duplicate r2 in r4 849 br.sptk b0
671 add r6=2*8,r2;; // duplicate r2 in r4
672
673 ld8 r3=[r2],3*8
674 ld8 r5=[r4],3*8
675 ld8 r7=[r6],3*8;;
676 mov b0=r3
677 mov b1=r5
678 mov b2=r7;;
679
680 ld8 r3=[r2],3*8
681 ld8 r5=[r4],3*8
682 ld8 r7=[r6],3*8;;
683 mov b3=r3
684 mov b4=r5
685 mov b5=r7;;
686
687 ld8 r3=[r2],2*8
688 ld8 r5=[r4],2*8;;
689 mov b6=r3
690 mov b7=r5;;
691
692restore_CRs:
693 add r4=8,r2 // duplicate r2 in r4
694 add r6=2*8,r2;; // duplicate r2 in r4
695
696 ld8 r3=[r2],8*8
697 ld8 r5=[r4],3*8
698 ld8 r7=[r6],3*8;; // 48 byte increments
699 mov cr.dcr=r3
700 mov cr.itm=r5
701 mov cr.iva=r7;;
702
703 ld8 r3=[r2],8*8;; // 64 byte increments
704// mov cr.pta=r3
705
706
707// if PSR.ic=1, reading interruption registers causes an illegal operation fault
708 mov r3=psr;;
709 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
710(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
711
712begin_rskip_intr_regs:
713(p6) br rSkipIntrRegs;;
714
715 add r4=8,r2 // duplicate r2 in r4
716 add r6=2*8,r2;; // duplicate r2 in r4
717
718 ld8 r3=[r2],3*8
719 ld8 r5=[r4],3*8
720 ld8 r7=[r6],3*8;;
721 mov cr.ipsr=r3
722// mov cr.isr=r5 // cr.isr is read only
723
724 ld8 r3=[r2],3*8
725 ld8 r5=[r4],3*8
726 ld8 r7=[r6],3*8;;
727 mov cr.iip=r3
728 mov cr.ifa=r5
729 mov cr.itir=r7;;
730
731 ld8 r3=[r2],3*8
732 ld8 r5=[r4],3*8
733 ld8 r7=[r6],3*8;;
734 mov cr.iipa=r3
735 mov cr.ifs=r5
736 mov cr.iim=r7
737
738 ld8 r3=[r2],160;; // 160 byte increment
739 mov cr.iha=r3
740
741rSkipIntrRegs:
742 ld8 r3=[r2],152;; // another 152 byte inc.
743
744 add r4=8,r2 // duplicate r2 in r4
745 add r6=2*8,r2;; // duplicate r2 in r6
746
747 ld8 r3=[r2],8*3
748 ld8 r5=[r4],8*3
749 ld8 r7=[r6],8*3;;
750 mov cr.lid=r3
751// mov cr.ivr=r5 // cr.ivr is read only
752 mov cr.tpr=r7;;
753
754 ld8 r3=[r2],8*3
755 ld8 r5=[r4],8*3
756 ld8 r7=[r6],8*3;;
757// mov cr.eoi=r3
758// mov cr.irr0=r5 // cr.irr0 is read only
759// mov cr.irr1=r7;; // cr.irr1 is read only
760
761 ld8 r3=[r2],8*3
762 ld8 r5=[r4],8*3
763 ld8 r7=[r6],8*3;;
764// mov cr.irr2=r3 // cr.irr2 is read only
765// mov cr.irr3=r5 // cr.irr3 is read only
766 mov cr.itv=r7;;
767
768 ld8 r3=[r2],8*7
769 ld8 r5=[r4],8*7;;
770 mov cr.pmv=r3
771 mov cr.cmcv=r5;;
772
773 ld8 r3=[r2],8*23
774 ld8 r5=[r4],8*23;;
775 adds r2=8*23,r2
776 adds r4=8*23,r4;;
777// mov cr.lrr0=r3
778// mov cr.lrr1=r5
779
780 adds r2=8*2,r2;;
781
782restore_ARs:
783 add r4=8,r2 // duplicate r2 in r4
784 add r6=2*8,r2;; // duplicate r2 in r4
785
786 ld8 r3=[r2],3*8
787 ld8 r5=[r4],3*8
788 ld8 r7=[r6],3*8;;
789 mov ar.k0=r3
790 mov ar.k1=r5
791 mov ar.k2=r7;;
792
793 ld8 r3=[r2],3*8
794 ld8 r5=[r4],3*8
795 ld8 r7=[r6],3*8;;
796 mov ar.k3=r3
797 mov ar.k4=r5
798 mov ar.k5=r7;;
799
800 ld8 r3=[r2],10*8
801 ld8 r5=[r4],10*8
802 ld8 r7=[r6],10*8;;
803 mov ar.k6=r3
804 mov ar.k7=r5
805 ;;
806
807 ld8 r3=[r2],3*8
808 ld8 r5=[r4],3*8
809 ld8 r7=[r6],3*8;;
810// mov ar.rsc=r3
811// mov ar.bsp=r5 // ar.bsp is read only
812 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
813 ;;
814 mov ar.bspstore=r7;;
815
816 ld8 r9=[r2],8*13;;
817 mov ar.rnat=r9
818
819 mov ar.rsc=r3
820 ld8 r3=[r2],8*4;;
821 mov ar.ccv=r3
822
823 ld8 r3=[r2],8*4;;
824 mov ar.unat=r3
825
826 ld8 r3=[r2],8*4;;
827 mov ar.fpsr=r3
828
829 ld8 r3=[r2],160;; // 160
830// mov ar.itc=r3
831
832 ld8 r3=[r2],8;;
833 mov ar.pfs=r3
834
835 ld8 r3=[r2],8;;
836 mov ar.lc=r3
837
838 ld8 r3=[r2];;
839 mov ar.ec=r3
840 add r2=8*62,r2;; // padding
841
842restore_RRs:
843 mov r5=ar.lc
844 mov ar.lc=0x08-1
845 movl r4=0x00;;
846cStRRr:
847 dep.z r7=r4,61,3
848 ld8 r3=[r2],8;;
849 mov rr[r7]=r3 // what are its access previledges?
850 add r4=1,r4
851 br.cloop.sptk.few cStRRr
852 ;;
853 mov ar.lc=r5
854 ;;
855end_os_mca_restore:
856 br ia64_os_mca_done_restore;;
857 850
858//EndStub////////////////////////////////////////////////////////////////////// 851//EndStub//////////////////////////////////////////////////////////////////////
859 852
860 853
861// ok, the issue here is that we need to save state information so 854//++
862// it can be useable by the kernel debugger and show regs routines. 855// Name:
863// In order to do this, our best bet is save the current state (plus 856// ia64_new_stack()
864// the state information obtain from the MIN_STATE_AREA) into a pt_regs
865// format. This way we can pass it on in a useable format.
866// 857//
867 858// Stub Description:
868// 859//
869// SAL to OS entry point for INIT on the monarch processor 860// Switch to the MCA/INIT stack.
870// This has been defined for registration purposes with SAL
871// as a part of ia64_mca_init.
872// 861//
873// When we get here, the following registers have been 862// r2 contains the return address, r3 contains either
874// set by the SAL for our use 863// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
875// 864//
876// 1. GR1 = OS INIT GP 865// On entry RBS is still on the original stack, this routine switches RBS
877// 2. GR8 = PAL_PROC physical address 866// to use the MCA/INIT stack.
878// 3. GR9 = SAL_PROC physical address
879// 4. GR10 = SAL GP (physical)
880// 5. GR11 = Init Reason
881// 0 = Received INIT for event other than crash dump switch
882// 1 = Received wakeup at the end of an OS_MCA corrected machine check
883// 2 = Received INIT dude to CrashDump switch assertion
884// 867//
885// 6. GR12 = Return address to location within SAL_INIT procedure 868// On entry, sos->pal_min_state is physical, on exit it is virtual.
886 869//
870//--
887 871
888GLOBAL_ENTRY(ia64_monarch_init_handler) 872ia64_new_stack:
889 .prologue 873 add regs=MCA_PT_REGS_OFFSET, r3
890 // stash the information the SAL passed to os 874 add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
891 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) 875 mov b0=r2 // save return address
876 GET_IA64_MCA_DATA(temp1)
877 invala
892 ;; 878 ;;
893 SAVE_MIN_WITH_COVER 879 add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
880 add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
894 ;; 881 ;;
895 mov r8=cr.ifa 882 // Address of minstate area provided by PAL is physical, uncacheable.
896 mov r9=cr.isr 883 // Convert to Linux virtual address in region 6 for C code.
897 adds r3=8,r2 // set up second base pointer 884 ld8 ms=[temp2] // pal_min_state, physical
898 ;; 885 ;;
899 SAVE_REST 886 dep temp1=-1,ms,62,2 // set region 6
900 887 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
901// ok, enough should be saved at this point to be dangerous, and supply 888 ;;
902// information for a dump 889 st8 [temp2]=temp1 // pal_min_state, virtual
903// We need to switch to Virtual mode before hitting the C functions.
904 890
905 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN 891 add temp4=temp3, regs // start of bspstore on new stack
906 mov r3=psr // get the current psr, minimum enabled at this point
907 ;; 892 ;;
908 or r2=r2,r3 893 mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
909 ;; 894 ;;
910 movl r3=IVirtual_Switch 895 flushrs // must be first in group
896 br.sptk b0
897
898//EndStub//////////////////////////////////////////////////////////////////////
899
900
901//++
902// Name:
903// ia64_old_stack()
904//
905// Stub Description:
906//
907// Switch to the old stack.
908//
909// r2 contains the return address, r3 contains either
910// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
911//
912// On entry, pal_min_state is virtual, on exit it is physical.
913//
914// On entry RBS is on the MCA/INIT stack, this routine switches RBS
915// back to the previous stack.
916//
917// The psr is set to all zeroes. SAL return requires either all zeroes or
918// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
919// code does not perform correctly.
920//
921// The dirty registers at the time of the event were flushed to the
922// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
923// before reverting to the previous bspstore.
924//--
925
926ia64_old_stack:
927 add regs=MCA_PT_REGS_OFFSET, r3
928 mov b0=r2 // save return address
929 GET_IA64_MCA_DATA(temp2)
930 LOAD_PHYSICAL(p0,temp1,1f)
911 ;; 931 ;;
912 mov cr.iip=r3 // short return to set the appropriate bits 932 mov cr.ipsr=r0
913 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits 933 mov cr.ifs=r0
934 mov cr.iip=temp1
914 ;; 935 ;;
936 invala
915 rfi 937 rfi
9381:
939
940 add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
916 ;; 941 ;;
917IVirtual_Switch: 942 add temp1=PT(LOADRS), regs
918 //
919 // We should now be running virtual
920 //
921 // Let's call the C handler to get the rest of the state info
922 //
923 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
924 ;; 943 ;;
925 adds out0=16,sp // out0 = pointer to pt_regs 944 ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
926 ;; 945 ;;
927 DO_SAVE_SWITCH_STACK 946 ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
928 .body 947 mov ar.rsc=temp2
929 adds out1=16,sp // out0 = pointer to switch_stack 948 ;;
949 loadrs
950 ld8 temp4=[temp1] // restore ar.rnat
951 ;;
952 mov ar.bspstore=temp3 // back to old stack
953 ;;
954 mov ar.rnat=temp4
955 ;;
956
957 br.sptk b0
930 958
931 br.call.sptk.many rp=ia64_init_handler 959//EndStub//////////////////////////////////////////////////////////////////////
932.ret1:
933 960
934return_from_init:
935 br.sptk return_from_init
936END(ia64_monarch_init_handler)
937 961
962//++
963// Name:
964// ia64_set_kernel_registers()
938// 965//
939// SAL to OS entry point for INIT on the slave processor 966// Stub Description:
940// This has been defined for registration purposes with SAL 967//
941// as a part of ia64_mca_init. 968// Set the registers that are required by the C code in order to run on an
969// MCA/INIT stack.
970//
971// r2 contains the return address, r3 contains either
972// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
942// 973//
974//--
975
976ia64_set_kernel_registers:
977 add temp3=MCA_SP_OFFSET, r3
978 add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
979 mov b0=r2 // save return address
980 GET_IA64_MCA_DATA(temp1)
981 ;;
982 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
983 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
984 add r13=temp1, r3 // set current to start of MCA/INIT stack
985 ;;
986 ld8 r1=[temp4] // OS GP from SAL OS state
987 ;;
988 DATA_PA_TO_VA(r1,temp1)
989 DATA_PA_TO_VA(r12,temp2)
990 DATA_PA_TO_VA(r13,temp3)
991 ;;
992 mov IA64_KR(CURRENT)=r13
993
994 // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK?
995
996 br.sptk b0
997
998//EndStub//////////////////////////////////////////////////////////////////////
999
1000#undef ms
1001#undef regs
1002#undef temp1
1003#undef temp2
1004#undef temp3
1005#undef temp4
1006
943 1007
944GLOBAL_ENTRY(ia64_slave_init_handler) 1008// Support function for mca.c, it is here to avoid using inline asm. Given the
9451: br.sptk 1b 1009// address of an rnat slot, if that address is below the current ar.bspstore
946END(ia64_slave_init_handler) 1010// then return the contents of that slot, otherwise return the contents of
1011// ar.rnat.
1012GLOBAL_ENTRY(ia64_get_rnat)
1013 alloc r14=ar.pfs,1,0,0,0
1014 mov ar.rsc=0
1015 ;;
1016 mov r14=ar.bspstore
1017 ;;
1018 cmp.lt p6,p7=in0,r14
1019 ;;
1020(p6) ld8 r8=[in0]
1021(p7) mov r8=ar.rnat
1022 mov ar.rsc=3
1023 br.ret.sptk.many rp
1024END(ia64_get_rnat)
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index abc0113a821d..6e683745af49 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -4,6 +4,8 @@
4 * 4 *
5 * Copyright (C) 2004 FUJITSU LIMITED 5 * Copyright (C) 2004 FUJITSU LIMITED
6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
7 * Copyright (C) 2005 Silicon Graphics, Inc
8 * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
7 */ 9 */
8#include <linux/config.h> 10#include <linux/config.h>
9#include <linux/types.h> 11#include <linux/types.h>
@@ -38,10 +40,6 @@
38/* max size of SAL error record (default) */ 40/* max size of SAL error record (default) */
39static int sal_rec_max = 10000; 41static int sal_rec_max = 10000;
40 42
41/* from mca.c */
42static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state;
43static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state;
44
45/* from mca_drv_asm.S */ 43/* from mca_drv_asm.S */
46extern void *mca_handler_bhhook(void); 44extern void *mca_handler_bhhook(void);
47 45
@@ -316,7 +314,8 @@ init_record_index_pools(void)
316 */ 314 */
317 315
318static mca_type_t 316static mca_type_t
319is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) 317is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
318 struct ia64_sal_os_state *sos)
320{ 319{
321 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 320 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
322 321
@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
327 * Therefore it is local MCA when rendezvous has not been requested. 326 * Therefore it is local MCA when rendezvous has not been requested.
328 * Failed to rendezvous, the system must be down. 327 * Failed to rendezvous, the system must be down.
329 */ 328 */
330 switch (sal_to_os_handoff_state->imsto_rendez_state) { 329 switch (sos->rv_rc) {
331 case -1: /* SAL rendezvous unsuccessful */ 330 case -1: /* SAL rendezvous unsuccessful */
332 return MCA_IS_GLOBAL; 331 return MCA_IS_GLOBAL;
333 case 0: /* SAL rendezvous not required */ 332 case 0: /* SAL rendezvous not required */
@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
388 */ 387 */
389 388
390static int 389static int
391recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 390recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
391 struct ia64_sal_os_state *sos)
392{ 392{
393 sal_log_mod_error_info_t *smei; 393 sal_log_mod_error_info_t *smei;
394 pal_min_state_area_t *pmsa; 394 pal_min_state_area_t *pmsa;
@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
426 * setup for resume to bottom half of MCA, 426 * setup for resume to bottom half of MCA,
427 * "mca_handler_bhhook" 427 * "mca_handler_bhhook"
428 */ 428 */
429 pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); 429 pmsa = sos->pal_min_state;
430 /* pass to bhhook as 1st argument (gr8) */ 430 /* pass to bhhook as 1st argument (gr8) */
431 pmsa->pmsa_gr[8-1] = smei->target_identifier; 431 pmsa->pmsa_gr[8-1] = smei->target_identifier;
432 /* set interrupted return address (but no use) */ 432 /* set interrupted return address (but no use) */
@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
459 */ 459 */
460 460
461static int 461static int
462recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 462recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
463 struct ia64_sal_os_state *sos)
463{ 464{
464 int status = 0; 465 int status = 0;
465 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 466 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
469 case 1: /* partial read */ 470 case 1: /* partial read */
470 case 3: /* full line(cpu) read */ 471 case 3: /* full line(cpu) read */
471 case 9: /* I/O space read */ 472 case 9: /* I/O space read */
472 status = recover_from_read_error(slidx, peidx, pbci); 473 status = recover_from_read_error(slidx, peidx, pbci, sos);
473 break; 474 break;
474 case 0: /* unknown */ 475 case 0: /* unknown */
475 case 2: /* partial write */ 476 case 2: /* partial write */
@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
508 */ 509 */
509 510
510static int 511static int
511recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 512recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
513 struct ia64_sal_os_state *sos)
512{ 514{
513 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 515 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
514 516
@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
545 * This means "there are some platform errors". 547 * This means "there are some platform errors".
546 */ 548 */
547 if (platform) 549 if (platform)
548 return recover_from_platform_error(slidx, peidx, pbci); 550 return recover_from_platform_error(slidx, peidx, pbci, sos);
549 /* 551 /*
550 * On account of strange SAL error record, we cannot recover. 552 * On account of strange SAL error record, we cannot recover.
551 */ 553 */
@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
562 564
563static int 565static int
564mca_try_to_recover(void *rec, 566mca_try_to_recover(void *rec,
565 ia64_mca_sal_to_os_state_t *sal_to_os_state, 567 struct ia64_sal_os_state *sos)
566 ia64_mca_os_to_sal_state_t *os_to_sal_state)
567{ 568{
568 int platform_err; 569 int platform_err;
569 int n_proc_err; 570 int n_proc_err;
@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec,
571 peidx_table_t peidx; 572 peidx_table_t peidx;
572 pal_bus_check_info_t pbci; 573 pal_bus_check_info_t pbci;
573 574
574 /* handoff state from/to mca.c */
575 sal_to_os_handoff_state = sal_to_os_state;
576 os_to_sal_handoff_state = os_to_sal_state;
577
578 /* Make index of SAL error record */ 575 /* Make index of SAL error record */
579 platform_err = mca_make_slidx(rec, &slidx); 576 platform_err = mca_make_slidx(rec, &slidx);
580 577
@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec,
597 *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); 594 *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
598 595
599 /* Check whether MCA is global or not */ 596 /* Check whether MCA is global or not */
600 if (is_mca_global(&peidx, &pbci)) 597 if (is_mca_global(&peidx, &pbci, sos))
601 return 0; 598 return 0;
602 599
603 /* Try to recover a processor error */ 600 /* Try to recover a processor error */
604 return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); 601 return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos);
605} 602}
606 603
607/* 604/*
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index f6d8a010d99b..85ed54179afa 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -5,73 +5,6 @@
5#include "entry.h" 5#include "entry.h"
6 6
7/* 7/*
8 * For ivt.s we want to access the stack virtually so we don't have to disable translation
9 * on interrupts.
10 *
11 * On entry:
12 * r1: pointer to current task (ar.k6)
13 */
14#define MINSTATE_START_SAVE_MIN_VIRT \
15(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
16 ;; \
17(pUStk) mov.m r24=ar.rnat; \
18(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
19(pKStk) mov r1=sp; /* get sp */ \
20 ;; \
21(pUStk) lfetch.fault.excl.nt1 [r22]; \
22(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
23(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
24 ;; \
25(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
26(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
27 ;; \
28(pUStk) mov r18=ar.bsp; \
29(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
30
31#define MINSTATE_END_SAVE_MIN_VIRT \
32 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
33 ;;
34
35/*
36 * For mca_asm.S we want to access the stack physically since the state is saved before we
37 * go virtual and don't want to destroy the iip or ipsr.
38 */
39#define MINSTATE_START_SAVE_MIN_PHYS \
40(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
41(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
42(pKStk) ld8 r3 = [r3];; \
43(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
44(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
45(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
46(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
47 ;; \
48(pUStk) mov r24=ar.rnat; \
49(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
50(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
51(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
52 ;; \
53(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
54 ;; \
55(pUStk) mov r18=ar.bsp; \
56(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
57
58#define MINSTATE_END_SAVE_MIN_PHYS \
59 dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
60 ;;
61
62#ifdef MINSTATE_VIRT
63# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
64# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
65# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
66#endif
67
68#ifdef MINSTATE_PHYS
69# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
70# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
71# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
72#endif
73
74/*
75 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 8 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
76 * the minimum state necessary that allows us to turn psr.ic back 9 * the minimum state necessary that allows us to turn psr.ic back
77 * on. 10 * on.
@@ -97,7 +30,7 @@
97 * we can pass interruption state as arguments to a handler. 30 * we can pass interruption state as arguments to a handler.
98 */ 31 */
99#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 32#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
100 MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ 33 mov r16=IA64_KR(CURRENT); /* M */ \
101 mov r27=ar.rsc; /* M */ \ 34 mov r27=ar.rsc; /* M */ \
102 mov r20=r1; /* A */ \ 35 mov r20=r1; /* A */ \
103 mov r25=ar.unat; /* M */ \ 36 mov r25=ar.unat; /* M */ \
@@ -118,7 +51,21 @@
118 SAVE_IFS; \ 51 SAVE_IFS; \
119 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ 52 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
120 ;; \ 53 ;; \
121 MINSTATE_START_SAVE_MIN \ 54(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
55 ;; \
56(pUStk) mov.m r24=ar.rnat; \
57(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
58(pKStk) mov r1=sp; /* get sp */ \
59 ;; \
60(pUStk) lfetch.fault.excl.nt1 [r22]; \
61(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
62(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
63 ;; \
64(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
65(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
66 ;; \
67(pUStk) mov r18=ar.bsp; \
68(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
122 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 69 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
123 adds r16=PT(CR_IPSR),r1; \ 70 adds r16=PT(CR_IPSR),r1; \
124 ;; \ 71 ;; \
@@ -181,7 +128,8 @@
181 EXTRA; \ 128 EXTRA; \
182 movl r1=__gp; /* establish kernel global pointer */ \ 129 movl r1=__gp; /* establish kernel global pointer */ \
183 ;; \ 130 ;; \
184 MINSTATE_END_SAVE_MIN 131 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
132 ;;
185 133
186/* 134/*
187 * SAVE_REST saves the remainder of pt_regs (with psr.ic on). 135 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 25e7c8344564..89faa603c6be 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -307,11 +307,9 @@ vm_info(char *page)
307 307
308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { 308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); 309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
310 return 0; 310 } else {
311 }
312 311
313 312 p += sprintf(p,
314 p += sprintf(p,
315 "Physical Address Space : %d bits\n" 313 "Physical Address Space : %d bits\n"
316 "Virtual Address Space : %d bits\n" 314 "Virtual Address Space : %d bits\n"
317 "Protection Key Registers(PKR) : %d\n" 315 "Protection Key Registers(PKR) : %d\n"
@@ -319,92 +317,99 @@ vm_info(char *page)
319 "Hash Tag ID : 0x%x\n" 317 "Hash Tag ID : 0x%x\n"
320 "Size of RR.rid : %d\n", 318 "Size of RR.rid : %d\n",
321 vm_info_1.pal_vm_info_1_s.phys_add_size, 319 vm_info_1.pal_vm_info_1_s.phys_add_size,
322 vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, 320 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
323 vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, 321 vm_info_1.pal_vm_info_1_s.max_pkr+1,
322 vm_info_1.pal_vm_info_1_s.key_size,
323 vm_info_1.pal_vm_info_1_s.hash_tag_id,
324 vm_info_2.pal_vm_info_2_s.rid_size); 324 vm_info_2.pal_vm_info_2_s.rid_size);
325 }
325 326
326 if (ia64_pal_mem_attrib(&attrib) != 0) 327 if (ia64_pal_mem_attrib(&attrib) == 0) {
327 return 0; 328 p += sprintf(p, "Supported memory attributes : ");
328 329 sep = "";
329 p += sprintf(p, "Supported memory attributes : "); 330 for (i = 0; i < 8; i++) {
330 sep = ""; 331 if (attrib & (1 << i)) {
331 for (i = 0; i < 8; i++) { 332 p += sprintf(p, "%s%s", sep, mem_attrib[i]);
332 if (attrib & (1 << i)) { 333 sep = ", ";
333 p += sprintf(p, "%s%s", sep, mem_attrib[i]); 334 }
334 sep = ", ";
335 } 335 }
336 p += sprintf(p, "\n");
336 } 337 }
337 p += sprintf(p, "\n");
338 338
339 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { 339 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
340 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); 340 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
341 return 0; 341 } else {
342 }
343
344 p += sprintf(p,
345 "\nTLB walker : %simplemented\n"
346 "Number of DTR : %d\n"
347 "Number of ITR : %d\n"
348 "TLB insertable page sizes : ",
349 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
350 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
351 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
352 342
343 p += sprintf(p,
344 "\nTLB walker : %simplemented\n"
345 "Number of DTR : %d\n"
346 "Number of ITR : %d\n"
347 "TLB insertable page sizes : ",
348 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
349 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
350 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
353 351
354 p = bitvector_process(p, tr_pages);
355 352
356 p += sprintf(p, "\nTLB purgeable page sizes : "); 353 p = bitvector_process(p, tr_pages);
357 354
358 p = bitvector_process(p, vw_pages); 355 p += sprintf(p, "\nTLB purgeable page sizes : ");
359 356
357 p = bitvector_process(p, vw_pages);
358 }
360 if ((status=ia64_get_ptce(&ptce)) != 0) { 359 if ((status=ia64_get_ptce(&ptce)) != 0) {
361 printk(KERN_ERR "ia64_get_ptce=%ld\n", status); 360 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
362 return 0; 361 } else {
363 } 362 p += sprintf(p,
364
365 p += sprintf(p,
366 "\nPurge base address : 0x%016lx\n" 363 "\nPurge base address : 0x%016lx\n"
367 "Purge outer loop count : %d\n" 364 "Purge outer loop count : %d\n"
368 "Purge inner loop count : %d\n" 365 "Purge inner loop count : %d\n"
369 "Purge outer loop stride : %d\n" 366 "Purge outer loop stride : %d\n"
370 "Purge inner loop stride : %d\n", 367 "Purge inner loop stride : %d\n",
371 ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); 368 ptce.base, ptce.count[0], ptce.count[1],
369 ptce.stride[0], ptce.stride[1]);
372 370
373 p += sprintf(p, 371 p += sprintf(p,
374 "TC Levels : %d\n" 372 "TC Levels : %d\n"
375 "Unique TC(s) : %d\n", 373 "Unique TC(s) : %d\n",
376 vm_info_1.pal_vm_info_1_s.num_tc_levels, 374 vm_info_1.pal_vm_info_1_s.num_tc_levels,
377 vm_info_1.pal_vm_info_1_s.max_unique_tcs); 375 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
378 376
379 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { 377 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
380 for (j=2; j>0 ; j--) { 378 for (j=2; j>0 ; j--) {
381 tc_pages = 0; /* just in case */ 379 tc_pages = 0; /* just in case */
382 380
383 381
384 /* even without unification, some levels may not be present */ 382 /* even without unification, some levels may not be present */
385 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { 383 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
386 continue; 384 continue;
387 } 385 }
388 386
389 p += sprintf(p, 387 p += sprintf(p,
390 "\n%s Translation Cache Level %d:\n" 388 "\n%s Translation Cache Level %d:\n"
391 "\tHash sets : %d\n" 389 "\tHash sets : %d\n"
392 "\tAssociativity : %d\n" 390 "\tAssociativity : %d\n"
393 "\tNumber of entries : %d\n" 391 "\tNumber of entries : %d\n"
394 "\tFlags : ", 392 "\tFlags : ",
395 cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, 393 cache_types[j+tc_info.tc_unified], i+1,
396 tc_info.tc_associativity, tc_info.tc_num_entries); 394 tc_info.tc_num_sets,
395 tc_info.tc_associativity,
396 tc_info.tc_num_entries);
397 397
398 if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); 398 if (tc_info.tc_pf)
399 if (tc_info.tc_unified) p += sprintf(p, "Unified "); 399 p += sprintf(p, "PreferredPageSizeOptimized ");
400 if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); 400 if (tc_info.tc_unified)
401 p += sprintf(p, "Unified ");
402 if (tc_info.tc_reduce_tr)
403 p += sprintf(p, "TCReduction");
401 404
402 p += sprintf(p, "\n\tSupported page sizes: "); 405 p += sprintf(p, "\n\tSupported page sizes: ");
403 406
404 p = bitvector_process(p, tc_pages); 407 p = bitvector_process(p, tc_pages);
405 408
406 /* when unified date (j=2) is enough */ 409 /* when unified date (j=2) is enough */
407 if (tc_info.tc_unified) break; 410 if (tc_info.tc_unified)
411 break;
412 }
408 } 413 }
409 } 414 }
410 p += sprintf(p, "\n"); 415 p += sprintf(p, "\n");
@@ -440,14 +445,14 @@ register_info(char *page)
440 p += sprintf(p, "\n"); 445 p += sprintf(p, "\n");
441 } 446 }
442 447
443 if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; 448 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
444 449
445 p += sprintf(p, 450 p += sprintf(p,
446 "RSE stacked physical registers : %ld\n" 451 "RSE stacked physical registers : %ld\n"
447 "RSE load/store hints : %ld (%s)\n", 452 "RSE load/store hints : %ld (%s)\n",
448 phys_stacked, hints.ph_data, 453 phys_stacked, hints.ph_data,
449 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); 454 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
450 455 }
451 if (ia64_pal_debug_info(&iregs, &dregs)) 456 if (ia64_pal_debug_info(&iregs, &dregs))
452 return 0; 457 return 0;
453 458
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f1201ac8a116..1650353e3f77 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -38,6 +38,7 @@
38#include <linux/pagemap.h> 38#include <linux/pagemap.h>
39#include <linux/mount.h> 39#include <linux/mount.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/rcupdate.h>
41 42
42#include <asm/errno.h> 43#include <asm/errno.h>
43#include <asm/intrinsics.h> 44#include <asm/intrinsics.h>
@@ -496,7 +497,7 @@ typedef struct {
496static pfm_stats_t pfm_stats[NR_CPUS]; 497static pfm_stats_t pfm_stats[NR_CPUS];
497static pfm_session_t pfm_sessions; /* global sessions information */ 498static pfm_session_t pfm_sessions; /* global sessions information */
498 499
499static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED; 500static DEFINE_SPINLOCK(pfm_alt_install_check);
500static pfm_intr_handler_desc_t *pfm_alt_intr_handler; 501static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
501 502
502static struct proc_dir_entry *perfmon_dir; 503static struct proc_dir_entry *perfmon_dir;
@@ -2217,15 +2218,17 @@ static void
2217pfm_free_fd(int fd, struct file *file) 2218pfm_free_fd(int fd, struct file *file)
2218{ 2219{
2219 struct files_struct *files = current->files; 2220 struct files_struct *files = current->files;
2221 struct fdtable *fdt = files_fdtable(files);
2220 2222
2221 /* 2223 /*
2222 * there ie no fd_uninstall(), so we do it here 2224 * there ie no fd_uninstall(), so we do it here
2223 */ 2225 */
2224 spin_lock(&files->file_lock); 2226 spin_lock(&files->file_lock);
2225 files->fd[fd] = NULL; 2227 rcu_assign_pointer(fdt->fd[fd], NULL);
2226 spin_unlock(&files->file_lock); 2228 spin_unlock(&files->file_lock);
2227 2229
2228 if (file) put_filp(file); 2230 if (file)
2231 put_filp(file);
2229 put_unused_fd(fd); 2232 put_unused_fd(fd);
2230} 2233}
2231 2234
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 6f0cc7a6634e..ca68e6e44a72 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -22,6 +22,11 @@
22 * 22 *
23 * Dec 5 2004 kaos@sgi.com 23 * Dec 5 2004 kaos@sgi.com
24 * Standardize which records are cleared automatically. 24 * Standardize which records are cleared automatically.
25 *
26 * Aug 18 2005 kaos@sgi.com
27 * mca.c may not pass a buffer, a NULL buffer just indicates that a new
28 * record is available in SAL.
29 * Replace some NR_CPUS by cpus_online, for hotplug cpu.
25 */ 30 */
26 31
27#include <linux/types.h> 32#include <linux/types.h>
@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *data, int shift)
193 * The buffer passed from mca.c points to the output from ia64_log_get. This is 198 * The buffer passed from mca.c points to the output from ia64_log_get. This is
194 * a persistent buffer but its contents can change between the interrupt and 199 * a persistent buffer but its contents can change between the interrupt and
195 * when user space processes the record. Save the record id to identify 200 * when user space processes the record. Save the record id to identify
196 * changes. 201 * changes. If the buffer is NULL then just update the bitmap.
197 */ 202 */
198void 203void
199salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) 204salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
@@ -206,27 +211,29 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
206 211
207 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); 212 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
208 213
209 if (irqsafe) 214 if (buffer) {
210 spin_lock_irqsave(&data_saved_lock, flags); 215 if (irqsafe)
211 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 216 spin_lock_irqsave(&data_saved_lock, flags);
212 if (!data_saved->buffer) 217 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
213 break; 218 if (!data_saved->buffer)
214 } 219 break;
215 if (i == saved_size) { 220 }
216 if (!data->saved_num) { 221 if (i == saved_size) {
217 shift1_data_saved(data, 0); 222 if (!data->saved_num) {
218 data_saved = data->data_saved + saved_size - 1; 223 shift1_data_saved(data, 0);
219 } else 224 data_saved = data->data_saved + saved_size - 1;
220 data_saved = NULL; 225 } else
221 } 226 data_saved = NULL;
222 if (data_saved) { 227 }
223 data_saved->cpu = smp_processor_id(); 228 if (data_saved) {
224 data_saved->id = ((sal_log_record_header_t *)buffer)->id; 229 data_saved->cpu = smp_processor_id();
225 data_saved->size = size; 230 data_saved->id = ((sal_log_record_header_t *)buffer)->id;
226 data_saved->buffer = buffer; 231 data_saved->size = size;
232 data_saved->buffer = buffer;
233 }
234 if (irqsafe)
235 spin_unlock_irqrestore(&data_saved_lock, flags);
227 } 236 }
228 if (irqsafe)
229 spin_unlock_irqrestore(&data_saved_lock, flags);
230 237
231 if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { 238 if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) {
232 if (irqsafe) 239 if (irqsafe)
@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_data *data)
244 int i; 251 int i;
245 if (!data->open) 252 if (!data->open)
246 return; 253 return;
247 for (i = 0; i < NR_CPUS; ++i) { 254 for_each_online_cpu(i) {
248 if (test_bit(i, &data->cpu_event)) { 255 if (test_bit(i, &data->cpu_event)) {
249 /* double up() is not a problem, user space will see no 256 /* double up() is not a problem, user space will see no
250 * records for the additional "events". 257 * records for the additional "events".
@@ -291,7 +298,7 @@ retry:
291 298
292 n = data->cpu_check; 299 n = data->cpu_check;
293 for (i = 0; i < NR_CPUS; i++) { 300 for (i = 0; i < NR_CPUS; i++) {
294 if (test_bit(n, &data->cpu_event)) { 301 if (test_bit(n, &data->cpu_event) && cpu_online(n)) {
295 cpu = n; 302 cpu = n;
296 break; 303 break;
297 } 304 }
@@ -585,11 +592,10 @@ salinfo_init(void)
585 592
586 /* we missed any events before now */ 593 /* we missed any events before now */
587 online = 0; 594 online = 0;
588 for (j = 0; j < NR_CPUS; j++) 595 for_each_online_cpu(j) {
589 if (cpu_online(j)) { 596 set_bit(j, &data->cpu_event);
590 set_bit(j, &data->cpu_event); 597 ++online;
591 ++online; 598 }
592 }
593 sema_init(&data->sem, online); 599 sema_init(&data->sem, online);
594 600
595 *sdir++ = dir; 601 *sdir++ = dir;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 84f89da7c640..1f5c26dbe705 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -384,7 +384,7 @@ setup_arch (char **cmdline_p)
384 if (early_console_setup(*cmdline_p) == 0) 384 if (early_console_setup(*cmdline_p) == 0)
385 mark_bsp_online(); 385 mark_bsp_online();
386 386
387#ifdef CONFIG_ACPI_BOOT 387#ifdef CONFIG_ACPI
388 /* Initialize the ACPI boot-time table parser */ 388 /* Initialize the ACPI boot-time table parser */
389 acpi_table_init(); 389 acpi_table_init();
390# ifdef CONFIG_ACPI_NUMA 390# ifdef CONFIG_ACPI_NUMA
@@ -420,7 +420,7 @@ setup_arch (char **cmdline_p)
420 420
421 cpu_init(); /* initialize the bootstrap CPU */ 421 cpu_init(); /* initialize the bootstrap CPU */
422 422
423#ifdef CONFIG_ACPI_BOOT 423#ifdef CONFIG_ACPI
424 acpi_boot_init(); 424 acpi_boot_init();
425#endif 425#endif
426 426
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 92ff46ad21e2..706b7734e191 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -36,7 +36,7 @@ int arch_register_cpu(int num)
36 parent = &sysfs_nodes[cpu_to_node(num)]; 36 parent = &sysfs_nodes[cpu_to_node(num)];
37#endif /* CONFIG_NUMA */ 37#endif /* CONFIG_NUMA */
38 38
39#ifdef CONFIG_ACPI_BOOT 39#ifdef CONFIG_ACPI
40 /* 40 /*
41 * If CPEI cannot be re-targetted, and this is 41 * If CPEI cannot be re-targetted, and this is
42 * CPEI target, then dont create the control file 42 * CPEI target, then dont create the control file
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 3288be47bc75..93d5a3b41f69 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2020,28 +2020,6 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2020} 2020}
2021 2021
2022void 2022void
2023unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
2024 struct pt_regs *pt, struct switch_stack *sw)
2025{
2026 unsigned long sof;
2027
2028 init_frame_info(info, t, sw, pt->r12);
2029 info->cfm_loc = &pt->cr_ifs;
2030 info->unat_loc = &pt->ar_unat;
2031 info->pfs_loc = &pt->ar_pfs;
2032 sof = *info->cfm_loc & 0x7f;
2033 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
2034 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
2035 info->pt = (unsigned long) pt;
2036 UNW_DPRINT(3, "unwind.%s:\n"
2037 " bsp 0x%lx\n"
2038 " sof 0x%lx\n"
2039 " ip 0x%lx\n",
2040 __FUNCTION__, info->bsp, sof, info->ip);
2041 find_save_locs(info);
2042}
2043
2044void
2045unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) 2023unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2046{ 2024{
2047 unsigned long sol; 2025 unsigned long sol;