diff options
Diffstat (limited to 'arch/ia64/kernel')
28 files changed, 1945 insertions, 1917 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b242594be55b..307514f7a282 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o | |||
16 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o | 16 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o |
17 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
18 | obj-$(CONFIG_MODULES) += module.o | 18 | obj-$(CONFIG_MODULES) += module.o |
19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o | 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o |
20 | obj-$(CONFIG_NUMA) += numa.o | 20 | obj-$(CONFIG_NUMA) += numa.o |
21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o | 21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o |
22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | 22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o |
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index 2623df5e2633..13a5b3b49bf8 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c | |||
@@ -17,20 +17,20 @@ | |||
17 | #include <asm/acpi-ext.h> | 17 | #include <asm/acpi-ext.h> |
18 | 18 | ||
19 | struct acpi_vendor_descriptor { | 19 | struct acpi_vendor_descriptor { |
20 | u8 guid_id; | 20 | u8 guid_id; |
21 | efi_guid_t guid; | 21 | efi_guid_t guid; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct acpi_vendor_info { | 24 | struct acpi_vendor_info { |
25 | struct acpi_vendor_descriptor *descriptor; | 25 | struct acpi_vendor_descriptor *descriptor; |
26 | u8 *data; | 26 | u8 *data; |
27 | u32 length; | 27 | u32 length; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | acpi_status | 30 | acpi_status |
31 | acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | 31 | acpi_vendor_resource_match(struct acpi_resource *resource, void *context) |
32 | { | 32 | { |
33 | struct acpi_vendor_info *info = (struct acpi_vendor_info *) context; | 33 | struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; |
34 | struct acpi_resource_vendor *vendor; | 34 | struct acpi_resource_vendor *vendor; |
35 | struct acpi_vendor_descriptor *descriptor; | 35 | struct acpi_vendor_descriptor *descriptor; |
36 | u32 length; | 36 | u32 length; |
@@ -38,8 +38,8 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | |||
38 | if (resource->id != ACPI_RSTYPE_VENDOR) | 38 | if (resource->id != ACPI_RSTYPE_VENDOR) |
39 | return AE_OK; | 39 | return AE_OK; |
40 | 40 | ||
41 | vendor = (struct acpi_resource_vendor *) &resource->data; | 41 | vendor = (struct acpi_resource_vendor *)&resource->data; |
42 | descriptor = (struct acpi_vendor_descriptor *) vendor->reserved; | 42 | descriptor = (struct acpi_vendor_descriptor *)vendor->reserved; |
43 | if (vendor->length <= sizeof(*info->descriptor) || | 43 | if (vendor->length <= sizeof(*info->descriptor) || |
44 | descriptor->guid_id != info->descriptor->guid_id || | 44 | descriptor->guid_id != info->descriptor->guid_id || |
45 | efi_guidcmp(descriptor->guid, info->descriptor->guid)) | 45 | efi_guidcmp(descriptor->guid, info->descriptor->guid)) |
@@ -50,21 +50,24 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | |||
50 | if (!info->data) | 50 | if (!info->data) |
51 | return AE_NO_MEMORY; | 51 | return AE_NO_MEMORY; |
52 | 52 | ||
53 | memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length); | 53 | memcpy(info->data, |
54 | vendor->reserved + sizeof(struct acpi_vendor_descriptor), | ||
55 | length); | ||
54 | info->length = length; | 56 | info->length = length; |
55 | return AE_CTRL_TERMINATE; | 57 | return AE_CTRL_TERMINATE; |
56 | } | 58 | } |
57 | 59 | ||
58 | acpi_status | 60 | acpi_status |
59 | acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, | 61 | acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, |
60 | u8 **data, u32 *length) | 62 | u8 ** data, u32 * length) |
61 | { | 63 | { |
62 | struct acpi_vendor_info info; | 64 | struct acpi_vendor_info info; |
63 | 65 | ||
64 | info.descriptor = id; | 66 | info.descriptor = id; |
65 | info.data = NULL; | 67 | info.data = NULL; |
66 | 68 | ||
67 | acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info); | 69 | acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, |
70 | &info); | ||
68 | if (!info.data) | 71 | if (!info.data) |
69 | return AE_NOT_FOUND; | 72 | return AE_NOT_FOUND; |
70 | 73 | ||
@@ -75,17 +78,19 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, | |||
75 | 78 | ||
76 | struct acpi_vendor_descriptor hp_ccsr_descriptor = { | 79 | struct acpi_vendor_descriptor hp_ccsr_descriptor = { |
77 | .guid_id = 2, | 80 | .guid_id = 2, |
78 | .guid = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) | 81 | .guid = |
82 | EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, | ||
83 | 0x37, 0x0e, 0xad) | ||
79 | }; | 84 | }; |
80 | 85 | ||
81 | acpi_status | 86 | acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length) |
82 | hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) | ||
83 | { | 87 | { |
84 | acpi_status status; | 88 | acpi_status status; |
85 | u8 *data; | 89 | u8 *data; |
86 | u32 length; | 90 | u32 length; |
87 | 91 | ||
88 | status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); | 92 | status = |
93 | acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); | ||
89 | 94 | ||
90 | if (ACPI_FAILURE(status) || length != 16) | 95 | if (ACPI_FAILURE(status) || length != 16) |
91 | return AE_NOT_FOUND; | 96 | return AE_NOT_FOUND; |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 9609f243e5d0..9ad94ddf6687 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -74,12 +74,11 @@ unsigned int acpi_cpei_override; | |||
74 | unsigned int acpi_cpei_phys_cpuid; | 74 | unsigned int acpi_cpei_phys_cpuid; |
75 | 75 | ||
76 | #define MAX_SAPICS 256 | 76 | #define MAX_SAPICS 256 |
77 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = | 77 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 }; |
78 | { [0 ... MAX_SAPICS - 1] = -1 }; | 78 | |
79 | EXPORT_SYMBOL(ia64_acpiid_to_sapicid); | 79 | EXPORT_SYMBOL(ia64_acpiid_to_sapicid); |
80 | 80 | ||
81 | const char * | 81 | const char *acpi_get_sysname(void) |
82 | acpi_get_sysname (void) | ||
83 | { | 82 | { |
84 | #ifdef CONFIG_IA64_GENERIC | 83 | #ifdef CONFIG_IA64_GENERIC |
85 | unsigned long rsdp_phys; | 84 | unsigned long rsdp_phys; |
@@ -89,27 +88,29 @@ acpi_get_sysname (void) | |||
89 | 88 | ||
90 | rsdp_phys = acpi_find_rsdp(); | 89 | rsdp_phys = acpi_find_rsdp(); |
91 | if (!rsdp_phys) { | 90 | if (!rsdp_phys) { |
92 | printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); | 91 | printk(KERN_ERR |
92 | "ACPI 2.0 RSDP not found, default to \"dig\"\n"); | ||
93 | return "dig"; | 93 | return "dig"; |
94 | } | 94 | } |
95 | 95 | ||
96 | rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); | 96 | rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); |
97 | if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { | 97 | if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { |
98 | printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); | 98 | printk(KERN_ERR |
99 | "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); | ||
99 | return "dig"; | 100 | return "dig"; |
100 | } | 101 | } |
101 | 102 | ||
102 | xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); | 103 | xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); |
103 | hdr = &xsdt->header; | 104 | hdr = &xsdt->header; |
104 | if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { | 105 | if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { |
105 | printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); | 106 | printk(KERN_ERR |
107 | "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); | ||
106 | return "dig"; | 108 | return "dig"; |
107 | } | 109 | } |
108 | 110 | ||
109 | if (!strcmp(hdr->oem_id, "HP")) { | 111 | if (!strcmp(hdr->oem_id, "HP")) { |
110 | return "hpzx1"; | 112 | return "hpzx1"; |
111 | } | 113 | } else if (!strcmp(hdr->oem_id, "SGI")) { |
112 | else if (!strcmp(hdr->oem_id, "SGI")) { | ||
113 | return "sn2"; | 114 | return "sn2"; |
114 | } | 115 | } |
115 | 116 | ||
@@ -131,7 +132,7 @@ acpi_get_sysname (void) | |||
131 | #endif | 132 | #endif |
132 | } | 133 | } |
133 | 134 | ||
134 | #ifdef CONFIG_ACPI_BOOT | 135 | #ifdef CONFIG_ACPI |
135 | 136 | ||
136 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 | 137 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 |
137 | 138 | ||
@@ -146,8 +147,7 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; | |||
146 | * Interrupt routing API for device drivers. Provides interrupt vector for | 147 | * Interrupt routing API for device drivers. Provides interrupt vector for |
147 | * a generic platform event. Currently only CPEI is implemented. | 148 | * a generic platform event. Currently only CPEI is implemented. |
148 | */ | 149 | */ |
149 | int | 150 | int acpi_request_vector(u32 int_type) |
150 | acpi_request_vector (u32 int_type) | ||
151 | { | 151 | { |
152 | int vector = -1; | 152 | int vector = -1; |
153 | 153 | ||
@@ -155,12 +155,12 @@ acpi_request_vector (u32 int_type) | |||
155 | /* corrected platform error interrupt */ | 155 | /* corrected platform error interrupt */ |
156 | vector = platform_intr_list[int_type]; | 156 | vector = platform_intr_list[int_type]; |
157 | } else | 157 | } else |
158 | printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); | 158 | printk(KERN_ERR |
159 | "acpi_request_vector(): invalid interrupt type\n"); | ||
159 | return vector; | 160 | return vector; |
160 | } | 161 | } |
161 | 162 | ||
162 | char * | 163 | char *__acpi_map_table(unsigned long phys_addr, unsigned long size) |
163 | __acpi_map_table (unsigned long phys_addr, unsigned long size) | ||
164 | { | 164 | { |
165 | return __va(phys_addr); | 165 | return __va(phys_addr); |
166 | } | 166 | } |
@@ -169,19 +169,18 @@ __acpi_map_table (unsigned long phys_addr, unsigned long size) | |||
169 | Boot-time Table Parsing | 169 | Boot-time Table Parsing |
170 | -------------------------------------------------------------------------- */ | 170 | -------------------------------------------------------------------------- */ |
171 | 171 | ||
172 | static int total_cpus __initdata; | 172 | static int total_cpus __initdata; |
173 | static int available_cpus __initdata; | 173 | static int available_cpus __initdata; |
174 | struct acpi_table_madt * acpi_madt __initdata; | 174 | struct acpi_table_madt *acpi_madt __initdata; |
175 | static u8 has_8259; | 175 | static u8 has_8259; |
176 | |||
177 | 176 | ||
178 | static int __init | 177 | static int __init |
179 | acpi_parse_lapic_addr_ovr ( | 178 | acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, |
180 | acpi_table_entry_header *header, const unsigned long end) | 179 | const unsigned long end) |
181 | { | 180 | { |
182 | struct acpi_table_lapic_addr_ovr *lapic; | 181 | struct acpi_table_lapic_addr_ovr *lapic; |
183 | 182 | ||
184 | lapic = (struct acpi_table_lapic_addr_ovr *) header; | 183 | lapic = (struct acpi_table_lapic_addr_ovr *)header; |
185 | 184 | ||
186 | if (BAD_MADT_ENTRY(lapic, end)) | 185 | if (BAD_MADT_ENTRY(lapic, end)) |
187 | return -EINVAL; | 186 | return -EINVAL; |
@@ -193,22 +192,23 @@ acpi_parse_lapic_addr_ovr ( | |||
193 | return 0; | 192 | return 0; |
194 | } | 193 | } |
195 | 194 | ||
196 | |||
197 | static int __init | 195 | static int __init |
198 | acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) | 196 | acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) |
199 | { | 197 | { |
200 | struct acpi_table_lsapic *lsapic; | 198 | struct acpi_table_lsapic *lsapic; |
201 | 199 | ||
202 | lsapic = (struct acpi_table_lsapic *) header; | 200 | lsapic = (struct acpi_table_lsapic *)header; |
203 | 201 | ||
204 | if (BAD_MADT_ENTRY(lsapic, end)) | 202 | if (BAD_MADT_ENTRY(lsapic, end)) |
205 | return -EINVAL; | 203 | return -EINVAL; |
206 | 204 | ||
207 | if (lsapic->flags.enabled) { | 205 | if (lsapic->flags.enabled) { |
208 | #ifdef CONFIG_SMP | 206 | #ifdef CONFIG_SMP |
209 | smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; | 207 | smp_boot_data.cpu_phys_id[available_cpus] = |
208 | (lsapic->id << 8) | lsapic->eid; | ||
210 | #endif | 209 | #endif |
211 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid; | 210 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = |
211 | (lsapic->id << 8) | lsapic->eid; | ||
212 | ++available_cpus; | 212 | ++available_cpus; |
213 | } | 213 | } |
214 | 214 | ||
@@ -216,13 +216,12 @@ acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) | |||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | |||
220 | static int __init | 219 | static int __init |
221 | acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) | 220 | acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) |
222 | { | 221 | { |
223 | struct acpi_table_lapic_nmi *lacpi_nmi; | 222 | struct acpi_table_lapic_nmi *lacpi_nmi; |
224 | 223 | ||
225 | lacpi_nmi = (struct acpi_table_lapic_nmi*) header; | 224 | lacpi_nmi = (struct acpi_table_lapic_nmi *)header; |
226 | 225 | ||
227 | if (BAD_MADT_ENTRY(lacpi_nmi, end)) | 226 | if (BAD_MADT_ENTRY(lacpi_nmi, end)) |
228 | return -EINVAL; | 227 | return -EINVAL; |
@@ -231,13 +230,12 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) | |||
231 | return 0; | 230 | return 0; |
232 | } | 231 | } |
233 | 232 | ||
234 | |||
235 | static int __init | 233 | static int __init |
236 | acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) | 234 | acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) |
237 | { | 235 | { |
238 | struct acpi_table_iosapic *iosapic; | 236 | struct acpi_table_iosapic *iosapic; |
239 | 237 | ||
240 | iosapic = (struct acpi_table_iosapic *) header; | 238 | iosapic = (struct acpi_table_iosapic *)header; |
241 | 239 | ||
242 | if (BAD_MADT_ENTRY(iosapic, end)) | 240 | if (BAD_MADT_ENTRY(iosapic, end)) |
243 | return -EINVAL; | 241 | return -EINVAL; |
@@ -245,15 +243,14 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) | |||
245 | return iosapic_init(iosapic->address, iosapic->global_irq_base); | 243 | return iosapic_init(iosapic->address, iosapic->global_irq_base); |
246 | } | 244 | } |
247 | 245 | ||
248 | |||
249 | static int __init | 246 | static int __init |
250 | acpi_parse_plat_int_src ( | 247 | acpi_parse_plat_int_src(acpi_table_entry_header * header, |
251 | acpi_table_entry_header *header, const unsigned long end) | 248 | const unsigned long end) |
252 | { | 249 | { |
253 | struct acpi_table_plat_int_src *plintsrc; | 250 | struct acpi_table_plat_int_src *plintsrc; |
254 | int vector; | 251 | int vector; |
255 | 252 | ||
256 | plintsrc = (struct acpi_table_plat_int_src *) header; | 253 | plintsrc = (struct acpi_table_plat_int_src *)header; |
257 | 254 | ||
258 | if (BAD_MADT_ENTRY(plintsrc, end)) | 255 | if (BAD_MADT_ENTRY(plintsrc, end)) |
259 | return -EINVAL; | 256 | return -EINVAL; |
@@ -267,8 +264,12 @@ acpi_parse_plat_int_src ( | |||
267 | plintsrc->iosapic_vector, | 264 | plintsrc->iosapic_vector, |
268 | plintsrc->eid, | 265 | plintsrc->eid, |
269 | plintsrc->id, | 266 | plintsrc->id, |
270 | (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 267 | (plintsrc->flags.polarity == |
271 | (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 268 | 1) ? IOSAPIC_POL_HIGH : |
269 | IOSAPIC_POL_LOW, | ||
270 | (plintsrc->flags.trigger == | ||
271 | 1) ? IOSAPIC_EDGE : | ||
272 | IOSAPIC_LEVEL); | ||
272 | 273 | ||
273 | platform_intr_list[plintsrc->type] = vector; | 274 | platform_intr_list[plintsrc->type] = vector; |
274 | if (acpi_madt_rev > 1) { | 275 | if (acpi_madt_rev > 1) { |
@@ -283,7 +284,6 @@ acpi_parse_plat_int_src ( | |||
283 | return 0; | 284 | return 0; |
284 | } | 285 | } |
285 | 286 | ||
286 | |||
287 | unsigned int can_cpei_retarget(void) | 287 | unsigned int can_cpei_retarget(void) |
288 | { | 288 | { |
289 | extern int cpe_vector; | 289 | extern int cpe_vector; |
@@ -322,29 +322,30 @@ unsigned int get_cpei_target_cpu(void) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | static int __init | 324 | static int __init |
325 | acpi_parse_int_src_ovr ( | 325 | acpi_parse_int_src_ovr(acpi_table_entry_header * header, |
326 | acpi_table_entry_header *header, const unsigned long end) | 326 | const unsigned long end) |
327 | { | 327 | { |
328 | struct acpi_table_int_src_ovr *p; | 328 | struct acpi_table_int_src_ovr *p; |
329 | 329 | ||
330 | p = (struct acpi_table_int_src_ovr *) header; | 330 | p = (struct acpi_table_int_src_ovr *)header; |
331 | 331 | ||
332 | if (BAD_MADT_ENTRY(p, end)) | 332 | if (BAD_MADT_ENTRY(p, end)) |
333 | return -EINVAL; | 333 | return -EINVAL; |
334 | 334 | ||
335 | iosapic_override_isa_irq(p->bus_irq, p->global_irq, | 335 | iosapic_override_isa_irq(p->bus_irq, p->global_irq, |
336 | (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 336 | (p->flags.polarity == |
337 | (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 337 | 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, |
338 | (p->flags.trigger == | ||
339 | 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | ||
338 | return 0; | 340 | return 0; |
339 | } | 341 | } |
340 | 342 | ||
341 | |||
342 | static int __init | 343 | static int __init |
343 | acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) | 344 | acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) |
344 | { | 345 | { |
345 | struct acpi_table_nmi_src *nmi_src; | 346 | struct acpi_table_nmi_src *nmi_src; |
346 | 347 | ||
347 | nmi_src = (struct acpi_table_nmi_src*) header; | 348 | nmi_src = (struct acpi_table_nmi_src *)header; |
348 | 349 | ||
349 | if (BAD_MADT_ENTRY(nmi_src, end)) | 350 | if (BAD_MADT_ENTRY(nmi_src, end)) |
350 | return -EINVAL; | 351 | return -EINVAL; |
@@ -353,11 +354,9 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) | |||
353 | return 0; | 354 | return 0; |
354 | } | 355 | } |
355 | 356 | ||
356 | static void __init | 357 | static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
357 | acpi_madt_oem_check (char *oem_id, char *oem_table_id) | ||
358 | { | 358 | { |
359 | if (!strncmp(oem_id, "IBM", 3) && | 359 | if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { |
360 | (!strncmp(oem_table_id, "SERMOW", 6))) { | ||
361 | 360 | ||
362 | /* | 361 | /* |
363 | * Unfortunately ITC_DRIFT is not yet part of the | 362 | * Unfortunately ITC_DRIFT is not yet part of the |
@@ -370,19 +369,18 @@ acpi_madt_oem_check (char *oem_id, char *oem_table_id) | |||
370 | } | 369 | } |
371 | } | 370 | } |
372 | 371 | ||
373 | static int __init | 372 | static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) |
374 | acpi_parse_madt (unsigned long phys_addr, unsigned long size) | ||
375 | { | 373 | { |
376 | if (!phys_addr || !size) | 374 | if (!phys_addr || !size) |
377 | return -EINVAL; | 375 | return -EINVAL; |
378 | 376 | ||
379 | acpi_madt = (struct acpi_table_madt *) __va(phys_addr); | 377 | acpi_madt = (struct acpi_table_madt *)__va(phys_addr); |
380 | 378 | ||
381 | acpi_madt_rev = acpi_madt->header.revision; | 379 | acpi_madt_rev = acpi_madt->header.revision; |
382 | 380 | ||
383 | /* remember the value for reference after free_initmem() */ | 381 | /* remember the value for reference after free_initmem() */ |
384 | #ifdef CONFIG_ITANIUM | 382 | #ifdef CONFIG_ITANIUM |
385 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ | 383 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ |
386 | #else | 384 | #else |
387 | has_8259 = acpi_madt->flags.pcat_compat; | 385 | has_8259 = acpi_madt->flags.pcat_compat; |
388 | #endif | 386 | #endif |
@@ -396,19 +394,18 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) | |||
396 | printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); | 394 | printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); |
397 | 395 | ||
398 | acpi_madt_oem_check(acpi_madt->header.oem_id, | 396 | acpi_madt_oem_check(acpi_madt->header.oem_id, |
399 | acpi_madt->header.oem_table_id); | 397 | acpi_madt->header.oem_table_id); |
400 | 398 | ||
401 | return 0; | 399 | return 0; |
402 | } | 400 | } |
403 | 401 | ||
404 | |||
405 | #ifdef CONFIG_ACPI_NUMA | 402 | #ifdef CONFIG_ACPI_NUMA |
406 | 403 | ||
407 | #undef SLIT_DEBUG | 404 | #undef SLIT_DEBUG |
408 | 405 | ||
409 | #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) | 406 | #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) |
410 | 407 | ||
411 | static int __initdata srat_num_cpus; /* number of cpus */ | 408 | static int __initdata srat_num_cpus; /* number of cpus */ |
412 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; | 409 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; |
413 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) | 410 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) |
414 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) | 411 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) |
@@ -421,15 +418,15 @@ static struct acpi_table_slit __initdata *slit_table; | |||
421 | * ACPI 2.0 SLIT (System Locality Information Table) | 418 | * ACPI 2.0 SLIT (System Locality Information Table) |
422 | * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf | 419 | * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf |
423 | */ | 420 | */ |
424 | void __init | 421 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
425 | acpi_numa_slit_init (struct acpi_table_slit *slit) | ||
426 | { | 422 | { |
427 | u32 len; | 423 | u32 len; |
428 | 424 | ||
429 | len = sizeof(struct acpi_table_header) + 8 | 425 | len = sizeof(struct acpi_table_header) + 8 |
430 | + slit->localities * slit->localities; | 426 | + slit->localities * slit->localities; |
431 | if (slit->header.length != len) { | 427 | if (slit->header.length != len) { |
432 | printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | 428 | printk(KERN_ERR |
429 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | ||
433 | len, slit->header.length); | 430 | len, slit->header.length); |
434 | memset(numa_slit, 10, sizeof(numa_slit)); | 431 | memset(numa_slit, 10, sizeof(numa_slit)); |
435 | return; | 432 | return; |
@@ -438,19 +435,20 @@ acpi_numa_slit_init (struct acpi_table_slit *slit) | |||
438 | } | 435 | } |
439 | 436 | ||
440 | void __init | 437 | void __init |
441 | acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) | 438 | acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) |
442 | { | 439 | { |
443 | /* record this node in proximity bitmap */ | 440 | /* record this node in proximity bitmap */ |
444 | pxm_bit_set(pa->proximity_domain); | 441 | pxm_bit_set(pa->proximity_domain); |
445 | 442 | ||
446 | node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); | 443 | node_cpuid[srat_num_cpus].phys_id = |
444 | (pa->apic_id << 8) | (pa->lsapic_eid); | ||
447 | /* nid should be overridden as logical node id later */ | 445 | /* nid should be overridden as logical node id later */ |
448 | node_cpuid[srat_num_cpus].nid = pa->proximity_domain; | 446 | node_cpuid[srat_num_cpus].nid = pa->proximity_domain; |
449 | srat_num_cpus++; | 447 | srat_num_cpus++; |
450 | } | 448 | } |
451 | 449 | ||
452 | void __init | 450 | void __init |
453 | acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) | 451 | acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) |
454 | { | 452 | { |
455 | unsigned long paddr, size; | 453 | unsigned long paddr, size; |
456 | u8 pxm; | 454 | u8 pxm; |
@@ -487,8 +485,7 @@ acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) | |||
487 | num_node_memblks++; | 485 | num_node_memblks++; |
488 | } | 486 | } |
489 | 487 | ||
490 | void __init | 488 | void __init acpi_numa_arch_fixup(void) |
491 | acpi_numa_arch_fixup (void) | ||
492 | { | 489 | { |
493 | int i, j, node_from, node_to; | 490 | int i, j, node_from, node_to; |
494 | 491 | ||
@@ -534,21 +531,24 @@ acpi_numa_arch_fixup (void) | |||
534 | for (i = 0; i < srat_num_cpus; i++) | 531 | for (i = 0; i < srat_num_cpus; i++) |
535 | node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; | 532 | node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; |
536 | 533 | ||
537 | printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); | 534 | printk(KERN_INFO "Number of logical nodes in system = %d\n", |
538 | printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); | 535 | num_online_nodes()); |
536 | printk(KERN_INFO "Number of memory chunks in system = %d\n", | ||
537 | num_node_memblks); | ||
539 | 538 | ||
540 | if (!slit_table) return; | 539 | if (!slit_table) |
540 | return; | ||
541 | memset(numa_slit, -1, sizeof(numa_slit)); | 541 | memset(numa_slit, -1, sizeof(numa_slit)); |
542 | for (i=0; i<slit_table->localities; i++) { | 542 | for (i = 0; i < slit_table->localities; i++) { |
543 | if (!pxm_bit_test(i)) | 543 | if (!pxm_bit_test(i)) |
544 | continue; | 544 | continue; |
545 | node_from = pxm_to_nid_map[i]; | 545 | node_from = pxm_to_nid_map[i]; |
546 | for (j=0; j<slit_table->localities; j++) { | 546 | for (j = 0; j < slit_table->localities; j++) { |
547 | if (!pxm_bit_test(j)) | 547 | if (!pxm_bit_test(j)) |
548 | continue; | 548 | continue; |
549 | node_to = pxm_to_nid_map[j]; | 549 | node_to = pxm_to_nid_map[j]; |
550 | node_distance(node_from, node_to) = | 550 | node_distance(node_from, node_to) = |
551 | slit_table->entry[i*slit_table->localities + j]; | 551 | slit_table->entry[i * slit_table->localities + j]; |
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
@@ -556,36 +556,41 @@ acpi_numa_arch_fixup (void) | |||
556 | printk("ACPI 2.0 SLIT locality table:\n"); | 556 | printk("ACPI 2.0 SLIT locality table:\n"); |
557 | for_each_online_node(i) { | 557 | for_each_online_node(i) { |
558 | for_each_online_node(j) | 558 | for_each_online_node(j) |
559 | printk("%03d ", node_distance(i,j)); | 559 | printk("%03d ", node_distance(i, j)); |
560 | printk("\n"); | 560 | printk("\n"); |
561 | } | 561 | } |
562 | #endif | 562 | #endif |
563 | } | 563 | } |
564 | #endif /* CONFIG_ACPI_NUMA */ | 564 | #endif /* CONFIG_ACPI_NUMA */ |
565 | 565 | ||
566 | unsigned int | 566 | /* |
567 | acpi_register_gsi (u32 gsi, int edge_level, int active_high_low) | 567 | * success: return IRQ number (>=0) |
568 | * failure: return < 0 | ||
569 | */ | ||
570 | int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) | ||
568 | { | 571 | { |
569 | if (has_8259 && gsi < 16) | 572 | if (has_8259 && gsi < 16) |
570 | return isa_irq_to_vector(gsi); | 573 | return isa_irq_to_vector(gsi); |
571 | 574 | ||
572 | return iosapic_register_intr(gsi, | 575 | return iosapic_register_intr(gsi, |
573 | (active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 576 | (active_high_low == |
574 | (edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 577 | ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : |
578 | IOSAPIC_POL_LOW, | ||
579 | (edge_level == | ||
580 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : | ||
581 | IOSAPIC_LEVEL); | ||
575 | } | 582 | } |
583 | |||
576 | EXPORT_SYMBOL(acpi_register_gsi); | 584 | EXPORT_SYMBOL(acpi_register_gsi); |
577 | 585 | ||
578 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | 586 | void acpi_unregister_gsi(u32 gsi) |
579 | void | ||
580 | acpi_unregister_gsi (u32 gsi) | ||
581 | { | 587 | { |
582 | iosapic_unregister_intr(gsi); | 588 | iosapic_unregister_intr(gsi); |
583 | } | 589 | } |
590 | |||
584 | EXPORT_SYMBOL(acpi_unregister_gsi); | 591 | EXPORT_SYMBOL(acpi_unregister_gsi); |
585 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
586 | 592 | ||
587 | static int __init | 593 | static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) |
588 | acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | ||
589 | { | 594 | { |
590 | struct acpi_table_header *fadt_header; | 595 | struct acpi_table_header *fadt_header; |
591 | struct fadt_descriptor_rev2 *fadt; | 596 | struct fadt_descriptor_rev2 *fadt; |
@@ -593,11 +598,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | |||
593 | if (!phys_addr || !size) | 598 | if (!phys_addr || !size) |
594 | return -EINVAL; | 599 | return -EINVAL; |
595 | 600 | ||
596 | fadt_header = (struct acpi_table_header *) __va(phys_addr); | 601 | fadt_header = (struct acpi_table_header *)__va(phys_addr); |
597 | if (fadt_header->revision != 3) | 602 | if (fadt_header->revision != 3) |
598 | return -ENODEV; /* Only deal with ACPI 2.0 FADT */ | 603 | return -ENODEV; /* Only deal with ACPI 2.0 FADT */ |
599 | 604 | ||
600 | fadt = (struct fadt_descriptor_rev2 *) fadt_header; | 605 | fadt = (struct fadt_descriptor_rev2 *)fadt_header; |
601 | 606 | ||
602 | if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) | 607 | if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) |
603 | acpi_kbd_controller_present = 0; | 608 | acpi_kbd_controller_present = 0; |
@@ -609,22 +614,19 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | |||
609 | return 0; | 614 | return 0; |
610 | } | 615 | } |
611 | 616 | ||
612 | 617 | unsigned long __init acpi_find_rsdp(void) | |
613 | unsigned long __init | ||
614 | acpi_find_rsdp (void) | ||
615 | { | 618 | { |
616 | unsigned long rsdp_phys = 0; | 619 | unsigned long rsdp_phys = 0; |
617 | 620 | ||
618 | if (efi.acpi20) | 621 | if (efi.acpi20) |
619 | rsdp_phys = __pa(efi.acpi20); | 622 | rsdp_phys = __pa(efi.acpi20); |
620 | else if (efi.acpi) | 623 | else if (efi.acpi) |
621 | printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); | 624 | printk(KERN_WARNING PREFIX |
625 | "v1.0/r0.71 tables no longer supported\n"); | ||
622 | return rsdp_phys; | 626 | return rsdp_phys; |
623 | } | 627 | } |
624 | 628 | ||
625 | 629 | int __init acpi_boot_init(void) | |
626 | int __init | ||
627 | acpi_boot_init (void) | ||
628 | { | 630 | { |
629 | 631 | ||
630 | /* | 632 | /* |
@@ -642,31 +644,43 @@ acpi_boot_init (void) | |||
642 | 644 | ||
643 | /* Local APIC */ | 645 | /* Local APIC */ |
644 | 646 | ||
645 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) | 647 | if (acpi_table_parse_madt |
646 | printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); | 648 | (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) |
649 | printk(KERN_ERR PREFIX | ||
650 | "Error parsing LAPIC address override entry\n"); | ||
647 | 651 | ||
648 | if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) | 652 | if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) |
649 | printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); | 653 | < 1) |
654 | printk(KERN_ERR PREFIX | ||
655 | "Error parsing MADT - no LAPIC entries\n"); | ||
650 | 656 | ||
651 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) | 657 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) |
658 | < 0) | ||
652 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | 659 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); |
653 | 660 | ||
654 | /* I/O APIC */ | 661 | /* I/O APIC */ |
655 | 662 | ||
656 | if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) | 663 | if (acpi_table_parse_madt |
657 | printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); | 664 | (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) |
665 | printk(KERN_ERR PREFIX | ||
666 | "Error parsing MADT - no IOSAPIC entries\n"); | ||
658 | 667 | ||
659 | /* System-Level Interrupt Routing */ | 668 | /* System-Level Interrupt Routing */ |
660 | 669 | ||
661 | if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) | 670 | if (acpi_table_parse_madt |
662 | printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); | 671 | (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, |
672 | ACPI_MAX_PLATFORM_INTERRUPTS) < 0) | ||
673 | printk(KERN_ERR PREFIX | ||
674 | "Error parsing platform interrupt source entry\n"); | ||
663 | 675 | ||
664 | if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) | 676 | if (acpi_table_parse_madt |
665 | printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); | 677 | (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) |
678 | printk(KERN_ERR PREFIX | ||
679 | "Error parsing interrupt source overrides entry\n"); | ||
666 | 680 | ||
667 | if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) | 681 | if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) |
668 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | 682 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); |
669 | skip_madt: | 683 | skip_madt: |
670 | 684 | ||
671 | /* | 685 | /* |
672 | * FADT says whether a legacy keyboard controller is present. | 686 | * FADT says whether a legacy keyboard controller is present. |
@@ -681,8 +695,9 @@ acpi_boot_init (void) | |||
681 | if (available_cpus == 0) { | 695 | if (available_cpus == 0) { |
682 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | 696 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); |
683 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | 697 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); |
684 | smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); | 698 | smp_boot_data.cpu_phys_id[available_cpus] = |
685 | available_cpus = 1; /* We've got at least one of these, no? */ | 699 | hard_smp_processor_id(); |
700 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
686 | } | 701 | } |
687 | smp_boot_data.cpu_count = available_cpus; | 702 | smp_boot_data.cpu_count = available_cpus; |
688 | 703 | ||
@@ -691,8 +706,10 @@ acpi_boot_init (void) | |||
691 | if (srat_num_cpus == 0) { | 706 | if (srat_num_cpus == 0) { |
692 | int cpu, i = 1; | 707 | int cpu, i = 1; |
693 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 708 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
694 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) | 709 | if (smp_boot_data.cpu_phys_id[cpu] != |
695 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; | 710 | hard_smp_processor_id()) |
711 | node_cpuid[i++].phys_id = | ||
712 | smp_boot_data.cpu_phys_id[cpu]; | ||
696 | } | 713 | } |
697 | # endif | 714 | # endif |
698 | #endif | 715 | #endif |
@@ -700,12 +717,12 @@ acpi_boot_init (void) | |||
700 | build_cpu_to_node_map(); | 717 | build_cpu_to_node_map(); |
701 | #endif | 718 | #endif |
702 | /* Make boot-up look pretty */ | 719 | /* Make boot-up look pretty */ |
703 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); | 720 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, |
721 | total_cpus); | ||
704 | return 0; | 722 | return 0; |
705 | } | 723 | } |
706 | 724 | ||
707 | int | 725 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) |
708 | acpi_gsi_to_irq (u32 gsi, unsigned int *irq) | ||
709 | { | 726 | { |
710 | int vector; | 727 | int vector; |
711 | 728 | ||
@@ -726,11 +743,10 @@ acpi_gsi_to_irq (u32 gsi, unsigned int *irq) | |||
726 | */ | 743 | */ |
727 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 744 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
728 | static | 745 | static |
729 | int | 746 | int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) |
730 | acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) | ||
731 | { | 747 | { |
732 | #ifdef CONFIG_ACPI_NUMA | 748 | #ifdef CONFIG_ACPI_NUMA |
733 | int pxm_id; | 749 | int pxm_id; |
734 | 750 | ||
735 | pxm_id = acpi_get_pxm(handle); | 751 | pxm_id = acpi_get_pxm(handle); |
736 | 752 | ||
@@ -738,31 +754,28 @@ acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) | |||
738 | * Assuming that the container driver would have set the proximity | 754 | * Assuming that the container driver would have set the proximity |
739 | * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag | 755 | * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag |
740 | */ | 756 | */ |
741 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0: | 757 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id]; |
742 | pxm_to_nid_map[pxm_id]; | ||
743 | 758 | ||
744 | node_cpuid[cpu].phys_id = physid; | 759 | node_cpuid[cpu].phys_id = physid; |
745 | #endif | 760 | #endif |
746 | return(0); | 761 | return (0); |
747 | } | 762 | } |
748 | 763 | ||
749 | 764 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |
750 | int | ||
751 | acpi_map_lsapic(acpi_handle handle, int *pcpu) | ||
752 | { | 765 | { |
753 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 766 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
754 | union acpi_object *obj; | 767 | union acpi_object *obj; |
755 | struct acpi_table_lsapic *lsapic; | 768 | struct acpi_table_lsapic *lsapic; |
756 | cpumask_t tmp_map; | 769 | cpumask_t tmp_map; |
757 | long physid; | 770 | long physid; |
758 | int cpu; | 771 | int cpu; |
759 | 772 | ||
760 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 773 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
761 | return -EINVAL; | 774 | return -EINVAL; |
762 | 775 | ||
763 | if (!buffer.length || !buffer.pointer) | 776 | if (!buffer.length || !buffer.pointer) |
764 | return -EINVAL; | 777 | return -EINVAL; |
765 | 778 | ||
766 | obj = buffer.pointer; | 779 | obj = buffer.pointer; |
767 | if (obj->type != ACPI_TYPE_BUFFER || | 780 | if (obj->type != ACPI_TYPE_BUFFER || |
768 | obj->buffer.length < sizeof(*lsapic)) { | 781 | obj->buffer.length < sizeof(*lsapic)) { |
@@ -778,7 +791,7 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
778 | return -EINVAL; | 791 | return -EINVAL; |
779 | } | 792 | } |
780 | 793 | ||
781 | physid = ((lsapic->id <<8) | (lsapic->eid)); | 794 | physid = ((lsapic->id << 8) | (lsapic->eid)); |
782 | 795 | ||
783 | acpi_os_free(buffer.pointer); | 796 | acpi_os_free(buffer.pointer); |
784 | buffer.length = ACPI_ALLOCATE_BUFFER; | 797 | buffer.length = ACPI_ALLOCATE_BUFFER; |
@@ -786,50 +799,49 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
786 | 799 | ||
787 | cpus_complement(tmp_map, cpu_present_map); | 800 | cpus_complement(tmp_map, cpu_present_map); |
788 | cpu = first_cpu(tmp_map); | 801 | cpu = first_cpu(tmp_map); |
789 | if(cpu >= NR_CPUS) | 802 | if (cpu >= NR_CPUS) |
790 | return -EINVAL; | 803 | return -EINVAL; |
791 | 804 | ||
792 | acpi_map_cpu2node(handle, cpu, physid); | 805 | acpi_map_cpu2node(handle, cpu, physid); |
793 | 806 | ||
794 | cpu_set(cpu, cpu_present_map); | 807 | cpu_set(cpu, cpu_present_map); |
795 | ia64_cpu_to_sapicid[cpu] = physid; | 808 | ia64_cpu_to_sapicid[cpu] = physid; |
796 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; | 809 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; |
797 | 810 | ||
798 | *pcpu = cpu; | 811 | *pcpu = cpu; |
799 | return(0); | 812 | return (0); |
800 | } | 813 | } |
801 | EXPORT_SYMBOL(acpi_map_lsapic); | ||
802 | 814 | ||
815 | EXPORT_SYMBOL(acpi_map_lsapic); | ||
803 | 816 | ||
804 | int | 817 | int acpi_unmap_lsapic(int cpu) |
805 | acpi_unmap_lsapic(int cpu) | ||
806 | { | 818 | { |
807 | int i; | 819 | int i; |
808 | 820 | ||
809 | for (i=0; i<MAX_SAPICS; i++) { | 821 | for (i = 0; i < MAX_SAPICS; i++) { |
810 | if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { | 822 | if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { |
811 | ia64_acpiid_to_sapicid[i] = -1; | 823 | ia64_acpiid_to_sapicid[i] = -1; |
812 | break; | 824 | break; |
813 | } | 825 | } |
814 | } | 826 | } |
815 | ia64_cpu_to_sapicid[cpu] = -1; | 827 | ia64_cpu_to_sapicid[cpu] = -1; |
816 | cpu_clear(cpu,cpu_present_map); | 828 | cpu_clear(cpu, cpu_present_map); |
817 | 829 | ||
818 | #ifdef CONFIG_ACPI_NUMA | 830 | #ifdef CONFIG_ACPI_NUMA |
819 | /* NUMA specific cleanup's */ | 831 | /* NUMA specific cleanup's */ |
820 | #endif | 832 | #endif |
821 | 833 | ||
822 | return(0); | 834 | return (0); |
823 | } | 835 | } |
836 | |||
824 | EXPORT_SYMBOL(acpi_unmap_lsapic); | 837 | EXPORT_SYMBOL(acpi_unmap_lsapic); |
825 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
826 | |||
827 | 839 | ||
828 | #ifdef CONFIG_ACPI_NUMA | 840 | #ifdef CONFIG_ACPI_NUMA |
829 | acpi_status __devinit | 841 | static acpi_status __devinit |
830 | acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | 842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) |
831 | { | 843 | { |
832 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
833 | union acpi_object *obj; | 845 | union acpi_object *obj; |
834 | struct acpi_table_iosapic *iosapic; | 846 | struct acpi_table_iosapic *iosapic; |
835 | unsigned int gsi_base; | 847 | unsigned int gsi_base; |
@@ -878,29 +890,38 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | |||
878 | map_iosapic_to_node(gsi_base, node); | 890 | map_iosapic_to_node(gsi_base, node); |
879 | return AE_OK; | 891 | return AE_OK; |
880 | } | 892 | } |
881 | #endif /* CONFIG_NUMA */ | ||
882 | 893 | ||
883 | int | 894 | static int __init |
884 | acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base) | 895 | acpi_map_iosapics (void) |
896 | { | ||
897 | acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | fs_initcall(acpi_map_iosapics); | ||
902 | #endif /* CONFIG_ACPI_NUMA */ | ||
903 | |||
904 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | ||
885 | { | 905 | { |
886 | int err; | 906 | int err; |
887 | 907 | ||
888 | if ((err = iosapic_init(phys_addr, gsi_base))) | 908 | if ((err = iosapic_init(phys_addr, gsi_base))) |
889 | return err; | 909 | return err; |
890 | 910 | ||
891 | #if CONFIG_ACPI_NUMA | 911 | #ifdef CONFIG_ACPI_NUMA |
892 | acpi_map_iosapic(handle, 0, NULL, NULL); | 912 | acpi_map_iosapic(handle, 0, NULL, NULL); |
893 | #endif /* CONFIG_ACPI_NUMA */ | 913 | #endif /* CONFIG_ACPI_NUMA */ |
894 | 914 | ||
895 | return 0; | 915 | return 0; |
896 | } | 916 | } |
917 | |||
897 | EXPORT_SYMBOL(acpi_register_ioapic); | 918 | EXPORT_SYMBOL(acpi_register_ioapic); |
898 | 919 | ||
899 | int | 920 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) |
900 | acpi_unregister_ioapic (acpi_handle handle, u32 gsi_base) | ||
901 | { | 921 | { |
902 | return iosapic_remove(gsi_base); | 922 | return iosapic_remove(gsi_base); |
903 | } | 923 | } |
924 | |||
904 | EXPORT_SYMBOL(acpi_unregister_ioapic); | 925 | EXPORT_SYMBOL(acpi_unregister_ioapic); |
905 | 926 | ||
906 | #endif /* CONFIG_ACPI_BOOT */ | 927 | #endif /* CONFIG_ACPI */ |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 7d1ae2982c53..77225659e968 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * to extract and format the required data. | 4 | * to extract and format the required data. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define ASM_OFFSETS_C 1 | ||
7 | #include <linux/config.h> | 8 | #include <linux/config.h> |
8 | 9 | ||
9 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
@@ -211,17 +212,41 @@ void foo(void) | |||
211 | #endif | 212 | #endif |
212 | 213 | ||
213 | BLANK(); | 214 | BLANK(); |
214 | DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, | 215 | DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET, |
215 | offsetof (struct ia64_mca_cpu, proc_state_dump)); | 216 | offsetof (struct ia64_mca_cpu, mca_stack)); |
216 | DEFINE(IA64_MCA_CPU_STACK_OFFSET, | ||
217 | offsetof (struct ia64_mca_cpu, stack)); | ||
218 | DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET, | ||
219 | offsetof (struct ia64_mca_cpu, stackframe)); | ||
220 | DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET, | ||
221 | offsetof (struct ia64_mca_cpu, rbstore)); | ||
222 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, | 217 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, |
223 | offsetof (struct ia64_mca_cpu, init_stack)); | 218 | offsetof (struct ia64_mca_cpu, init_stack)); |
224 | BLANK(); | 219 | BLANK(); |
220 | DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET, | ||
221 | offsetof (struct ia64_sal_os_state, sal_ra)); | ||
222 | DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, | ||
223 | offsetof (struct ia64_sal_os_state, os_gp)); | ||
224 | DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, | ||
225 | offsetof (struct ia64_sal_os_state, pal_min_state)); | ||
226 | DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, | ||
227 | offsetof (struct ia64_sal_os_state, proc_state_param)); | ||
228 | DEFINE(IA64_SAL_OS_STATE_SIZE, | ||
229 | sizeof (struct ia64_sal_os_state)); | ||
230 | DEFINE(IA64_PMSA_GR_OFFSET, | ||
231 | offsetof (struct pal_min_state_area_s, pmsa_gr)); | ||
232 | DEFINE(IA64_PMSA_BANK1_GR_OFFSET, | ||
233 | offsetof (struct pal_min_state_area_s, pmsa_bank1_gr)); | ||
234 | DEFINE(IA64_PMSA_PR_OFFSET, | ||
235 | offsetof (struct pal_min_state_area_s, pmsa_pr)); | ||
236 | DEFINE(IA64_PMSA_BR0_OFFSET, | ||
237 | offsetof (struct pal_min_state_area_s, pmsa_br0)); | ||
238 | DEFINE(IA64_PMSA_RSC_OFFSET, | ||
239 | offsetof (struct pal_min_state_area_s, pmsa_rsc)); | ||
240 | DEFINE(IA64_PMSA_IIP_OFFSET, | ||
241 | offsetof (struct pal_min_state_area_s, pmsa_iip)); | ||
242 | DEFINE(IA64_PMSA_IPSR_OFFSET, | ||
243 | offsetof (struct pal_min_state_area_s, pmsa_ipsr)); | ||
244 | DEFINE(IA64_PMSA_IFS_OFFSET, | ||
245 | offsetof (struct pal_min_state_area_s, pmsa_ifs)); | ||
246 | DEFINE(IA64_PMSA_XIP_OFFSET, | ||
247 | offsetof (struct pal_min_state_area_s, pmsa_xip)); | ||
248 | BLANK(); | ||
249 | |||
225 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ | 250 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ |
226 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); | 251 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); |
227 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); | 252 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); |
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c deleted file mode 100644 index bbb8efe126b7..000000000000 --- a/arch/ia64/kernel/domain.c +++ /dev/null | |||
@@ -1,396 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/kernel/domain.c | ||
3 | * Architecture specific sched-domains builder. | ||
4 | * | ||
5 | * Copyright (C) 2004 Jesse Barnes | ||
6 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
7 | */ | ||
8 | |||
9 | #include <linux/sched.h> | ||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/topology.h> | ||
15 | #include <linux/nodemask.h> | ||
16 | |||
17 | #define SD_NODES_PER_DOMAIN 16 | ||
18 | |||
19 | #ifdef CONFIG_NUMA | ||
20 | /** | ||
21 | * find_next_best_node - find the next node to include in a sched_domain | ||
22 | * @node: node whose sched_domain we're building | ||
23 | * @used_nodes: nodes already in the sched_domain | ||
24 | * | ||
25 | * Find the next node to include in a given scheduling domain. Simply | ||
26 | * finds the closest node not already in the @used_nodes map. | ||
27 | * | ||
28 | * Should use nodemask_t. | ||
29 | */ | ||
30 | static int find_next_best_node(int node, unsigned long *used_nodes) | ||
31 | { | ||
32 | int i, n, val, min_val, best_node = 0; | ||
33 | |||
34 | min_val = INT_MAX; | ||
35 | |||
36 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
37 | /* Start at @node */ | ||
38 | n = (node + i) % MAX_NUMNODES; | ||
39 | |||
40 | if (!nr_cpus_node(n)) | ||
41 | continue; | ||
42 | |||
43 | /* Skip already used nodes */ | ||
44 | if (test_bit(n, used_nodes)) | ||
45 | continue; | ||
46 | |||
47 | /* Simple min distance search */ | ||
48 | val = node_distance(node, n); | ||
49 | |||
50 | if (val < min_val) { | ||
51 | min_val = val; | ||
52 | best_node = n; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | set_bit(best_node, used_nodes); | ||
57 | return best_node; | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * sched_domain_node_span - get a cpumask for a node's sched_domain | ||
62 | * @node: node whose cpumask we're constructing | ||
63 | * @size: number of nodes to include in this span | ||
64 | * | ||
65 | * Given a node, construct a good cpumask for its sched_domain to span. It | ||
66 | * should be one that prevents unnecessary balancing, but also spreads tasks | ||
67 | * out optimally. | ||
68 | */ | ||
69 | static cpumask_t sched_domain_node_span(int node) | ||
70 | { | ||
71 | int i; | ||
72 | cpumask_t span, nodemask; | ||
73 | DECLARE_BITMAP(used_nodes, MAX_NUMNODES); | ||
74 | |||
75 | cpus_clear(span); | ||
76 | bitmap_zero(used_nodes, MAX_NUMNODES); | ||
77 | |||
78 | nodemask = node_to_cpumask(node); | ||
79 | cpus_or(span, span, nodemask); | ||
80 | set_bit(node, used_nodes); | ||
81 | |||
82 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | ||
83 | int next_node = find_next_best_node(node, used_nodes); | ||
84 | nodemask = node_to_cpumask(next_node); | ||
85 | cpus_or(span, span, nodemask); | ||
86 | } | ||
87 | |||
88 | return span; | ||
89 | } | ||
90 | #endif | ||
91 | |||
92 | /* | ||
93 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we | ||
94 | * can switch it on easily if needed. | ||
95 | */ | ||
96 | #ifdef CONFIG_SCHED_SMT | ||
97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | ||
98 | static struct sched_group sched_group_cpus[NR_CPUS]; | ||
99 | static int cpu_to_cpu_group(int cpu) | ||
100 | { | ||
101 | return cpu; | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | ||
106 | static struct sched_group sched_group_phys[NR_CPUS]; | ||
107 | static int cpu_to_phys_group(int cpu) | ||
108 | { | ||
109 | #ifdef CONFIG_SCHED_SMT | ||
110 | return first_cpu(cpu_sibling_map[cpu]); | ||
111 | #else | ||
112 | return cpu; | ||
113 | #endif | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_NUMA | ||
117 | /* | ||
118 | * The init_sched_build_groups can't handle what we want to do with node | ||
119 | * groups, so roll our own. Now each node has its own list of groups which | ||
120 | * gets dynamically allocated. | ||
121 | */ | ||
122 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | ||
123 | static struct sched_group *sched_group_nodes[MAX_NUMNODES]; | ||
124 | |||
125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | ||
126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; | ||
127 | |||
128 | static int cpu_to_allnodes_group(int cpu) | ||
129 | { | ||
130 | return cpu_to_node(cpu); | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | /* | ||
135 | * Build sched domains for a given set of cpus and attach the sched domains | ||
136 | * to the individual cpus | ||
137 | */ | ||
138 | void build_sched_domains(const cpumask_t *cpu_map) | ||
139 | { | ||
140 | int i; | ||
141 | |||
142 | /* | ||
143 | * Set up domains for cpus specified by the cpu_map. | ||
144 | */ | ||
145 | for_each_cpu_mask(i, *cpu_map) { | ||
146 | int group; | ||
147 | struct sched_domain *sd = NULL, *p; | ||
148 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); | ||
149 | |||
150 | cpus_and(nodemask, nodemask, *cpu_map); | ||
151 | |||
152 | #ifdef CONFIG_NUMA | ||
153 | if (num_online_cpus() | ||
154 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | ||
155 | sd = &per_cpu(allnodes_domains, i); | ||
156 | *sd = SD_ALLNODES_INIT; | ||
157 | sd->span = *cpu_map; | ||
158 | group = cpu_to_allnodes_group(i); | ||
159 | sd->groups = &sched_group_allnodes[group]; | ||
160 | p = sd; | ||
161 | } else | ||
162 | p = NULL; | ||
163 | |||
164 | sd = &per_cpu(node_domains, i); | ||
165 | *sd = SD_NODE_INIT; | ||
166 | sd->span = sched_domain_node_span(cpu_to_node(i)); | ||
167 | sd->parent = p; | ||
168 | cpus_and(sd->span, sd->span, *cpu_map); | ||
169 | #endif | ||
170 | |||
171 | p = sd; | ||
172 | sd = &per_cpu(phys_domains, i); | ||
173 | group = cpu_to_phys_group(i); | ||
174 | *sd = SD_CPU_INIT; | ||
175 | sd->span = nodemask; | ||
176 | sd->parent = p; | ||
177 | sd->groups = &sched_group_phys[group]; | ||
178 | |||
179 | #ifdef CONFIG_SCHED_SMT | ||
180 | p = sd; | ||
181 | sd = &per_cpu(cpu_domains, i); | ||
182 | group = cpu_to_cpu_group(i); | ||
183 | *sd = SD_SIBLING_INIT; | ||
184 | sd->span = cpu_sibling_map[i]; | ||
185 | cpus_and(sd->span, sd->span, *cpu_map); | ||
186 | sd->parent = p; | ||
187 | sd->groups = &sched_group_cpus[group]; | ||
188 | #endif | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_SCHED_SMT | ||
192 | /* Set up CPU (sibling) groups */ | ||
193 | for_each_cpu_mask(i, *cpu_map) { | ||
194 | cpumask_t this_sibling_map = cpu_sibling_map[i]; | ||
195 | cpus_and(this_sibling_map, this_sibling_map, *cpu_map); | ||
196 | if (i != first_cpu(this_sibling_map)) | ||
197 | continue; | ||
198 | |||
199 | init_sched_build_groups(sched_group_cpus, this_sibling_map, | ||
200 | &cpu_to_cpu_group); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | /* Set up physical groups */ | ||
205 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
206 | cpumask_t nodemask = node_to_cpumask(i); | ||
207 | |||
208 | cpus_and(nodemask, nodemask, *cpu_map); | ||
209 | if (cpus_empty(nodemask)) | ||
210 | continue; | ||
211 | |||
212 | init_sched_build_groups(sched_group_phys, nodemask, | ||
213 | &cpu_to_phys_group); | ||
214 | } | ||
215 | |||
216 | #ifdef CONFIG_NUMA | ||
217 | init_sched_build_groups(sched_group_allnodes, *cpu_map, | ||
218 | &cpu_to_allnodes_group); | ||
219 | |||
220 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
221 | /* Set up node groups */ | ||
222 | struct sched_group *sg, *prev; | ||
223 | cpumask_t nodemask = node_to_cpumask(i); | ||
224 | cpumask_t domainspan; | ||
225 | cpumask_t covered = CPU_MASK_NONE; | ||
226 | int j; | ||
227 | |||
228 | cpus_and(nodemask, nodemask, *cpu_map); | ||
229 | if (cpus_empty(nodemask)) | ||
230 | continue; | ||
231 | |||
232 | domainspan = sched_domain_node_span(i); | ||
233 | cpus_and(domainspan, domainspan, *cpu_map); | ||
234 | |||
235 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | ||
236 | sched_group_nodes[i] = sg; | ||
237 | for_each_cpu_mask(j, nodemask) { | ||
238 | struct sched_domain *sd; | ||
239 | sd = &per_cpu(node_domains, j); | ||
240 | sd->groups = sg; | ||
241 | if (sd->groups == NULL) { | ||
242 | /* Turn off balancing if we have no groups */ | ||
243 | sd->flags = 0; | ||
244 | } | ||
245 | } | ||
246 | if (!sg) { | ||
247 | printk(KERN_WARNING | ||
248 | "Can not alloc domain group for node %d\n", i); | ||
249 | continue; | ||
250 | } | ||
251 | sg->cpu_power = 0; | ||
252 | sg->cpumask = nodemask; | ||
253 | cpus_or(covered, covered, nodemask); | ||
254 | prev = sg; | ||
255 | |||
256 | for (j = 0; j < MAX_NUMNODES; j++) { | ||
257 | cpumask_t tmp, notcovered; | ||
258 | int n = (i + j) % MAX_NUMNODES; | ||
259 | |||
260 | cpus_complement(notcovered, covered); | ||
261 | cpus_and(tmp, notcovered, *cpu_map); | ||
262 | cpus_and(tmp, tmp, domainspan); | ||
263 | if (cpus_empty(tmp)) | ||
264 | break; | ||
265 | |||
266 | nodemask = node_to_cpumask(n); | ||
267 | cpus_and(tmp, tmp, nodemask); | ||
268 | if (cpus_empty(tmp)) | ||
269 | continue; | ||
270 | |||
271 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | ||
272 | if (!sg) { | ||
273 | printk(KERN_WARNING | ||
274 | "Can not alloc domain group for node %d\n", j); | ||
275 | break; | ||
276 | } | ||
277 | sg->cpu_power = 0; | ||
278 | sg->cpumask = tmp; | ||
279 | cpus_or(covered, covered, tmp); | ||
280 | prev->next = sg; | ||
281 | prev = sg; | ||
282 | } | ||
283 | prev->next = sched_group_nodes[i]; | ||
284 | } | ||
285 | #endif | ||
286 | |||
287 | /* Calculate CPU power for physical packages and nodes */ | ||
288 | for_each_cpu_mask(i, *cpu_map) { | ||
289 | int power; | ||
290 | struct sched_domain *sd; | ||
291 | #ifdef CONFIG_SCHED_SMT | ||
292 | sd = &per_cpu(cpu_domains, i); | ||
293 | power = SCHED_LOAD_SCALE; | ||
294 | sd->groups->cpu_power = power; | ||
295 | #endif | ||
296 | |||
297 | sd = &per_cpu(phys_domains, i); | ||
298 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
299 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
300 | sd->groups->cpu_power = power; | ||
301 | |||
302 | #ifdef CONFIG_NUMA | ||
303 | sd = &per_cpu(allnodes_domains, i); | ||
304 | if (sd->groups) { | ||
305 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
306 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
307 | sd->groups->cpu_power = power; | ||
308 | } | ||
309 | #endif | ||
310 | } | ||
311 | |||
312 | #ifdef CONFIG_NUMA | ||
313 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
314 | struct sched_group *sg = sched_group_nodes[i]; | ||
315 | int j; | ||
316 | |||
317 | if (sg == NULL) | ||
318 | continue; | ||
319 | next_sg: | ||
320 | for_each_cpu_mask(j, sg->cpumask) { | ||
321 | struct sched_domain *sd; | ||
322 | int power; | ||
323 | |||
324 | sd = &per_cpu(phys_domains, j); | ||
325 | if (j != first_cpu(sd->groups->cpumask)) { | ||
326 | /* | ||
327 | * Only add "power" once for each | ||
328 | * physical package. | ||
329 | */ | ||
330 | continue; | ||
331 | } | ||
332 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
333 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
334 | |||
335 | sg->cpu_power += power; | ||
336 | } | ||
337 | sg = sg->next; | ||
338 | if (sg != sched_group_nodes[i]) | ||
339 | goto next_sg; | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | /* Attach the domains */ | ||
344 | for_each_cpu_mask(i, *cpu_map) { | ||
345 | struct sched_domain *sd; | ||
346 | #ifdef CONFIG_SCHED_SMT | ||
347 | sd = &per_cpu(cpu_domains, i); | ||
348 | #else | ||
349 | sd = &per_cpu(phys_domains, i); | ||
350 | #endif | ||
351 | cpu_attach_domain(sd, i); | ||
352 | } | ||
353 | } | ||
354 | /* | ||
355 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | ||
356 | */ | ||
357 | void arch_init_sched_domains(const cpumask_t *cpu_map) | ||
358 | { | ||
359 | cpumask_t cpu_default_map; | ||
360 | |||
361 | /* | ||
362 | * Setup mask for cpus without special case scheduling requirements. | ||
363 | * For now this just excludes isolated cpus, but could be used to | ||
364 | * exclude other special cases in the future. | ||
365 | */ | ||
366 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | ||
367 | |||
368 | build_sched_domains(&cpu_default_map); | ||
369 | } | ||
370 | |||
371 | void arch_destroy_sched_domains(const cpumask_t *cpu_map) | ||
372 | { | ||
373 | #ifdef CONFIG_NUMA | ||
374 | int i; | ||
375 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
376 | cpumask_t nodemask = node_to_cpumask(i); | ||
377 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
378 | |||
379 | cpus_and(nodemask, nodemask, *cpu_map); | ||
380 | if (cpus_empty(nodemask)) | ||
381 | continue; | ||
382 | |||
383 | if (sg == NULL) | ||
384 | continue; | ||
385 | sg = sg->next; | ||
386 | next_sg: | ||
387 | oldsg = sg; | ||
388 | sg = sg->next; | ||
389 | kfree(oldsg); | ||
390 | if (oldsg != sched_group_nodes[i]) | ||
391 | goto next_sg; | ||
392 | sched_group_nodes[i] = NULL; | ||
393 | } | ||
394 | #endif | ||
395 | } | ||
396 | |||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 9be53e1ea404..0741b066b98f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/cache.h> | 37 | #include <asm/cache.h> |
38 | #include <asm/errno.h> | 38 | #include <asm/errno.h> |
39 | #include <asm/kregs.h> | 39 | #include <asm/kregs.h> |
40 | #include <asm/offsets.h> | 40 | #include <asm/asm-offsets.h> |
41 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
42 | #include <asm/percpu.h> | 42 | #include <asm/percpu.h> |
43 | #include <asm/processor.h> | 43 | #include <asm/processor.h> |
@@ -204,9 +204,6 @@ GLOBAL_ENTRY(ia64_switch_to) | |||
204 | (p6) br.cond.dpnt .map | 204 | (p6) br.cond.dpnt .map |
205 | ;; | 205 | ;; |
206 | .done: | 206 | .done: |
207 | (p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!! | ||
208 | ;; | ||
209 | (p6) srlz.d | ||
210 | ld8 sp=[r21] // load kernel stack pointer of new task | 207 | ld8 sp=[r21] // load kernel stack pointer of new task |
211 | mov IA64_KR(CURRENT)=in0 // update "current" application register | 208 | mov IA64_KR(CURRENT)=in0 // update "current" application register |
212 | mov r8=r13 // return pointer to previously running task | 209 | mov r8=r13 // return pointer to previously running task |
@@ -234,6 +231,9 @@ GLOBAL_ENTRY(ia64_switch_to) | |||
234 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... | 231 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... |
235 | ;; | 232 | ;; |
236 | itr.d dtr[r25]=r23 // wire in new mapping... | 233 | itr.d dtr[r25]=r23 // wire in new mapping... |
234 | ssm psr.ic // reenable the psr.ic bit | ||
235 | ;; | ||
236 | srlz.d | ||
237 | br.cond.sptk .done | 237 | br.cond.sptk .done |
238 | END(ia64_switch_to) | 238 | END(ia64_switch_to) |
239 | 239 | ||
@@ -470,6 +470,29 @@ ENTRY(load_switch_stack) | |||
470 | br.cond.sptk.many b7 | 470 | br.cond.sptk.many b7 |
471 | END(load_switch_stack) | 471 | END(load_switch_stack) |
472 | 472 | ||
473 | GLOBAL_ENTRY(prefetch_stack) | ||
474 | add r14 = -IA64_SWITCH_STACK_SIZE, sp | ||
475 | add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 | ||
476 | ;; | ||
477 | ld8 r16 = [r15] // load next's stack pointer | ||
478 | lfetch.fault.excl [r14], 128 | ||
479 | ;; | ||
480 | lfetch.fault.excl [r14], 128 | ||
481 | lfetch.fault [r16], 128 | ||
482 | ;; | ||
483 | lfetch.fault.excl [r14], 128 | ||
484 | lfetch.fault [r16], 128 | ||
485 | ;; | ||
486 | lfetch.fault.excl [r14], 128 | ||
487 | lfetch.fault [r16], 128 | ||
488 | ;; | ||
489 | lfetch.fault.excl [r14], 128 | ||
490 | lfetch.fault [r16], 128 | ||
491 | ;; | ||
492 | lfetch.fault [r16], 128 | ||
493 | br.ret.sptk.many rp | ||
494 | END(prefetch_stack) | ||
495 | |||
473 | GLOBAL_ENTRY(execve) | 496 | GLOBAL_ENTRY(execve) |
474 | mov r15=__NR_execve // put syscall number in place | 497 | mov r15=__NR_execve // put syscall number in place |
475 | break __BREAK_SYSCALL | 498 | break __BREAK_SYSCALL |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 7d7684a369d3..2ddbac6f4999 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/asmmacro.h> | 15 | #include <asm/asmmacro.h> |
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <asm/percpu.h> | 18 | #include <asm/percpu.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | #include <asm/sal.h> | 20 | #include <asm/sal.h> |
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 86948ce63e43..86064ca98952 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include <asm/asmmacro.h> | 11 | #include <asm/asmmacro.h> |
12 | #include <asm/errno.h> | 12 | #include <asm/errno.h> |
13 | #include <asm/offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/sigcontext.h> | 14 | #include <asm/sigcontext.h> |
15 | #include <asm/system.h> | 15 | #include <asm/system.h> |
16 | #include <asm/unistd.h> | 16 | #include <asm/unistd.h> |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 8d3a9291b47f..bfe65b2e8621 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/fpu.h> | 25 | #include <asm/fpu.h> |
26 | #include <asm/kregs.h> | 26 | #include <asm/kregs.h> |
27 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
28 | #include <asm/offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/pal.h> | 29 | #include <asm/pal.h> |
30 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 7936b62f7a2e..574084f343fa 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -561,7 +561,7 @@ static inline int vector_is_shared (int vector) | |||
561 | return (iosapic_intr_info[vector].count > 1); | 561 | return (iosapic_intr_info[vector].count > 1); |
562 | } | 562 | } |
563 | 563 | ||
564 | static void | 564 | static int |
565 | register_intr (unsigned int gsi, int vector, unsigned char delivery, | 565 | register_intr (unsigned int gsi, int vector, unsigned char delivery, |
566 | unsigned long polarity, unsigned long trigger) | 566 | unsigned long polarity, unsigned long trigger) |
567 | { | 567 | { |
@@ -576,7 +576,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
576 | index = find_iosapic(gsi); | 576 | index = find_iosapic(gsi); |
577 | if (index < 0) { | 577 | if (index < 0) { |
578 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); | 578 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); |
579 | return; | 579 | return -ENODEV; |
580 | } | 580 | } |
581 | 581 | ||
582 | iosapic_address = iosapic_lists[index].addr; | 582 | iosapic_address = iosapic_lists[index].addr; |
@@ -587,7 +587,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
587 | rte = iosapic_alloc_rte(); | 587 | rte = iosapic_alloc_rte(); |
588 | if (!rte) { | 588 | if (!rte) { |
589 | printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); | 589 | printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); |
590 | return; | 590 | return -ENOMEM; |
591 | } | 591 | } |
592 | 592 | ||
593 | rte_index = gsi - gsi_base; | 593 | rte_index = gsi - gsi_base; |
@@ -603,7 +603,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
603 | struct iosapic_intr_info *info = &iosapic_intr_info[vector]; | 603 | struct iosapic_intr_info *info = &iosapic_intr_info[vector]; |
604 | if (info->trigger != trigger || info->polarity != polarity) { | 604 | if (info->trigger != trigger || info->polarity != polarity) { |
605 | printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); | 605 | printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); |
606 | return; | 606 | return -EINVAL; |
607 | } | 607 | } |
608 | } | 608 | } |
609 | 609 | ||
@@ -623,6 +623,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
623 | __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); | 623 | __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); |
624 | idesc->handler = irq_type; | 624 | idesc->handler = irq_type; |
625 | } | 625 | } |
626 | return 0; | ||
626 | } | 627 | } |
627 | 628 | ||
628 | static unsigned int | 629 | static unsigned int |
@@ -710,7 +711,7 @@ int | |||
710 | iosapic_register_intr (unsigned int gsi, | 711 | iosapic_register_intr (unsigned int gsi, |
711 | unsigned long polarity, unsigned long trigger) | 712 | unsigned long polarity, unsigned long trigger) |
712 | { | 713 | { |
713 | int vector, mask = 1; | 714 | int vector, mask = 1, err; |
714 | unsigned int dest; | 715 | unsigned int dest; |
715 | unsigned long flags; | 716 | unsigned long flags; |
716 | struct iosapic_rte_info *rte; | 717 | struct iosapic_rte_info *rte; |
@@ -737,8 +738,8 @@ again: | |||
737 | vector = assign_irq_vector(AUTO_ASSIGN); | 738 | vector = assign_irq_vector(AUTO_ASSIGN); |
738 | if (vector < 0) { | 739 | if (vector < 0) { |
739 | vector = iosapic_find_sharable_vector(trigger, polarity); | 740 | vector = iosapic_find_sharable_vector(trigger, polarity); |
740 | if (vector < 0) | 741 | if (vector < 0) |
741 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 742 | return -ENOSPC; |
742 | } | 743 | } |
743 | 744 | ||
744 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); | 745 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); |
@@ -753,8 +754,13 @@ again: | |||
753 | } | 754 | } |
754 | 755 | ||
755 | dest = get_target_cpu(gsi, vector); | 756 | dest = get_target_cpu(gsi, vector); |
756 | register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, | 757 | err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, |
757 | polarity, trigger); | 758 | polarity, trigger); |
759 | if (err < 0) { | ||
760 | spin_unlock(&iosapic_lock); | ||
761 | spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); | ||
762 | return err; | ||
763 | } | ||
758 | 764 | ||
759 | /* | 765 | /* |
760 | * If the vector is shared and already unmasked for | 766 | * If the vector is shared and already unmasked for |
@@ -776,7 +782,6 @@ again: | |||
776 | return vector; | 782 | return vector; |
777 | } | 783 | } |
778 | 784 | ||
779 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | ||
780 | void | 785 | void |
781 | iosapic_unregister_intr (unsigned int gsi) | 786 | iosapic_unregister_intr (unsigned int gsi) |
782 | { | 787 | { |
@@ -859,7 +864,6 @@ iosapic_unregister_intr (unsigned int gsi) | |||
859 | spin_unlock(&iosapic_lock); | 864 | spin_unlock(&iosapic_lock); |
860 | spin_unlock_irqrestore(&idesc->lock, flags); | 865 | spin_unlock_irqrestore(&idesc->lock, flags); |
861 | } | 866 | } |
862 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
863 | 867 | ||
864 | /* | 868 | /* |
865 | * ACPI calls this when it finds an entry for a platform interrupt. | 869 | * ACPI calls this when it finds an entry for a platform interrupt. |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 28f2aadc38d0..205d98028261 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -91,23 +91,8 @@ skip: | |||
91 | } | 91 | } |
92 | 92 | ||
93 | #ifdef CONFIG_SMP | 93 | #ifdef CONFIG_SMP |
94 | /* | ||
95 | * This is updated when the user sets irq affinity via /proc | ||
96 | */ | ||
97 | static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; | ||
98 | static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)]; | ||
99 | |||
100 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | 94 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; |
101 | 95 | ||
102 | /* | ||
103 | * Arch specific routine for deferred write to iosapic rte to reprogram | ||
104 | * intr destination. | ||
105 | */ | ||
106 | void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) | ||
107 | { | ||
108 | pending_irq_cpumask[irq] = mask_val; | ||
109 | } | ||
110 | |||
111 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | 96 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
112 | { | 97 | { |
113 | cpumask_t mask = CPU_MASK_NONE; | 98 | cpumask_t mask = CPU_MASK_NONE; |
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
116 | 101 | ||
117 | if (irq < NR_IRQS) { | 102 | if (irq < NR_IRQS) { |
118 | irq_affinity[irq] = mask; | 103 | irq_affinity[irq] = mask; |
104 | set_irq_info(irq, mask); | ||
119 | irq_redir[irq] = (char) (redir & 0xff); | 105 | irq_redir[irq] = (char) (redir & 0xff); |
120 | } | 106 | } |
121 | } | 107 | } |
122 | |||
123 | |||
124 | void move_irq(int irq) | ||
125 | { | ||
126 | /* note - we hold desc->lock */ | ||
127 | cpumask_t tmp; | ||
128 | irq_desc_t *desc = irq_descp(irq); | ||
129 | int redir = test_bit(irq, pending_irq_redir); | ||
130 | |||
131 | if (unlikely(!desc->handler->set_affinity)) | ||
132 | return; | ||
133 | |||
134 | if (!cpus_empty(pending_irq_cpumask[irq])) { | ||
135 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
136 | if (unlikely(!cpus_empty(tmp))) { | ||
137 | desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0), | ||
138 | pending_irq_cpumask[irq]); | ||
139 | } | ||
140 | cpus_clear(pending_irq_cpumask[irq]); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | |||
145 | #endif /* CONFIG_SMP */ | 108 | #endif /* CONFIG_SMP */ |
146 | 109 | ||
147 | #ifdef CONFIG_HOTPLUG_CPU | 110 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 3bb3a13c4047..c13ca0d49c4a 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <asm/break.h> | 44 | #include <asm/break.h> |
45 | #include <asm/ia32.h> | 45 | #include <asm/ia32.h> |
46 | #include <asm/kregs.h> | 46 | #include <asm/kregs.h> |
47 | #include <asm/offsets.h> | 47 | #include <asm/asm-offsets.h> |
48 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | #include <asm/ptrace.h> | 50 | #include <asm/ptrace.h> |
@@ -69,7 +69,6 @@ | |||
69 | # define DBG_FAULT(i) | 69 | # define DBG_FAULT(i) |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #define MINSTATE_VIRT /* needed by minstate.h */ | ||
73 | #include "minstate.h" | 72 | #include "minstate.h" |
74 | 73 | ||
75 | #define FAULT(n) \ | 74 | #define FAULT(n) \ |
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S index b7fa3ccd2b0f..2323377e3695 100644 --- a/arch/ia64/kernel/jprobes.S +++ b/arch/ia64/kernel/jprobes.S | |||
@@ -49,6 +49,7 @@ | |||
49 | /* | 49 | /* |
50 | * void jprobe_break(void) | 50 | * void jprobe_break(void) |
51 | */ | 51 | */ |
52 | .section .kprobes.text, "ax" | ||
52 | ENTRY(jprobe_break) | 53 | ENTRY(jprobe_break) |
53 | break.m 0x80300 | 54 | break.m 0x80300 |
54 | END(jprobe_break) | 55 | END(jprobe_break) |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 884f5cd27d8a..471086b808a4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = { | |||
87 | * is IP relative instruction and update the kprobe | 87 | * is IP relative instruction and update the kprobe |
88 | * inst flag accordingly | 88 | * inst flag accordingly |
89 | */ | 89 | */ |
90 | static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, | 90 | static void __kprobes update_kprobe_inst_flag(uint template, uint slot, |
91 | unsigned long kprobe_inst, struct kprobe *p) | 91 | uint major_opcode, |
92 | unsigned long kprobe_inst, | ||
93 | struct kprobe *p) | ||
92 | { | 94 | { |
93 | p->ainsn.inst_flag = 0; | 95 | p->ainsn.inst_flag = 0; |
94 | p->ainsn.target_br_reg = 0; | 96 | p->ainsn.target_br_reg = 0; |
95 | 97 | ||
98 | /* Check for Break instruction | ||
99 | * Bits 37:40 Major opcode to be zero | ||
100 | * Bits 27:32 X6 to be zero | ||
101 | * Bits 32:35 X3 to be zero | ||
102 | */ | ||
103 | if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { | ||
104 | /* is a break instruction */ | ||
105 | p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; | ||
106 | return; | ||
107 | } | ||
108 | |||
96 | if (bundle_encoding[template][slot] == B) { | 109 | if (bundle_encoding[template][slot] == B) { |
97 | switch (major_opcode) { | 110 | switch (major_opcode) { |
98 | case INDIRECT_CALL_OPCODE: | 111 | case INDIRECT_CALL_OPCODE: |
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode | |||
126 | * Returns 0 if supported | 139 | * Returns 0 if supported |
127 | * Returns -EINVAL if unsupported | 140 | * Returns -EINVAL if unsupported |
128 | */ | 141 | */ |
129 | static int unsupported_inst(uint template, uint slot, uint major_opcode, | 142 | static int __kprobes unsupported_inst(uint template, uint slot, |
130 | unsigned long kprobe_inst, struct kprobe *p) | 143 | uint major_opcode, |
144 | unsigned long kprobe_inst, | ||
145 | struct kprobe *p) | ||
131 | { | 146 | { |
132 | unsigned long addr = (unsigned long)p->addr; | 147 | unsigned long addr = (unsigned long)p->addr; |
133 | 148 | ||
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode, | |||
168 | * on which we are inserting kprobe is cmp instruction | 183 | * on which we are inserting kprobe is cmp instruction |
169 | * with ctype as unc. | 184 | * with ctype as unc. |
170 | */ | 185 | */ |
171 | static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, | 186 | static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, |
172 | unsigned long kprobe_inst) | 187 | uint major_opcode, |
188 | unsigned long kprobe_inst) | ||
173 | { | 189 | { |
174 | cmp_inst_t cmp_inst; | 190 | cmp_inst_t cmp_inst; |
175 | uint ctype_unc = 0; | 191 | uint ctype_unc = 0; |
@@ -201,8 +217,10 @@ out: | |||
201 | * In this function we override the bundle with | 217 | * In this function we override the bundle with |
202 | * the break instruction at the given slot. | 218 | * the break instruction at the given slot. |
203 | */ | 219 | */ |
204 | static void prepare_break_inst(uint template, uint slot, uint major_opcode, | 220 | static void __kprobes prepare_break_inst(uint template, uint slot, |
205 | unsigned long kprobe_inst, struct kprobe *p) | 221 | uint major_opcode, |
222 | unsigned long kprobe_inst, | ||
223 | struct kprobe *p) | ||
206 | { | 224 | { |
207 | unsigned long break_inst = BREAK_INST; | 225 | unsigned long break_inst = BREAK_INST; |
208 | bundle_t *bundle = &p->ainsn.insn.bundle; | 226 | bundle_t *bundle = &p->ainsn.insn.bundle; |
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr) | |||
271 | && addr < (unsigned long)__end_ivt_text); | 289 | && addr < (unsigned long)__end_ivt_text); |
272 | } | 290 | } |
273 | 291 | ||
274 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) | 292 | static int __kprobes valid_kprobe_addr(int template, int slot, |
293 | unsigned long addr) | ||
275 | { | 294 | { |
276 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { | 295 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { |
277 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " | 296 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " |
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void) | |||
323 | * - cleanup by marking the instance as unused | 342 | * - cleanup by marking the instance as unused |
324 | * - long jump back to the original return address | 343 | * - long jump back to the original return address |
325 | */ | 344 | */ |
326 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 345 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
327 | { | 346 | { |
328 | struct kretprobe_instance *ri = NULL; | 347 | struct kretprobe_instance *ri = NULL; |
329 | struct hlist_head *head; | 348 | struct hlist_head *head; |
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
381 | return 1; | 400 | return 1; |
382 | } | 401 | } |
383 | 402 | ||
384 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 403 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
404 | struct pt_regs *regs) | ||
385 | { | 405 | { |
386 | struct kretprobe_instance *ri; | 406 | struct kretprobe_instance *ri; |
387 | 407 | ||
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | |||
399 | } | 419 | } |
400 | } | 420 | } |
401 | 421 | ||
402 | int arch_prepare_kprobe(struct kprobe *p) | 422 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
403 | { | 423 | { |
404 | unsigned long addr = (unsigned long) p->addr; | 424 | unsigned long addr = (unsigned long) p->addr; |
405 | unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); | 425 | unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); |
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p) | |||
430 | return 0; | 450 | return 0; |
431 | } | 451 | } |
432 | 452 | ||
433 | void arch_arm_kprobe(struct kprobe *p) | 453 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
434 | { | 454 | { |
435 | unsigned long addr = (unsigned long)p->addr; | 455 | unsigned long addr = (unsigned long)p->addr; |
436 | unsigned long arm_addr = addr & ~0xFULL; | 456 | unsigned long arm_addr = addr & ~0xFULL; |
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p) | |||
439 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | 459 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); |
440 | } | 460 | } |
441 | 461 | ||
442 | void arch_disarm_kprobe(struct kprobe *p) | 462 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
443 | { | 463 | { |
444 | unsigned long addr = (unsigned long)p->addr; | 464 | unsigned long addr = (unsigned long)p->addr; |
445 | unsigned long arm_addr = addr & ~0xFULL; | 465 | unsigned long arm_addr = addr & ~0xFULL; |
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p) | |||
449 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | 469 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); |
450 | } | 470 | } |
451 | 471 | ||
452 | void arch_remove_kprobe(struct kprobe *p) | 472 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
453 | { | 473 | { |
454 | } | 474 | } |
455 | 475 | ||
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p) | |||
461 | * to original stack address, handle the case where we need to fixup the | 481 | * to original stack address, handle the case where we need to fixup the |
462 | * relative IP address and/or fixup branch register. | 482 | * relative IP address and/or fixup branch register. |
463 | */ | 483 | */ |
464 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 484 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
465 | { | 485 | { |
466 | unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; | 486 | unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; |
467 | unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; | 487 | unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; |
@@ -528,13 +548,16 @@ turn_ss_off: | |||
528 | ia64_psr(regs)->ss = 0; | 548 | ia64_psr(regs)->ss = 0; |
529 | } | 549 | } |
530 | 550 | ||
531 | static void prepare_ss(struct kprobe *p, struct pt_regs *regs) | 551 | static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) |
532 | { | 552 | { |
533 | unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; | 553 | unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; |
534 | unsigned long slot = (unsigned long)p->addr & 0xf; | 554 | unsigned long slot = (unsigned long)p->addr & 0xf; |
535 | 555 | ||
536 | /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ | 556 | /* single step inline if break instruction */ |
537 | regs->cr_iip = bundle_addr & ~0xFULL; | 557 | if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) |
558 | regs->cr_iip = (unsigned long)p->addr & ~0xFULL; | ||
559 | else | ||
560 | regs->cr_iip = bundle_addr & ~0xFULL; | ||
538 | 561 | ||
539 | if (slot > 2) | 562 | if (slot > 2) |
540 | slot = 0; | 563 | slot = 0; |
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs) | |||
545 | ia64_psr(regs)->ss = 1; | 568 | ia64_psr(regs)->ss = 1; |
546 | } | 569 | } |
547 | 570 | ||
548 | static int pre_kprobes_handler(struct die_args *args) | 571 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
572 | { | ||
573 | unsigned int slot = ia64_psr(regs)->ri; | ||
574 | unsigned int template, major_opcode; | ||
575 | unsigned long kprobe_inst; | ||
576 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; | ||
577 | bundle_t bundle; | ||
578 | |||
579 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); | ||
580 | template = bundle.quad0.template; | ||
581 | |||
582 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
583 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
584 | slot++; | ||
585 | |||
586 | /* Get Kprobe probe instruction at given slot*/ | ||
587 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); | ||
588 | |||
589 | /* For break instruction, | ||
590 | * Bits 37:40 Major opcode to be zero | ||
591 | * Bits 27:32 X6 to be zero | ||
592 | * Bits 32:35 X3 to be zero | ||
593 | */ | ||
594 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { | ||
595 | /* Not a break instruction */ | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* Is a break instruction */ | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | static int __kprobes pre_kprobes_handler(struct die_args *args) | ||
549 | { | 604 | { |
550 | struct kprobe *p; | 605 | struct kprobe *p; |
551 | int ret = 0; | 606 | int ret = 0; |
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args) | |||
558 | if (kprobe_running()) { | 613 | if (kprobe_running()) { |
559 | p = get_kprobe(addr); | 614 | p = get_kprobe(addr); |
560 | if (p) { | 615 | if (p) { |
561 | if (kprobe_status == KPROBE_HIT_SS) { | 616 | if ( (kprobe_status == KPROBE_HIT_SS) && |
617 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { | ||
618 | ia64_psr(regs)->ss = 0; | ||
562 | unlock_kprobes(); | 619 | unlock_kprobes(); |
563 | goto no_kprobe; | 620 | goto no_kprobe; |
564 | } | 621 | } |
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args) | |||
592 | p = get_kprobe(addr); | 649 | p = get_kprobe(addr); |
593 | if (!p) { | 650 | if (!p) { |
594 | unlock_kprobes(); | 651 | unlock_kprobes(); |
652 | if (!is_ia64_break_inst(regs)) { | ||
653 | /* | ||
654 | * The breakpoint instruction was removed right | ||
655 | * after we hit it. Another cpu has removed | ||
656 | * either a probepoint or a debugger breakpoint | ||
657 | * at this address. In either case, no further | ||
658 | * handling of this interrupt is appropriate. | ||
659 | */ | ||
660 | ret = 1; | ||
661 | |||
662 | } | ||
663 | |||
664 | /* Not one of our break, let kernel handle it */ | ||
595 | goto no_kprobe; | 665 | goto no_kprobe; |
596 | } | 666 | } |
597 | 667 | ||
@@ -616,7 +686,7 @@ no_kprobe: | |||
616 | return ret; | 686 | return ret; |
617 | } | 687 | } |
618 | 688 | ||
619 | static int post_kprobes_handler(struct pt_regs *regs) | 689 | static int __kprobes post_kprobes_handler(struct pt_regs *regs) |
620 | { | 690 | { |
621 | if (!kprobe_running()) | 691 | if (!kprobe_running()) |
622 | return 0; | 692 | return 0; |
@@ -641,7 +711,7 @@ out: | |||
641 | return 1; | 711 | return 1; |
642 | } | 712 | } |
643 | 713 | ||
644 | static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) | 714 | static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) |
645 | { | 715 | { |
646 | if (!kprobe_running()) | 716 | if (!kprobe_running()) |
647 | return 0; | 717 | return 0; |
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) | |||
659 | return 0; | 729 | return 0; |
660 | } | 730 | } |
661 | 731 | ||
662 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | 732 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
663 | void *data) | 733 | unsigned long val, void *data) |
664 | { | 734 | { |
665 | struct die_args *args = (struct die_args *)data; | 735 | struct die_args *args = (struct die_args *)data; |
666 | switch(val) { | 736 | switch(val) { |
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | |||
681 | return NOTIFY_DONE; | 751 | return NOTIFY_DONE; |
682 | } | 752 | } |
683 | 753 | ||
684 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 754 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
685 | { | 755 | { |
686 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 756 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
687 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; | 757 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; |
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
703 | return 1; | 773 | return 1; |
704 | } | 774 | } |
705 | 775 | ||
706 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 776 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
707 | { | 777 | { |
708 | *regs = jprobe_saved_regs; | 778 | *regs = jprobe_saved_regs; |
709 | return 1; | 779 | return 1; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4ebbf3974381..d0a5106fba24 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -48,6 +48,9 @@ | |||
48 | * Delete dead variables and functions. | 48 | * Delete dead variables and functions. |
49 | * Reorder to remove the need for forward declarations and to consolidate | 49 | * Reorder to remove the need for forward declarations and to consolidate |
50 | * related code. | 50 | * related code. |
51 | * | ||
52 | * 2005-08-12 Keith Owens <kaos@sgi.com> | ||
53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. | ||
51 | */ | 54 | */ |
52 | #include <linux/config.h> | 55 | #include <linux/config.h> |
53 | #include <linux/types.h> | 56 | #include <linux/types.h> |
@@ -77,6 +80,8 @@ | |||
77 | #include <asm/irq.h> | 80 | #include <asm/irq.h> |
78 | #include <asm/hw_irq.h> | 81 | #include <asm/hw_irq.h> |
79 | 82 | ||
83 | #include "entry.h" | ||
84 | |||
80 | #if defined(IA64_MCA_DEBUG_INFO) | 85 | #if defined(IA64_MCA_DEBUG_INFO) |
81 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) | 86 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) |
82 | #else | 87 | #else |
@@ -84,9 +89,7 @@ | |||
84 | #endif | 89 | #endif |
85 | 90 | ||
86 | /* Used by mca_asm.S */ | 91 | /* Used by mca_asm.S */ |
87 | ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; | 92 | u32 ia64_mca_serialize; |
88 | ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; | ||
89 | u64 ia64_mca_serialize; | ||
90 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | 93 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ |
91 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 94 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
92 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 95 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
@@ -95,8 +98,10 @@ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | |||
95 | unsigned long __per_cpu_mca[NR_CPUS]; | 98 | unsigned long __per_cpu_mca[NR_CPUS]; |
96 | 99 | ||
97 | /* In mca_asm.S */ | 100 | /* In mca_asm.S */ |
98 | extern void ia64_monarch_init_handler (void); | 101 | extern void ia64_os_init_dispatch_monarch (void); |
99 | extern void ia64_slave_init_handler (void); | 102 | extern void ia64_os_init_dispatch_slave (void); |
103 | |||
104 | static int monarch_cpu = -1; | ||
100 | 105 | ||
101 | static ia64_mc_info_t ia64_mc_info; | 106 | static ia64_mc_info_t ia64_mc_info; |
102 | 107 | ||
@@ -234,7 +239,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) | |||
234 | * This function retrieves a specified error record type from SAL | 239 | * This function retrieves a specified error record type from SAL |
235 | * and wakes up any processes waiting for error records. | 240 | * and wakes up any processes waiting for error records. |
236 | * | 241 | * |
237 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) | 242 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) |
243 | * FIXME: remove MCA and irq_safe. | ||
238 | */ | 244 | */ |
239 | static void | 245 | static void |
240 | ia64_mca_log_sal_error_record(int sal_info_type) | 246 | ia64_mca_log_sal_error_record(int sal_info_type) |
@@ -242,7 +248,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
242 | u8 *buffer; | 248 | u8 *buffer; |
243 | sal_log_record_header_t *rh; | 249 | sal_log_record_header_t *rh; |
244 | u64 size; | 250 | u64 size; |
245 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; | 251 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; |
246 | #ifdef IA64_MCA_DEBUG_INFO | 252 | #ifdef IA64_MCA_DEBUG_INFO |
247 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; | 253 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; |
248 | #endif | 254 | #endif |
@@ -330,191 +336,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
330 | 336 | ||
331 | #endif /* CONFIG_ACPI */ | 337 | #endif /* CONFIG_ACPI */ |
332 | 338 | ||
333 | static void | ||
334 | show_min_state (pal_min_state_area_t *minstate) | ||
335 | { | ||
336 | u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; | ||
337 | u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; | ||
338 | |||
339 | printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); | ||
340 | printk("pr\t\t%016lx\n", minstate->pmsa_pr); | ||
341 | printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); | ||
342 | printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); | ||
343 | printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); | ||
344 | printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); | ||
345 | printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); | ||
346 | printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); | ||
347 | printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); | ||
348 | printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); | ||
349 | printk("b1\t\t%016lx ", minstate->pmsa_br1); | ||
350 | print_symbol("%s\n", minstate->pmsa_br1); | ||
351 | |||
352 | printk("\nstatic registers r0-r15:\n"); | ||
353 | printk(" r0- 3 %016lx %016lx %016lx %016lx\n", | ||
354 | 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); | ||
355 | printk(" r4- 7 %016lx %016lx %016lx %016lx\n", | ||
356 | minstate->pmsa_gr[3], minstate->pmsa_gr[4], | ||
357 | minstate->pmsa_gr[5], minstate->pmsa_gr[6]); | ||
358 | printk(" r8-11 %016lx %016lx %016lx %016lx\n", | ||
359 | minstate->pmsa_gr[7], minstate->pmsa_gr[8], | ||
360 | minstate->pmsa_gr[9], minstate->pmsa_gr[10]); | ||
361 | printk("r12-15 %016lx %016lx %016lx %016lx\n", | ||
362 | minstate->pmsa_gr[11], minstate->pmsa_gr[12], | ||
363 | minstate->pmsa_gr[13], minstate->pmsa_gr[14]); | ||
364 | |||
365 | printk("\nbank 0:\n"); | ||
366 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
367 | minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], | ||
368 | minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); | ||
369 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
370 | minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], | ||
371 | minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); | ||
372 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
373 | minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], | ||
374 | minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); | ||
375 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
376 | minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], | ||
377 | minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); | ||
378 | |||
379 | printk("\nbank 1:\n"); | ||
380 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
381 | minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], | ||
382 | minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); | ||
383 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
384 | minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], | ||
385 | minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); | ||
386 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
387 | minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], | ||
388 | minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); | ||
389 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
390 | minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], | ||
391 | minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); | ||
392 | } | ||
393 | |||
394 | static void | ||
395 | fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) | ||
396 | { | ||
397 | u64 *dst_banked, *src_banked, bit, shift, nat_bits; | ||
398 | int i; | ||
399 | |||
400 | /* | ||
401 | * First, update the pt-regs and switch-stack structures with the contents stored | ||
402 | * in the min-state area: | ||
403 | */ | ||
404 | if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { | ||
405 | pt->cr_ipsr = ms->pmsa_xpsr; | ||
406 | pt->cr_iip = ms->pmsa_xip; | ||
407 | pt->cr_ifs = ms->pmsa_xfs; | ||
408 | } else { | ||
409 | pt->cr_ipsr = ms->pmsa_ipsr; | ||
410 | pt->cr_iip = ms->pmsa_iip; | ||
411 | pt->cr_ifs = ms->pmsa_ifs; | ||
412 | } | ||
413 | pt->ar_rsc = ms->pmsa_rsc; | ||
414 | pt->pr = ms->pmsa_pr; | ||
415 | pt->r1 = ms->pmsa_gr[0]; | ||
416 | pt->r2 = ms->pmsa_gr[1]; | ||
417 | pt->r3 = ms->pmsa_gr[2]; | ||
418 | sw->r4 = ms->pmsa_gr[3]; | ||
419 | sw->r5 = ms->pmsa_gr[4]; | ||
420 | sw->r6 = ms->pmsa_gr[5]; | ||
421 | sw->r7 = ms->pmsa_gr[6]; | ||
422 | pt->r8 = ms->pmsa_gr[7]; | ||
423 | pt->r9 = ms->pmsa_gr[8]; | ||
424 | pt->r10 = ms->pmsa_gr[9]; | ||
425 | pt->r11 = ms->pmsa_gr[10]; | ||
426 | pt->r12 = ms->pmsa_gr[11]; | ||
427 | pt->r13 = ms->pmsa_gr[12]; | ||
428 | pt->r14 = ms->pmsa_gr[13]; | ||
429 | pt->r15 = ms->pmsa_gr[14]; | ||
430 | dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ | ||
431 | src_banked = ms->pmsa_bank1_gr; | ||
432 | for (i = 0; i < 16; ++i) | ||
433 | dst_banked[i] = src_banked[i]; | ||
434 | pt->b0 = ms->pmsa_br0; | ||
435 | sw->b1 = ms->pmsa_br1; | ||
436 | |||
437 | /* construct the NaT bits for the pt-regs structure: */ | ||
438 | # define PUT_NAT_BIT(dst, addr) \ | ||
439 | do { \ | ||
440 | bit = nat_bits & 1; nat_bits >>= 1; \ | ||
441 | shift = ((unsigned long) addr >> 3) & 0x3f; \ | ||
442 | dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ | ||
443 | } while (0) | ||
444 | |||
445 | /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ | ||
446 | shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; | ||
447 | nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); | ||
448 | |||
449 | PUT_NAT_BIT(sw->caller_unat, &pt->r1); | ||
450 | PUT_NAT_BIT(sw->caller_unat, &pt->r2); | ||
451 | PUT_NAT_BIT(sw->caller_unat, &pt->r3); | ||
452 | PUT_NAT_BIT(sw->ar_unat, &sw->r4); | ||
453 | PUT_NAT_BIT(sw->ar_unat, &sw->r5); | ||
454 | PUT_NAT_BIT(sw->ar_unat, &sw->r6); | ||
455 | PUT_NAT_BIT(sw->ar_unat, &sw->r7); | ||
456 | PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); | ||
457 | PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); | ||
458 | PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); | ||
459 | PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); | ||
460 | nat_bits >>= 16; /* skip over bank0 NaT bits */ | ||
461 | PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); | ||
462 | PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); | ||
463 | PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); | ||
464 | PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); | ||
465 | PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); | ||
466 | PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); | ||
467 | PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); | ||
468 | PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); | ||
469 | } | ||
470 | |||
471 | static void | ||
472 | init_handler_platform (pal_min_state_area_t *ms, | ||
473 | struct pt_regs *pt, struct switch_stack *sw) | ||
474 | { | ||
475 | struct unw_frame_info info; | ||
476 | |||
477 | /* if a kernel debugger is available call it here else just dump the registers */ | ||
478 | |||
479 | /* | ||
480 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
481 | * generated via the BMC's command-line interface, but since the console is on the | ||
482 | * same serial line, the user will need some time to switch out of the BMC before | ||
483 | * the dump begins. | ||
484 | */ | ||
485 | printk("Delaying for 5 seconds...\n"); | ||
486 | udelay(5*1000000); | ||
487 | show_min_state(ms); | ||
488 | |||
489 | printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); | ||
490 | fetch_min_state(ms, pt, sw); | ||
491 | unw_init_from_interruption(&info, current, pt, sw); | ||
492 | ia64_do_show_stack(&info, NULL); | ||
493 | |||
494 | #ifdef CONFIG_SMP | ||
495 | /* read_trylock() would be handy... */ | ||
496 | if (!tasklist_lock.write_lock) | ||
497 | read_lock(&tasklist_lock); | ||
498 | #endif | ||
499 | { | ||
500 | struct task_struct *g, *t; | ||
501 | do_each_thread (g, t) { | ||
502 | if (t == current) | ||
503 | continue; | ||
504 | |||
505 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
506 | show_stack(t, NULL); | ||
507 | } while_each_thread (g, t); | ||
508 | } | ||
509 | #ifdef CONFIG_SMP | ||
510 | if (!tasklist_lock.write_lock) | ||
511 | read_unlock(&tasklist_lock); | ||
512 | #endif | ||
513 | |||
514 | printk("\nINIT dump complete. Please reboot now.\n"); | ||
515 | while (1); /* hang city if no debugger */ | ||
516 | } | ||
517 | |||
518 | #ifdef CONFIG_ACPI | 339 | #ifdef CONFIG_ACPI |
519 | /* | 340 | /* |
520 | * ia64_mca_register_cpev | 341 | * ia64_mca_register_cpev |
@@ -657,42 +478,6 @@ ia64_mca_cmc_vector_enable_keventd(void *unused) | |||
657 | } | 478 | } |
658 | 479 | ||
659 | /* | 480 | /* |
660 | * ia64_mca_wakeup_ipi_wait | ||
661 | * | ||
662 | * Wait for the inter-cpu interrupt to be sent by the | ||
663 | * monarch processor once it is done with handling the | ||
664 | * MCA. | ||
665 | * | ||
666 | * Inputs : None | ||
667 | * Outputs : None | ||
668 | */ | ||
669 | static void | ||
670 | ia64_mca_wakeup_ipi_wait(void) | ||
671 | { | ||
672 | int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); | ||
673 | int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); | ||
674 | u64 irr = 0; | ||
675 | |||
676 | do { | ||
677 | switch(irr_num) { | ||
678 | case 0: | ||
679 | irr = ia64_getreg(_IA64_REG_CR_IRR0); | ||
680 | break; | ||
681 | case 1: | ||
682 | irr = ia64_getreg(_IA64_REG_CR_IRR1); | ||
683 | break; | ||
684 | case 2: | ||
685 | irr = ia64_getreg(_IA64_REG_CR_IRR2); | ||
686 | break; | ||
687 | case 3: | ||
688 | irr = ia64_getreg(_IA64_REG_CR_IRR3); | ||
689 | break; | ||
690 | } | ||
691 | cpu_relax(); | ||
692 | } while (!(irr & (1UL << irr_bit))) ; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * ia64_mca_wakeup | 481 | * ia64_mca_wakeup |
697 | * | 482 | * |
698 | * Send an inter-cpu interrupt to wake-up a particular cpu | 483 | * Send an inter-cpu interrupt to wake-up a particular cpu |
@@ -757,11 +542,9 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | |||
757 | */ | 542 | */ |
758 | ia64_sal_mc_rendez(); | 543 | ia64_sal_mc_rendez(); |
759 | 544 | ||
760 | /* Wait for the wakeup IPI from the monarch | 545 | /* Wait for the monarch cpu to exit. */ |
761 | * This waiting is done by polling on the wakeup-interrupt | 546 | while (monarch_cpu != -1) |
762 | * vector bit in the processor's IRRs | 547 | cpu_relax(); /* spin until monarch leaves */ |
763 | */ | ||
764 | ia64_mca_wakeup_ipi_wait(); | ||
765 | 548 | ||
766 | /* Enable all interrupts */ | 549 | /* Enable all interrupts */ |
767 | local_irq_restore(flags); | 550 | local_irq_restore(flags); |
@@ -789,53 +572,13 @@ ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs) | |||
789 | return IRQ_HANDLED; | 572 | return IRQ_HANDLED; |
790 | } | 573 | } |
791 | 574 | ||
792 | /* | ||
793 | * ia64_return_to_sal_check | ||
794 | * | ||
795 | * This is function called before going back from the OS_MCA handler | ||
796 | * to the OS_MCA dispatch code which finally takes the control back | ||
797 | * to the SAL. | ||
798 | * The main purpose of this routine is to setup the OS_MCA to SAL | ||
799 | * return state which can be used by the OS_MCA dispatch code | ||
800 | * just before going back to SAL. | ||
801 | * | ||
802 | * Inputs : None | ||
803 | * Outputs : None | ||
804 | */ | ||
805 | |||
806 | static void | ||
807 | ia64_return_to_sal_check(int recover) | ||
808 | { | ||
809 | |||
810 | /* Copy over some relevant stuff from the sal_to_os_mca_handoff | ||
811 | * so that it can be used at the time of os_mca_to_sal_handoff | ||
812 | */ | ||
813 | ia64_os_to_sal_handoff_state.imots_sal_gp = | ||
814 | ia64_sal_to_os_handoff_state.imsto_sal_gp; | ||
815 | |||
816 | ia64_os_to_sal_handoff_state.imots_sal_check_ra = | ||
817 | ia64_sal_to_os_handoff_state.imsto_sal_check_ra; | ||
818 | |||
819 | if (recover) | ||
820 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; | ||
821 | else | ||
822 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; | ||
823 | |||
824 | /* Default = tell SAL to return to same context */ | ||
825 | ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT; | ||
826 | |||
827 | ia64_os_to_sal_handoff_state.imots_new_min_state = | ||
828 | (u64 *)ia64_sal_to_os_handoff_state.pal_min_state; | ||
829 | |||
830 | } | ||
831 | |||
832 | /* Function pointer for extra MCA recovery */ | 575 | /* Function pointer for extra MCA recovery */ |
833 | int (*ia64_mca_ucmc_extension) | 576 | int (*ia64_mca_ucmc_extension) |
834 | (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) | 577 | (void*,struct ia64_sal_os_state*) |
835 | = NULL; | 578 | = NULL; |
836 | 579 | ||
837 | int | 580 | int |
838 | ia64_reg_MCA_extension(void *fn) | 581 | ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) |
839 | { | 582 | { |
840 | if (ia64_mca_ucmc_extension) | 583 | if (ia64_mca_ucmc_extension) |
841 | return 1; | 584 | return 1; |
@@ -854,8 +597,321 @@ ia64_unreg_MCA_extension(void) | |||
854 | EXPORT_SYMBOL(ia64_reg_MCA_extension); | 597 | EXPORT_SYMBOL(ia64_reg_MCA_extension); |
855 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); | 598 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); |
856 | 599 | ||
600 | |||
601 | static inline void | ||
602 | copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) | ||
603 | { | ||
604 | u64 fslot, tslot, nat; | ||
605 | *tr = *fr; | ||
606 | fslot = ((unsigned long)fr >> 3) & 63; | ||
607 | tslot = ((unsigned long)tr >> 3) & 63; | ||
608 | *tnat &= ~(1UL << tslot); | ||
609 | nat = (fnat >> fslot) & 1; | ||
610 | *tnat |= (nat << tslot); | ||
611 | } | ||
612 | |||
613 | /* On entry to this routine, we are running on the per cpu stack, see | ||
614 | * mca_asm.h. The original stack has not been touched by this event. Some of | ||
615 | * the original stack's registers will be in the RBS on this stack. This stack | ||
616 | * also contains a partial pt_regs and switch_stack, the rest of the data is in | ||
617 | * PAL minstate. | ||
618 | * | ||
619 | * The first thing to do is modify the original stack to look like a blocked | ||
620 | * task so we can run backtrace on the original task. Also mark the per cpu | ||
621 | * stack as current to ensure that we use the correct task state, it also means | ||
622 | * that we can do backtrace on the MCA/INIT handler code itself. | ||
623 | */ | ||
624 | |||
625 | static task_t * | ||
626 | ia64_mca_modify_original_stack(struct pt_regs *regs, | ||
627 | const struct switch_stack *sw, | ||
628 | struct ia64_sal_os_state *sos, | ||
629 | const char *type) | ||
630 | { | ||
631 | char *p, comm[sizeof(current->comm)]; | ||
632 | ia64_va va; | ||
633 | extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ | ||
634 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
635 | task_t *previous_current; | ||
636 | struct pt_regs *old_regs; | ||
637 | struct switch_stack *old_sw; | ||
638 | unsigned size = sizeof(struct pt_regs) + | ||
639 | sizeof(struct switch_stack) + 16; | ||
640 | u64 *old_bspstore, *old_bsp; | ||
641 | u64 *new_bspstore, *new_bsp; | ||
642 | u64 old_unat, old_rnat, new_rnat, nat; | ||
643 | u64 slots, loadrs = regs->loadrs; | ||
644 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; | ||
645 | u64 ar_bspstore = regs->ar_bspstore; | ||
646 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); | ||
647 | const u64 *bank; | ||
648 | const char *msg; | ||
649 | int cpu = smp_processor_id(); | ||
650 | |||
651 | previous_current = curr_task(cpu); | ||
652 | set_curr_task(cpu, current); | ||
653 | if ((p = strchr(current->comm, ' '))) | ||
654 | *p = '\0'; | ||
655 | |||
656 | /* Best effort attempt to cope with MCA/INIT delivered while in | ||
657 | * physical mode. | ||
658 | */ | ||
659 | regs->cr_ipsr = ms->pmsa_ipsr; | ||
660 | if (ia64_psr(regs)->dt == 0) { | ||
661 | va.l = r12; | ||
662 | if (va.f.reg == 0) { | ||
663 | va.f.reg = 7; | ||
664 | r12 = va.l; | ||
665 | } | ||
666 | va.l = r13; | ||
667 | if (va.f.reg == 0) { | ||
668 | va.f.reg = 7; | ||
669 | r13 = va.l; | ||
670 | } | ||
671 | } | ||
672 | if (ia64_psr(regs)->rt == 0) { | ||
673 | va.l = ar_bspstore; | ||
674 | if (va.f.reg == 0) { | ||
675 | va.f.reg = 7; | ||
676 | ar_bspstore = va.l; | ||
677 | } | ||
678 | va.l = ar_bsp; | ||
679 | if (va.f.reg == 0) { | ||
680 | va.f.reg = 7; | ||
681 | ar_bsp = va.l; | ||
682 | } | ||
683 | } | ||
684 | |||
685 | /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers | ||
686 | * have been copied to the old stack, the old stack may fail the | ||
687 | * validation tests below. So ia64_old_stack() must restore the dirty | ||
688 | * registers from the new stack. The old and new bspstore probably | ||
689 | * have different alignments, so loadrs calculated on the old bsp | ||
690 | * cannot be used to restore from the new bsp. Calculate a suitable | ||
691 | * loadrs for the new stack and save it in the new pt_regs, where | ||
692 | * ia64_old_stack() can get it. | ||
693 | */ | ||
694 | old_bspstore = (u64 *)ar_bspstore; | ||
695 | old_bsp = (u64 *)ar_bsp; | ||
696 | slots = ia64_rse_num_regs(old_bspstore, old_bsp); | ||
697 | new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); | ||
698 | new_bsp = ia64_rse_skip_regs(new_bspstore, slots); | ||
699 | regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; | ||
700 | |||
701 | /* Verify the previous stack state before we change it */ | ||
702 | if (user_mode(regs)) { | ||
703 | msg = "occurred in user space"; | ||
704 | goto no_mod; | ||
705 | } | ||
706 | if (r13 != sos->prev_IA64_KR_CURRENT) { | ||
707 | msg = "inconsistent previous current and r13"; | ||
708 | goto no_mod; | ||
709 | } | ||
710 | if ((r12 - r13) >= KERNEL_STACK_SIZE) { | ||
711 | msg = "inconsistent r12 and r13"; | ||
712 | goto no_mod; | ||
713 | } | ||
714 | if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { | ||
715 | msg = "inconsistent ar.bspstore and r13"; | ||
716 | goto no_mod; | ||
717 | } | ||
718 | va.p = old_bspstore; | ||
719 | if (va.f.reg < 5) { | ||
720 | msg = "old_bspstore is in the wrong region"; | ||
721 | goto no_mod; | ||
722 | } | ||
723 | if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { | ||
724 | msg = "inconsistent ar.bsp and r13"; | ||
725 | goto no_mod; | ||
726 | } | ||
727 | size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; | ||
728 | if (ar_bspstore + size > r12) { | ||
729 | msg = "no room for blocked state"; | ||
730 | goto no_mod; | ||
731 | } | ||
732 | |||
733 | /* Change the comm field on the MCA/INT task to include the pid that | ||
734 | * was interrupted, it makes for easier debugging. If that pid was 0 | ||
735 | * (swapper or nested MCA/INIT) then use the start of the previous comm | ||
736 | * field suffixed with its cpu. | ||
737 | */ | ||
738 | if (previous_current->pid) | ||
739 | snprintf(comm, sizeof(comm), "%s %d", | ||
740 | current->comm, previous_current->pid); | ||
741 | else { | ||
742 | int l; | ||
743 | if ((p = strchr(previous_current->comm, ' '))) | ||
744 | l = p - previous_current->comm; | ||
745 | else | ||
746 | l = strlen(previous_current->comm); | ||
747 | snprintf(comm, sizeof(comm), "%s %*s %d", | ||
748 | current->comm, l, previous_current->comm, | ||
749 | previous_current->thread_info->cpu); | ||
750 | } | ||
751 | memcpy(current->comm, comm, sizeof(current->comm)); | ||
752 | |||
753 | /* Make the original task look blocked. First stack a struct pt_regs, | ||
754 | * describing the state at the time of interrupt. mca_asm.S built a | ||
755 | * partial pt_regs, copy it and fill in the blanks using minstate. | ||
756 | */ | ||
757 | p = (char *)r12 - sizeof(*regs); | ||
758 | old_regs = (struct pt_regs *)p; | ||
759 | memcpy(old_regs, regs, sizeof(*regs)); | ||
760 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
761 | * pmsa_{xip,xpsr,xfs} | ||
762 | */ | ||
763 | if (ia64_psr(regs)->ic) { | ||
764 | old_regs->cr_iip = ms->pmsa_iip; | ||
765 | old_regs->cr_ipsr = ms->pmsa_ipsr; | ||
766 | old_regs->cr_ifs = ms->pmsa_ifs; | ||
767 | } else { | ||
768 | old_regs->cr_iip = ms->pmsa_xip; | ||
769 | old_regs->cr_ipsr = ms->pmsa_xpsr; | ||
770 | old_regs->cr_ifs = ms->pmsa_xfs; | ||
771 | } | ||
772 | old_regs->pr = ms->pmsa_pr; | ||
773 | old_regs->b0 = ms->pmsa_br0; | ||
774 | old_regs->loadrs = loadrs; | ||
775 | old_regs->ar_rsc = ms->pmsa_rsc; | ||
776 | old_unat = old_regs->ar_unat; | ||
777 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); | ||
778 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); | ||
779 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); | ||
780 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); | ||
781 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); | ||
782 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); | ||
783 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); | ||
784 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); | ||
785 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); | ||
786 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); | ||
787 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); | ||
788 | if (ia64_psr(old_regs)->bn) | ||
789 | bank = ms->pmsa_bank1_gr; | ||
790 | else | ||
791 | bank = ms->pmsa_bank0_gr; | ||
792 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); | ||
793 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); | ||
794 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); | ||
795 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); | ||
796 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); | ||
797 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); | ||
798 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); | ||
799 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); | ||
800 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); | ||
801 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); | ||
802 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); | ||
803 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); | ||
804 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); | ||
805 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); | ||
806 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); | ||
807 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); | ||
808 | |||
809 | /* Next stack a struct switch_stack. mca_asm.S built a partial | ||
810 | * switch_stack, copy it and fill in the blanks using pt_regs and | ||
811 | * minstate. | ||
812 | * | ||
813 | * In the synthesized switch_stack, b0 points to ia64_leave_kernel, | ||
814 | * ar.pfs is set to 0. | ||
815 | * | ||
816 | * unwind.c::unw_unwind() does special processing for interrupt frames. | ||
817 | * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate | ||
818 | * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not | ||
819 | * that this is documented, of course. Set PRED_NON_SYSCALL in the | ||
820 | * switch_stack on the original stack so it will unwind correctly when | ||
821 | * unwind.c reads pt_regs. | ||
822 | * | ||
823 | * thread.ksp is updated to point to the synthesized switch_stack. | ||
824 | */ | ||
825 | p -= sizeof(struct switch_stack); | ||
826 | old_sw = (struct switch_stack *)p; | ||
827 | memcpy(old_sw, sw, sizeof(*sw)); | ||
828 | old_sw->caller_unat = old_unat; | ||
829 | old_sw->ar_fpsr = old_regs->ar_fpsr; | ||
830 | copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); | ||
831 | copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); | ||
832 | copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); | ||
833 | copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); | ||
834 | old_sw->b0 = (u64)ia64_leave_kernel; | ||
835 | old_sw->b1 = ms->pmsa_br1; | ||
836 | old_sw->ar_pfs = 0; | ||
837 | old_sw->ar_unat = old_unat; | ||
838 | old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); | ||
839 | previous_current->thread.ksp = (u64)p - 16; | ||
840 | |||
841 | /* Finally copy the original stack's registers back to its RBS. | ||
842 | * Registers from ar.bspstore through ar.bsp at the time of the event | ||
843 | * are in the current RBS, copy them back to the original stack. The | ||
844 | * copy must be done register by register because the original bspstore | ||
845 | * and the current one have different alignments, so the saved RNAT | ||
846 | * data occurs at different places. | ||
847 | * | ||
848 | * mca_asm does cover, so the old_bsp already includes all registers at | ||
849 | * the time of MCA/INIT. It also does flushrs, so all registers before | ||
850 | * this function have been written to backing store on the MCA/INIT | ||
851 | * stack. | ||
852 | */ | ||
853 | new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); | ||
854 | old_rnat = regs->ar_rnat; | ||
855 | while (slots--) { | ||
856 | if (ia64_rse_is_rnat_slot(new_bspstore)) { | ||
857 | new_rnat = ia64_get_rnat(new_bspstore++); | ||
858 | } | ||
859 | if (ia64_rse_is_rnat_slot(old_bspstore)) { | ||
860 | *old_bspstore++ = old_rnat; | ||
861 | old_rnat = 0; | ||
862 | } | ||
863 | nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; | ||
864 | old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); | ||
865 | old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); | ||
866 | *old_bspstore++ = *new_bspstore++; | ||
867 | } | ||
868 | old_sw->ar_bspstore = (unsigned long)old_bspstore; | ||
869 | old_sw->ar_rnat = old_rnat; | ||
870 | |||
871 | sos->prev_task = previous_current; | ||
872 | return previous_current; | ||
873 | |||
874 | no_mod: | ||
875 | printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | ||
876 | smp_processor_id(), type, msg); | ||
877 | return previous_current; | ||
878 | } | ||
879 | |||
880 | /* The monarch/slave interaction is based on monarch_cpu and requires that all | ||
881 | * slaves have entered rendezvous before the monarch leaves. If any cpu has | ||
882 | * not entered rendezvous yet then wait a bit. The assumption is that any | ||
883 | * slave that has not rendezvoused after a reasonable time is never going to do | ||
884 | * so. In this context, slave includes cpus that respond to the MCA rendezvous | ||
885 | * interrupt, as well as cpus that receive the INIT slave event. | ||
886 | */ | ||
887 | |||
888 | static void | ||
889 | ia64_wait_for_slaves(int monarch) | ||
890 | { | ||
891 | int c, wait = 0; | ||
892 | for_each_online_cpu(c) { | ||
893 | if (c == monarch) | ||
894 | continue; | ||
895 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
896 | udelay(1000); /* short wait first */ | ||
897 | wait = 1; | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | if (!wait) | ||
902 | return; | ||
903 | for_each_online_cpu(c) { | ||
904 | if (c == monarch) | ||
905 | continue; | ||
906 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
907 | udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ | ||
908 | break; | ||
909 | } | ||
910 | } | ||
911 | } | ||
912 | |||
857 | /* | 913 | /* |
858 | * ia64_mca_ucmc_handler | 914 | * ia64_mca_handler |
859 | * | 915 | * |
860 | * This is uncorrectable machine check handler called from OS_MCA | 916 | * This is uncorrectable machine check handler called from OS_MCA |
861 | * dispatch code which is in turn called from SAL_CHECK(). | 917 | * dispatch code which is in turn called from SAL_CHECK(). |
@@ -866,16 +922,28 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension); | |||
866 | * further MCA logging is enabled by clearing logs. | 922 | * further MCA logging is enabled by clearing logs. |
867 | * Monarch also has the duty of sending wakeup-IPIs to pull the | 923 | * Monarch also has the duty of sending wakeup-IPIs to pull the |
868 | * slave processors out of rendezvous spinloop. | 924 | * slave processors out of rendezvous spinloop. |
869 | * | ||
870 | * Inputs : None | ||
871 | * Outputs : None | ||
872 | */ | 925 | */ |
873 | void | 926 | void |
874 | ia64_mca_ucmc_handler(void) | 927 | ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, |
928 | struct ia64_sal_os_state *sos) | ||
875 | { | 929 | { |
876 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) | 930 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) |
877 | &ia64_sal_to_os_handoff_state.proc_state_param; | 931 | &sos->proc_state_param; |
878 | int recover; | 932 | int recover, cpu = smp_processor_id(); |
933 | task_t *previous_current; | ||
934 | |||
935 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | ||
936 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | ||
937 | monarch_cpu = cpu; | ||
938 | ia64_wait_for_slaves(cpu); | ||
939 | |||
940 | /* Wakeup all the processors which are spinning in the rendezvous loop. | ||
941 | * They will leave SAL, then spin in the OS with interrupts disabled | ||
942 | * until this monarch cpu leaves the MCA handler. That gets control | ||
943 | * back to the OS so we can backtrace the other cpus, backtrace when | ||
944 | * spinning in SAL does not work. | ||
945 | */ | ||
946 | ia64_mca_wakeup_all(); | ||
879 | 947 | ||
880 | /* Get the MCA error record and log it */ | 948 | /* Get the MCA error record and log it */ |
881 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); | 949 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); |
@@ -883,25 +951,20 @@ ia64_mca_ucmc_handler(void) | |||
883 | /* TLB error is only exist in this SAL error record */ | 951 | /* TLB error is only exist in this SAL error record */ |
884 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) | 952 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) |
885 | /* other error recovery */ | 953 | /* other error recovery */ |
886 | || (ia64_mca_ucmc_extension | 954 | || (ia64_mca_ucmc_extension |
887 | && ia64_mca_ucmc_extension( | 955 | && ia64_mca_ucmc_extension( |
888 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), | 956 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), |
889 | &ia64_sal_to_os_handoff_state, | 957 | sos)); |
890 | &ia64_os_to_sal_handoff_state)); | ||
891 | 958 | ||
892 | if (recover) { | 959 | if (recover) { |
893 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); | 960 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); |
894 | rh->severity = sal_log_severity_corrected; | 961 | rh->severity = sal_log_severity_corrected; |
895 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 962 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
963 | sos->os_status = IA64_MCA_CORRECTED; | ||
896 | } | 964 | } |
897 | /* | ||
898 | * Wakeup all the processors which are spinning in the rendezvous | ||
899 | * loop. | ||
900 | */ | ||
901 | ia64_mca_wakeup_all(); | ||
902 | 965 | ||
903 | /* Return to SAL */ | 966 | set_curr_task(cpu, previous_current); |
904 | ia64_return_to_sal_check(recover); | 967 | monarch_cpu = -1; |
905 | } | 968 | } |
906 | 969 | ||
907 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 970 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); |
@@ -953,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
953 | 1016 | ||
954 | cmc_polling_enabled = 1; | 1017 | cmc_polling_enabled = 1; |
955 | spin_unlock(&cmc_history_lock); | 1018 | spin_unlock(&cmc_history_lock); |
1019 | /* If we're being hit with CMC interrupts, we won't | ||
1020 | * ever execute the schedule_work() below. Need to | ||
1021 | * disable CMC interrupts on this processor now. | ||
1022 | */ | ||
1023 | ia64_mca_cmc_vector_disable(NULL); | ||
956 | schedule_work(&cmc_disable_work); | 1024 | schedule_work(&cmc_disable_work); |
957 | 1025 | ||
958 | /* | 1026 | /* |
@@ -1125,34 +1193,114 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
1125 | /* | 1193 | /* |
1126 | * C portion of the OS INIT handler | 1194 | * C portion of the OS INIT handler |
1127 | * | 1195 | * |
1128 | * Called from ia64_monarch_init_handler | 1196 | * Called from ia64_os_init_dispatch |
1129 | * | ||
1130 | * Inputs: pointer to pt_regs where processor info was saved. | ||
1131 | * | 1197 | * |
1132 | * Returns: | 1198 | * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for |
1133 | * 0 if SAL must warm boot the System | 1199 | * this event. This code is used for both monarch and slave INIT events, see |
1134 | * 1 if SAL must return to interrupted context using PAL_MC_RESUME | 1200 | * sos->monarch. |
1135 | * | 1201 | * |
1202 | * All INIT events switch to the INIT stack and change the previous process to | ||
1203 | * blocked status. If one of the INIT events is the monarch then we are | ||
1204 | * probably processing the nmi button/command. Use the monarch cpu to dump all | ||
1205 | * the processes. The slave INIT events all spin until the monarch cpu | ||
1206 | * returns. We can also get INIT slave events for MCA, in which case the MCA | ||
1207 | * process is the monarch. | ||
1136 | */ | 1208 | */ |
1209 | |||
1137 | void | 1210 | void |
1138 | ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) | 1211 | ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, |
1212 | struct ia64_sal_os_state *sos) | ||
1139 | { | 1213 | { |
1140 | pal_min_state_area_t *ms; | 1214 | static atomic_t slaves; |
1215 | static atomic_t monarchs; | ||
1216 | task_t *previous_current; | ||
1217 | int cpu = smp_processor_id(), c; | ||
1218 | struct task_struct *g, *t; | ||
1141 | 1219 | ||
1142 | oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ | 1220 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1143 | console_loglevel = 15; /* make sure printks make it to console */ | 1221 | console_loglevel = 15; /* make sure printks make it to console */ |
1144 | 1222 | ||
1145 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", | 1223 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", |
1146 | ia64_sal_to_os_handoff_state.proc_state_param); | 1224 | sos->proc_state_param, cpu, sos->monarch); |
1225 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); | ||
1147 | 1226 | ||
1148 | /* | 1227 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); |
1149 | * Address of minstate area provided by PAL is physical, | 1228 | sos->os_status = IA64_INIT_RESUME; |
1150 | * uncacheable (bit 63 set). Convert to Linux virtual | 1229 | |
1151 | * address in region 6. | 1230 | /* FIXME: Workaround for broken proms that drive all INIT events as |
1231 | * slaves. The last slave that enters is promoted to be a monarch. | ||
1232 | * Remove this code in September 2006, that gives platforms a year to | ||
1233 | * fix their proms and get their customers updated. | ||
1152 | */ | 1234 | */ |
1153 | ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); | 1235 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { |
1236 | printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", | ||
1237 | __FUNCTION__, cpu); | ||
1238 | atomic_dec(&slaves); | ||
1239 | sos->monarch = 1; | ||
1240 | } | ||
1241 | |||
1242 | /* FIXME: Workaround for broken proms that drive all INIT events as | ||
1243 | * monarchs. Second and subsequent monarchs are demoted to slaves. | ||
1244 | * Remove this code in September 2006, that gives platforms a year to | ||
1245 | * fix their proms and get their customers updated. | ||
1246 | */ | ||
1247 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { | ||
1248 | printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", | ||
1249 | __FUNCTION__, cpu); | ||
1250 | atomic_dec(&monarchs); | ||
1251 | sos->monarch = 0; | ||
1252 | } | ||
1154 | 1253 | ||
1155 | init_handler_platform(ms, pt, sw); /* call platform specific routines */ | 1254 | if (!sos->monarch) { |
1255 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | ||
1256 | while (monarch_cpu == -1) | ||
1257 | cpu_relax(); /* spin until monarch enters */ | ||
1258 | while (monarch_cpu != -1) | ||
1259 | cpu_relax(); /* spin until monarch leaves */ | ||
1260 | printk("Slave on cpu %d returning to normal service.\n", cpu); | ||
1261 | set_curr_task(cpu, previous_current); | ||
1262 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | ||
1263 | atomic_dec(&slaves); | ||
1264 | return; | ||
1265 | } | ||
1266 | |||
1267 | monarch_cpu = cpu; | ||
1268 | |||
1269 | /* | ||
1270 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
1271 | * generated via the BMC's command-line interface, but since the console is on the | ||
1272 | * same serial line, the user will need some time to switch out of the BMC before | ||
1273 | * the dump begins. | ||
1274 | */ | ||
1275 | printk("Delaying for 5 seconds...\n"); | ||
1276 | udelay(5*1000000); | ||
1277 | ia64_wait_for_slaves(cpu); | ||
1278 | printk(KERN_ERR "Processes interrupted by INIT -"); | ||
1279 | for_each_online_cpu(c) { | ||
1280 | struct ia64_sal_os_state *s; | ||
1281 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | ||
1282 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | ||
1283 | g = s->prev_task; | ||
1284 | if (g) { | ||
1285 | if (g->pid) | ||
1286 | printk(" %d", g->pid); | ||
1287 | else | ||
1288 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1289 | } | ||
1290 | } | ||
1291 | printk("\n\n"); | ||
1292 | if (read_trylock(&tasklist_lock)) { | ||
1293 | do_each_thread (g, t) { | ||
1294 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1295 | show_stack(t, NULL); | ||
1296 | } while_each_thread (g, t); | ||
1297 | read_unlock(&tasklist_lock); | ||
1298 | } | ||
1299 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | ||
1300 | atomic_dec(&monarchs); | ||
1301 | set_curr_task(cpu, previous_current); | ||
1302 | monarch_cpu = -1; | ||
1303 | return; | ||
1156 | } | 1304 | } |
1157 | 1305 | ||
1158 | static int __init | 1306 | static int __init |
@@ -1202,6 +1350,34 @@ static struct irqaction mca_cpep_irqaction = { | |||
1202 | }; | 1350 | }; |
1203 | #endif /* CONFIG_ACPI */ | 1351 | #endif /* CONFIG_ACPI */ |
1204 | 1352 | ||
1353 | /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on | ||
1354 | * these stacks can never sleep, they cannot return from the kernel to user | ||
1355 | * space, they do not appear in a normal ps listing. So there is no need to | ||
1356 | * format most of the fields. | ||
1357 | */ | ||
1358 | |||
1359 | static void | ||
1360 | format_mca_init_stack(void *mca_data, unsigned long offset, | ||
1361 | const char *type, int cpu) | ||
1362 | { | ||
1363 | struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); | ||
1364 | struct thread_info *ti; | ||
1365 | memset(p, 0, KERNEL_STACK_SIZE); | ||
1366 | ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); | ||
1367 | ti->flags = _TIF_MCA_INIT; | ||
1368 | ti->preempt_count = 1; | ||
1369 | ti->task = p; | ||
1370 | ti->cpu = cpu; | ||
1371 | p->thread_info = ti; | ||
1372 | p->state = TASK_UNINTERRUPTIBLE; | ||
1373 | __set_bit(cpu, &p->cpus_allowed); | ||
1374 | INIT_LIST_HEAD(&p->tasks); | ||
1375 | p->parent = p->real_parent = p->group_leader = p; | ||
1376 | INIT_LIST_HEAD(&p->children); | ||
1377 | INIT_LIST_HEAD(&p->sibling); | ||
1378 | strncpy(p->comm, type, sizeof(p->comm)-1); | ||
1379 | } | ||
1380 | |||
1205 | /* Do per-CPU MCA-related initialization. */ | 1381 | /* Do per-CPU MCA-related initialization. */ |
1206 | 1382 | ||
1207 | void __devinit | 1383 | void __devinit |
@@ -1214,19 +1390,28 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1214 | int cpu; | 1390 | int cpu; |
1215 | 1391 | ||
1216 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1392 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
1217 | * NR_CPUS); | 1393 | * NR_CPUS + KERNEL_STACK_SIZE); |
1394 | mca_data = (void *)(((unsigned long)mca_data + | ||
1395 | KERNEL_STACK_SIZE - 1) & | ||
1396 | (-KERNEL_STACK_SIZE)); | ||
1218 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1397 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1398 | format_mca_init_stack(mca_data, | ||
1399 | offsetof(struct ia64_mca_cpu, mca_stack), | ||
1400 | "MCA", cpu); | ||
1401 | format_mca_init_stack(mca_data, | ||
1402 | offsetof(struct ia64_mca_cpu, init_stack), | ||
1403 | "INIT", cpu); | ||
1219 | __per_cpu_mca[cpu] = __pa(mca_data); | 1404 | __per_cpu_mca[cpu] = __pa(mca_data); |
1220 | mca_data += sizeof(struct ia64_mca_cpu); | 1405 | mca_data += sizeof(struct ia64_mca_cpu); |
1221 | } | 1406 | } |
1222 | } | 1407 | } |
1223 | 1408 | ||
1224 | /* | 1409 | /* |
1225 | * The MCA info structure was allocated earlier and its | 1410 | * The MCA info structure was allocated earlier and its |
1226 | * physical address saved in __per_cpu_mca[cpu]. Copy that | 1411 | * physical address saved in __per_cpu_mca[cpu]. Copy that |
1227 | * address * to ia64_mca_data so we can access it as a per-CPU | 1412 | * address * to ia64_mca_data so we can access it as a per-CPU |
1228 | * variable. | 1413 | * variable. |
1229 | */ | 1414 | */ |
1230 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; | 1415 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; |
1231 | 1416 | ||
1232 | /* | 1417 | /* |
@@ -1236,11 +1421,11 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1236 | __get_cpu_var(ia64_mca_per_cpu_pte) = | 1421 | __get_cpu_var(ia64_mca_per_cpu_pte) = |
1237 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); | 1422 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); |
1238 | 1423 | ||
1239 | /* | 1424 | /* |
1240 | * Also, stash away a copy of the PAL address and the PTE | 1425 | * Also, stash away a copy of the PAL address and the PTE |
1241 | * needed to map it. | 1426 | * needed to map it. |
1242 | */ | 1427 | */ |
1243 | pal_vaddr = efi_get_pal_addr(); | 1428 | pal_vaddr = efi_get_pal_addr(); |
1244 | if (!pal_vaddr) | 1429 | if (!pal_vaddr) |
1245 | return; | 1430 | return; |
1246 | __get_cpu_var(ia64_mca_pal_base) = | 1431 | __get_cpu_var(ia64_mca_pal_base) = |
@@ -1272,8 +1457,8 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1272 | void __init | 1457 | void __init |
1273 | ia64_mca_init(void) | 1458 | ia64_mca_init(void) |
1274 | { | 1459 | { |
1275 | ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; | 1460 | ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; |
1276 | ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; | 1461 | ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; |
1277 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; | 1462 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; |
1278 | int i; | 1463 | int i; |
1279 | s64 rc; | 1464 | s64 rc; |
@@ -1351,9 +1536,9 @@ ia64_mca_init(void) | |||
1351 | * XXX - disable SAL checksum by setting size to 0, should be | 1536 | * XXX - disable SAL checksum by setting size to 0, should be |
1352 | * size of the actual init handler in mca_asm.S. | 1537 | * size of the actual init handler in mca_asm.S. |
1353 | */ | 1538 | */ |
1354 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); | 1539 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); |
1355 | ia64_mc_info.imi_monarch_init_handler_size = 0; | 1540 | ia64_mc_info.imi_monarch_init_handler_size = 0; |
1356 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); | 1541 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); |
1357 | ia64_mc_info.imi_slave_init_handler_size = 0; | 1542 | ia64_mc_info.imi_slave_init_handler_size = 0; |
1358 | 1543 | ||
1359 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, | 1544 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index ef3fd7265b67..db32fc1d3935 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -16,6 +16,9 @@ | |||
16 | // 04/11/12 Russ Anderson <rja@sgi.com> | 16 | // 04/11/12 Russ Anderson <rja@sgi.com> |
17 | // Added per cpu MCA/INIT stack save areas. | 17 | // Added per cpu MCA/INIT stack save areas. |
18 | // | 18 | // |
19 | // 12/08/05 Keith Owens <kaos@sgi.com> | ||
20 | // Use per cpu MCA/INIT stacks for all data. | ||
21 | // | ||
19 | #include <linux/config.h> | 22 | #include <linux/config.h> |
20 | #include <linux/threads.h> | 23 | #include <linux/threads.h> |
21 | 24 | ||
@@ -25,96 +28,23 @@ | |||
25 | #include <asm/mca_asm.h> | 28 | #include <asm/mca_asm.h> |
26 | #include <asm/mca.h> | 29 | #include <asm/mca.h> |
27 | 30 | ||
28 | /* | 31 | #include "entry.h" |
29 | * When we get a machine check, the kernel stack pointer is no longer | ||
30 | * valid, so we need to set a new stack pointer. | ||
31 | */ | ||
32 | #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ | ||
33 | |||
34 | /* | ||
35 | * Needed for return context to SAL | ||
36 | */ | ||
37 | #define IA64_MCA_SAME_CONTEXT 0 | ||
38 | #define IA64_MCA_COLD_BOOT -2 | ||
39 | |||
40 | #include "minstate.h" | ||
41 | |||
42 | /* | ||
43 | * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) | ||
44 | * 1. GR1 = OS GP | ||
45 | * 2. GR8 = PAL_PROC physical address | ||
46 | * 3. GR9 = SAL_PROC physical address | ||
47 | * 4. GR10 = SAL GP (physical) | ||
48 | * 5. GR11 = Rendez state | ||
49 | * 6. GR12 = Return address to location within SAL_CHECK | ||
50 | */ | ||
51 | #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ | ||
52 | LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ | ||
53 | st8 [_tmp]=r1,0x08;; \ | ||
54 | st8 [_tmp]=r8,0x08;; \ | ||
55 | st8 [_tmp]=r9,0x08;; \ | ||
56 | st8 [_tmp]=r10,0x08;; \ | ||
57 | st8 [_tmp]=r11,0x08;; \ | ||
58 | st8 [_tmp]=r12,0x08;; \ | ||
59 | st8 [_tmp]=r17,0x08;; \ | ||
60 | st8 [_tmp]=r18,0x08 | ||
61 | |||
62 | /* | ||
63 | * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) | ||
64 | * (p6) is executed if we never entered virtual mode (TLB error) | ||
65 | * (p7) is executed if we entered virtual mode as expected (normal case) | ||
66 | * 1. GR8 = OS_MCA return status | ||
67 | * 2. GR9 = SAL GP (physical) | ||
68 | * 3. GR10 = 0/1 returning same/new context | ||
69 | * 4. GR22 = New min state save area pointer | ||
70 | * returns ptr to SAL rtn save loc in _tmp | ||
71 | */ | ||
72 | #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ | ||
73 | movl _tmp=ia64_os_to_sal_handoff_state;; \ | ||
74 | DATA_VA_TO_PA(_tmp);; \ | ||
75 | ld8 r8=[_tmp],0x08;; \ | ||
76 | ld8 r9=[_tmp],0x08;; \ | ||
77 | ld8 r10=[_tmp],0x08;; \ | ||
78 | ld8 r22=[_tmp],0x08;; | ||
79 | // now _tmp is pointing to SAL rtn save location | ||
80 | |||
81 | /* | ||
82 | * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state | ||
83 | * imots_os_status=IA64_MCA_COLD_BOOT | ||
84 | * imots_sal_gp=SAL GP | ||
85 | * imots_context=IA64_MCA_SAME_CONTEXT | ||
86 | * imots_new_min_state=Min state save area pointer | ||
87 | * imots_sal_check_ra=Return address to location within SAL_CHECK | ||
88 | * | ||
89 | */ | ||
90 | #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ | ||
91 | movl tmp=IA64_MCA_COLD_BOOT; \ | ||
92 | movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ | ||
93 | movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ | ||
94 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
95 | ld8 tmp=[sal_to_os_handoff],48;; \ | ||
96 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
97 | movl tmp=IA64_MCA_SAME_CONTEXT;; \ | ||
98 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
99 | ld8 tmp=[sal_to_os_handoff],-8;; \ | ||
100 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
101 | ld8 tmp=[sal_to_os_handoff];; \ | ||
102 | st8 [os_to_sal_handoff]=tmp;; | ||
103 | 32 | ||
104 | #define GET_IA64_MCA_DATA(reg) \ | 33 | #define GET_IA64_MCA_DATA(reg) \ |
105 | GET_THIS_PADDR(reg, ia64_mca_data) \ | 34 | GET_THIS_PADDR(reg, ia64_mca_data) \ |
106 | ;; \ | 35 | ;; \ |
107 | ld8 reg=[reg] | 36 | ld8 reg=[reg] |
108 | 37 | ||
109 | .global ia64_os_mca_dispatch | ||
110 | .global ia64_os_mca_dispatch_end | ||
111 | .global ia64_sal_to_os_handoff_state | ||
112 | .global ia64_os_to_sal_handoff_state | ||
113 | .global ia64_do_tlb_purge | 38 | .global ia64_do_tlb_purge |
39 | .global ia64_os_mca_dispatch | ||
40 | .global ia64_os_init_dispatch_monarch | ||
41 | .global ia64_os_init_dispatch_slave | ||
114 | 42 | ||
115 | .text | 43 | .text |
116 | .align 16 | 44 | .align 16 |
117 | 45 | ||
46 | //StartMain//////////////////////////////////////////////////////////////////// | ||
47 | |||
118 | /* | 48 | /* |
119 | * Just the TLB purge part is moved to a separate function | 49 | * Just the TLB purge part is moved to a separate function |
120 | * so we can re-use the code for cpu hotplug code as well | 50 | * so we can re-use the code for cpu hotplug code as well |
@@ -207,34 +137,31 @@ ia64_do_tlb_purge: | |||
207 | br.sptk.many b1 | 137 | br.sptk.many b1 |
208 | ;; | 138 | ;; |
209 | 139 | ||
210 | ia64_os_mca_dispatch: | 140 | //EndMain////////////////////////////////////////////////////////////////////// |
141 | |||
142 | //StartMain//////////////////////////////////////////////////////////////////// | ||
211 | 143 | ||
144 | ia64_os_mca_dispatch: | ||
212 | // Serialize all MCA processing | 145 | // Serialize all MCA processing |
213 | mov r3=1;; | 146 | mov r3=1;; |
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 147 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; |
215 | ia64_os_mca_spin: | 148 | ia64_os_mca_spin: |
216 | xchg8 r4=[r2],r3;; | 149 | xchg4 r4=[r2],r3;; |
217 | cmp.ne p6,p0=r4,r0 | 150 | cmp.ne p6,p0=r4,r0 |
218 | (p6) br ia64_os_mca_spin | 151 | (p6) br ia64_os_mca_spin |
219 | 152 | ||
220 | // Save the SAL to OS MCA handoff state as defined | 153 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
221 | // by SAL SPEC 3.0 | 154 | LOAD_PHYSICAL(p0,r2,1f) // return address |
222 | // NOTE : The order in which the state gets saved | 155 | mov r19=1 // All MCA events are treated as monarch (for now) |
223 | // is dependent on the way the C-structure | 156 | br.sptk ia64_state_save // save the state that is not in minstate |
224 | // for ia64_mca_sal_to_os_state_t has been | 157 | 1: |
225 | // defined in include/asm/mca.h | ||
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
227 | ;; | ||
228 | |||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
230 | begin_os_mca_dump: | ||
231 | br ia64_os_mca_proc_state_dump;; | ||
232 | |||
233 | ia64_os_mca_done_dump: | ||
234 | 158 | ||
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | 159 | GET_IA64_MCA_DATA(r2) |
160 | // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param | ||
161 | ;; | ||
162 | add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 | ||
236 | ;; | 163 | ;; |
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | 164 | ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. |
238 | ;; | 165 | ;; |
239 | tbit.nz p6,p7=r18,60 | 166 | tbit.nz p6,p7=r18,60 |
240 | (p7) br.spnt done_tlb_purge_and_reload | 167 | (p7) br.spnt done_tlb_purge_and_reload |
@@ -323,624 +250,849 @@ ia64_reload_tr: | |||
323 | itr.d dtr[r20]=r16 | 250 | itr.d dtr[r20]=r16 |
324 | ;; | 251 | ;; |
325 | srlz.d | 252 | srlz.d |
326 | ;; | ||
327 | br.sptk.many done_tlb_purge_and_reload | ||
328 | err: | ||
329 | COLD_BOOT_HANDOFF_STATE(r20,r21,r22) | ||
330 | br.sptk.many ia64_os_mca_done_restore | ||
331 | 253 | ||
332 | done_tlb_purge_and_reload: | 254 | done_tlb_purge_and_reload: |
333 | 255 | ||
334 | // Setup new stack frame for OS_MCA handling | 256 | // switch to per cpu MCA stack |
335 | GET_IA64_MCA_DATA(r2) | 257 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
336 | ;; | 258 | LOAD_PHYSICAL(p0,r2,1f) // return address |
337 | add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 259 | br.sptk ia64_new_stack |
338 | add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 | 260 | 1: |
339 | ;; | 261 | |
340 | rse_switch_context(r6,r3,r2);; // RSC management in this new context | 262 | // everything saved, now we can set the kernel registers |
263 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
264 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
265 | br.sptk ia64_set_kernel_registers | ||
266 | 1: | ||
341 | 267 | ||
268 | // This must be done in physical mode | ||
342 | GET_IA64_MCA_DATA(r2) | 269 | GET_IA64_MCA_DATA(r2) |
343 | ;; | 270 | ;; |
344 | add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 | 271 | mov r7=r2 |
345 | ;; | ||
346 | mov r12=r2 // establish new stack-pointer | ||
347 | 272 | ||
348 | // Enter virtual mode from physical mode | 273 | // Enter virtual mode from physical mode |
349 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) | 274 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) |
350 | ia64_os_mca_virtual_begin: | 275 | |
276 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
277 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
278 | // handler, set a dummy return address of 0 in this routine. That | ||
279 | // requires that ia64_os_mca_virtual_begin be a global function. | ||
280 | ENTRY(ia64_os_mca_virtual_begin) | ||
281 | .prologue | ||
282 | .save rp,r0 | ||
283 | .body | ||
284 | |||
285 | mov ar.rsc=3 // set eager mode for C handler | ||
286 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
287 | ;; | ||
351 | 288 | ||
352 | // Call virtual mode handler | 289 | // Call virtual mode handler |
353 | movl r2=ia64_mca_ucmc_handler;; | 290 | alloc r14=ar.pfs,0,0,3,0 |
354 | mov b6=r2;; | 291 | ;; |
355 | br.call.sptk.many b0=b6;; | 292 | DATA_PA_TO_VA(r2,r7) |
356 | .ret0: | 293 | ;; |
294 | add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 | ||
295 | add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 | ||
296 | add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
297 | br.call.sptk.many b0=ia64_mca_handler | ||
298 | |||
357 | // Revert back to physical mode before going back to SAL | 299 | // Revert back to physical mode before going back to SAL |
358 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) | 300 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) |
359 | ia64_os_mca_virtual_end: | 301 | ia64_os_mca_virtual_end: |
360 | 302 | ||
361 | // restore the original stack frame here | 303 | END(ia64_os_mca_virtual_begin) |
304 | |||
305 | // switch back to previous stack | ||
306 | alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame | ||
307 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
308 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
309 | br.sptk ia64_old_stack | ||
310 | 1: | ||
311 | |||
312 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
313 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
314 | br.sptk ia64_state_restore // restore the SAL state | ||
315 | 1: | ||
316 | |||
317 | mov b0=r12 // SAL_CHECK return address | ||
318 | |||
319 | // release lock | ||
320 | LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; | ||
321 | st4.rel [r3]=r0 | ||
322 | |||
323 | br b0 | ||
324 | |||
325 | //EndMain////////////////////////////////////////////////////////////////////// | ||
326 | |||
327 | //StartMain//////////////////////////////////////////////////////////////////// | ||
328 | |||
329 | // | ||
330 | // SAL to OS entry point for INIT on all processors. This has been defined for | ||
331 | // registration purposes with SAL as a part of ia64_mca_init. Monarch and | ||
332 | // slave INIT have identical processing, except for the value of the | ||
333 | // sos->monarch flag in r19. | ||
334 | // | ||
335 | |||
336 | ia64_os_init_dispatch_monarch: | ||
337 | mov r19=1 // Bow, bow, ye lower middle classes! | ||
338 | br.sptk ia64_os_init_dispatch | ||
339 | |||
340 | ia64_os_init_dispatch_slave: | ||
341 | mov r19=0 // <igor>yeth, mathter</igor> | ||
342 | |||
343 | ia64_os_init_dispatch: | ||
344 | |||
345 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
346 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
347 | br.sptk ia64_state_save // save the state that is not in minstate | ||
348 | 1: | ||
349 | |||
350 | // switch to per cpu INIT stack | ||
351 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
352 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
353 | br.sptk ia64_new_stack | ||
354 | 1: | ||
355 | |||
356 | // everything saved, now we can set the kernel registers | ||
357 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
358 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
359 | br.sptk ia64_set_kernel_registers | ||
360 | 1: | ||
361 | |||
362 | // This must be done in physical mode | ||
362 | GET_IA64_MCA_DATA(r2) | 363 | GET_IA64_MCA_DATA(r2) |
363 | ;; | 364 | ;; |
364 | add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 365 | mov r7=r2 |
365 | ;; | 366 | |
366 | movl r4=IA64_PSR_MC | 367 | // Enter virtual mode from physical mode |
368 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) | ||
369 | |||
370 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
371 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
372 | // handler, set a dummy return address of 0 in this routine. That | ||
373 | // requires that ia64_os_init_virtual_begin be a global function. | ||
374 | ENTRY(ia64_os_init_virtual_begin) | ||
375 | .prologue | ||
376 | .save rp,r0 | ||
377 | .body | ||
378 | |||
379 | mov ar.rsc=3 // set eager mode for C handler | ||
380 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
367 | ;; | 381 | ;; |
368 | rse_return_context(r4,r3,r2) // switch from interrupt context for RSE | ||
369 | 382 | ||
370 | // let us restore all the registers from our PSI structure | 383 | // Call virtual mode handler |
371 | mov r8=gp | 384 | alloc r14=ar.pfs,0,0,3,0 |
372 | ;; | 385 | ;; |
373 | begin_os_mca_restore: | 386 | DATA_PA_TO_VA(r2,r7) |
374 | br ia64_os_mca_proc_state_restore;; | 387 | ;; |
388 | add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 | ||
389 | add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 | ||
390 | add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
391 | br.call.sptk.many b0=ia64_init_handler | ||
375 | 392 | ||
376 | ia64_os_mca_done_restore: | 393 | // Revert back to physical mode before going back to SAL |
377 | OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; | 394 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) |
378 | // branch back to SALE_CHECK | 395 | ia64_os_init_virtual_end: |
379 | ld8 r3=[r2];; | ||
380 | mov b0=r3;; // SAL_CHECK return address | ||
381 | 396 | ||
382 | // release lock | 397 | END(ia64_os_init_virtual_begin) |
383 | movl r3=ia64_mca_serialize;; | ||
384 | DATA_VA_TO_PA(r3);; | ||
385 | st8.rel [r3]=r0 | ||
386 | 398 | ||
399 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
400 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
401 | br.sptk ia64_state_restore // restore the SAL state | ||
402 | 1: | ||
403 | |||
404 | // switch back to previous stack | ||
405 | alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame | ||
406 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
407 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
408 | br.sptk ia64_old_stack | ||
409 | 1: | ||
410 | |||
411 | mov b0=r12 // SAL_CHECK return address | ||
387 | br b0 | 412 | br b0 |
388 | ;; | 413 | |
389 | ia64_os_mca_dispatch_end: | ||
390 | //EndMain////////////////////////////////////////////////////////////////////// | 414 | //EndMain////////////////////////////////////////////////////////////////////// |
391 | 415 | ||
416 | // common defines for the stubs | ||
417 | #define ms r4 | ||
418 | #define regs r5 | ||
419 | #define temp1 r2 /* careful, it overlaps with input registers */ | ||
420 | #define temp2 r3 /* careful, it overlaps with input registers */ | ||
421 | #define temp3 r7 | ||
422 | #define temp4 r14 | ||
423 | |||
392 | 424 | ||
393 | //++ | 425 | //++ |
394 | // Name: | 426 | // Name: |
395 | // ia64_os_mca_proc_state_dump() | 427 | // ia64_state_save() |
396 | // | 428 | // |
397 | // Stub Description: | 429 | // Stub Description: |
398 | // | 430 | // |
399 | // This stub dumps the processor state during MCHK to a data area | 431 | // Save the state that is not in minstate. This is sensitive to the layout of |
432 | // struct ia64_sal_os_state in mca.h. | ||
433 | // | ||
434 | // r2 contains the return address, r3 contains either | ||
435 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
436 | // | ||
437 | // The OS to SAL section of struct ia64_sal_os_state is set to a default | ||
438 | // value of cold boot (MCA) or warm boot (INIT) and return to the same | ||
439 | // context. ia64_sal_os_state is also used to hold some registers that | ||
440 | // need to be saved and restored across the stack switches. | ||
441 | // | ||
442 | // Most input registers to this stub come from PAL/SAL | ||
443 | // r1 os gp, physical | ||
444 | // r8 pal_proc entry point | ||
445 | // r9 sal_proc entry point | ||
446 | // r10 sal gp | ||
447 | // r11 MCA - rendevzous state, INIT - reason code | ||
448 | // r12 sal return address | ||
449 | // r17 pal min_state | ||
450 | // r18 processor state parameter | ||
451 | // r19 monarch flag, set by the caller of this routine | ||
452 | // | ||
453 | // In addition to the SAL to OS state, this routine saves all the | ||
454 | // registers that appear in struct pt_regs and struct switch_stack, | ||
455 | // excluding those that are already in the PAL minstate area. This | ||
456 | // results in a partial pt_regs and switch_stack, the C code copies the | ||
457 | // remaining registers from PAL minstate to pt_regs and switch_stack. The | ||
458 | // resulting structures contain all the state of the original process when | ||
459 | // MCA/INIT occurred. | ||
400 | // | 460 | // |
401 | //-- | 461 | //-- |
402 | 462 | ||
403 | ia64_os_mca_proc_state_dump: | 463 | ia64_state_save: |
404 | // Save bank 1 GRs 16-31 which will be used by c-language code when we switch | 464 | add regs=MCA_SOS_OFFSET, r3 |
405 | // to virtual addressing mode. | 465 | add ms=MCA_SOS_OFFSET+8, r3 |
406 | GET_IA64_MCA_DATA(r2) | 466 | mov b0=r2 // save return address |
467 | cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 | ||
468 | ;; | ||
469 | GET_IA64_MCA_DATA(temp2) | ||
470 | ;; | ||
471 | add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack | ||
472 | add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack | ||
473 | ;; | ||
474 | mov regs=temp1 // save the start of sos | ||
475 | st8 [temp1]=r1,16 // os_gp | ||
476 | st8 [temp2]=r8,16 // pal_proc | ||
477 | ;; | ||
478 | st8 [temp1]=r9,16 // sal_proc | ||
479 | st8 [temp2]=r11,16 // rv_rc | ||
480 | mov r11=cr.iipa | ||
407 | ;; | 481 | ;; |
408 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 482 | st8 [temp1]=r18,16 // proc_state_param |
409 | ;; | 483 | st8 [temp2]=r19,16 // monarch |
410 | // save ar.NaT | 484 | mov r6=IA64_KR(CURRENT) |
411 | mov r5=ar.unat // ar.unat | 485 | ;; |
412 | 486 | st8 [temp1]=r12,16 // sal_ra | |
413 | // save banked GRs 16-31 along with NaT bits | 487 | st8 [temp2]=r10,16 // sal_gp |
414 | bsw.1;; | 488 | mov r12=cr.isr |
415 | st8.spill [r2]=r16,8;; | 489 | ;; |
416 | st8.spill [r2]=r17,8;; | 490 | st8 [temp1]=r17,16 // pal_min_state |
417 | st8.spill [r2]=r18,8;; | 491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT |
418 | st8.spill [r2]=r19,8;; | 492 | mov r6=IA64_KR(CURRENT_STACK) |
419 | st8.spill [r2]=r20,8;; | 493 | ;; |
420 | st8.spill [r2]=r21,8;; | 494 | st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK |
421 | st8.spill [r2]=r22,8;; | 495 | st8 [temp2]=r0,16 // prev_task, starts off as NULL |
422 | st8.spill [r2]=r23,8;; | 496 | mov r6=cr.ifa |
423 | st8.spill [r2]=r24,8;; | 497 | ;; |
424 | st8.spill [r2]=r25,8;; | 498 | st8 [temp1]=r12,16 // cr.isr |
425 | st8.spill [r2]=r26,8;; | 499 | st8 [temp2]=r6,16 // cr.ifa |
426 | st8.spill [r2]=r27,8;; | 500 | mov r12=cr.itir |
427 | st8.spill [r2]=r28,8;; | 501 | ;; |
428 | st8.spill [r2]=r29,8;; | 502 | st8 [temp1]=r12,16 // cr.itir |
429 | st8.spill [r2]=r30,8;; | 503 | st8 [temp2]=r11,16 // cr.iipa |
430 | st8.spill [r2]=r31,8;; | 504 | mov r12=cr.iim |
431 | 505 | ;; | |
432 | mov r4=ar.unat;; | 506 | st8 [temp1]=r12,16 // cr.iim |
433 | st8 [r2]=r4,8 // save User NaT bits for r16-r31 | 507 | (p1) mov r12=IA64_MCA_COLD_BOOT |
434 | mov ar.unat=r5 // restore original unat | 508 | (p2) mov r12=IA64_INIT_WARM_BOOT |
435 | bsw.0;; | 509 | mov r6=cr.iha |
436 | 510 | ;; | |
437 | //save BRs | 511 | st8 [temp2]=r6,16 // cr.iha |
438 | add r4=8,r2 // duplicate r2 in r4 | 512 | st8 [temp1]=r12 // os_status, default is cold boot |
439 | add r6=2*8,r2 // duplicate r2 in r4 | 513 | mov r6=IA64_MCA_SAME_CONTEXT |
440 | 514 | ;; | |
441 | mov r3=b0 | 515 | st8 [temp1]=r6 // context, default is same context |
442 | mov r5=b1 | 516 | |
443 | mov r7=b2;; | 517 | // Save the pt_regs data that is not in minstate. The previous code |
444 | st8 [r2]=r3,3*8 | 518 | // left regs at sos. |
445 | st8 [r4]=r5,3*8 | 519 | add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs |
446 | st8 [r6]=r7,3*8;; | 520 | ;; |
447 | 521 | add temp1=PT(B6), regs | |
448 | mov r3=b3 | 522 | mov temp3=b6 |
449 | mov r5=b4 | 523 | mov temp4=b7 |
450 | mov r7=b5;; | 524 | add temp2=PT(B7), regs |
451 | st8 [r2]=r3,3*8 | 525 | ;; |
452 | st8 [r4]=r5,3*8 | 526 | st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 |
453 | st8 [r6]=r7,3*8;; | 527 | st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 |
454 | 528 | mov temp3=ar.csd | |
455 | mov r3=b6 | 529 | mov temp4=ar.ssd |
456 | mov r5=b7;; | 530 | cover // must be last in group |
457 | st8 [r2]=r3,2*8 | 531 | ;; |
458 | st8 [r4]=r5,2*8;; | 532 | st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd |
459 | 533 | st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd | |
460 | cSaveCRs: | 534 | mov temp3=ar.unat |
461 | // save CRs | 535 | mov temp4=ar.pfs |
462 | add r4=8,r2 // duplicate r2 in r4 | 536 | ;; |
463 | add r6=2*8,r2 // duplicate r2 in r4 | 537 | st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat |
464 | 538 | st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs | |
465 | mov r3=cr.dcr | 539 | mov temp3=ar.rnat |
466 | mov r5=cr.itm | 540 | mov temp4=ar.bspstore |
467 | mov r7=cr.iva;; | 541 | ;; |
468 | 542 | st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat | |
469 | st8 [r2]=r3,8*8 | 543 | st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore |
470 | st8 [r4]=r5,3*8 | 544 | mov temp3=ar.bsp |
471 | st8 [r6]=r7,3*8;; // 48 byte rements | 545 | ;; |
472 | 546 | sub temp3=temp3, temp4 // ar.bsp - ar.bspstore | |
473 | mov r3=cr.pta;; | 547 | mov temp4=ar.fpsr |
474 | st8 [r2]=r3,8*8;; // 64 byte rements | 548 | ;; |
475 | 549 | shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" | |
476 | // if PSR.ic=0, reading interruption registers causes an illegal operation fault | 550 | ;; |
477 | mov r3=psr;; | 551 | st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs |
478 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | 552 | st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr |
479 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | 553 | mov temp3=ar.ccv |
480 | begin_skip_intr_regs: | 554 | ;; |
481 | (p6) br SkipIntrRegs;; | 555 | st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv |
482 | 556 | stf.spill [temp2]=f6,PT(F8)-PT(F6) | |
483 | add r4=8,r2 // duplicate r2 in r4 | 557 | ;; |
484 | add r6=2*8,r2 // duplicate r2 in r6 | 558 | stf.spill [temp1]=f7,PT(F9)-PT(F7) |
485 | 559 | stf.spill [temp2]=f8,PT(F10)-PT(F8) | |
486 | mov r3=cr.ipsr | 560 | ;; |
487 | mov r5=cr.isr | 561 | stf.spill [temp1]=f9,PT(F11)-PT(F9) |
488 | mov r7=r0;; | 562 | stf.spill [temp2]=f10 |
489 | st8 [r2]=r3,3*8 | 563 | ;; |
490 | st8 [r4]=r5,3*8 | 564 | stf.spill [temp1]=f11 |
491 | st8 [r6]=r7,3*8;; | 565 | |
492 | 566 | // Save the switch_stack data that is not in minstate nor pt_regs. The | |
493 | mov r3=cr.iip | 567 | // previous code left regs at pt_regs. |
494 | mov r5=cr.ifa | 568 | add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs |
495 | mov r7=cr.itir;; | 569 | ;; |
496 | st8 [r2]=r3,3*8 | 570 | add temp1=SW(F2), regs |
497 | st8 [r4]=r5,3*8 | 571 | add temp2=SW(F3), regs |
498 | st8 [r6]=r7,3*8;; | 572 | ;; |
499 | 573 | stf.spill [temp1]=f2,32 | |
500 | mov r3=cr.iipa | 574 | stf.spill [temp2]=f3,32 |
501 | mov r5=cr.ifs | 575 | ;; |
502 | mov r7=cr.iim;; | 576 | stf.spill [temp1]=f4,32 |
503 | st8 [r2]=r3,3*8 | 577 | stf.spill [temp2]=f5,32 |
504 | st8 [r4]=r5,3*8 | 578 | ;; |
505 | st8 [r6]=r7,3*8;; | 579 | stf.spill [temp1]=f12,32 |
506 | 580 | stf.spill [temp2]=f13,32 | |
507 | mov r3=cr25;; // cr.iha | 581 | ;; |
508 | st8 [r2]=r3,160;; // 160 byte rement | 582 | stf.spill [temp1]=f14,32 |
509 | 583 | stf.spill [temp2]=f15,32 | |
510 | SkipIntrRegs: | 584 | ;; |
511 | st8 [r2]=r0,152;; // another 152 byte . | 585 | stf.spill [temp1]=f16,32 |
512 | 586 | stf.spill [temp2]=f17,32 | |
513 | add r4=8,r2 // duplicate r2 in r4 | 587 | ;; |
514 | add r6=2*8,r2 // duplicate r2 in r6 | 588 | stf.spill [temp1]=f18,32 |
515 | 589 | stf.spill [temp2]=f19,32 | |
516 | mov r3=cr.lid | 590 | ;; |
517 | // mov r5=cr.ivr // cr.ivr, don't read it | 591 | stf.spill [temp1]=f20,32 |
518 | mov r7=cr.tpr;; | 592 | stf.spill [temp2]=f21,32 |
519 | st8 [r2]=r3,3*8 | 593 | ;; |
520 | st8 [r4]=r5,3*8 | 594 | stf.spill [temp1]=f22,32 |
521 | st8 [r6]=r7,3*8;; | 595 | stf.spill [temp2]=f23,32 |
522 | 596 | ;; | |
523 | mov r3=r0 // cr.eoi => cr67 | 597 | stf.spill [temp1]=f24,32 |
524 | mov r5=r0 // cr.irr0 => cr68 | 598 | stf.spill [temp2]=f25,32 |
525 | mov r7=r0;; // cr.irr1 => cr69 | 599 | ;; |
526 | st8 [r2]=r3,3*8 | 600 | stf.spill [temp1]=f26,32 |
527 | st8 [r4]=r5,3*8 | 601 | stf.spill [temp2]=f27,32 |
528 | st8 [r6]=r7,3*8;; | 602 | ;; |
529 | 603 | stf.spill [temp1]=f28,32 | |
530 | mov r3=r0 // cr.irr2 => cr70 | 604 | stf.spill [temp2]=f29,32 |
531 | mov r5=r0 // cr.irr3 => cr71 | 605 | ;; |
532 | mov r7=cr.itv;; | 606 | stf.spill [temp1]=f30,SW(B2)-SW(F30) |
533 | st8 [r2]=r3,3*8 | 607 | stf.spill [temp2]=f31,SW(B3)-SW(F31) |
534 | st8 [r4]=r5,3*8 | 608 | mov temp3=b2 |
535 | st8 [r6]=r7,3*8;; | 609 | mov temp4=b3 |
536 | 610 | ;; | |
537 | mov r3=cr.pmv | 611 | st8 [temp1]=temp3,16 // save b2 |
538 | mov r5=cr.cmcv;; | 612 | st8 [temp2]=temp4,16 // save b3 |
539 | st8 [r2]=r3,7*8 | 613 | mov temp3=b4 |
540 | st8 [r4]=r5,7*8;; | 614 | mov temp4=b5 |
541 | 615 | ;; | |
542 | mov r3=r0 // cr.lrr0 => cr80 | 616 | st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 |
543 | mov r5=r0;; // cr.lrr1 => cr81 | 617 | st8 [temp2]=temp4 // save b5 |
544 | st8 [r2]=r3,23*8 | 618 | mov temp3=ar.lc |
545 | st8 [r4]=r5,23*8;; | 619 | ;; |
546 | 620 | st8 [temp1]=temp3 // save ar.lc | |
547 | adds r2=25*8,r2;; | 621 | |
548 | 622 | // FIXME: Some proms are incorrectly accessing the minstate area as | |
549 | cSaveARs: | 623 | // cached data. The C code uses region 6, uncached virtual. Ensure |
550 | // save ARs | 624 | // that there is no cache data lying around for the first 1K of the |
551 | add r4=8,r2 // duplicate r2 in r4 | 625 | // minstate area. |
552 | add r6=2*8,r2 // duplicate r2 in r6 | 626 | // Remove this code in September 2006, that gives platforms a year to |
553 | 627 | // fix their proms and get their customers updated. | |
554 | mov r3=ar.k0 | 628 | |
555 | mov r5=ar.k1 | 629 | add r1=32*1,r17 |
556 | mov r7=ar.k2;; | 630 | add r2=32*2,r17 |
557 | st8 [r2]=r3,3*8 | 631 | add r3=32*3,r17 |
558 | st8 [r4]=r5,3*8 | 632 | add r4=32*4,r17 |
559 | st8 [r6]=r7,3*8;; | 633 | add r5=32*5,r17 |
560 | 634 | add r6=32*6,r17 | |
561 | mov r3=ar.k3 | 635 | add r7=32*7,r17 |
562 | mov r5=ar.k4 | 636 | ;; |
563 | mov r7=ar.k5;; | 637 | fc r17 |
564 | st8 [r2]=r3,3*8 | 638 | fc r1 |
565 | st8 [r4]=r5,3*8 | 639 | fc r2 |
566 | st8 [r6]=r7,3*8;; | 640 | fc r3 |
567 | 641 | fc r4 | |
568 | mov r3=ar.k6 | 642 | fc r5 |
569 | mov r5=ar.k7 | 643 | fc r6 |
570 | mov r7=r0;; // ar.kr8 | 644 | fc r7 |
571 | st8 [r2]=r3,10*8 | 645 | add r17=32*8,r17 |
572 | st8 [r4]=r5,10*8 | 646 | add r1=32*8,r1 |
573 | st8 [r6]=r7,10*8;; // rement by 72 bytes | 647 | add r2=32*8,r2 |
574 | 648 | add r3=32*8,r3 | |
575 | mov r3=ar.rsc | 649 | add r4=32*8,r4 |
576 | mov ar.rsc=r0 // put RSE in enforced lazy mode | 650 | add r5=32*8,r5 |
577 | mov r5=ar.bsp | 651 | add r6=32*8,r6 |
578 | ;; | 652 | add r7=32*8,r7 |
579 | mov r7=ar.bspstore;; | 653 | ;; |
580 | st8 [r2]=r3,3*8 | 654 | fc r17 |
581 | st8 [r4]=r5,3*8 | 655 | fc r1 |
582 | st8 [r6]=r7,3*8;; | 656 | fc r2 |
583 | 657 | fc r3 | |
584 | mov r3=ar.rnat;; | 658 | fc r4 |
585 | st8 [r2]=r3,8*13 // increment by 13x8 bytes | 659 | fc r5 |
586 | 660 | fc r6 | |
587 | mov r3=ar.ccv;; | 661 | fc r7 |
588 | st8 [r2]=r3,8*4 | 662 | add r17=32*8,r17 |
589 | 663 | add r1=32*8,r1 | |
590 | mov r3=ar.unat;; | 664 | add r2=32*8,r2 |
591 | st8 [r2]=r3,8*4 | 665 | add r3=32*8,r3 |
592 | 666 | add r4=32*8,r4 | |
593 | mov r3=ar.fpsr;; | 667 | add r5=32*8,r5 |
594 | st8 [r2]=r3,8*4 | 668 | add r6=32*8,r6 |
595 | 669 | add r7=32*8,r7 | |
596 | mov r3=ar.itc;; | 670 | ;; |
597 | st8 [r2]=r3,160 // 160 | 671 | fc r17 |
598 | 672 | fc r1 | |
599 | mov r3=ar.pfs;; | 673 | fc r2 |
600 | st8 [r2]=r3,8 | 674 | fc r3 |
601 | 675 | fc r4 | |
602 | mov r3=ar.lc;; | 676 | fc r5 |
603 | st8 [r2]=r3,8 | 677 | fc r6 |
604 | 678 | fc r7 | |
605 | mov r3=ar.ec;; | 679 | add r17=32*8,r17 |
606 | st8 [r2]=r3 | 680 | add r1=32*8,r1 |
607 | add r2=8*62,r2 //padding | 681 | add r2=32*8,r2 |
608 | 682 | add r3=32*8,r3 | |
609 | // save RRs | 683 | add r4=32*8,r4 |
610 | mov ar.lc=0x08-1 | 684 | add r5=32*8,r5 |
611 | movl r4=0x00;; | 685 | add r6=32*8,r6 |
612 | 686 | add r7=32*8,r7 | |
613 | cStRR: | 687 | ;; |
614 | dep.z r5=r4,61,3;; | 688 | fc r17 |
615 | mov r3=rr[r5];; | 689 | fc r1 |
616 | st8 [r2]=r3,8 | 690 | fc r2 |
617 | add r4=1,r4 | 691 | fc r3 |
618 | br.cloop.sptk.few cStRR | 692 | fc r4 |
619 | ;; | 693 | fc r5 |
620 | end_os_mca_dump: | 694 | fc r6 |
621 | br ia64_os_mca_done_dump;; | 695 | fc r7 |
696 | |||
697 | br.sptk b0 | ||
622 | 698 | ||
623 | //EndStub////////////////////////////////////////////////////////////////////// | 699 | //EndStub////////////////////////////////////////////////////////////////////// |
624 | 700 | ||
625 | 701 | ||
626 | //++ | 702 | //++ |
627 | // Name: | 703 | // Name: |
628 | // ia64_os_mca_proc_state_restore() | 704 | // ia64_state_restore() |
629 | // | 705 | // |
630 | // Stub Description: | 706 | // Stub Description: |
631 | // | 707 | // |
632 | // This is a stub to restore the saved processor state during MCHK | 708 | // Restore the SAL/OS state. This is sensitive to the layout of struct |
709 | // ia64_sal_os_state in mca.h. | ||
710 | // | ||
711 | // r2 contains the return address, r3 contains either | ||
712 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
713 | // | ||
714 | // In addition to the SAL to OS state, this routine restores all the | ||
715 | // registers that appear in struct pt_regs and struct switch_stack, | ||
716 | // excluding those in the PAL minstate area. | ||
633 | // | 717 | // |
634 | //-- | 718 | //-- |
635 | 719 | ||
636 | ia64_os_mca_proc_state_restore: | 720 | ia64_state_restore: |
721 | // Restore the switch_stack data that is not in minstate nor pt_regs. | ||
722 | add regs=MCA_SWITCH_STACK_OFFSET, r3 | ||
723 | mov b0=r2 // save return address | ||
724 | ;; | ||
725 | GET_IA64_MCA_DATA(temp2) | ||
726 | ;; | ||
727 | add regs=temp2, regs | ||
728 | ;; | ||
729 | add temp1=SW(F2), regs | ||
730 | add temp2=SW(F3), regs | ||
731 | ;; | ||
732 | ldf.fill f2=[temp1],32 | ||
733 | ldf.fill f3=[temp2],32 | ||
734 | ;; | ||
735 | ldf.fill f4=[temp1],32 | ||
736 | ldf.fill f5=[temp2],32 | ||
737 | ;; | ||
738 | ldf.fill f12=[temp1],32 | ||
739 | ldf.fill f13=[temp2],32 | ||
740 | ;; | ||
741 | ldf.fill f14=[temp1],32 | ||
742 | ldf.fill f15=[temp2],32 | ||
743 | ;; | ||
744 | ldf.fill f16=[temp1],32 | ||
745 | ldf.fill f17=[temp2],32 | ||
746 | ;; | ||
747 | ldf.fill f18=[temp1],32 | ||
748 | ldf.fill f19=[temp2],32 | ||
749 | ;; | ||
750 | ldf.fill f20=[temp1],32 | ||
751 | ldf.fill f21=[temp2],32 | ||
752 | ;; | ||
753 | ldf.fill f22=[temp1],32 | ||
754 | ldf.fill f23=[temp2],32 | ||
755 | ;; | ||
756 | ldf.fill f24=[temp1],32 | ||
757 | ldf.fill f25=[temp2],32 | ||
758 | ;; | ||
759 | ldf.fill f26=[temp1],32 | ||
760 | ldf.fill f27=[temp2],32 | ||
761 | ;; | ||
762 | ldf.fill f28=[temp1],32 | ||
763 | ldf.fill f29=[temp2],32 | ||
764 | ;; | ||
765 | ldf.fill f30=[temp1],SW(B2)-SW(F30) | ||
766 | ldf.fill f31=[temp2],SW(B3)-SW(F31) | ||
767 | ;; | ||
768 | ld8 temp3=[temp1],16 // restore b2 | ||
769 | ld8 temp4=[temp2],16 // restore b3 | ||
770 | ;; | ||
771 | mov b2=temp3 | ||
772 | mov b3=temp4 | ||
773 | ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 | ||
774 | ld8 temp4=[temp2] // restore b5 | ||
775 | ;; | ||
776 | mov b4=temp3 | ||
777 | mov b5=temp4 | ||
778 | ld8 temp3=[temp1] // restore ar.lc | ||
779 | ;; | ||
780 | mov ar.lc=temp3 | ||
637 | 781 | ||
638 | // Restore bank1 GR16-31 | 782 | // Restore the pt_regs data that is not in minstate. The previous code |
639 | GET_IA64_MCA_DATA(r2) | 783 | // left regs at switch_stack. |
784 | add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs | ||
785 | ;; | ||
786 | add temp1=PT(B6), regs | ||
787 | add temp2=PT(B7), regs | ||
788 | ;; | ||
789 | ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 | ||
790 | ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 | ||
791 | ;; | ||
792 | mov b6=temp3 | ||
793 | mov b7=temp4 | ||
794 | ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd | ||
795 | ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd | ||
640 | ;; | 796 | ;; |
641 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 797 | mov ar.csd=temp3 |
642 | 798 | mov ar.ssd=temp4 | |
643 | restore_GRs: // restore bank-1 GRs 16-31 | 799 | ld8 temp3=[temp1] // restore ar.unat |
644 | bsw.1;; | 800 | add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 |
645 | add r3=16*8,r2;; // to get to NaT of GR 16-31 | 801 | ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs |
646 | ld8 r3=[r3];; | 802 | ;; |
647 | mov ar.unat=r3;; // first restore NaT | 803 | mov ar.unat=temp3 |
648 | 804 | mov ar.pfs=temp4 | |
649 | ld8.fill r16=[r2],8;; | 805 | // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. |
650 | ld8.fill r17=[r2],8;; | 806 | ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv |
651 | ld8.fill r18=[r2],8;; | 807 | ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr |
652 | ld8.fill r19=[r2],8;; | 808 | ;; |
653 | ld8.fill r20=[r2],8;; | 809 | mov ar.ccv=temp3 |
654 | ld8.fill r21=[r2],8;; | 810 | mov ar.fpsr=temp4 |
655 | ld8.fill r22=[r2],8;; | 811 | ldf.fill f6=[temp1],PT(F8)-PT(F6) |
656 | ld8.fill r23=[r2],8;; | 812 | ldf.fill f7=[temp2],PT(F9)-PT(F7) |
657 | ld8.fill r24=[r2],8;; | 813 | ;; |
658 | ld8.fill r25=[r2],8;; | 814 | ldf.fill f8=[temp1],PT(F10)-PT(F8) |
659 | ld8.fill r26=[r2],8;; | 815 | ldf.fill f9=[temp2],PT(F11)-PT(F9) |
660 | ld8.fill r27=[r2],8;; | 816 | ;; |
661 | ld8.fill r28=[r2],8;; | 817 | ldf.fill f10=[temp1] |
662 | ld8.fill r29=[r2],8;; | 818 | ldf.fill f11=[temp2] |
663 | ld8.fill r30=[r2],8;; | 819 | |
664 | ld8.fill r31=[r2],8;; | 820 | // Restore the SAL to OS state. The previous code left regs at pt_regs. |
665 | 821 | add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs | |
666 | ld8 r3=[r2],8;; // increment to skip NaT | 822 | ;; |
667 | bsw.0;; | 823 | add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs |
668 | 824 | add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs | |
669 | restore_BRs: | 825 | ;; |
670 | add r4=8,r2 // duplicate r2 in r4 | 826 | ld8 r12=[temp1],16 // sal_ra |
671 | add r6=2*8,r2;; // duplicate r2 in r4 | 827 | ld8 r9=[temp2],16 // sal_gp |
672 | 828 | ;; | |
673 | ld8 r3=[r2],3*8 | 829 | ld8 r22=[temp1],16 // pal_min_state, virtual |
674 | ld8 r5=[r4],3*8 | 830 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT |
675 | ld8 r7=[r6],3*8;; | 831 | ;; |
676 | mov b0=r3 | 832 | ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK |
677 | mov b1=r5 | 833 | ld8 r20=[temp2],16 // prev_task |
678 | mov b2=r7;; | 834 | ;; |
679 | 835 | ld8 temp3=[temp1],16 // cr.isr | |
680 | ld8 r3=[r2],3*8 | 836 | ld8 temp4=[temp2],16 // cr.ifa |
681 | ld8 r5=[r4],3*8 | 837 | ;; |
682 | ld8 r7=[r6],3*8;; | 838 | mov cr.isr=temp3 |
683 | mov b3=r3 | 839 | mov cr.ifa=temp4 |
684 | mov b4=r5 | 840 | ld8 temp3=[temp1],16 // cr.itir |
685 | mov b5=r7;; | 841 | ld8 temp4=[temp2],16 // cr.iipa |
686 | 842 | ;; | |
687 | ld8 r3=[r2],2*8 | 843 | mov cr.itir=temp3 |
688 | ld8 r5=[r4],2*8;; | 844 | mov cr.iipa=temp4 |
689 | mov b6=r3 | 845 | ld8 temp3=[temp1],16 // cr.iim |
690 | mov b7=r5;; | 846 | ld8 temp4=[temp2],16 // cr.iha |
691 | 847 | ;; | |
692 | restore_CRs: | 848 | mov cr.iim=temp3 |
693 | add r4=8,r2 // duplicate r2 in r4 | 849 | mov cr.iha=temp4 |
694 | add r6=2*8,r2;; // duplicate r2 in r4 | 850 | dep r22=0,r22,62,2 // pal_min_state, physical, uncached |
695 | 851 | mov IA64_KR(CURRENT)=r21 | |
696 | ld8 r3=[r2],8*8 | 852 | ld8 r8=[temp1] // os_status |
697 | ld8 r5=[r4],3*8 | 853 | ld8 r10=[temp2] // context |
698 | ld8 r7=[r6],3*8;; // 48 byte increments | 854 | |
699 | mov cr.dcr=r3 | 855 | /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To |
700 | mov cr.itm=r5 | 856 | * avoid any dependencies on the algorithm in ia64_switch_to(), just |
701 | mov cr.iva=r7;; | 857 | * purge any existing CURRENT_STACK mapping and insert the new one. |
702 | 858 | * | |
703 | ld8 r3=[r2],8*8;; // 64 byte increments | 859 | * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains |
704 | // mov cr.pta=r3 | 860 | * prev_IA64_KR_CURRENT, these values may have been changed by the C |
705 | 861 | * code. Do not use r8, r9, r10, r22, they contain values ready for | |
706 | 862 | * the return to SAL. | |
707 | // if PSR.ic=1, reading interruption registers causes an illegal operation fault | 863 | */ |
708 | mov r3=psr;; | 864 | |
709 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | 865 | mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK |
710 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | 866 | ;; |
711 | 867 | shl r15=r15,IA64_GRANULE_SHIFT | |
712 | begin_rskip_intr_regs: | 868 | ;; |
713 | (p6) br rSkipIntrRegs;; | 869 | dep r15=-1,r15,61,3 // virtual granule |
714 | 870 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | |
715 | add r4=8,r2 // duplicate r2 in r4 | 871 | ;; |
716 | add r6=2*8,r2;; // duplicate r2 in r4 | 872 | ptr.d r15,r18 |
717 | 873 | ;; | |
718 | ld8 r3=[r2],3*8 | 874 | srlz.d |
719 | ld8 r5=[r4],3*8 | 875 | |
720 | ld8 r7=[r6],3*8;; | 876 | extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT |
721 | mov cr.ipsr=r3 | 877 | shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK |
722 | // mov cr.isr=r5 // cr.isr is read only | 878 | movl r21=PAGE_KERNEL // page properties |
723 | 879 | ;; | |
724 | ld8 r3=[r2],3*8 | 880 | mov IA64_KR(CURRENT_STACK)=r16 |
725 | ld8 r5=[r4],3*8 | 881 | cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? |
726 | ld8 r7=[r6],3*8;; | 882 | or r21=r20,r21 // construct PA | page properties |
727 | mov cr.iip=r3 | 883 | (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( |
728 | mov cr.ifa=r5 | 884 | ;; |
729 | mov cr.itir=r7;; | 885 | mov cr.itir=r18 |
730 | 886 | mov cr.ifa=r21 | |
731 | ld8 r3=[r2],3*8 | 887 | mov r20=IA64_TR_CURRENT_STACK |
732 | ld8 r5=[r4],3*8 | 888 | ;; |
733 | ld8 r7=[r6],3*8;; | 889 | itr.d dtr[r20]=r21 |
734 | mov cr.iipa=r3 | 890 | ;; |
735 | mov cr.ifs=r5 | 891 | srlz.d |
736 | mov cr.iim=r7 | 892 | 1: |
737 | 893 | ||
738 | ld8 r3=[r2],160;; // 160 byte increment | 894 | br.sptk b0 |
739 | mov cr.iha=r3 | ||
740 | |||
741 | rSkipIntrRegs: | ||
742 | ld8 r3=[r2],152;; // another 152 byte inc. | ||
743 | |||
744 | add r4=8,r2 // duplicate r2 in r4 | ||
745 | add r6=2*8,r2;; // duplicate r2 in r6 | ||
746 | |||
747 | ld8 r3=[r2],8*3 | ||
748 | ld8 r5=[r4],8*3 | ||
749 | ld8 r7=[r6],8*3;; | ||
750 | mov cr.lid=r3 | ||
751 | // mov cr.ivr=r5 // cr.ivr is read only | ||
752 | mov cr.tpr=r7;; | ||
753 | |||
754 | ld8 r3=[r2],8*3 | ||
755 | ld8 r5=[r4],8*3 | ||
756 | ld8 r7=[r6],8*3;; | ||
757 | // mov cr.eoi=r3 | ||
758 | // mov cr.irr0=r5 // cr.irr0 is read only | ||
759 | // mov cr.irr1=r7;; // cr.irr1 is read only | ||
760 | |||
761 | ld8 r3=[r2],8*3 | ||
762 | ld8 r5=[r4],8*3 | ||
763 | ld8 r7=[r6],8*3;; | ||
764 | // mov cr.irr2=r3 // cr.irr2 is read only | ||
765 | // mov cr.irr3=r5 // cr.irr3 is read only | ||
766 | mov cr.itv=r7;; | ||
767 | |||
768 | ld8 r3=[r2],8*7 | ||
769 | ld8 r5=[r4],8*7;; | ||
770 | mov cr.pmv=r3 | ||
771 | mov cr.cmcv=r5;; | ||
772 | |||
773 | ld8 r3=[r2],8*23 | ||
774 | ld8 r5=[r4],8*23;; | ||
775 | adds r2=8*23,r2 | ||
776 | adds r4=8*23,r4;; | ||
777 | // mov cr.lrr0=r3 | ||
778 | // mov cr.lrr1=r5 | ||
779 | |||
780 | adds r2=8*2,r2;; | ||
781 | |||
782 | restore_ARs: | ||
783 | add r4=8,r2 // duplicate r2 in r4 | ||
784 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
785 | |||
786 | ld8 r3=[r2],3*8 | ||
787 | ld8 r5=[r4],3*8 | ||
788 | ld8 r7=[r6],3*8;; | ||
789 | mov ar.k0=r3 | ||
790 | mov ar.k1=r5 | ||
791 | mov ar.k2=r7;; | ||
792 | |||
793 | ld8 r3=[r2],3*8 | ||
794 | ld8 r5=[r4],3*8 | ||
795 | ld8 r7=[r6],3*8;; | ||
796 | mov ar.k3=r3 | ||
797 | mov ar.k4=r5 | ||
798 | mov ar.k5=r7;; | ||
799 | |||
800 | ld8 r3=[r2],10*8 | ||
801 | ld8 r5=[r4],10*8 | ||
802 | ld8 r7=[r6],10*8;; | ||
803 | mov ar.k6=r3 | ||
804 | mov ar.k7=r5 | ||
805 | ;; | ||
806 | |||
807 | ld8 r3=[r2],3*8 | ||
808 | ld8 r5=[r4],3*8 | ||
809 | ld8 r7=[r6],3*8;; | ||
810 | // mov ar.rsc=r3 | ||
811 | // mov ar.bsp=r5 // ar.bsp is read only | ||
812 | mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode | ||
813 | ;; | ||
814 | mov ar.bspstore=r7;; | ||
815 | |||
816 | ld8 r9=[r2],8*13;; | ||
817 | mov ar.rnat=r9 | ||
818 | |||
819 | mov ar.rsc=r3 | ||
820 | ld8 r3=[r2],8*4;; | ||
821 | mov ar.ccv=r3 | ||
822 | |||
823 | ld8 r3=[r2],8*4;; | ||
824 | mov ar.unat=r3 | ||
825 | |||
826 | ld8 r3=[r2],8*4;; | ||
827 | mov ar.fpsr=r3 | ||
828 | |||
829 | ld8 r3=[r2],160;; // 160 | ||
830 | // mov ar.itc=r3 | ||
831 | |||
832 | ld8 r3=[r2],8;; | ||
833 | mov ar.pfs=r3 | ||
834 | |||
835 | ld8 r3=[r2],8;; | ||
836 | mov ar.lc=r3 | ||
837 | |||
838 | ld8 r3=[r2];; | ||
839 | mov ar.ec=r3 | ||
840 | add r2=8*62,r2;; // padding | ||
841 | |||
842 | restore_RRs: | ||
843 | mov r5=ar.lc | ||
844 | mov ar.lc=0x08-1 | ||
845 | movl r4=0x00;; | ||
846 | cStRRr: | ||
847 | dep.z r7=r4,61,3 | ||
848 | ld8 r3=[r2],8;; | ||
849 | mov rr[r7]=r3 // what are its access previledges? | ||
850 | add r4=1,r4 | ||
851 | br.cloop.sptk.few cStRRr | ||
852 | ;; | ||
853 | mov ar.lc=r5 | ||
854 | ;; | ||
855 | end_os_mca_restore: | ||
856 | br ia64_os_mca_done_restore;; | ||
857 | 895 | ||
858 | //EndStub////////////////////////////////////////////////////////////////////// | 896 | //EndStub////////////////////////////////////////////////////////////////////// |
859 | 897 | ||
860 | 898 | ||
861 | // ok, the issue here is that we need to save state information so | 899 | //++ |
862 | // it can be useable by the kernel debugger and show regs routines. | 900 | // Name: |
863 | // In order to do this, our best bet is save the current state (plus | 901 | // ia64_new_stack() |
864 | // the state information obtain from the MIN_STATE_AREA) into a pt_regs | ||
865 | // format. This way we can pass it on in a useable format. | ||
866 | // | 902 | // |
867 | 903 | // Stub Description: | |
868 | // | 904 | // |
869 | // SAL to OS entry point for INIT on the monarch processor | 905 | // Switch to the MCA/INIT stack. |
870 | // This has been defined for registration purposes with SAL | ||
871 | // as a part of ia64_mca_init. | ||
872 | // | 906 | // |
873 | // When we get here, the following registers have been | 907 | // r2 contains the return address, r3 contains either |
874 | // set by the SAL for our use | 908 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. |
875 | // | 909 | // |
876 | // 1. GR1 = OS INIT GP | 910 | // On entry RBS is still on the original stack, this routine switches RBS |
877 | // 2. GR8 = PAL_PROC physical address | 911 | // to use the MCA/INIT stack. |
878 | // 3. GR9 = SAL_PROC physical address | ||
879 | // 4. GR10 = SAL GP (physical) | ||
880 | // 5. GR11 = Init Reason | ||
881 | // 0 = Received INIT for event other than crash dump switch | ||
882 | // 1 = Received wakeup at the end of an OS_MCA corrected machine check | ||
883 | // 2 = Received INIT dude to CrashDump switch assertion | ||
884 | // | 912 | // |
885 | // 6. GR12 = Return address to location within SAL_INIT procedure | 913 | // On entry, sos->pal_min_state is physical, on exit it is virtual. |
886 | 914 | // | |
915 | //-- | ||
887 | 916 | ||
888 | GLOBAL_ENTRY(ia64_monarch_init_handler) | 917 | ia64_new_stack: |
889 | .prologue | 918 | add regs=MCA_PT_REGS_OFFSET, r3 |
890 | // stash the information the SAL passed to os | 919 | add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 |
891 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | 920 | mov b0=r2 // save return address |
921 | GET_IA64_MCA_DATA(temp1) | ||
922 | invala | ||
892 | ;; | 923 | ;; |
893 | SAVE_MIN_WITH_COVER | 924 | add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack |
925 | add regs=regs, temp1 // struct pt_regs on MCA or INIT stack | ||
894 | ;; | 926 | ;; |
895 | mov r8=cr.ifa | 927 | // Address of minstate area provided by PAL is physical, uncacheable. |
896 | mov r9=cr.isr | 928 | // Convert to Linux virtual address in region 6 for C code. |
897 | adds r3=8,r2 // set up second base pointer | 929 | ld8 ms=[temp2] // pal_min_state, physical |
898 | ;; | 930 | ;; |
899 | SAVE_REST | 931 | dep temp1=-1,ms,62,2 // set region 6 |
900 | 932 | mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET | |
901 | // ok, enough should be saved at this point to be dangerous, and supply | 933 | ;; |
902 | // information for a dump | 934 | st8 [temp2]=temp1 // pal_min_state, virtual |
903 | // We need to switch to Virtual mode before hitting the C functions. | ||
904 | 935 | ||
905 | movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN | 936 | add temp4=temp3, regs // start of bspstore on new stack |
906 | mov r3=psr // get the current psr, minimum enabled at this point | ||
907 | ;; | 937 | ;; |
908 | or r2=r2,r3 | 938 | mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack |
909 | ;; | 939 | ;; |
910 | movl r3=IVirtual_Switch | 940 | flushrs // must be first in group |
941 | br.sptk b0 | ||
942 | |||
943 | //EndStub////////////////////////////////////////////////////////////////////// | ||
944 | |||
945 | |||
946 | //++ | ||
947 | // Name: | ||
948 | // ia64_old_stack() | ||
949 | // | ||
950 | // Stub Description: | ||
951 | // | ||
952 | // Switch to the old stack. | ||
953 | // | ||
954 | // r2 contains the return address, r3 contains either | ||
955 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
956 | // | ||
957 | // On entry, pal_min_state is virtual, on exit it is physical. | ||
958 | // | ||
959 | // On entry RBS is on the MCA/INIT stack, this routine switches RBS | ||
960 | // back to the previous stack. | ||
961 | // | ||
962 | // The psr is set to all zeroes. SAL return requires either all zeroes or | ||
963 | // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this | ||
964 | // code does not perform correctly. | ||
965 | // | ||
966 | // The dirty registers at the time of the event were flushed to the | ||
967 | // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers | ||
968 | // before reverting to the previous bspstore. | ||
969 | //-- | ||
970 | |||
971 | ia64_old_stack: | ||
972 | add regs=MCA_PT_REGS_OFFSET, r3 | ||
973 | mov b0=r2 // save return address | ||
974 | GET_IA64_MCA_DATA(temp2) | ||
975 | LOAD_PHYSICAL(p0,temp1,1f) | ||
911 | ;; | 976 | ;; |
912 | mov cr.iip=r3 // short return to set the appropriate bits | 977 | mov cr.ipsr=r0 |
913 | mov cr.ipsr=r2 // need to do an rfi to set appropriate bits | 978 | mov cr.ifs=r0 |
979 | mov cr.iip=temp1 | ||
914 | ;; | 980 | ;; |
981 | invala | ||
915 | rfi | 982 | rfi |
983 | 1: | ||
984 | |||
985 | add regs=regs, temp2 // struct pt_regs on MCA or INIT stack | ||
916 | ;; | 986 | ;; |
917 | IVirtual_Switch: | 987 | add temp1=PT(LOADRS), regs |
918 | // | ||
919 | // We should now be running virtual | ||
920 | // | ||
921 | // Let's call the C handler to get the rest of the state info | ||
922 | // | ||
923 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
924 | ;; | 988 | ;; |
925 | adds out0=16,sp // out0 = pointer to pt_regs | 989 | ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs |
990 | ;; | ||
991 | ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore | ||
992 | mov ar.rsc=temp2 | ||
993 | ;; | ||
994 | loadrs | ||
995 | ld8 temp4=[temp1] // restore ar.rnat | ||
996 | ;; | ||
997 | mov ar.bspstore=temp3 // back to old stack | ||
998 | ;; | ||
999 | mov ar.rnat=temp4 | ||
926 | ;; | 1000 | ;; |
927 | DO_SAVE_SWITCH_STACK | ||
928 | .body | ||
929 | adds out1=16,sp // out0 = pointer to switch_stack | ||
930 | 1001 | ||
931 | br.call.sptk.many rp=ia64_init_handler | 1002 | br.sptk b0 |
932 | .ret1: | 1003 | |
1004 | //EndStub////////////////////////////////////////////////////////////////////// | ||
933 | 1005 | ||
934 | return_from_init: | ||
935 | br.sptk return_from_init | ||
936 | END(ia64_monarch_init_handler) | ||
937 | 1006 | ||
1007 | //++ | ||
1008 | // Name: | ||
1009 | // ia64_set_kernel_registers() | ||
1010 | // | ||
1011 | // Stub Description: | ||
1012 | // | ||
1013 | // Set the registers that are required by the C code in order to run on an | ||
1014 | // MCA/INIT stack. | ||
938 | // | 1015 | // |
939 | // SAL to OS entry point for INIT on the slave processor | 1016 | // r2 contains the return address, r3 contains either |
940 | // This has been defined for registration purposes with SAL | 1017 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. |
941 | // as a part of ia64_mca_init. | ||
942 | // | 1018 | // |
1019 | //-- | ||
943 | 1020 | ||
944 | GLOBAL_ENTRY(ia64_slave_init_handler) | 1021 | ia64_set_kernel_registers: |
945 | 1: br.sptk 1b | 1022 | add temp3=MCA_SP_OFFSET, r3 |
946 | END(ia64_slave_init_handler) | 1023 | add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 |
1024 | mov b0=r2 // save return address | ||
1025 | GET_IA64_MCA_DATA(temp1) | ||
1026 | ;; | ||
1027 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | ||
1028 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | ||
1029 | add r13=temp1, r3 // set current to start of MCA/INIT stack | ||
1030 | add r20=temp1, r3 // physical start of MCA/INIT stack | ||
1031 | ;; | ||
1032 | ld8 r1=[temp4] // OS GP from SAL OS state | ||
1033 | ;; | ||
1034 | DATA_PA_TO_VA(r1,temp1) | ||
1035 | DATA_PA_TO_VA(r12,temp2) | ||
1036 | DATA_PA_TO_VA(r13,temp3) | ||
1037 | ;; | ||
1038 | mov IA64_KR(CURRENT)=r13 | ||
1039 | |||
1040 | /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid | ||
1041 | * any dependencies on the algorithm in ia64_switch_to(), just purge | ||
1042 | * any existing CURRENT_STACK mapping and insert the new one. | ||
1043 | */ | ||
1044 | |||
1045 | mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK | ||
1046 | ;; | ||
1047 | shl r16=r16,IA64_GRANULE_SHIFT | ||
1048 | ;; | ||
1049 | dep r16=-1,r16,61,3 // virtual granule | ||
1050 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | ||
1051 | ;; | ||
1052 | ptr.d r16,r18 | ||
1053 | ;; | ||
1054 | srlz.d | ||
1055 | |||
1056 | shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack | ||
1057 | movl r21=PAGE_KERNEL // page properties | ||
1058 | ;; | ||
1059 | mov IA64_KR(CURRENT_STACK)=r16 | ||
1060 | or r21=r20,r21 // construct PA | page properties | ||
1061 | ;; | ||
1062 | mov cr.itir=r18 | ||
1063 | mov cr.ifa=r13 | ||
1064 | mov r20=IA64_TR_CURRENT_STACK | ||
1065 | ;; | ||
1066 | itr.d dtr[r20]=r21 | ||
1067 | ;; | ||
1068 | srlz.d | ||
1069 | |||
1070 | br.sptk b0 | ||
1071 | |||
1072 | //EndStub////////////////////////////////////////////////////////////////////// | ||
1073 | |||
1074 | #undef ms | ||
1075 | #undef regs | ||
1076 | #undef temp1 | ||
1077 | #undef temp2 | ||
1078 | #undef temp3 | ||
1079 | #undef temp4 | ||
1080 | |||
1081 | |||
1082 | // Support function for mca.c, it is here to avoid using inline asm. Given the | ||
1083 | // address of an rnat slot, if that address is below the current ar.bspstore | ||
1084 | // then return the contents of that slot, otherwise return the contents of | ||
1085 | // ar.rnat. | ||
1086 | GLOBAL_ENTRY(ia64_get_rnat) | ||
1087 | alloc r14=ar.pfs,1,0,0,0 | ||
1088 | mov ar.rsc=0 | ||
1089 | ;; | ||
1090 | mov r14=ar.bspstore | ||
1091 | ;; | ||
1092 | cmp.lt p6,p7=in0,r14 | ||
1093 | ;; | ||
1094 | (p6) ld8 r8=[in0] | ||
1095 | (p7) mov r8=ar.rnat | ||
1096 | mov ar.rsc=3 | ||
1097 | br.ret.sptk.many rp | ||
1098 | END(ia64_get_rnat) | ||
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index abc0113a821d..f081c60ab206 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 2004 FUJITSU LIMITED | 5 | * Copyright (C) 2004 FUJITSU LIMITED |
6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) | 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) |
7 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
8 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
7 | */ | 9 | */ |
8 | #include <linux/config.h> | 10 | #include <linux/config.h> |
9 | #include <linux/types.h> | 11 | #include <linux/types.h> |
@@ -38,10 +40,6 @@ | |||
38 | /* max size of SAL error record (default) */ | 40 | /* max size of SAL error record (default) */ |
39 | static int sal_rec_max = 10000; | 41 | static int sal_rec_max = 10000; |
40 | 42 | ||
41 | /* from mca.c */ | ||
42 | static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state; | ||
43 | static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state; | ||
44 | |||
45 | /* from mca_drv_asm.S */ | 43 | /* from mca_drv_asm.S */ |
46 | extern void *mca_handler_bhhook(void); | 44 | extern void *mca_handler_bhhook(void); |
47 | 45 | ||
@@ -58,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; | |||
58 | static int num_page_isolate = 0; | 56 | static int num_page_isolate = 0; |
59 | 57 | ||
60 | typedef enum { | 58 | typedef enum { |
61 | ISOLATE_NG = 0, | 59 | ISOLATE_NG, |
62 | ISOLATE_OK = 1 | 60 | ISOLATE_OK, |
61 | ISOLATE_NONE | ||
63 | } isolate_status_t; | 62 | } isolate_status_t; |
64 | 63 | ||
65 | /* | 64 | /* |
@@ -76,7 +75,7 @@ static struct { | |||
76 | * @paddr: poisoned memory location | 75 | * @paddr: poisoned memory location |
77 | * | 76 | * |
78 | * Return value: | 77 | * Return value: |
79 | * ISOLATE_OK / ISOLATE_NG | 78 | * one of isolate_status_t, ISOLATE_OK/NG/NONE. |
80 | */ | 79 | */ |
81 | 80 | ||
82 | static isolate_status_t | 81 | static isolate_status_t |
@@ -86,23 +85,26 @@ mca_page_isolate(unsigned long paddr) | |||
86 | struct page *p; | 85 | struct page *p; |
87 | 86 | ||
88 | /* whether physical address is valid or not */ | 87 | /* whether physical address is valid or not */ |
89 | if ( !ia64_phys_addr_valid(paddr) ) | 88 | if (!ia64_phys_addr_valid(paddr)) |
90 | return ISOLATE_NG; | 89 | return ISOLATE_NONE; |
90 | |||
91 | if (!pfn_valid(paddr)) | ||
92 | return ISOLATE_NONE; | ||
91 | 93 | ||
92 | /* convert physical address to physical page number */ | 94 | /* convert physical address to physical page number */ |
93 | p = pfn_to_page(paddr>>PAGE_SHIFT); | 95 | p = pfn_to_page(paddr>>PAGE_SHIFT); |
94 | 96 | ||
95 | /* check whether a page number have been already registered or not */ | 97 | /* check whether a page number have been already registered or not */ |
96 | for( i = 0; i < num_page_isolate; i++ ) | 98 | for (i = 0; i < num_page_isolate; i++) |
97 | if( page_isolate[i] == p ) | 99 | if (page_isolate[i] == p) |
98 | return ISOLATE_OK; /* already listed */ | 100 | return ISOLATE_OK; /* already listed */ |
99 | 101 | ||
100 | /* limitation check */ | 102 | /* limitation check */ |
101 | if( num_page_isolate == MAX_PAGE_ISOLATE ) | 103 | if (num_page_isolate == MAX_PAGE_ISOLATE) |
102 | return ISOLATE_NG; | 104 | return ISOLATE_NG; |
103 | 105 | ||
104 | /* kick pages having attribute 'SLAB' or 'Reserved' */ | 106 | /* kick pages having attribute 'SLAB' or 'Reserved' */ |
105 | if( PageSlab(p) || PageReserved(p) ) | 107 | if (PageSlab(p) || PageReserved(p)) |
106 | return ISOLATE_NG; | 108 | return ISOLATE_NG; |
107 | 109 | ||
108 | /* add attribute 'Reserved' and register the page */ | 110 | /* add attribute 'Reserved' and register the page */ |
@@ -124,10 +126,15 @@ mca_handler_bh(unsigned long paddr) | |||
124 | current->pid, current->comm); | 126 | current->pid, current->comm); |
125 | 127 | ||
126 | spin_lock(&mca_bh_lock); | 128 | spin_lock(&mca_bh_lock); |
127 | if (mca_page_isolate(paddr) == ISOLATE_OK) { | 129 | switch (mca_page_isolate(paddr)) { |
130 | case ISOLATE_OK: | ||
128 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); | 131 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); |
129 | } else { | 132 | break; |
133 | case ISOLATE_NG: | ||
130 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); | 134 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); |
135 | break; | ||
136 | default: | ||
137 | break; | ||
131 | } | 138 | } |
132 | spin_unlock(&mca_bh_lock); | 139 | spin_unlock(&mca_bh_lock); |
133 | 140 | ||
@@ -141,10 +148,10 @@ mca_handler_bh(unsigned long paddr) | |||
141 | * @peidx: pointer to index of processor error section | 148 | * @peidx: pointer to index of processor error section |
142 | */ | 149 | */ |
143 | 150 | ||
144 | static void | 151 | static void |
145 | mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | 152 | mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) |
146 | { | 153 | { |
147 | /* | 154 | /* |
148 | * calculate the start address of | 155 | * calculate the start address of |
149 | * "struct cpuid_info" and "sal_processor_static_info_t". | 156 | * "struct cpuid_info" and "sal_processor_static_info_t". |
150 | */ | 157 | */ |
@@ -166,7 +173,7 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | |||
166 | } | 173 | } |
167 | 174 | ||
168 | /** | 175 | /** |
169 | * mca_make_slidx - Make index of SAL error record | 176 | * mca_make_slidx - Make index of SAL error record |
170 | * @buffer: pointer to SAL error record | 177 | * @buffer: pointer to SAL error record |
171 | * @slidx: pointer to index of SAL error record | 178 | * @slidx: pointer to index of SAL error record |
172 | * | 179 | * |
@@ -174,12 +181,12 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) | |||
174 | * 1 if record has platform error / 0 if not | 181 | * 1 if record has platform error / 0 if not |
175 | */ | 182 | */ |
176 | #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ | 183 | #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ |
177 | { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ | 184 | {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ |
178 | hl->hdr = ptr; \ | 185 | hl->hdr = ptr; \ |
179 | list_add(&hl->list, &(sect)); \ | 186 | list_add(&hl->list, &(sect)); \ |
180 | slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } | 187 | slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } |
181 | 188 | ||
182 | static int | 189 | static int |
183 | mca_make_slidx(void *buffer, slidx_table_t *slidx) | 190 | mca_make_slidx(void *buffer, slidx_table_t *slidx) |
184 | { | 191 | { |
185 | int platform_err = 0; | 192 | int platform_err = 0; |
@@ -216,28 +223,36 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) | |||
216 | sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); | 223 | sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); |
217 | if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { | 224 | if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { |
218 | LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); | 225 | LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); |
219 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { | 226 | } else if (!efi_guidcmp(sp->guid, |
227 | SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { | ||
220 | platform_err = 1; | 228 | platform_err = 1; |
221 | LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); | 229 | LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); |
222 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { | 230 | } else if (!efi_guidcmp(sp->guid, |
231 | SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { | ||
223 | platform_err = 1; | 232 | platform_err = 1; |
224 | LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); | 233 | LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); |
225 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { | 234 | } else if (!efi_guidcmp(sp->guid, |
235 | SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { | ||
226 | platform_err = 1; | 236 | platform_err = 1; |
227 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); | 237 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); |
228 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { | 238 | } else if (!efi_guidcmp(sp->guid, |
239 | SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { | ||
229 | platform_err = 1; | 240 | platform_err = 1; |
230 | LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); | 241 | LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); |
231 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { | 242 | } else if (!efi_guidcmp(sp->guid, |
243 | SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { | ||
232 | platform_err = 1; | 244 | platform_err = 1; |
233 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); | 245 | LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); |
234 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { | 246 | } else if (!efi_guidcmp(sp->guid, |
247 | SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { | ||
235 | platform_err = 1; | 248 | platform_err = 1; |
236 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); | 249 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); |
237 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { | 250 | } else if (!efi_guidcmp(sp->guid, |
251 | SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { | ||
238 | platform_err = 1; | 252 | platform_err = 1; |
239 | LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); | 253 | LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); |
240 | } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { | 254 | } else if (!efi_guidcmp(sp->guid, |
255 | SAL_PLAT_BUS_ERR_SECT_GUID)) { | ||
241 | platform_err = 1; | 256 | platform_err = 1; |
242 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); | 257 | LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); |
243 | } else { | 258 | } else { |
@@ -255,15 +270,16 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) | |||
255 | * Return value: | 270 | * Return value: |
256 | * 0 on Success / -ENOMEM on Failure | 271 | * 0 on Success / -ENOMEM on Failure |
257 | */ | 272 | */ |
258 | static int | 273 | static int |
259 | init_record_index_pools(void) | 274 | init_record_index_pools(void) |
260 | { | 275 | { |
261 | int i; | 276 | int i; |
262 | int rec_max_size; /* Maximum size of SAL error records */ | 277 | int rec_max_size; /* Maximum size of SAL error records */ |
263 | int sect_min_size; /* Minimum size of SAL error sections */ | 278 | int sect_min_size; /* Minimum size of SAL error sections */ |
264 | /* minimum size table of each section */ | 279 | /* minimum size table of each section */ |
265 | static int sal_log_sect_min_sizes[] = { | 280 | static int sal_log_sect_min_sizes[] = { |
266 | sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), | 281 | sizeof(sal_log_processor_info_t) |
282 | + sizeof(sal_processor_static_info_t), | ||
267 | sizeof(sal_log_mem_dev_err_info_t), | 283 | sizeof(sal_log_mem_dev_err_info_t), |
268 | sizeof(sal_log_sel_dev_err_info_t), | 284 | sizeof(sal_log_sel_dev_err_info_t), |
269 | sizeof(sal_log_pci_bus_err_info_t), | 285 | sizeof(sal_log_pci_bus_err_info_t), |
@@ -296,7 +312,8 @@ init_record_index_pools(void) | |||
296 | 312 | ||
297 | /* - 3 - */ | 313 | /* - 3 - */ |
298 | slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; | 314 | slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; |
299 | slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); | 315 | slidx_pool.buffer = (slidx_list_t *) |
316 | kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); | ||
300 | 317 | ||
301 | return slidx_pool.buffer ? 0 : -ENOMEM; | 318 | return slidx_pool.buffer ? 0 : -ENOMEM; |
302 | } | 319 | } |
@@ -310,24 +327,27 @@ init_record_index_pools(void) | |||
310 | * is_mca_global - Check whether this MCA is global or not | 327 | * is_mca_global - Check whether this MCA is global or not |
311 | * @peidx: pointer of index of processor error section | 328 | * @peidx: pointer of index of processor error section |
312 | * @pbci: pointer to pal_bus_check_info_t | 329 | * @pbci: pointer to pal_bus_check_info_t |
330 | * @sos: pointer to hand off struct between SAL and OS | ||
313 | * | 331 | * |
314 | * Return value: | 332 | * Return value: |
315 | * MCA_IS_LOCAL / MCA_IS_GLOBAL | 333 | * MCA_IS_LOCAL / MCA_IS_GLOBAL |
316 | */ | 334 | */ |
317 | 335 | ||
318 | static mca_type_t | 336 | static mca_type_t |
319 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 337 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
338 | struct ia64_sal_os_state *sos) | ||
320 | { | 339 | { |
321 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 340 | pal_processor_state_info_t *psp = |
341 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
322 | 342 | ||
323 | /* | 343 | /* |
324 | * PAL can request a rendezvous, if the MCA has a global scope. | 344 | * PAL can request a rendezvous, if the MCA has a global scope. |
325 | * If "rz_always" flag is set, SAL requests MCA rendezvous | 345 | * If "rz_always" flag is set, SAL requests MCA rendezvous |
326 | * in spite of global MCA. | 346 | * in spite of global MCA. |
327 | * Therefore it is local MCA when rendezvous has not been requested. | 347 | * Therefore it is local MCA when rendezvous has not been requested. |
328 | * Failed to rendezvous, the system must be down. | 348 | * Failed to rendezvous, the system must be down. |
329 | */ | 349 | */ |
330 | switch (sal_to_os_handoff_state->imsto_rendez_state) { | 350 | switch (sos->rv_rc) { |
331 | case -1: /* SAL rendezvous unsuccessful */ | 351 | case -1: /* SAL rendezvous unsuccessful */ |
332 | return MCA_IS_GLOBAL; | 352 | return MCA_IS_GLOBAL; |
333 | case 0: /* SAL rendezvous not required */ | 353 | case 0: /* SAL rendezvous not required */ |
@@ -382,13 +402,16 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | |||
382 | * @slidx: pointer of index of SAL error record | 402 | * @slidx: pointer of index of SAL error record |
383 | * @peidx: pointer of index of processor error section | 403 | * @peidx: pointer of index of processor error section |
384 | * @pbci: pointer of pal_bus_check_info | 404 | * @pbci: pointer of pal_bus_check_info |
405 | * @sos: pointer to hand off struct between SAL and OS | ||
385 | * | 406 | * |
386 | * Return value: | 407 | * Return value: |
387 | * 1 on Success / 0 on Failure | 408 | * 1 on Success / 0 on Failure |
388 | */ | 409 | */ |
389 | 410 | ||
390 | static int | 411 | static int |
391 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 412 | recover_from_read_error(slidx_table_t *slidx, |
413 | peidx_table_t *peidx, pal_bus_check_info_t *pbci, | ||
414 | struct ia64_sal_os_state *sos) | ||
392 | { | 415 | { |
393 | sal_log_mod_error_info_t *smei; | 416 | sal_log_mod_error_info_t *smei; |
394 | pal_min_state_area_t *pmsa; | 417 | pal_min_state_area_t *pmsa; |
@@ -426,7 +449,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
426 | * setup for resume to bottom half of MCA, | 449 | * setup for resume to bottom half of MCA, |
427 | * "mca_handler_bhhook" | 450 | * "mca_handler_bhhook" |
428 | */ | 451 | */ |
429 | pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); | 452 | pmsa = sos->pal_min_state; |
430 | /* pass to bhhook as 1st argument (gr8) */ | 453 | /* pass to bhhook as 1st argument (gr8) */ |
431 | pmsa->pmsa_gr[8-1] = smei->target_identifier; | 454 | pmsa->pmsa_gr[8-1] = smei->target_identifier; |
432 | /* set interrupted return address (but no use) */ | 455 | /* set interrupted return address (but no use) */ |
@@ -453,23 +476,28 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
453 | * @slidx: pointer of index of SAL error record | 476 | * @slidx: pointer of index of SAL error record |
454 | * @peidx: pointer of index of processor error section | 477 | * @peidx: pointer of index of processor error section |
455 | * @pbci: pointer of pal_bus_check_info | 478 | * @pbci: pointer of pal_bus_check_info |
479 | * @sos: pointer to hand off struct between SAL and OS | ||
456 | * | 480 | * |
457 | * Return value: | 481 | * Return value: |
458 | * 1 on Success / 0 on Failure | 482 | * 1 on Success / 0 on Failure |
459 | */ | 483 | */ |
460 | 484 | ||
461 | static int | 485 | static int |
462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 486 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, |
487 | pal_bus_check_info_t *pbci, | ||
488 | struct ia64_sal_os_state *sos) | ||
463 | { | 489 | { |
464 | int status = 0; | 490 | int status = 0; |
465 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 491 | pal_processor_state_info_t *psp = |
492 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
466 | 493 | ||
467 | if (psp->bc && pbci->eb && pbci->bsi == 0) { | 494 | if (psp->bc && pbci->eb && pbci->bsi == 0) { |
468 | switch(pbci->type) { | 495 | switch(pbci->type) { |
469 | case 1: /* partial read */ | 496 | case 1: /* partial read */ |
470 | case 3: /* full line(cpu) read */ | 497 | case 3: /* full line(cpu) read */ |
471 | case 9: /* I/O space read */ | 498 | case 9: /* I/O space read */ |
472 | status = recover_from_read_error(slidx, peidx, pbci); | 499 | status = recover_from_read_error(slidx, peidx, pbci, |
500 | sos); | ||
473 | break; | 501 | break; |
474 | case 0: /* unknown */ | 502 | case 0: /* unknown */ |
475 | case 2: /* partial write */ | 503 | case 2: /* partial write */ |
@@ -480,7 +508,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
480 | case 8: /* write coalescing transactions */ | 508 | case 8: /* write coalescing transactions */ |
481 | case 10: /* I/O space write */ | 509 | case 10: /* I/O space write */ |
482 | case 11: /* inter-processor interrupt message(IPI) */ | 510 | case 11: /* inter-processor interrupt message(IPI) */ |
483 | case 12: /* interrupt acknowledge or external task priority cycle */ | 511 | case 12: /* interrupt acknowledge or |
512 | external task priority cycle */ | ||
484 | default: | 513 | default: |
485 | break; | 514 | break; |
486 | } | 515 | } |
@@ -495,6 +524,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
495 | * @slidx: pointer of index of SAL error record | 524 | * @slidx: pointer of index of SAL error record |
496 | * @peidx: pointer of index of processor error section | 525 | * @peidx: pointer of index of processor error section |
497 | * @pbci: pointer of pal_bus_check_info | 526 | * @pbci: pointer of pal_bus_check_info |
527 | * @sos: pointer to hand off struct between SAL and OS | ||
498 | * | 528 | * |
499 | * Return value: | 529 | * Return value: |
500 | * 1 on Success / 0 on Failure | 530 | * 1 on Success / 0 on Failure |
@@ -508,14 +538,17 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
508 | */ | 538 | */ |
509 | 539 | ||
510 | static int | 540 | static int |
511 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 541 | recover_from_processor_error(int platform, slidx_table_t *slidx, |
542 | peidx_table_t *peidx, pal_bus_check_info_t *pbci, | ||
543 | struct ia64_sal_os_state *sos) | ||
512 | { | 544 | { |
513 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 545 | pal_processor_state_info_t *psp = |
546 | (pal_processor_state_info_t*)peidx_psp(peidx); | ||
514 | 547 | ||
515 | /* | 548 | /* |
516 | * We cannot recover errors with other than bus_check. | 549 | * We cannot recover errors with other than bus_check. |
517 | */ | 550 | */ |
518 | if (psp->cc || psp->rc || psp->uc) | 551 | if (psp->cc || psp->rc || psp->uc) |
519 | return 0; | 552 | return 0; |
520 | 553 | ||
521 | /* | 554 | /* |
@@ -544,10 +577,10 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
544 | * (e.g. a load from poisoned memory) | 577 | * (e.g. a load from poisoned memory) |
545 | * This means "there are some platform errors". | 578 | * This means "there are some platform errors". |
546 | */ | 579 | */ |
547 | if (platform) | 580 | if (platform) |
548 | return recover_from_platform_error(slidx, peidx, pbci); | 581 | return recover_from_platform_error(slidx, peidx, pbci, sos); |
549 | /* | 582 | /* |
550 | * On account of strange SAL error record, we cannot recover. | 583 | * On account of strange SAL error record, we cannot recover. |
551 | */ | 584 | */ |
552 | return 0; | 585 | return 0; |
553 | } | 586 | } |
@@ -555,15 +588,14 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
555 | /** | 588 | /** |
556 | * mca_try_to_recover - Try to recover from MCA | 589 | * mca_try_to_recover - Try to recover from MCA |
557 | * @rec: pointer to a SAL error record | 590 | * @rec: pointer to a SAL error record |
591 | * @sos: pointer to hand off struct between SAL and OS | ||
558 | * | 592 | * |
559 | * Return value: | 593 | * Return value: |
560 | * 1 on Success / 0 on Failure | 594 | * 1 on Success / 0 on Failure |
561 | */ | 595 | */ |
562 | 596 | ||
563 | static int | 597 | static int |
564 | mca_try_to_recover(void *rec, | 598 | mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos) |
565 | ia64_mca_sal_to_os_state_t *sal_to_os_state, | ||
566 | ia64_mca_os_to_sal_state_t *os_to_sal_state) | ||
567 | { | 599 | { |
568 | int platform_err; | 600 | int platform_err; |
569 | int n_proc_err; | 601 | int n_proc_err; |
@@ -571,10 +603,6 @@ mca_try_to_recover(void *rec, | |||
571 | peidx_table_t peidx; | 603 | peidx_table_t peidx; |
572 | pal_bus_check_info_t pbci; | 604 | pal_bus_check_info_t pbci; |
573 | 605 | ||
574 | /* handoff state from/to mca.c */ | ||
575 | sal_to_os_handoff_state = sal_to_os_state; | ||
576 | os_to_sal_handoff_state = os_to_sal_state; | ||
577 | |||
578 | /* Make index of SAL error record */ | 606 | /* Make index of SAL error record */ |
579 | platform_err = mca_make_slidx(rec, &slidx); | 607 | platform_err = mca_make_slidx(rec, &slidx); |
580 | 608 | ||
@@ -591,17 +619,19 @@ mca_try_to_recover(void *rec, | |||
591 | } | 619 | } |
592 | 620 | ||
593 | /* Make index of processor error section */ | 621 | /* Make index of processor error section */ |
594 | mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx); | 622 | mca_make_peidx((sal_log_processor_info_t*) |
623 | slidx_first_entry(&slidx.proc_err)->hdr, &peidx); | ||
595 | 624 | ||
596 | /* Extract Processor BUS_CHECK[0] */ | 625 | /* Extract Processor BUS_CHECK[0] */ |
597 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); | 626 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); |
598 | 627 | ||
599 | /* Check whether MCA is global or not */ | 628 | /* Check whether MCA is global or not */ |
600 | if (is_mca_global(&peidx, &pbci)) | 629 | if (is_mca_global(&peidx, &pbci, sos)) |
601 | return 0; | 630 | return 0; |
602 | 631 | ||
603 | /* Try to recover a processor error */ | 632 | /* Try to recover a processor error */ |
604 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); | 633 | return recover_from_processor_error(platform_err, &slidx, &peidx, |
634 | &pbci, sos); | ||
605 | } | 635 | } |
606 | 636 | ||
607 | /* | 637 | /* |
@@ -614,7 +644,7 @@ int __init mca_external_handler_init(void) | |||
614 | return -ENOMEM; | 644 | return -ENOMEM; |
615 | 645 | ||
616 | /* register external mca handlers */ | 646 | /* register external mca handlers */ |
617 | if (ia64_reg_MCA_extension(mca_try_to_recover)){ | 647 | if (ia64_reg_MCA_extension(mca_try_to_recover)) { |
618 | printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); | 648 | printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); |
619 | kfree(slidx_pool.buffer); | 649 | kfree(slidx_pool.buffer); |
620 | return -EFAULT; | 650 | return -EFAULT; |
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index 0227b761f2c4..e2f6fa1e0ef6 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) | 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) |
7 | */ | 7 | */ |
8 | /* | 8 | /* |
9 | * Processor error section: | 9 | * Processor error section: |
10 | * | 10 | * |
11 | * +-sal_log_processor_info_t *info-------------+ | 11 | * +-sal_log_processor_info_t *info-------------+ |
12 | * | sal_log_section_hdr_t header; | | 12 | * | sal_log_section_hdr_t header; | |
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index 2d7e0217638d..3f298ee4d00c 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S | |||
@@ -13,45 +13,45 @@ | |||
13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
14 | 14 | ||
15 | GLOBAL_ENTRY(mca_handler_bhhook) | 15 | GLOBAL_ENTRY(mca_handler_bhhook) |
16 | invala // clear RSE ? | 16 | invala // clear RSE ? |
17 | ;; // | 17 | ;; |
18 | cover // | 18 | cover |
19 | ;; // | 19 | ;; |
20 | clrrrb // | 20 | clrrrb |
21 | ;; | 21 | ;; |
22 | alloc r16=ar.pfs,0,2,1,0 // make a new frame | 22 | alloc r16=ar.pfs,0,2,1,0 // make a new frame |
23 | ;; | 23 | ;; |
24 | mov ar.rsc=0 | 24 | mov ar.rsc=0 |
25 | ;; | 25 | ;; |
26 | mov r13=IA64_KR(CURRENT) // current task pointer | 26 | mov r13=IA64_KR(CURRENT) // current task pointer |
27 | ;; | 27 | ;; |
28 | mov r2=r13 | 28 | mov r2=r13 |
29 | ;; | 29 | ;; |
30 | addl r22=IA64_RBS_OFFSET,r2 | 30 | addl r22=IA64_RBS_OFFSET,r2 |
31 | ;; | 31 | ;; |
32 | mov ar.bspstore=r22 | 32 | mov ar.bspstore=r22 |
33 | ;; | 33 | ;; |
34 | addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 | 34 | addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 |
35 | ;; | 35 | ;; |
36 | adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | 36 | adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
37 | ;; | 37 | ;; |
38 | st1 [r2]=r0 // clear current->thread.on_ustack flag | 38 | st1 [r2]=r0 // clear current->thread.on_ustack flag |
39 | mov loc0=r16 | 39 | mov loc0=r16 |
40 | movl loc1=mca_handler_bh // recovery C function | 40 | movl loc1=mca_handler_bh // recovery C function |
41 | ;; | 41 | ;; |
42 | mov out0=r8 // poisoned address | 42 | mov out0=r8 // poisoned address |
43 | mov b6=loc1 | 43 | mov b6=loc1 |
44 | ;; | 44 | ;; |
45 | mov loc1=rp | 45 | mov loc1=rp |
46 | ;; | 46 | ;; |
47 | ssm psr.i | 47 | ssm psr.i |
48 | ;; | 48 | ;; |
49 | br.call.sptk.many rp=b6 // does not return ... | 49 | br.call.sptk.many rp=b6 // does not return ... |
50 | ;; | 50 | ;; |
51 | mov ar.pfs=loc0 | 51 | mov ar.pfs=loc0 |
52 | mov rp=loc1 | 52 | mov rp=loc1 |
53 | ;; | 53 | ;; |
54 | mov r8=r0 | 54 | mov r8=r0 |
55 | br.ret.sptk.many rp | 55 | br.ret.sptk.many rp |
56 | ;; | 56 | ;; |
57 | END(mca_handler_bhhook) | 57 | END(mca_handler_bhhook) |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index f6d8a010d99b..85ed54179afa 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -5,73 +5,6 @@ | |||
5 | #include "entry.h" | 5 | #include "entry.h" |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * For ivt.s we want to access the stack virtually so we don't have to disable translation | ||
9 | * on interrupts. | ||
10 | * | ||
11 | * On entry: | ||
12 | * r1: pointer to current task (ar.k6) | ||
13 | */ | ||
14 | #define MINSTATE_START_SAVE_MIN_VIRT \ | ||
15 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
16 | ;; \ | ||
17 | (pUStk) mov.m r24=ar.rnat; \ | ||
18 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
19 | (pKStk) mov r1=sp; /* get sp */ \ | ||
20 | ;; \ | ||
21 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
22 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
23 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
24 | ;; \ | ||
25 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
26 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
27 | ;; \ | ||
28 | (pUStk) mov r18=ar.bsp; \ | ||
29 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ | ||
30 | |||
31 | #define MINSTATE_END_SAVE_MIN_VIRT \ | ||
32 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ | ||
33 | ;; | ||
34 | |||
35 | /* | ||
36 | * For mca_asm.S we want to access the stack physically since the state is saved before we | ||
37 | * go virtual and don't want to destroy the iip or ipsr. | ||
38 | */ | ||
39 | #define MINSTATE_START_SAVE_MIN_PHYS \ | ||
40 | (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \ | ||
41 | (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \ | ||
42 | (pKStk) ld8 r3 = [r3];; \ | ||
43 | (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \ | ||
44 | (pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ | ||
45 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
46 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ | ||
47 | ;; \ | ||
48 | (pUStk) mov r24=ar.rnat; \ | ||
49 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
50 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
51 | (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \ | ||
52 | ;; \ | ||
53 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
54 | ;; \ | ||
55 | (pUStk) mov r18=ar.bsp; \ | ||
56 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
57 | |||
58 | #define MINSTATE_END_SAVE_MIN_PHYS \ | ||
59 | dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \ | ||
60 | ;; | ||
61 | |||
62 | #ifdef MINSTATE_VIRT | ||
63 | # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT) | ||
64 | # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT | ||
65 | # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT | ||
66 | #endif | ||
67 | |||
68 | #ifdef MINSTATE_PHYS | ||
69 | # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg | ||
70 | # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS | ||
71 | # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS | ||
72 | #endif | ||
73 | |||
74 | /* | ||
75 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 8 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
76 | * the minimum state necessary that allows us to turn psr.ic back | 9 | * the minimum state necessary that allows us to turn psr.ic back |
77 | * on. | 10 | * on. |
@@ -97,7 +30,7 @@ | |||
97 | * we can pass interruption state as arguments to a handler. | 30 | * we can pass interruption state as arguments to a handler. |
98 | */ | 31 | */ |
99 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | 32 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ |
100 | MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ | 33 | mov r16=IA64_KR(CURRENT); /* M */ \ |
101 | mov r27=ar.rsc; /* M */ \ | 34 | mov r27=ar.rsc; /* M */ \ |
102 | mov r20=r1; /* A */ \ | 35 | mov r20=r1; /* A */ \ |
103 | mov r25=ar.unat; /* M */ \ | 36 | mov r25=ar.unat; /* M */ \ |
@@ -118,7 +51,21 @@ | |||
118 | SAVE_IFS; \ | 51 | SAVE_IFS; \ |
119 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ | 52 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ |
120 | ;; \ | 53 | ;; \ |
121 | MINSTATE_START_SAVE_MIN \ | 54 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ |
55 | ;; \ | ||
56 | (pUStk) mov.m r24=ar.rnat; \ | ||
57 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
58 | (pKStk) mov r1=sp; /* get sp */ \ | ||
59 | ;; \ | ||
60 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
61 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
62 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
63 | ;; \ | ||
64 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
65 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
66 | ;; \ | ||
67 | (pUStk) mov r18=ar.bsp; \ | ||
68 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
122 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ | 69 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ |
123 | adds r16=PT(CR_IPSR),r1; \ | 70 | adds r16=PT(CR_IPSR),r1; \ |
124 | ;; \ | 71 | ;; \ |
@@ -181,7 +128,8 @@ | |||
181 | EXTRA; \ | 128 | EXTRA; \ |
182 | movl r1=__gp; /* establish kernel global pointer */ \ | 129 | movl r1=__gp; /* establish kernel global pointer */ \ |
183 | ;; \ | 130 | ;; \ |
184 | MINSTATE_END_SAVE_MIN | 131 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ |
132 | ;; | ||
185 | 133 | ||
186 | /* | 134 | /* |
187 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). | 135 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 25e7c8344564..89faa603c6be 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -307,11 +307,9 @@ vm_info(char *page) | |||
307 | 307 | ||
308 | if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { | 308 | if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { |
309 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); | 309 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); |
310 | return 0; | 310 | } else { |
311 | } | ||
312 | 311 | ||
313 | 312 | p += sprintf(p, | |
314 | p += sprintf(p, | ||
315 | "Physical Address Space : %d bits\n" | 313 | "Physical Address Space : %d bits\n" |
316 | "Virtual Address Space : %d bits\n" | 314 | "Virtual Address Space : %d bits\n" |
317 | "Protection Key Registers(PKR) : %d\n" | 315 | "Protection Key Registers(PKR) : %d\n" |
@@ -319,92 +317,99 @@ vm_info(char *page) | |||
319 | "Hash Tag ID : 0x%x\n" | 317 | "Hash Tag ID : 0x%x\n" |
320 | "Size of RR.rid : %d\n", | 318 | "Size of RR.rid : %d\n", |
321 | vm_info_1.pal_vm_info_1_s.phys_add_size, | 319 | vm_info_1.pal_vm_info_1_s.phys_add_size, |
322 | vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, | 320 | vm_info_2.pal_vm_info_2_s.impl_va_msb+1, |
323 | vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, | 321 | vm_info_1.pal_vm_info_1_s.max_pkr+1, |
322 | vm_info_1.pal_vm_info_1_s.key_size, | ||
323 | vm_info_1.pal_vm_info_1_s.hash_tag_id, | ||
324 | vm_info_2.pal_vm_info_2_s.rid_size); | 324 | vm_info_2.pal_vm_info_2_s.rid_size); |
325 | } | ||
325 | 326 | ||
326 | if (ia64_pal_mem_attrib(&attrib) != 0) | 327 | if (ia64_pal_mem_attrib(&attrib) == 0) { |
327 | return 0; | 328 | p += sprintf(p, "Supported memory attributes : "); |
328 | 329 | sep = ""; | |
329 | p += sprintf(p, "Supported memory attributes : "); | 330 | for (i = 0; i < 8; i++) { |
330 | sep = ""; | 331 | if (attrib & (1 << i)) { |
331 | for (i = 0; i < 8; i++) { | 332 | p += sprintf(p, "%s%s", sep, mem_attrib[i]); |
332 | if (attrib & (1 << i)) { | 333 | sep = ", "; |
333 | p += sprintf(p, "%s%s", sep, mem_attrib[i]); | 334 | } |
334 | sep = ", "; | ||
335 | } | 335 | } |
336 | p += sprintf(p, "\n"); | ||
336 | } | 337 | } |
337 | p += sprintf(p, "\n"); | ||
338 | 338 | ||
339 | if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { | 339 | if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { |
340 | printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); | 340 | printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); |
341 | return 0; | 341 | } else { |
342 | } | ||
343 | |||
344 | p += sprintf(p, | ||
345 | "\nTLB walker : %simplemented\n" | ||
346 | "Number of DTR : %d\n" | ||
347 | "Number of ITR : %d\n" | ||
348 | "TLB insertable page sizes : ", | ||
349 | vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", | ||
350 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, | ||
351 | vm_info_1.pal_vm_info_1_s.max_itr_entry+1); | ||
352 | 342 | ||
343 | p += sprintf(p, | ||
344 | "\nTLB walker : %simplemented\n" | ||
345 | "Number of DTR : %d\n" | ||
346 | "Number of ITR : %d\n" | ||
347 | "TLB insertable page sizes : ", | ||
348 | vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", | ||
349 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, | ||
350 | vm_info_1.pal_vm_info_1_s.max_itr_entry+1); | ||
353 | 351 | ||
354 | p = bitvector_process(p, tr_pages); | ||
355 | 352 | ||
356 | p += sprintf(p, "\nTLB purgeable page sizes : "); | 353 | p = bitvector_process(p, tr_pages); |
357 | 354 | ||
358 | p = bitvector_process(p, vw_pages); | 355 | p += sprintf(p, "\nTLB purgeable page sizes : "); |
359 | 356 | ||
357 | p = bitvector_process(p, vw_pages); | ||
358 | } | ||
360 | if ((status=ia64_get_ptce(&ptce)) != 0) { | 359 | if ((status=ia64_get_ptce(&ptce)) != 0) { |
361 | printk(KERN_ERR "ia64_get_ptce=%ld\n", status); | 360 | printk(KERN_ERR "ia64_get_ptce=%ld\n", status); |
362 | return 0; | 361 | } else { |
363 | } | 362 | p += sprintf(p, |
364 | |||
365 | p += sprintf(p, | ||
366 | "\nPurge base address : 0x%016lx\n" | 363 | "\nPurge base address : 0x%016lx\n" |
367 | "Purge outer loop count : %d\n" | 364 | "Purge outer loop count : %d\n" |
368 | "Purge inner loop count : %d\n" | 365 | "Purge inner loop count : %d\n" |
369 | "Purge outer loop stride : %d\n" | 366 | "Purge outer loop stride : %d\n" |
370 | "Purge inner loop stride : %d\n", | 367 | "Purge inner loop stride : %d\n", |
371 | ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); | 368 | ptce.base, ptce.count[0], ptce.count[1], |
369 | ptce.stride[0], ptce.stride[1]); | ||
372 | 370 | ||
373 | p += sprintf(p, | 371 | p += sprintf(p, |
374 | "TC Levels : %d\n" | 372 | "TC Levels : %d\n" |
375 | "Unique TC(s) : %d\n", | 373 | "Unique TC(s) : %d\n", |
376 | vm_info_1.pal_vm_info_1_s.num_tc_levels, | 374 | vm_info_1.pal_vm_info_1_s.num_tc_levels, |
377 | vm_info_1.pal_vm_info_1_s.max_unique_tcs); | 375 | vm_info_1.pal_vm_info_1_s.max_unique_tcs); |
378 | 376 | ||
379 | for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { | 377 | for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { |
380 | for (j=2; j>0 ; j--) { | 378 | for (j=2; j>0 ; j--) { |
381 | tc_pages = 0; /* just in case */ | 379 | tc_pages = 0; /* just in case */ |
382 | 380 | ||
383 | 381 | ||
384 | /* even without unification, some levels may not be present */ | 382 | /* even without unification, some levels may not be present */ |
385 | if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { | 383 | if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { |
386 | continue; | 384 | continue; |
387 | } | 385 | } |
388 | 386 | ||
389 | p += sprintf(p, | 387 | p += sprintf(p, |
390 | "\n%s Translation Cache Level %d:\n" | 388 | "\n%s Translation Cache Level %d:\n" |
391 | "\tHash sets : %d\n" | 389 | "\tHash sets : %d\n" |
392 | "\tAssociativity : %d\n" | 390 | "\tAssociativity : %d\n" |
393 | "\tNumber of entries : %d\n" | 391 | "\tNumber of entries : %d\n" |
394 | "\tFlags : ", | 392 | "\tFlags : ", |
395 | cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, | 393 | cache_types[j+tc_info.tc_unified], i+1, |
396 | tc_info.tc_associativity, tc_info.tc_num_entries); | 394 | tc_info.tc_num_sets, |
395 | tc_info.tc_associativity, | ||
396 | tc_info.tc_num_entries); | ||
397 | 397 | ||
398 | if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); | 398 | if (tc_info.tc_pf) |
399 | if (tc_info.tc_unified) p += sprintf(p, "Unified "); | 399 | p += sprintf(p, "PreferredPageSizeOptimized "); |
400 | if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); | 400 | if (tc_info.tc_unified) |
401 | p += sprintf(p, "Unified "); | ||
402 | if (tc_info.tc_reduce_tr) | ||
403 | p += sprintf(p, "TCReduction"); | ||
401 | 404 | ||
402 | p += sprintf(p, "\n\tSupported page sizes: "); | 405 | p += sprintf(p, "\n\tSupported page sizes: "); |
403 | 406 | ||
404 | p = bitvector_process(p, tc_pages); | 407 | p = bitvector_process(p, tc_pages); |
405 | 408 | ||
406 | /* when unified date (j=2) is enough */ | 409 | /* when unified date (j=2) is enough */ |
407 | if (tc_info.tc_unified) break; | 410 | if (tc_info.tc_unified) |
411 | break; | ||
412 | } | ||
408 | } | 413 | } |
409 | } | 414 | } |
410 | p += sprintf(p, "\n"); | 415 | p += sprintf(p, "\n"); |
@@ -440,14 +445,14 @@ register_info(char *page) | |||
440 | p += sprintf(p, "\n"); | 445 | p += sprintf(p, "\n"); |
441 | } | 446 | } |
442 | 447 | ||
443 | if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; | 448 | if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { |
444 | 449 | ||
445 | p += sprintf(p, | 450 | p += sprintf(p, |
446 | "RSE stacked physical registers : %ld\n" | 451 | "RSE stacked physical registers : %ld\n" |
447 | "RSE load/store hints : %ld (%s)\n", | 452 | "RSE load/store hints : %ld (%s)\n", |
448 | phys_stacked, hints.ph_data, | 453 | phys_stacked, hints.ph_data, |
449 | hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); | 454 | hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); |
450 | 455 | } | |
451 | if (ia64_pal_debug_info(&iregs, &dregs)) | 456 | if (ia64_pal_debug_info(&iregs, &dregs)) |
452 | return 0; | 457 | return 0; |
453 | 458 | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1201ac8a116..d71731ee5b61 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/pagemap.h> | 38 | #include <linux/pagemap.h> |
39 | #include <linux/mount.h> | 39 | #include <linux/mount.h> |
40 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
41 | #include <linux/rcupdate.h> | ||
41 | 42 | ||
42 | #include <asm/errno.h> | 43 | #include <asm/errno.h> |
43 | #include <asm/intrinsics.h> | 44 | #include <asm/intrinsics.h> |
@@ -496,7 +497,7 @@ typedef struct { | |||
496 | static pfm_stats_t pfm_stats[NR_CPUS]; | 497 | static pfm_stats_t pfm_stats[NR_CPUS]; |
497 | static pfm_session_t pfm_sessions; /* global sessions information */ | 498 | static pfm_session_t pfm_sessions; /* global sessions information */ |
498 | 499 | ||
499 | static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED; | 500 | static DEFINE_SPINLOCK(pfm_alt_install_check); |
500 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; | 501 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; |
501 | 502 | ||
502 | static struct proc_dir_entry *perfmon_dir; | 503 | static struct proc_dir_entry *perfmon_dir; |
@@ -573,7 +574,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x) | |||
573 | return 0UL; | 574 | return 0UL; |
574 | } | 575 | } |
575 | 576 | ||
576 | static inline unsigned long | 577 | static inline void |
577 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) | 578 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) |
578 | { | 579 | { |
579 | spin_unlock(&(x)->ctx_lock); | 580 | spin_unlock(&(x)->ctx_lock); |
@@ -2217,15 +2218,18 @@ static void | |||
2217 | pfm_free_fd(int fd, struct file *file) | 2218 | pfm_free_fd(int fd, struct file *file) |
2218 | { | 2219 | { |
2219 | struct files_struct *files = current->files; | 2220 | struct files_struct *files = current->files; |
2221 | struct fdtable *fdt; | ||
2220 | 2222 | ||
2221 | /* | 2223 | /* |
2222 | * there ie no fd_uninstall(), so we do it here | 2224 | * there ie no fd_uninstall(), so we do it here |
2223 | */ | 2225 | */ |
2224 | spin_lock(&files->file_lock); | 2226 | spin_lock(&files->file_lock); |
2225 | files->fd[fd] = NULL; | 2227 | fdt = files_fdtable(files); |
2228 | rcu_assign_pointer(fdt->fd[fd], NULL); | ||
2226 | spin_unlock(&files->file_lock); | 2229 | spin_unlock(&files->file_lock); |
2227 | 2230 | ||
2228 | if (file) put_filp(file); | 2231 | if (file) |
2232 | put_filp(file); | ||
2229 | put_unused_fd(fd); | 2233 | put_unused_fd(fd); |
2230 | } | 2234 | } |
2231 | 2235 | ||
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 6f0cc7a6634e..ca68e6e44a72 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -22,6 +22,11 @@ | |||
22 | * | 22 | * |
23 | * Dec 5 2004 kaos@sgi.com | 23 | * Dec 5 2004 kaos@sgi.com |
24 | * Standardize which records are cleared automatically. | 24 | * Standardize which records are cleared automatically. |
25 | * | ||
26 | * Aug 18 2005 kaos@sgi.com | ||
27 | * mca.c may not pass a buffer, a NULL buffer just indicates that a new | ||
28 | * record is available in SAL. | ||
29 | * Replace some NR_CPUS by cpus_online, for hotplug cpu. | ||
25 | */ | 30 | */ |
26 | 31 | ||
27 | #include <linux/types.h> | 32 | #include <linux/types.h> |
@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *data, int shift) | |||
193 | * The buffer passed from mca.c points to the output from ia64_log_get. This is | 198 | * The buffer passed from mca.c points to the output from ia64_log_get. This is |
194 | * a persistent buffer but its contents can change between the interrupt and | 199 | * a persistent buffer but its contents can change between the interrupt and |
195 | * when user space processes the record. Save the record id to identify | 200 | * when user space processes the record. Save the record id to identify |
196 | * changes. | 201 | * changes. If the buffer is NULL then just update the bitmap. |
197 | */ | 202 | */ |
198 | void | 203 | void |
199 | salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) | 204 | salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) |
@@ -206,27 +211,29 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) | |||
206 | 211 | ||
207 | BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); | 212 | BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); |
208 | 213 | ||
209 | if (irqsafe) | 214 | if (buffer) { |
210 | spin_lock_irqsave(&data_saved_lock, flags); | 215 | if (irqsafe) |
211 | for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { | 216 | spin_lock_irqsave(&data_saved_lock, flags); |
212 | if (!data_saved->buffer) | 217 | for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { |
213 | break; | 218 | if (!data_saved->buffer) |
214 | } | 219 | break; |
215 | if (i == saved_size) { | 220 | } |
216 | if (!data->saved_num) { | 221 | if (i == saved_size) { |
217 | shift1_data_saved(data, 0); | 222 | if (!data->saved_num) { |
218 | data_saved = data->data_saved + saved_size - 1; | 223 | shift1_data_saved(data, 0); |
219 | } else | 224 | data_saved = data->data_saved + saved_size - 1; |
220 | data_saved = NULL; | 225 | } else |
221 | } | 226 | data_saved = NULL; |
222 | if (data_saved) { | 227 | } |
223 | data_saved->cpu = smp_processor_id(); | 228 | if (data_saved) { |
224 | data_saved->id = ((sal_log_record_header_t *)buffer)->id; | 229 | data_saved->cpu = smp_processor_id(); |
225 | data_saved->size = size; | 230 | data_saved->id = ((sal_log_record_header_t *)buffer)->id; |
226 | data_saved->buffer = buffer; | 231 | data_saved->size = size; |
232 | data_saved->buffer = buffer; | ||
233 | } | ||
234 | if (irqsafe) | ||
235 | spin_unlock_irqrestore(&data_saved_lock, flags); | ||
227 | } | 236 | } |
228 | if (irqsafe) | ||
229 | spin_unlock_irqrestore(&data_saved_lock, flags); | ||
230 | 237 | ||
231 | if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { | 238 | if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { |
232 | if (irqsafe) | 239 | if (irqsafe) |
@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_data *data) | |||
244 | int i; | 251 | int i; |
245 | if (!data->open) | 252 | if (!data->open) |
246 | return; | 253 | return; |
247 | for (i = 0; i < NR_CPUS; ++i) { | 254 | for_each_online_cpu(i) { |
248 | if (test_bit(i, &data->cpu_event)) { | 255 | if (test_bit(i, &data->cpu_event)) { |
249 | /* double up() is not a problem, user space will see no | 256 | /* double up() is not a problem, user space will see no |
250 | * records for the additional "events". | 257 | * records for the additional "events". |
@@ -291,7 +298,7 @@ retry: | |||
291 | 298 | ||
292 | n = data->cpu_check; | 299 | n = data->cpu_check; |
293 | for (i = 0; i < NR_CPUS; i++) { | 300 | for (i = 0; i < NR_CPUS; i++) { |
294 | if (test_bit(n, &data->cpu_event)) { | 301 | if (test_bit(n, &data->cpu_event) && cpu_online(n)) { |
295 | cpu = n; | 302 | cpu = n; |
296 | break; | 303 | break; |
297 | } | 304 | } |
@@ -585,11 +592,10 @@ salinfo_init(void) | |||
585 | 592 | ||
586 | /* we missed any events before now */ | 593 | /* we missed any events before now */ |
587 | online = 0; | 594 | online = 0; |
588 | for (j = 0; j < NR_CPUS; j++) | 595 | for_each_online_cpu(j) { |
589 | if (cpu_online(j)) { | 596 | set_bit(j, &data->cpu_event); |
590 | set_bit(j, &data->cpu_event); | 597 | ++online; |
591 | ++online; | 598 | } |
592 | } | ||
593 | sema_init(&data->sem, online); | 599 | sema_init(&data->sem, online); |
594 | 600 | ||
595 | *sdir++ = dir; | 601 | *sdir++ = dir; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 3e9b797e6588..f95fd2766634 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -276,28 +276,31 @@ find_initrd (void) | |||
276 | static void __init | 276 | static void __init |
277 | io_port_init (void) | 277 | io_port_init (void) |
278 | { | 278 | { |
279 | extern unsigned long ia64_iobase; | ||
280 | unsigned long phys_iobase; | 279 | unsigned long phys_iobase; |
281 | 280 | ||
282 | /* | 281 | /* |
283 | * Set `iobase' to the appropriate address in region 6 (uncached access range). | 282 | * Set `iobase' based on the EFI memory map or, failing that, the |
283 | * value firmware left in ar.k0. | ||
284 | * | 284 | * |
285 | * The EFI memory map is the "preferred" location to get the I/O port space base, | 285 | * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute |
286 | * rather the relying on AR.KR0. This should become more clear in future SAL | 286 | * the port's virtual address, so ia32_load_state() loads it with a |
287 | * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is | 287 | * user virtual address. But in ia64 mode, glibc uses the |
288 | * found in the memory map. | 288 | * *physical* address in ar.k0 to mmap the appropriate area from |
289 | * /dev/mem, and the inX()/outX() interfaces use MMIO. In both | ||
290 | * cases, user-mode can only use the legacy 0-64K I/O port space. | ||
291 | * | ||
292 | * ar.k0 is not involved in kernel I/O port accesses, which can use | ||
293 | * any of the I/O port spaces and are done via MMIO using the | ||
294 | * virtual mmio_base from the appropriate io_space[]. | ||
289 | */ | 295 | */ |
290 | phys_iobase = efi_get_iobase(); | 296 | phys_iobase = efi_get_iobase(); |
291 | if (phys_iobase) | 297 | if (!phys_iobase) { |
292 | /* set AR.KR0 since this is all we use it for anyway */ | ||
293 | ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); | ||
294 | else { | ||
295 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); | 298 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); |
296 | printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " | 299 | printk(KERN_INFO "No I/O port range found in EFI memory map, " |
297 | "to AR.KR0\n"); | 300 | "falling back to AR.KR0 (0x%lx)\n", phys_iobase); |
298 | printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); | ||
299 | } | 301 | } |
300 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | 302 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); |
303 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); | ||
301 | 304 | ||
302 | /* setup legacy IO port space */ | 305 | /* setup legacy IO port space */ |
303 | io_space[0].mmio_base = ia64_iobase; | 306 | io_space[0].mmio_base = ia64_iobase; |
@@ -416,7 +419,7 @@ setup_arch (char **cmdline_p) | |||
416 | if (early_console_setup(*cmdline_p) == 0) | 419 | if (early_console_setup(*cmdline_p) == 0) |
417 | mark_bsp_online(); | 420 | mark_bsp_online(); |
418 | 421 | ||
419 | #ifdef CONFIG_ACPI_BOOT | 422 | #ifdef CONFIG_ACPI |
420 | /* Initialize the ACPI boot-time table parser */ | 423 | /* Initialize the ACPI boot-time table parser */ |
421 | acpi_table_init(); | 424 | acpi_table_init(); |
422 | # ifdef CONFIG_ACPI_NUMA | 425 | # ifdef CONFIG_ACPI_NUMA |
@@ -452,7 +455,7 @@ setup_arch (char **cmdline_p) | |||
452 | 455 | ||
453 | cpu_init(); /* initialize the bootstrap CPU */ | 456 | cpu_init(); /* initialize the bootstrap CPU */ |
454 | 457 | ||
455 | #ifdef CONFIG_ACPI_BOOT | 458 | #ifdef CONFIG_ACPI |
456 | acpi_boot_init(); | 459 | acpi_boot_init(); |
457 | #endif | 460 | #endif |
458 | 461 | ||
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 92ff46ad21e2..706b7734e191 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -36,7 +36,7 @@ int arch_register_cpu(int num) | |||
36 | parent = &sysfs_nodes[cpu_to_node(num)]; | 36 | parent = &sysfs_nodes[cpu_to_node(num)]; |
37 | #endif /* CONFIG_NUMA */ | 37 | #endif /* CONFIG_NUMA */ |
38 | 38 | ||
39 | #ifdef CONFIG_ACPI_BOOT | 39 | #ifdef CONFIG_ACPI |
40 | /* | 40 | /* |
41 | * If CPEI cannot be re-targetted, and this is | 41 | * If CPEI cannot be re-targetted, and this is |
42 | * CPEI target, then dont create the control file | 42 | * CPEI target, then dont create the control file |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 4440c8343fa4..f970359e7edf 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/vt_kern.h> /* For unblank_screen() */ | 15 | #include <linux/vt_kern.h> /* For unblank_screen() */ |
16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ | 16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ |
17 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
18 | #include <linux/kprobes.h> | ||
18 | 19 | ||
19 | #include <asm/fpswa.h> | 20 | #include <asm/fpswa.h> |
20 | #include <asm/ia32.h> | 21 | #include <asm/ia32.h> |
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err) | |||
122 | } | 123 | } |
123 | 124 | ||
124 | void | 125 | void |
125 | ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | 126 | __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) |
126 | { | 127 | { |
127 | siginfo_t siginfo; | 128 | siginfo_t siginfo; |
128 | int sig, code; | 129 | int sig, code; |
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, | |||
444 | return rv; | 445 | return rv; |
445 | } | 446 | } |
446 | 447 | ||
447 | void | 448 | void __kprobes |
448 | ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | 449 | ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, |
449 | unsigned long iim, unsigned long itir, long arg5, long arg6, | 450 | unsigned long iim, unsigned long itir, long arg5, long arg6, |
450 | long arg7, struct pt_regs regs) | 451 | long arg7, struct pt_regs regs) |
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 3288be47bc75..93d5a3b41f69 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
@@ -2020,28 +2020,6 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, | |||
2020 | } | 2020 | } |
2021 | 2021 | ||
2022 | void | 2022 | void |
2023 | unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, | ||
2024 | struct pt_regs *pt, struct switch_stack *sw) | ||
2025 | { | ||
2026 | unsigned long sof; | ||
2027 | |||
2028 | init_frame_info(info, t, sw, pt->r12); | ||
2029 | info->cfm_loc = &pt->cr_ifs; | ||
2030 | info->unat_loc = &pt->ar_unat; | ||
2031 | info->pfs_loc = &pt->ar_pfs; | ||
2032 | sof = *info->cfm_loc & 0x7f; | ||
2033 | info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); | ||
2034 | info->ip = pt->cr_iip + ia64_psr(pt)->ri; | ||
2035 | info->pt = (unsigned long) pt; | ||
2036 | UNW_DPRINT(3, "unwind.%s:\n" | ||
2037 | " bsp 0x%lx\n" | ||
2038 | " sof 0x%lx\n" | ||
2039 | " ip 0x%lx\n", | ||
2040 | __FUNCTION__, info->bsp, sof, info->ip); | ||
2041 | find_save_locs(info); | ||
2042 | } | ||
2043 | |||
2044 | void | ||
2045 | unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) | 2023 | unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) |
2046 | { | 2024 | { |
2047 | unsigned long sol; | 2025 | unsigned long sol; |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index a676e79e0681..30d8564e9603 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -48,6 +48,7 @@ SECTIONS | |||
48 | *(.text) | 48 | *(.text) |
49 | SCHED_TEXT | 49 | SCHED_TEXT |
50 | LOCK_TEXT | 50 | LOCK_TEXT |
51 | KPROBES_TEXT | ||
51 | *(.gnu.linkonce.t*) | 52 | *(.gnu.linkonce.t*) |
52 | } | 53 | } |
53 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) | 54 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) |