aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-09-08 17:27:13 -0400
committerTony Luck <tony.luck@intel.com>2005-09-08 17:27:13 -0400
commit344a076110f4ecb16ea6d286b63be696604982ed (patch)
treedef6e229efdb6ee91b631b6695bf7f9ace8e2719 /arch/ia64/kernel
parent9b17e7e74e767d8a494a74c3c459aeecd1e08c5f (diff)
parent1b11d78cf87a7014f96e5b7fa2e1233cc8081a00 (diff)
[IA64] Manual merge fix for 3 files
arch/ia64/Kconfig arch/ia64/kernel/acpi.c include/asm-ia64/irq.h Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi-ext.c37
-rw-r--r--arch/ia64/kernel/acpi.c326
-rw-r--r--arch/ia64/kernel/domain.c396
-rw-r--r--arch/ia64/kernel/iosapic.c22
-rw-r--r--arch/ia64/kernel/irq.c39
-rw-r--r--arch/ia64/kernel/jprobes.S1
-rw-r--r--arch/ia64/kernel/kprobes.c124
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
12 files changed, 312 insertions, 647 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index b242594be55b..307514f7a282 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
16obj-$(CONFIG_IA64_PALINFO) += palinfo.o 16obj-$(CONFIG_IA64_PALINFO) += palinfo.o
17obj-$(CONFIG_IOSAPIC) += iosapic.o 17obj-$(CONFIG_IOSAPIC) += iosapic.o
18obj-$(CONFIG_MODULES) += module.o 18obj-$(CONFIG_MODULES) += module.o
19obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o 19obj-$(CONFIG_SMP) += smp.o smpboot.o
20obj-$(CONFIG_NUMA) += numa.o 20obj-$(CONFIG_NUMA) += numa.o
21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 2623df5e2633..13a5b3b49bf8 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -17,20 +17,20 @@
17#include <asm/acpi-ext.h> 17#include <asm/acpi-ext.h>
18 18
19struct acpi_vendor_descriptor { 19struct acpi_vendor_descriptor {
20 u8 guid_id; 20 u8 guid_id;
21 efi_guid_t guid; 21 efi_guid_t guid;
22}; 22};
23 23
24struct acpi_vendor_info { 24struct acpi_vendor_info {
25 struct acpi_vendor_descriptor *descriptor; 25 struct acpi_vendor_descriptor *descriptor;
26 u8 *data; 26 u8 *data;
27 u32 length; 27 u32 length;
28}; 28};
29 29
30acpi_status 30acpi_status
31acpi_vendor_resource_match(struct acpi_resource *resource, void *context) 31acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
32{ 32{
33 struct acpi_vendor_info *info = (struct acpi_vendor_info *) context; 33 struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
34 struct acpi_resource_vendor *vendor; 34 struct acpi_resource_vendor *vendor;
35 struct acpi_vendor_descriptor *descriptor; 35 struct acpi_vendor_descriptor *descriptor;
36 u32 length; 36 u32 length;
@@ -38,8 +38,8 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
38 if (resource->id != ACPI_RSTYPE_VENDOR) 38 if (resource->id != ACPI_RSTYPE_VENDOR)
39 return AE_OK; 39 return AE_OK;
40 40
41 vendor = (struct acpi_resource_vendor *) &resource->data; 41 vendor = (struct acpi_resource_vendor *)&resource->data;
42 descriptor = (struct acpi_vendor_descriptor *) vendor->reserved; 42 descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
43 if (vendor->length <= sizeof(*info->descriptor) || 43 if (vendor->length <= sizeof(*info->descriptor) ||
44 descriptor->guid_id != info->descriptor->guid_id || 44 descriptor->guid_id != info->descriptor->guid_id ||
45 efi_guidcmp(descriptor->guid, info->descriptor->guid)) 45 efi_guidcmp(descriptor->guid, info->descriptor->guid))
@@ -50,21 +50,24 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
50 if (!info->data) 50 if (!info->data)
51 return AE_NO_MEMORY; 51 return AE_NO_MEMORY;
52 52
53 memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length); 53 memcpy(info->data,
54 vendor->reserved + sizeof(struct acpi_vendor_descriptor),
55 length);
54 info->length = length; 56 info->length = length;
55 return AE_CTRL_TERMINATE; 57 return AE_CTRL_TERMINATE;
56} 58}
57 59
58acpi_status 60acpi_status
59acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, 61acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
60 u8 **data, u32 *length) 62 u8 ** data, u32 * length)
61{ 63{
62 struct acpi_vendor_info info; 64 struct acpi_vendor_info info;
63 65
64 info.descriptor = id; 66 info.descriptor = id;
65 info.data = NULL; 67 info.data = NULL;
66 68
67 acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info); 69 acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match,
70 &info);
68 if (!info.data) 71 if (!info.data)
69 return AE_NOT_FOUND; 72 return AE_NOT_FOUND;
70 73
@@ -75,17 +78,19 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id,
75 78
76struct acpi_vendor_descriptor hp_ccsr_descriptor = { 79struct acpi_vendor_descriptor hp_ccsr_descriptor = {
77 .guid_id = 2, 80 .guid_id = 2,
78 .guid = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) 81 .guid =
82 EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
83 0x37, 0x0e, 0xad)
79}; 84};
80 85
81acpi_status 86acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length)
82hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
83{ 87{
84 acpi_status status; 88 acpi_status status;
85 u8 *data; 89 u8 *data;
86 u32 length; 90 u32 length;
87 91
88 status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); 92 status =
93 acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
89 94
90 if (ACPI_FAILURE(status) || length != 16) 95 if (ACPI_FAILURE(status) || length != 16)
91 return AE_NOT_FOUND; 96 return AE_NOT_FOUND;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 66970d7b6c87..28a4529fdd60 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -74,12 +74,11 @@ unsigned int acpi_cpei_override;
74unsigned int acpi_cpei_phys_cpuid; 74unsigned int acpi_cpei_phys_cpuid;
75 75
76#define MAX_SAPICS 256 76#define MAX_SAPICS 256
77u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = 77u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
78 { [0 ... MAX_SAPICS - 1] = -1 }; 78
79EXPORT_SYMBOL(ia64_acpiid_to_sapicid); 79EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
80 80
81const char * 81const char *acpi_get_sysname(void)
82acpi_get_sysname (void)
83{ 82{
84#ifdef CONFIG_IA64_GENERIC 83#ifdef CONFIG_IA64_GENERIC
85 unsigned long rsdp_phys; 84 unsigned long rsdp_phys;
@@ -89,27 +88,29 @@ acpi_get_sysname (void)
89 88
90 rsdp_phys = acpi_find_rsdp(); 89 rsdp_phys = acpi_find_rsdp();
91 if (!rsdp_phys) { 90 if (!rsdp_phys) {
92 printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); 91 printk(KERN_ERR
92 "ACPI 2.0 RSDP not found, default to \"dig\"\n");
93 return "dig"; 93 return "dig";
94 } 94 }
95 95
96 rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); 96 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
97 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 97 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
98 printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 98 printk(KERN_ERR
99 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
99 return "dig"; 100 return "dig";
100 } 101 }
101 102
102 xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); 103 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
103 hdr = &xsdt->header; 104 hdr = &xsdt->header;
104 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 105 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
105 printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 106 printk(KERN_ERR
107 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
106 return "dig"; 108 return "dig";
107 } 109 }
108 110
109 if (!strcmp(hdr->oem_id, "HP")) { 111 if (!strcmp(hdr->oem_id, "HP")) {
110 return "hpzx1"; 112 return "hpzx1";
111 } 113 } else if (!strcmp(hdr->oem_id, "SGI")) {
112 else if (!strcmp(hdr->oem_id, "SGI")) {
113 return "sn2"; 114 return "sn2";
114 } 115 }
115 116
@@ -131,7 +132,7 @@ acpi_get_sysname (void)
131#endif 132#endif
132} 133}
133 134
134#ifdef CONFIG_ACPI_BOOT 135#ifdef CONFIG_ACPI
135 136
136#define ACPI_MAX_PLATFORM_INTERRUPTS 256 137#define ACPI_MAX_PLATFORM_INTERRUPTS 256
137 138
@@ -146,8 +147,7 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
146 * Interrupt routing API for device drivers. Provides interrupt vector for 147 * Interrupt routing API for device drivers. Provides interrupt vector for
147 * a generic platform event. Currently only CPEI is implemented. 148 * a generic platform event. Currently only CPEI is implemented.
148 */ 149 */
149int 150int acpi_request_vector(u32 int_type)
150acpi_request_vector (u32 int_type)
151{ 151{
152 int vector = -1; 152 int vector = -1;
153 153
@@ -155,12 +155,12 @@ acpi_request_vector (u32 int_type)
155 /* corrected platform error interrupt */ 155 /* corrected platform error interrupt */
156 vector = platform_intr_list[int_type]; 156 vector = platform_intr_list[int_type];
157 } else 157 } else
158 printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); 158 printk(KERN_ERR
159 "acpi_request_vector(): invalid interrupt type\n");
159 return vector; 160 return vector;
160} 161}
161 162
162char * 163char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
163__acpi_map_table (unsigned long phys_addr, unsigned long size)
164{ 164{
165 return __va(phys_addr); 165 return __va(phys_addr);
166} 166}
@@ -169,19 +169,18 @@ __acpi_map_table (unsigned long phys_addr, unsigned long size)
169 Boot-time Table Parsing 169 Boot-time Table Parsing
170 -------------------------------------------------------------------------- */ 170 -------------------------------------------------------------------------- */
171 171
172static int total_cpus __initdata; 172static int total_cpus __initdata;
173static int available_cpus __initdata; 173static int available_cpus __initdata;
174struct acpi_table_madt * acpi_madt __initdata; 174struct acpi_table_madt *acpi_madt __initdata;
175static u8 has_8259; 175static u8 has_8259;
176
177 176
178static int __init 177static int __init
179acpi_parse_lapic_addr_ovr ( 178acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
180 acpi_table_entry_header *header, const unsigned long end) 179 const unsigned long end)
181{ 180{
182 struct acpi_table_lapic_addr_ovr *lapic; 181 struct acpi_table_lapic_addr_ovr *lapic;
183 182
184 lapic = (struct acpi_table_lapic_addr_ovr *) header; 183 lapic = (struct acpi_table_lapic_addr_ovr *)header;
185 184
186 if (BAD_MADT_ENTRY(lapic, end)) 185 if (BAD_MADT_ENTRY(lapic, end))
187 return -EINVAL; 186 return -EINVAL;
@@ -193,22 +192,23 @@ acpi_parse_lapic_addr_ovr (
193 return 0; 192 return 0;
194} 193}
195 194
196
197static int __init 195static int __init
198acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) 196acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
199{ 197{
200 struct acpi_table_lsapic *lsapic; 198 struct acpi_table_lsapic *lsapic;
201 199
202 lsapic = (struct acpi_table_lsapic *) header; 200 lsapic = (struct acpi_table_lsapic *)header;
203 201
204 if (BAD_MADT_ENTRY(lsapic, end)) 202 if (BAD_MADT_ENTRY(lsapic, end))
205 return -EINVAL; 203 return -EINVAL;
206 204
207 if (lsapic->flags.enabled) { 205 if (lsapic->flags.enabled) {
208#ifdef CONFIG_SMP 206#ifdef CONFIG_SMP
209 smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; 207 smp_boot_data.cpu_phys_id[available_cpus] =
208 (lsapic->id << 8) | lsapic->eid;
210#endif 209#endif
211 ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid; 210 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
211 (lsapic->id << 8) | lsapic->eid;
212 ++available_cpus; 212 ++available_cpus;
213 } 213 }
214 214
@@ -216,13 +216,12 @@ acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
216 return 0; 216 return 0;
217} 217}
218 218
219
220static int __init 219static int __init
221acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) 220acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
222{ 221{
223 struct acpi_table_lapic_nmi *lacpi_nmi; 222 struct acpi_table_lapic_nmi *lacpi_nmi;
224 223
225 lacpi_nmi = (struct acpi_table_lapic_nmi*) header; 224 lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
226 225
227 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 226 if (BAD_MADT_ENTRY(lacpi_nmi, end))
228 return -EINVAL; 227 return -EINVAL;
@@ -231,13 +230,12 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
231 return 0; 230 return 0;
232} 231}
233 232
234
235static int __init 233static int __init
236acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) 234acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
237{ 235{
238 struct acpi_table_iosapic *iosapic; 236 struct acpi_table_iosapic *iosapic;
239 237
240 iosapic = (struct acpi_table_iosapic *) header; 238 iosapic = (struct acpi_table_iosapic *)header;
241 239
242 if (BAD_MADT_ENTRY(iosapic, end)) 240 if (BAD_MADT_ENTRY(iosapic, end))
243 return -EINVAL; 241 return -EINVAL;
@@ -245,15 +243,14 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
245 return iosapic_init(iosapic->address, iosapic->global_irq_base); 243 return iosapic_init(iosapic->address, iosapic->global_irq_base);
246} 244}
247 245
248
249static int __init 246static int __init
250acpi_parse_plat_int_src ( 247acpi_parse_plat_int_src(acpi_table_entry_header * header,
251 acpi_table_entry_header *header, const unsigned long end) 248 const unsigned long end)
252{ 249{
253 struct acpi_table_plat_int_src *plintsrc; 250 struct acpi_table_plat_int_src *plintsrc;
254 int vector; 251 int vector;
255 252
256 plintsrc = (struct acpi_table_plat_int_src *) header; 253 plintsrc = (struct acpi_table_plat_int_src *)header;
257 254
258 if (BAD_MADT_ENTRY(plintsrc, end)) 255 if (BAD_MADT_ENTRY(plintsrc, end))
259 return -EINVAL; 256 return -EINVAL;
@@ -267,8 +264,12 @@ acpi_parse_plat_int_src (
267 plintsrc->iosapic_vector, 264 plintsrc->iosapic_vector,
268 plintsrc->eid, 265 plintsrc->eid,
269 plintsrc->id, 266 plintsrc->id,
270 (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 267 (plintsrc->flags.polarity ==
271 (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 268 1) ? IOSAPIC_POL_HIGH :
269 IOSAPIC_POL_LOW,
270 (plintsrc->flags.trigger ==
271 1) ? IOSAPIC_EDGE :
272 IOSAPIC_LEVEL);
272 273
273 platform_intr_list[plintsrc->type] = vector; 274 platform_intr_list[plintsrc->type] = vector;
274 if (acpi_madt_rev > 1) { 275 if (acpi_madt_rev > 1) {
@@ -283,7 +284,6 @@ acpi_parse_plat_int_src (
283 return 0; 284 return 0;
284} 285}
285 286
286
287unsigned int can_cpei_retarget(void) 287unsigned int can_cpei_retarget(void)
288{ 288{
289 extern int cpe_vector; 289 extern int cpe_vector;
@@ -322,29 +322,30 @@ unsigned int get_cpei_target_cpu(void)
322} 322}
323 323
324static int __init 324static int __init
325acpi_parse_int_src_ovr ( 325acpi_parse_int_src_ovr(acpi_table_entry_header * header,
326 acpi_table_entry_header *header, const unsigned long end) 326 const unsigned long end)
327{ 327{
328 struct acpi_table_int_src_ovr *p; 328 struct acpi_table_int_src_ovr *p;
329 329
330 p = (struct acpi_table_int_src_ovr *) header; 330 p = (struct acpi_table_int_src_ovr *)header;
331 331
332 if (BAD_MADT_ENTRY(p, end)) 332 if (BAD_MADT_ENTRY(p, end))
333 return -EINVAL; 333 return -EINVAL;
334 334
335 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 335 iosapic_override_isa_irq(p->bus_irq, p->global_irq,
336 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 336 (p->flags.polarity ==
337 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 337 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
338 (p->flags.trigger ==
339 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
338 return 0; 340 return 0;
339} 341}
340 342
341
342static int __init 343static int __init
343acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) 344acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
344{ 345{
345 struct acpi_table_nmi_src *nmi_src; 346 struct acpi_table_nmi_src *nmi_src;
346 347
347 nmi_src = (struct acpi_table_nmi_src*) header; 348 nmi_src = (struct acpi_table_nmi_src *)header;
348 349
349 if (BAD_MADT_ENTRY(nmi_src, end)) 350 if (BAD_MADT_ENTRY(nmi_src, end))
350 return -EINVAL; 351 return -EINVAL;
@@ -353,11 +354,9 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
353 return 0; 354 return 0;
354} 355}
355 356
356static void __init 357static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
357acpi_madt_oem_check (char *oem_id, char *oem_table_id)
358{ 358{
359 if (!strncmp(oem_id, "IBM", 3) && 359 if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
360 (!strncmp(oem_table_id, "SERMOW", 6))) {
361 360
362 /* 361 /*
363 * Unfortunately ITC_DRIFT is not yet part of the 362 * Unfortunately ITC_DRIFT is not yet part of the
@@ -370,19 +369,18 @@ acpi_madt_oem_check (char *oem_id, char *oem_table_id)
370 } 369 }
371} 370}
372 371
373static int __init 372static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
374acpi_parse_madt (unsigned long phys_addr, unsigned long size)
375{ 373{
376 if (!phys_addr || !size) 374 if (!phys_addr || !size)
377 return -EINVAL; 375 return -EINVAL;
378 376
379 acpi_madt = (struct acpi_table_madt *) __va(phys_addr); 377 acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
380 378
381 acpi_madt_rev = acpi_madt->header.revision; 379 acpi_madt_rev = acpi_madt->header.revision;
382 380
383 /* remember the value for reference after free_initmem() */ 381 /* remember the value for reference after free_initmem() */
384#ifdef CONFIG_ITANIUM 382#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 383 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 384#else
387 has_8259 = acpi_madt->flags.pcat_compat; 385 has_8259 = acpi_madt->flags.pcat_compat;
388#endif 386#endif
@@ -396,19 +394,18 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 394 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 395
398 acpi_madt_oem_check(acpi_madt->header.oem_id, 396 acpi_madt_oem_check(acpi_madt->header.oem_id,
399 acpi_madt->header.oem_table_id); 397 acpi_madt->header.oem_table_id);
400 398
401 return 0; 399 return 0;
402} 400}
403 401
404
405#ifdef CONFIG_ACPI_NUMA 402#ifdef CONFIG_ACPI_NUMA
406 403
407#undef SLIT_DEBUG 404#undef SLIT_DEBUG
408 405
409#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) 406#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
410 407
411static int __initdata srat_num_cpus; /* number of cpus */ 408static int __initdata srat_num_cpus; /* number of cpus */
412static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; 409static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) 410#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
414#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 411#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
@@ -421,15 +418,15 @@ static struct acpi_table_slit __initdata *slit_table;
421 * ACPI 2.0 SLIT (System Locality Information Table) 418 * ACPI 2.0 SLIT (System Locality Information Table)
422 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf 419 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
423 */ 420 */
424void __init 421void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
425acpi_numa_slit_init (struct acpi_table_slit *slit)
426{ 422{
427 u32 len; 423 u32 len;
428 424
429 len = sizeof(struct acpi_table_header) + 8 425 len = sizeof(struct acpi_table_header) + 8
430 + slit->localities * slit->localities; 426 + slit->localities * slit->localities;
431 if (slit->header.length != len) { 427 if (slit->header.length != len) {
432 printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 428 printk(KERN_ERR
429 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
433 len, slit->header.length); 430 len, slit->header.length);
434 memset(numa_slit, 10, sizeof(numa_slit)); 431 memset(numa_slit, 10, sizeof(numa_slit));
435 return; 432 return;
@@ -438,19 +435,20 @@ acpi_numa_slit_init (struct acpi_table_slit *slit)
438} 435}
439 436
440void __init 437void __init
441acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) 438acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
442{ 439{
443 /* record this node in proximity bitmap */ 440 /* record this node in proximity bitmap */
444 pxm_bit_set(pa->proximity_domain); 441 pxm_bit_set(pa->proximity_domain);
445 442
446 node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); 443 node_cpuid[srat_num_cpus].phys_id =
444 (pa->apic_id << 8) | (pa->lsapic_eid);
447 /* nid should be overridden as logical node id later */ 445 /* nid should be overridden as logical node id later */
448 node_cpuid[srat_num_cpus].nid = pa->proximity_domain; 446 node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
449 srat_num_cpus++; 447 srat_num_cpus++;
450} 448}
451 449
452void __init 450void __init
453acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) 451acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
454{ 452{
455 unsigned long paddr, size; 453 unsigned long paddr, size;
456 u8 pxm; 454 u8 pxm;
@@ -487,8 +485,7 @@ acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
487 num_node_memblks++; 485 num_node_memblks++;
488} 486}
489 487
490void __init 488void __init acpi_numa_arch_fixup(void)
491acpi_numa_arch_fixup (void)
492{ 489{
493 int i, j, node_from, node_to; 490 int i, j, node_from, node_to;
494 491
@@ -534,21 +531,24 @@ acpi_numa_arch_fixup (void)
534 for (i = 0; i < srat_num_cpus; i++) 531 for (i = 0; i < srat_num_cpus; i++)
535 node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; 532 node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
536 533
537 printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); 534 printk(KERN_INFO "Number of logical nodes in system = %d\n",
538 printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); 535 num_online_nodes());
536 printk(KERN_INFO "Number of memory chunks in system = %d\n",
537 num_node_memblks);
539 538
540 if (!slit_table) return; 539 if (!slit_table)
540 return;
541 memset(numa_slit, -1, sizeof(numa_slit)); 541 memset(numa_slit, -1, sizeof(numa_slit));
542 for (i=0; i<slit_table->localities; i++) { 542 for (i = 0; i < slit_table->localities; i++) {
543 if (!pxm_bit_test(i)) 543 if (!pxm_bit_test(i))
544 continue; 544 continue;
545 node_from = pxm_to_nid_map[i]; 545 node_from = pxm_to_nid_map[i];
546 for (j=0; j<slit_table->localities; j++) { 546 for (j = 0; j < slit_table->localities; j++) {
547 if (!pxm_bit_test(j)) 547 if (!pxm_bit_test(j))
548 continue; 548 continue;
549 node_to = pxm_to_nid_map[j]; 549 node_to = pxm_to_nid_map[j];
550 node_distance(node_from, node_to) = 550 node_distance(node_from, node_to) =
551 slit_table->entry[i*slit_table->localities + j]; 551 slit_table->entry[i * slit_table->localities + j];
552 } 552 }
553 } 553 }
554 554
@@ -556,34 +556,41 @@ acpi_numa_arch_fixup (void)
556 printk("ACPI 2.0 SLIT locality table:\n"); 556 printk("ACPI 2.0 SLIT locality table:\n");
557 for_each_online_node(i) { 557 for_each_online_node(i) {
558 for_each_online_node(j) 558 for_each_online_node(j)
559 printk("%03d ", node_distance(i,j)); 559 printk("%03d ", node_distance(i, j));
560 printk("\n"); 560 printk("\n");
561 } 561 }
562#endif 562#endif
563} 563}
564#endif /* CONFIG_ACPI_NUMA */ 564#endif /* CONFIG_ACPI_NUMA */
565 565
566unsigned int 566/*
567acpi_register_gsi (u32 gsi, int edge_level, int active_high_low) 567 * success: return IRQ number (>=0)
568 * failure: return < 0
569 */
570int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
568{ 571{
569 if (has_8259 && gsi < 16) 572 if (has_8259 && gsi < 16)
570 return isa_irq_to_vector(gsi); 573 return isa_irq_to_vector(gsi);
571 574
572 return iosapic_register_intr(gsi, 575 return iosapic_register_intr(gsi,
573 (active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 576 (active_high_low ==
574 (edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 577 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
578 IOSAPIC_POL_LOW,
579 (edge_level ==
580 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
581 IOSAPIC_LEVEL);
575} 582}
583
576EXPORT_SYMBOL(acpi_register_gsi); 584EXPORT_SYMBOL(acpi_register_gsi);
577 585
578void 586void acpi_unregister_gsi(u32 gsi)
579acpi_unregister_gsi (u32 gsi)
580{ 587{
581 iosapic_unregister_intr(gsi); 588 iosapic_unregister_intr(gsi);
582} 589}
590
583EXPORT_SYMBOL(acpi_unregister_gsi); 591EXPORT_SYMBOL(acpi_unregister_gsi);
584 592
585static int __init 593static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
586acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
587{ 594{
588 struct acpi_table_header *fadt_header; 595 struct acpi_table_header *fadt_header;
589 struct fadt_descriptor_rev2 *fadt; 596 struct fadt_descriptor_rev2 *fadt;
@@ -591,11 +598,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
591 if (!phys_addr || !size) 598 if (!phys_addr || !size)
592 return -EINVAL; 599 return -EINVAL;
593 600
594 fadt_header = (struct acpi_table_header *) __va(phys_addr); 601 fadt_header = (struct acpi_table_header *)__va(phys_addr);
595 if (fadt_header->revision != 3) 602 if (fadt_header->revision != 3)
596 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 603 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
597 604
598 fadt = (struct fadt_descriptor_rev2 *) fadt_header; 605 fadt = (struct fadt_descriptor_rev2 *)fadt_header;
599 606
600 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) 607 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
601 acpi_kbd_controller_present = 0; 608 acpi_kbd_controller_present = 0;
@@ -607,22 +614,19 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
607 return 0; 614 return 0;
608} 615}
609 616
610 617unsigned long __init acpi_find_rsdp(void)
611unsigned long __init
612acpi_find_rsdp (void)
613{ 618{
614 unsigned long rsdp_phys = 0; 619 unsigned long rsdp_phys = 0;
615 620
616 if (efi.acpi20) 621 if (efi.acpi20)
617 rsdp_phys = __pa(efi.acpi20); 622 rsdp_phys = __pa(efi.acpi20);
618 else if (efi.acpi) 623 else if (efi.acpi)
619 printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); 624 printk(KERN_WARNING PREFIX
625 "v1.0/r0.71 tables no longer supported\n");
620 return rsdp_phys; 626 return rsdp_phys;
621} 627}
622 628
623 629int __init acpi_boot_init(void)
624int __init
625acpi_boot_init (void)
626{ 630{
627 631
628 /* 632 /*
@@ -640,31 +644,43 @@ acpi_boot_init (void)
640 644
641 /* Local APIC */ 645 /* Local APIC */
642 646
643 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 647 if (acpi_table_parse_madt
644 printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); 648 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
649 printk(KERN_ERR PREFIX
650 "Error parsing LAPIC address override entry\n");
645 651
646 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) 652 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
647 printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); 653 < 1)
654 printk(KERN_ERR PREFIX
655 "Error parsing MADT - no LAPIC entries\n");
648 656
649 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) 657 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
658 < 0)
650 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 659 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
651 660
652 /* I/O APIC */ 661 /* I/O APIC */
653 662
654 if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 663 if (acpi_table_parse_madt
655 printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); 664 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
665 printk(KERN_ERR PREFIX
666 "Error parsing MADT - no IOSAPIC entries\n");
656 667
657 /* System-Level Interrupt Routing */ 668 /* System-Level Interrupt Routing */
658 669
659 if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 670 if (acpi_table_parse_madt
660 printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); 671 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
672 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
673 printk(KERN_ERR PREFIX
674 "Error parsing platform interrupt source entry\n");
661 675
662 if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 676 if (acpi_table_parse_madt
663 printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); 677 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
678 printk(KERN_ERR PREFIX
679 "Error parsing interrupt source overrides entry\n");
664 680
665 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 681 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
666 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 682 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
667 skip_madt: 683 skip_madt:
668 684
669 /* 685 /*
670 * FADT says whether a legacy keyboard controller is present. 686 * FADT says whether a legacy keyboard controller is present.
@@ -679,8 +695,9 @@ acpi_boot_init (void)
679 if (available_cpus == 0) { 695 if (available_cpus == 0) {
680 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); 696 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
681 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); 697 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
682 smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); 698 smp_boot_data.cpu_phys_id[available_cpus] =
683 available_cpus = 1; /* We've got at least one of these, no? */ 699 hard_smp_processor_id();
700 available_cpus = 1; /* We've got at least one of these, no? */
684 } 701 }
685 smp_boot_data.cpu_count = available_cpus; 702 smp_boot_data.cpu_count = available_cpus;
686 703
@@ -689,8 +706,10 @@ acpi_boot_init (void)
689 if (srat_num_cpus == 0) { 706 if (srat_num_cpus == 0) {
690 int cpu, i = 1; 707 int cpu, i = 1;
691 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 708 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
692 if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) 709 if (smp_boot_data.cpu_phys_id[cpu] !=
693 node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; 710 hard_smp_processor_id())
711 node_cpuid[i++].phys_id =
712 smp_boot_data.cpu_phys_id[cpu];
694 } 713 }
695# endif 714# endif
696#endif 715#endif
@@ -698,12 +717,12 @@ acpi_boot_init (void)
698 build_cpu_to_node_map(); 717 build_cpu_to_node_map();
699#endif 718#endif
700 /* Make boot-up look pretty */ 719 /* Make boot-up look pretty */
701 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); 720 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
721 total_cpus);
702 return 0; 722 return 0;
703} 723}
704 724
705int 725int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
706acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
707{ 726{
708 int vector; 727 int vector;
709 728
@@ -724,11 +743,10 @@ acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
724 */ 743 */
725#ifdef CONFIG_ACPI_HOTPLUG_CPU 744#ifdef CONFIG_ACPI_HOTPLUG_CPU
726static 745static
727int 746int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
728acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
729{ 747{
730#ifdef CONFIG_ACPI_NUMA 748#ifdef CONFIG_ACPI_NUMA
731 int pxm_id; 749 int pxm_id;
732 750
733 pxm_id = acpi_get_pxm(handle); 751 pxm_id = acpi_get_pxm(handle);
734 752
@@ -736,31 +754,28 @@ acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
736 * Assuming that the container driver would have set the proximity 754 * Assuming that the container driver would have set the proximity
737 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag 755 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
738 */ 756 */
739 node_cpuid[cpu].nid = (pxm_id < 0) ? 0: 757 node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
740 pxm_to_nid_map[pxm_id];
741 758
742 node_cpuid[cpu].phys_id = physid; 759 node_cpuid[cpu].phys_id = physid;
743#endif 760#endif
744 return(0); 761 return (0);
745} 762}
746 763
747 764int acpi_map_lsapic(acpi_handle handle, int *pcpu)
748int
749acpi_map_lsapic(acpi_handle handle, int *pcpu)
750{ 765{
751 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
752 union acpi_object *obj; 767 union acpi_object *obj;
753 struct acpi_table_lsapic *lsapic; 768 struct acpi_table_lsapic *lsapic;
754 cpumask_t tmp_map; 769 cpumask_t tmp_map;
755 long physid; 770 long physid;
756 int cpu; 771 int cpu;
757 772
758 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 773 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
759 return -EINVAL; 774 return -EINVAL;
760 775
761 if (!buffer.length || !buffer.pointer) 776 if (!buffer.length || !buffer.pointer)
762 return -EINVAL; 777 return -EINVAL;
763 778
764 obj = buffer.pointer; 779 obj = buffer.pointer;
765 if (obj->type != ACPI_TYPE_BUFFER || 780 if (obj->type != ACPI_TYPE_BUFFER ||
766 obj->buffer.length < sizeof(*lsapic)) { 781 obj->buffer.length < sizeof(*lsapic)) {
@@ -776,7 +791,7 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu)
776 return -EINVAL; 791 return -EINVAL;
777 } 792 }
778 793
779 physid = ((lsapic->id <<8) | (lsapic->eid)); 794 physid = ((lsapic->id << 8) | (lsapic->eid));
780 795
781 acpi_os_free(buffer.pointer); 796 acpi_os_free(buffer.pointer);
782 buffer.length = ACPI_ALLOCATE_BUFFER; 797 buffer.length = ACPI_ALLOCATE_BUFFER;
@@ -784,50 +799,49 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu)
784 799
785 cpus_complement(tmp_map, cpu_present_map); 800 cpus_complement(tmp_map, cpu_present_map);
786 cpu = first_cpu(tmp_map); 801 cpu = first_cpu(tmp_map);
787 if(cpu >= NR_CPUS) 802 if (cpu >= NR_CPUS)
788 return -EINVAL; 803 return -EINVAL;
789 804
790 acpi_map_cpu2node(handle, cpu, physid); 805 acpi_map_cpu2node(handle, cpu, physid);
791 806
792 cpu_set(cpu, cpu_present_map); 807 cpu_set(cpu, cpu_present_map);
793 ia64_cpu_to_sapicid[cpu] = physid; 808 ia64_cpu_to_sapicid[cpu] = physid;
794 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; 809 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
795 810
796 *pcpu = cpu; 811 *pcpu = cpu;
797 return(0); 812 return (0);
798} 813}
799EXPORT_SYMBOL(acpi_map_lsapic);
800 814
815EXPORT_SYMBOL(acpi_map_lsapic);
801 816
802int 817int acpi_unmap_lsapic(int cpu)
803acpi_unmap_lsapic(int cpu)
804{ 818{
805 int i; 819 int i;
806 820
807 for (i=0; i<MAX_SAPICS; i++) { 821 for (i = 0; i < MAX_SAPICS; i++) {
808 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { 822 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
809 ia64_acpiid_to_sapicid[i] = -1; 823 ia64_acpiid_to_sapicid[i] = -1;
810 break; 824 break;
811 } 825 }
812 } 826 }
813 ia64_cpu_to_sapicid[cpu] = -1; 827 ia64_cpu_to_sapicid[cpu] = -1;
814 cpu_clear(cpu,cpu_present_map); 828 cpu_clear(cpu, cpu_present_map);
815 829
816#ifdef CONFIG_ACPI_NUMA 830#ifdef CONFIG_ACPI_NUMA
817 /* NUMA specific cleanup's */ 831 /* NUMA specific cleanup's */
818#endif 832#endif
819 833
820 return(0); 834 return (0);
821} 835}
836
822EXPORT_SYMBOL(acpi_unmap_lsapic); 837EXPORT_SYMBOL(acpi_unmap_lsapic);
823#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 838#endif /* CONFIG_ACPI_HOTPLUG_CPU */
824
825 839
826#ifdef CONFIG_ACPI_NUMA 840#ifdef CONFIG_ACPI_NUMA
827acpi_status __devinit 841acpi_status __devinit
828acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) 842acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
829{ 843{
830 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 844 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
831 union acpi_object *obj; 845 union acpi_object *obj;
832 struct acpi_table_iosapic *iosapic; 846 struct acpi_table_iosapic *iosapic;
833 unsigned int gsi_base; 847 unsigned int gsi_base;
@@ -876,10 +890,9 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
876 map_iosapic_to_node(gsi_base, node); 890 map_iosapic_to_node(gsi_base, node);
877 return AE_OK; 891 return AE_OK;
878} 892}
879#endif /* CONFIG_NUMA */ 893#endif /* CONFIG_NUMA */
880 894
881int 895int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
882acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base)
883{ 896{
884 int err; 897 int err;
885 898
@@ -888,17 +901,18 @@ acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base)
888 901
889#if CONFIG_ACPI_NUMA 902#if CONFIG_ACPI_NUMA
890 acpi_map_iosapic(handle, 0, NULL, NULL); 903 acpi_map_iosapic(handle, 0, NULL, NULL);
891#endif /* CONFIG_ACPI_NUMA */ 904#endif /* CONFIG_ACPI_NUMA */
892 905
893 return 0; 906 return 0;
894} 907}
908
895EXPORT_SYMBOL(acpi_register_ioapic); 909EXPORT_SYMBOL(acpi_register_ioapic);
896 910
897int 911int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
898acpi_unregister_ioapic (acpi_handle handle, u32 gsi_base)
899{ 912{
900 return iosapic_remove(gsi_base); 913 return iosapic_remove(gsi_base);
901} 914}
915
902EXPORT_SYMBOL(acpi_unregister_ioapic); 916EXPORT_SYMBOL(acpi_unregister_ioapic);
903 917
904#endif /* CONFIG_ACPI_BOOT */ 918#endif /* CONFIG_ACPI */
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c
deleted file mode 100644
index bbb8efe126b7..000000000000
--- a/arch/ia64/kernel/domain.c
+++ /dev/null
@@ -1,396 +0,0 @@
1/*
2 * arch/ia64/kernel/domain.c
3 * Architecture specific sched-domains builder.
4 *
5 * Copyright (C) 2004 Jesse Barnes
6 * Copyright (C) 2004 Silicon Graphics, Inc.
7 */
8
9#include <linux/sched.h>
10#include <linux/percpu.h>
11#include <linux/slab.h>
12#include <linux/cpumask.h>
13#include <linux/init.h>
14#include <linux/topology.h>
15#include <linux/nodemask.h>
16
17#define SD_NODES_PER_DOMAIN 16
18
19#ifdef CONFIG_NUMA
20/**
21 * find_next_best_node - find the next node to include in a sched_domain
22 * @node: node whose sched_domain we're building
23 * @used_nodes: nodes already in the sched_domain
24 *
25 * Find the next node to include in a given scheduling domain. Simply
26 * finds the closest node not already in the @used_nodes map.
27 *
28 * Should use nodemask_t.
29 */
30static int find_next_best_node(int node, unsigned long *used_nodes)
31{
32 int i, n, val, min_val, best_node = 0;
33
34 min_val = INT_MAX;
35
36 for (i = 0; i < MAX_NUMNODES; i++) {
37 /* Start at @node */
38 n = (node + i) % MAX_NUMNODES;
39
40 if (!nr_cpus_node(n))
41 continue;
42
43 /* Skip already used nodes */
44 if (test_bit(n, used_nodes))
45 continue;
46
47 /* Simple min distance search */
48 val = node_distance(node, n);
49
50 if (val < min_val) {
51 min_val = val;
52 best_node = n;
53 }
54 }
55
56 set_bit(best_node, used_nodes);
57 return best_node;
58}
59
60/**
61 * sched_domain_node_span - get a cpumask for a node's sched_domain
62 * @node: node whose cpumask we're constructing
63 * @size: number of nodes to include in this span
64 *
65 * Given a node, construct a good cpumask for its sched_domain to span. It
66 * should be one that prevents unnecessary balancing, but also spreads tasks
67 * out optimally.
68 */
69static cpumask_t sched_domain_node_span(int node)
70{
71 int i;
72 cpumask_t span, nodemask;
73 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
74
75 cpus_clear(span);
76 bitmap_zero(used_nodes, MAX_NUMNODES);
77
78 nodemask = node_to_cpumask(node);
79 cpus_or(span, span, nodemask);
80 set_bit(node, used_nodes);
81
82 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
83 int next_node = find_next_best_node(node, used_nodes);
84 nodemask = node_to_cpumask(next_node);
85 cpus_or(span, span, nodemask);
86 }
87
88 return span;
89}
90#endif
91
92/*
93 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
94 * can switch it on easily if needed.
95 */
96#ifdef CONFIG_SCHED_SMT
97static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
98static struct sched_group sched_group_cpus[NR_CPUS];
99static int cpu_to_cpu_group(int cpu)
100{
101 return cpu;
102}
103#endif
104
105static DEFINE_PER_CPU(struct sched_domain, phys_domains);
106static struct sched_group sched_group_phys[NR_CPUS];
107static int cpu_to_phys_group(int cpu)
108{
109#ifdef CONFIG_SCHED_SMT
110 return first_cpu(cpu_sibling_map[cpu]);
111#else
112 return cpu;
113#endif
114}
115
116#ifdef CONFIG_NUMA
117/*
118 * The init_sched_build_groups can't handle what we want to do with node
119 * groups, so roll our own. Now each node has its own list of groups which
120 * gets dynamically allocated.
121 */
122static DEFINE_PER_CPU(struct sched_domain, node_domains);
123static struct sched_group *sched_group_nodes[MAX_NUMNODES];
124
125static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
126static struct sched_group sched_group_allnodes[MAX_NUMNODES];
127
128static int cpu_to_allnodes_group(int cpu)
129{
130 return cpu_to_node(cpu);
131}
132#endif
133
134/*
135 * Build sched domains for a given set of cpus and attach the sched domains
136 * to the individual cpus
137 */
138void build_sched_domains(const cpumask_t *cpu_map)
139{
140 int i;
141
142 /*
143 * Set up domains for cpus specified by the cpu_map.
144 */
145 for_each_cpu_mask(i, *cpu_map) {
146 int group;
147 struct sched_domain *sd = NULL, *p;
148 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
149
150 cpus_and(nodemask, nodemask, *cpu_map);
151
152#ifdef CONFIG_NUMA
153 if (num_online_cpus()
154 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
155 sd = &per_cpu(allnodes_domains, i);
156 *sd = SD_ALLNODES_INIT;
157 sd->span = *cpu_map;
158 group = cpu_to_allnodes_group(i);
159 sd->groups = &sched_group_allnodes[group];
160 p = sd;
161 } else
162 p = NULL;
163
164 sd = &per_cpu(node_domains, i);
165 *sd = SD_NODE_INIT;
166 sd->span = sched_domain_node_span(cpu_to_node(i));
167 sd->parent = p;
168 cpus_and(sd->span, sd->span, *cpu_map);
169#endif
170
171 p = sd;
172 sd = &per_cpu(phys_domains, i);
173 group = cpu_to_phys_group(i);
174 *sd = SD_CPU_INIT;
175 sd->span = nodemask;
176 sd->parent = p;
177 sd->groups = &sched_group_phys[group];
178
179#ifdef CONFIG_SCHED_SMT
180 p = sd;
181 sd = &per_cpu(cpu_domains, i);
182 group = cpu_to_cpu_group(i);
183 *sd = SD_SIBLING_INIT;
184 sd->span = cpu_sibling_map[i];
185 cpus_and(sd->span, sd->span, *cpu_map);
186 sd->parent = p;
187 sd->groups = &sched_group_cpus[group];
188#endif
189 }
190
191#ifdef CONFIG_SCHED_SMT
192 /* Set up CPU (sibling) groups */
193 for_each_cpu_mask(i, *cpu_map) {
194 cpumask_t this_sibling_map = cpu_sibling_map[i];
195 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
196 if (i != first_cpu(this_sibling_map))
197 continue;
198
199 init_sched_build_groups(sched_group_cpus, this_sibling_map,
200 &cpu_to_cpu_group);
201 }
202#endif
203
204 /* Set up physical groups */
205 for (i = 0; i < MAX_NUMNODES; i++) {
206 cpumask_t nodemask = node_to_cpumask(i);
207
208 cpus_and(nodemask, nodemask, *cpu_map);
209 if (cpus_empty(nodemask))
210 continue;
211
212 init_sched_build_groups(sched_group_phys, nodemask,
213 &cpu_to_phys_group);
214 }
215
216#ifdef CONFIG_NUMA
217 init_sched_build_groups(sched_group_allnodes, *cpu_map,
218 &cpu_to_allnodes_group);
219
220 for (i = 0; i < MAX_NUMNODES; i++) {
221 /* Set up node groups */
222 struct sched_group *sg, *prev;
223 cpumask_t nodemask = node_to_cpumask(i);
224 cpumask_t domainspan;
225 cpumask_t covered = CPU_MASK_NONE;
226 int j;
227
228 cpus_and(nodemask, nodemask, *cpu_map);
229 if (cpus_empty(nodemask))
230 continue;
231
232 domainspan = sched_domain_node_span(i);
233 cpus_and(domainspan, domainspan, *cpu_map);
234
235 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
236 sched_group_nodes[i] = sg;
237 for_each_cpu_mask(j, nodemask) {
238 struct sched_domain *sd;
239 sd = &per_cpu(node_domains, j);
240 sd->groups = sg;
241 if (sd->groups == NULL) {
242 /* Turn off balancing if we have no groups */
243 sd->flags = 0;
244 }
245 }
246 if (!sg) {
247 printk(KERN_WARNING
248 "Can not alloc domain group for node %d\n", i);
249 continue;
250 }
251 sg->cpu_power = 0;
252 sg->cpumask = nodemask;
253 cpus_or(covered, covered, nodemask);
254 prev = sg;
255
256 for (j = 0; j < MAX_NUMNODES; j++) {
257 cpumask_t tmp, notcovered;
258 int n = (i + j) % MAX_NUMNODES;
259
260 cpus_complement(notcovered, covered);
261 cpus_and(tmp, notcovered, *cpu_map);
262 cpus_and(tmp, tmp, domainspan);
263 if (cpus_empty(tmp))
264 break;
265
266 nodemask = node_to_cpumask(n);
267 cpus_and(tmp, tmp, nodemask);
268 if (cpus_empty(tmp))
269 continue;
270
271 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
272 if (!sg) {
273 printk(KERN_WARNING
274 "Can not alloc domain group for node %d\n", j);
275 break;
276 }
277 sg->cpu_power = 0;
278 sg->cpumask = tmp;
279 cpus_or(covered, covered, tmp);
280 prev->next = sg;
281 prev = sg;
282 }
283 prev->next = sched_group_nodes[i];
284 }
285#endif
286
287 /* Calculate CPU power for physical packages and nodes */
288 for_each_cpu_mask(i, *cpu_map) {
289 int power;
290 struct sched_domain *sd;
291#ifdef CONFIG_SCHED_SMT
292 sd = &per_cpu(cpu_domains, i);
293 power = SCHED_LOAD_SCALE;
294 sd->groups->cpu_power = power;
295#endif
296
297 sd = &per_cpu(phys_domains, i);
298 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
299 (cpus_weight(sd->groups->cpumask)-1) / 10;
300 sd->groups->cpu_power = power;
301
302#ifdef CONFIG_NUMA
303 sd = &per_cpu(allnodes_domains, i);
304 if (sd->groups) {
305 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
306 (cpus_weight(sd->groups->cpumask)-1) / 10;
307 sd->groups->cpu_power = power;
308 }
309#endif
310 }
311
312#ifdef CONFIG_NUMA
313 for (i = 0; i < MAX_NUMNODES; i++) {
314 struct sched_group *sg = sched_group_nodes[i];
315 int j;
316
317 if (sg == NULL)
318 continue;
319next_sg:
320 for_each_cpu_mask(j, sg->cpumask) {
321 struct sched_domain *sd;
322 int power;
323
324 sd = &per_cpu(phys_domains, j);
325 if (j != first_cpu(sd->groups->cpumask)) {
326 /*
327 * Only add "power" once for each
328 * physical package.
329 */
330 continue;
331 }
332 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
333 (cpus_weight(sd->groups->cpumask)-1) / 10;
334
335 sg->cpu_power += power;
336 }
337 sg = sg->next;
338 if (sg != sched_group_nodes[i])
339 goto next_sg;
340 }
341#endif
342
343 /* Attach the domains */
344 for_each_cpu_mask(i, *cpu_map) {
345 struct sched_domain *sd;
346#ifdef CONFIG_SCHED_SMT
347 sd = &per_cpu(cpu_domains, i);
348#else
349 sd = &per_cpu(phys_domains, i);
350#endif
351 cpu_attach_domain(sd, i);
352 }
353}
354/*
355 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
356 */
357void arch_init_sched_domains(const cpumask_t *cpu_map)
358{
359 cpumask_t cpu_default_map;
360
361 /*
362 * Setup mask for cpus without special case scheduling requirements.
363 * For now this just excludes isolated cpus, but could be used to
364 * exclude other special cases in the future.
365 */
366 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
367
368 build_sched_domains(&cpu_default_map);
369}
370
371void arch_destroy_sched_domains(const cpumask_t *cpu_map)
372{
373#ifdef CONFIG_NUMA
374 int i;
375 for (i = 0; i < MAX_NUMNODES; i++) {
376 cpumask_t nodemask = node_to_cpumask(i);
377 struct sched_group *oldsg, *sg = sched_group_nodes[i];
378
379 cpus_and(nodemask, nodemask, *cpu_map);
380 if (cpus_empty(nodemask))
381 continue;
382
383 if (sg == NULL)
384 continue;
385 sg = sg->next;
386next_sg:
387 oldsg = sg;
388 sg = sg->next;
389 kfree(oldsg);
390 if (oldsg != sched_group_nodes[i])
391 goto next_sg;
392 sched_group_nodes[i] = NULL;
393 }
394#endif
395}
396
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c0c3f55b0231..574084f343fa 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -561,7 +561,7 @@ static inline int vector_is_shared (int vector)
561 return (iosapic_intr_info[vector].count > 1); 561 return (iosapic_intr_info[vector].count > 1);
562} 562}
563 563
564static void 564static int
565register_intr (unsigned int gsi, int vector, unsigned char delivery, 565register_intr (unsigned int gsi, int vector, unsigned char delivery,
566 unsigned long polarity, unsigned long trigger) 566 unsigned long polarity, unsigned long trigger)
567{ 567{
@@ -576,7 +576,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
576 index = find_iosapic(gsi); 576 index = find_iosapic(gsi);
577 if (index < 0) { 577 if (index < 0) {
578 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); 578 printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi);
579 return; 579 return -ENODEV;
580 } 580 }
581 581
582 iosapic_address = iosapic_lists[index].addr; 582 iosapic_address = iosapic_lists[index].addr;
@@ -587,7 +587,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
587 rte = iosapic_alloc_rte(); 587 rte = iosapic_alloc_rte();
588 if (!rte) { 588 if (!rte) {
589 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); 589 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
590 return; 590 return -ENOMEM;
591 } 591 }
592 592
593 rte_index = gsi - gsi_base; 593 rte_index = gsi - gsi_base;
@@ -603,7 +603,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
603 struct iosapic_intr_info *info = &iosapic_intr_info[vector]; 603 struct iosapic_intr_info *info = &iosapic_intr_info[vector];
604 if (info->trigger != trigger || info->polarity != polarity) { 604 if (info->trigger != trigger || info->polarity != polarity) {
605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); 605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
606 return; 606 return -EINVAL;
607 } 607 }
608 } 608 }
609 609
@@ -623,6 +623,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
623 __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); 623 __FUNCTION__, vector, idesc->handler->typename, irq_type->typename);
624 idesc->handler = irq_type; 624 idesc->handler = irq_type;
625 } 625 }
626 return 0;
626} 627}
627 628
628static unsigned int 629static unsigned int
@@ -710,7 +711,7 @@ int
710iosapic_register_intr (unsigned int gsi, 711iosapic_register_intr (unsigned int gsi,
711 unsigned long polarity, unsigned long trigger) 712 unsigned long polarity, unsigned long trigger)
712{ 713{
713 int vector, mask = 1; 714 int vector, mask = 1, err;
714 unsigned int dest; 715 unsigned int dest;
715 unsigned long flags; 716 unsigned long flags;
716 struct iosapic_rte_info *rte; 717 struct iosapic_rte_info *rte;
@@ -737,8 +738,8 @@ again:
737 vector = assign_irq_vector(AUTO_ASSIGN); 738 vector = assign_irq_vector(AUTO_ASSIGN);
738 if (vector < 0) { 739 if (vector < 0) {
739 vector = iosapic_find_sharable_vector(trigger, polarity); 740 vector = iosapic_find_sharable_vector(trigger, polarity);
740 if (vector < 0) 741 if (vector < 0)
741 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 742 return -ENOSPC;
742 } 743 }
743 744
744 spin_lock_irqsave(&irq_descp(vector)->lock, flags); 745 spin_lock_irqsave(&irq_descp(vector)->lock, flags);
@@ -753,8 +754,13 @@ again:
753 } 754 }
754 755
755 dest = get_target_cpu(gsi, vector); 756 dest = get_target_cpu(gsi, vector);
756 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, 757 err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
757 polarity, trigger); 758 polarity, trigger);
759 if (err < 0) {
760 spin_unlock(&iosapic_lock);
761 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
762 return err;
763 }
758 764
759 /* 765 /*
760 * If the vector is shared and already unmasked for 766 * If the vector is shared and already unmasked for
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 28f2aadc38d0..205d98028261 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -91,23 +91,8 @@ skip:
91} 91}
92 92
93#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
94/*
95 * This is updated when the user sets irq affinity via /proc
96 */
97static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
98static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
99
100static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; 94static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
101 95
102/*
103 * Arch specific routine for deferred write to iosapic rte to reprogram
104 * intr destination.
105 */
106void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
107{
108 pending_irq_cpumask[irq] = mask_val;
109}
110
111void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 96void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
112{ 97{
113 cpumask_t mask = CPU_MASK_NONE; 98 cpumask_t mask = CPU_MASK_NONE;
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
116 101
117 if (irq < NR_IRQS) { 102 if (irq < NR_IRQS) {
118 irq_affinity[irq] = mask; 103 irq_affinity[irq] = mask;
104 set_irq_info(irq, mask);
119 irq_redir[irq] = (char) (redir & 0xff); 105 irq_redir[irq] = (char) (redir & 0xff);
120 } 106 }
121} 107}
122
123
124void move_irq(int irq)
125{
126 /* note - we hold desc->lock */
127 cpumask_t tmp;
128 irq_desc_t *desc = irq_descp(irq);
129 int redir = test_bit(irq, pending_irq_redir);
130
131 if (unlikely(!desc->handler->set_affinity))
132 return;
133
134 if (!cpus_empty(pending_irq_cpumask[irq])) {
135 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
136 if (unlikely(!cpus_empty(tmp))) {
137 desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
138 pending_irq_cpumask[irq]);
139 }
140 cpus_clear(pending_irq_cpumask[irq]);
141 }
142}
143
144
145#endif /* CONFIG_SMP */ 108#endif /* CONFIG_SMP */
146 109
147#ifdef CONFIG_HOTPLUG_CPU 110#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index b7fa3ccd2b0f..2323377e3695 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -49,6 +49,7 @@
49 /* 49 /*
50 * void jprobe_break(void) 50 * void jprobe_break(void)
51 */ 51 */
52 .section .kprobes.text, "ax"
52ENTRY(jprobe_break) 53ENTRY(jprobe_break)
53 break.m 0x80300 54 break.m 0x80300
54END(jprobe_break) 55END(jprobe_break)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 884f5cd27d8a..471086b808a4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = {
87 * is IP relative instruction and update the kprobe 87 * is IP relative instruction and update the kprobe
88 * inst flag accordingly 88 * inst flag accordingly
89 */ 89 */
90static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, 90static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
91 unsigned long kprobe_inst, struct kprobe *p) 91 uint major_opcode,
92 unsigned long kprobe_inst,
93 struct kprobe *p)
92{ 94{
93 p->ainsn.inst_flag = 0; 95 p->ainsn.inst_flag = 0;
94 p->ainsn.target_br_reg = 0; 96 p->ainsn.target_br_reg = 0;
95 97
98 /* Check for Break instruction
99 * Bits 37:40 Major opcode to be zero
100 * Bits 27:32 X6 to be zero
101 * Bits 32:35 X3 to be zero
102 */
103 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
104 /* is a break instruction */
105 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
106 return;
107 }
108
96 if (bundle_encoding[template][slot] == B) { 109 if (bundle_encoding[template][slot] == B) {
97 switch (major_opcode) { 110 switch (major_opcode) {
98 case INDIRECT_CALL_OPCODE: 111 case INDIRECT_CALL_OPCODE:
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode
126 * Returns 0 if supported 139 * Returns 0 if supported
127 * Returns -EINVAL if unsupported 140 * Returns -EINVAL if unsupported
128 */ 141 */
129static int unsupported_inst(uint template, uint slot, uint major_opcode, 142static int __kprobes unsupported_inst(uint template, uint slot,
130 unsigned long kprobe_inst, struct kprobe *p) 143 uint major_opcode,
144 unsigned long kprobe_inst,
145 struct kprobe *p)
131{ 146{
132 unsigned long addr = (unsigned long)p->addr; 147 unsigned long addr = (unsigned long)p->addr;
133 148
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode,
168 * on which we are inserting kprobe is cmp instruction 183 * on which we are inserting kprobe is cmp instruction
169 * with ctype as unc. 184 * with ctype as unc.
170 */ 185 */
171static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, 186static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
172unsigned long kprobe_inst) 187 uint major_opcode,
188 unsigned long kprobe_inst)
173{ 189{
174 cmp_inst_t cmp_inst; 190 cmp_inst_t cmp_inst;
175 uint ctype_unc = 0; 191 uint ctype_unc = 0;
@@ -201,8 +217,10 @@ out:
201 * In this function we override the bundle with 217 * In this function we override the bundle with
202 * the break instruction at the given slot. 218 * the break instruction at the given slot.
203 */ 219 */
204static void prepare_break_inst(uint template, uint slot, uint major_opcode, 220static void __kprobes prepare_break_inst(uint template, uint slot,
205 unsigned long kprobe_inst, struct kprobe *p) 221 uint major_opcode,
222 unsigned long kprobe_inst,
223 struct kprobe *p)
206{ 224{
207 unsigned long break_inst = BREAK_INST; 225 unsigned long break_inst = BREAK_INST;
208 bundle_t *bundle = &p->ainsn.insn.bundle; 226 bundle_t *bundle = &p->ainsn.insn.bundle;
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr)
271 && addr < (unsigned long)__end_ivt_text); 289 && addr < (unsigned long)__end_ivt_text);
272} 290}
273 291
274static int valid_kprobe_addr(int template, int slot, unsigned long addr) 292static int __kprobes valid_kprobe_addr(int template, int slot,
293 unsigned long addr)
275{ 294{
276 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 295 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
277 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 296 printk(KERN_WARNING "Attempting to insert unaligned kprobe "
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void)
323 * - cleanup by marking the instance as unused 342 * - cleanup by marking the instance as unused
324 * - long jump back to the original return address 343 * - long jump back to the original return address
325 */ 344 */
326int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 345int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
327{ 346{
328 struct kretprobe_instance *ri = NULL; 347 struct kretprobe_instance *ri = NULL;
329 struct hlist_head *head; 348 struct hlist_head *head;
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
381 return 1; 400 return 1;
382} 401}
383 402
384void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 403void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
404 struct pt_regs *regs)
385{ 405{
386 struct kretprobe_instance *ri; 406 struct kretprobe_instance *ri;
387 407
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
399 } 419 }
400} 420}
401 421
402int arch_prepare_kprobe(struct kprobe *p) 422int __kprobes arch_prepare_kprobe(struct kprobe *p)
403{ 423{
404 unsigned long addr = (unsigned long) p->addr; 424 unsigned long addr = (unsigned long) p->addr;
405 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 425 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p)
430 return 0; 450 return 0;
431} 451}
432 452
433void arch_arm_kprobe(struct kprobe *p) 453void __kprobes arch_arm_kprobe(struct kprobe *p)
434{ 454{
435 unsigned long addr = (unsigned long)p->addr; 455 unsigned long addr = (unsigned long)p->addr;
436 unsigned long arm_addr = addr & ~0xFULL; 456 unsigned long arm_addr = addr & ~0xFULL;
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p)
439 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 459 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
440} 460}
441 461
442void arch_disarm_kprobe(struct kprobe *p) 462void __kprobes arch_disarm_kprobe(struct kprobe *p)
443{ 463{
444 unsigned long addr = (unsigned long)p->addr; 464 unsigned long addr = (unsigned long)p->addr;
445 unsigned long arm_addr = addr & ~0xFULL; 465 unsigned long arm_addr = addr & ~0xFULL;
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p)
449 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 469 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
450} 470}
451 471
452void arch_remove_kprobe(struct kprobe *p) 472void __kprobes arch_remove_kprobe(struct kprobe *p)
453{ 473{
454} 474}
455 475
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p)
461 * to original stack address, handle the case where we need to fixup the 481 * to original stack address, handle the case where we need to fixup the
462 * relative IP address and/or fixup branch register. 482 * relative IP address and/or fixup branch register.
463 */ 483 */
464static void resume_execution(struct kprobe *p, struct pt_regs *regs) 484static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
465{ 485{
466 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 486 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
467 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 487 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
@@ -528,13 +548,16 @@ turn_ss_off:
528 ia64_psr(regs)->ss = 0; 548 ia64_psr(regs)->ss = 0;
529} 549}
530 550
531static void prepare_ss(struct kprobe *p, struct pt_regs *regs) 551static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
532{ 552{
533 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 553 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
534 unsigned long slot = (unsigned long)p->addr & 0xf; 554 unsigned long slot = (unsigned long)p->addr & 0xf;
535 555
536 /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ 556 /* single step inline if break instruction */
537 regs->cr_iip = bundle_addr & ~0xFULL; 557 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
558 regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
559 else
560 regs->cr_iip = bundle_addr & ~0xFULL;
538 561
539 if (slot > 2) 562 if (slot > 2)
540 slot = 0; 563 slot = 0;
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
545 ia64_psr(regs)->ss = 1; 568 ia64_psr(regs)->ss = 1;
546} 569}
547 570
548static int pre_kprobes_handler(struct die_args *args) 571static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
572{
573 unsigned int slot = ia64_psr(regs)->ri;
574 unsigned int template, major_opcode;
575 unsigned long kprobe_inst;
576 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
577 bundle_t bundle;
578
579 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
580 template = bundle.quad0.template;
581
582 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
583 if (slot == 1 && bundle_encoding[template][1] == L)
584 slot++;
585
586 /* Get Kprobe probe instruction at given slot*/
587 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
588
589 /* For break instruction,
590 * Bits 37:40 Major opcode to be zero
591 * Bits 27:32 X6 to be zero
592 * Bits 32:35 X3 to be zero
593 */
594 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
595 /* Not a break instruction */
596 return 0;
597 }
598
599 /* Is a break instruction */
600 return 1;
601}
602
603static int __kprobes pre_kprobes_handler(struct die_args *args)
549{ 604{
550 struct kprobe *p; 605 struct kprobe *p;
551 int ret = 0; 606 int ret = 0;
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args)
558 if (kprobe_running()) { 613 if (kprobe_running()) {
559 p = get_kprobe(addr); 614 p = get_kprobe(addr);
560 if (p) { 615 if (p) {
561 if (kprobe_status == KPROBE_HIT_SS) { 616 if ( (kprobe_status == KPROBE_HIT_SS) &&
617 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
618 ia64_psr(regs)->ss = 0;
562 unlock_kprobes(); 619 unlock_kprobes();
563 goto no_kprobe; 620 goto no_kprobe;
564 } 621 }
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args)
592 p = get_kprobe(addr); 649 p = get_kprobe(addr);
593 if (!p) { 650 if (!p) {
594 unlock_kprobes(); 651 unlock_kprobes();
652 if (!is_ia64_break_inst(regs)) {
653 /*
654 * The breakpoint instruction was removed right
655 * after we hit it. Another cpu has removed
656 * either a probepoint or a debugger breakpoint
657 * at this address. In either case, no further
658 * handling of this interrupt is appropriate.
659 */
660 ret = 1;
661
662 }
663
664 /* Not one of our break, let kernel handle it */
595 goto no_kprobe; 665 goto no_kprobe;
596 } 666 }
597 667
@@ -616,7 +686,7 @@ no_kprobe:
616 return ret; 686 return ret;
617} 687}
618 688
619static int post_kprobes_handler(struct pt_regs *regs) 689static int __kprobes post_kprobes_handler(struct pt_regs *regs)
620{ 690{
621 if (!kprobe_running()) 691 if (!kprobe_running())
622 return 0; 692 return 0;
@@ -641,7 +711,7 @@ out:
641 return 1; 711 return 1;
642} 712}
643 713
644static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) 714static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
645{ 715{
646 if (!kprobe_running()) 716 if (!kprobe_running())
647 return 0; 717 return 0;
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
659 return 0; 729 return 0;
660} 730}
661 731
662int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 732int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
663 void *data) 733 unsigned long val, void *data)
664{ 734{
665 struct die_args *args = (struct die_args *)data; 735 struct die_args *args = (struct die_args *)data;
666 switch(val) { 736 switch(val) {
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
681 return NOTIFY_DONE; 751 return NOTIFY_DONE;
682} 752}
683 753
684int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 754int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
685{ 755{
686 struct jprobe *jp = container_of(p, struct jprobe, kp); 756 struct jprobe *jp = container_of(p, struct jprobe, kp);
687 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 757 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
703 return 1; 773 return 1;
704} 774}
705 775
706int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 776int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
707{ 777{
708 *regs = jprobe_saved_regs; 778 *regs = jprobe_saved_regs;
709 return 1; 779 return 1;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 84f89da7c640..1f5c26dbe705 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -384,7 +384,7 @@ setup_arch (char **cmdline_p)
384 if (early_console_setup(*cmdline_p) == 0) 384 if (early_console_setup(*cmdline_p) == 0)
385 mark_bsp_online(); 385 mark_bsp_online();
386 386
387#ifdef CONFIG_ACPI_BOOT 387#ifdef CONFIG_ACPI
388 /* Initialize the ACPI boot-time table parser */ 388 /* Initialize the ACPI boot-time table parser */
389 acpi_table_init(); 389 acpi_table_init();
390# ifdef CONFIG_ACPI_NUMA 390# ifdef CONFIG_ACPI_NUMA
@@ -420,7 +420,7 @@ setup_arch (char **cmdline_p)
420 420
421 cpu_init(); /* initialize the bootstrap CPU */ 421 cpu_init(); /* initialize the bootstrap CPU */
422 422
423#ifdef CONFIG_ACPI_BOOT 423#ifdef CONFIG_ACPI
424 acpi_boot_init(); 424 acpi_boot_init();
425#endif 425#endif
426 426
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 92ff46ad21e2..706b7734e191 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -36,7 +36,7 @@ int arch_register_cpu(int num)
36 parent = &sysfs_nodes[cpu_to_node(num)]; 36 parent = &sysfs_nodes[cpu_to_node(num)];
37#endif /* CONFIG_NUMA */ 37#endif /* CONFIG_NUMA */
38 38
39#ifdef CONFIG_ACPI_BOOT 39#ifdef CONFIG_ACPI
40 /* 40 /*
41 * If CPEI cannot be re-targetted, and this is 41 * If CPEI cannot be re-targetted, and this is
42 * CPEI target, then dont create the control file 42 * CPEI target, then dont create the control file
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 4440c8343fa4..f970359e7edf 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/vt_kern.h> /* For unblank_screen() */ 15#include <linux/vt_kern.h> /* For unblank_screen() */
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
18 19
19#include <asm/fpswa.h> 20#include <asm/fpswa.h>
20#include <asm/ia32.h> 21#include <asm/ia32.h>
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
122} 123}
123 124
124void 125void
125ia64_bad_break (unsigned long break_num, struct pt_regs *regs) 126__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
126{ 127{
127 siginfo_t siginfo; 128 siginfo_t siginfo;
128 int sig, code; 129 int sig, code;
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
444 return rv; 445 return rv;
445} 446}
446 447
447void 448void __kprobes
448ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, 449ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
449 unsigned long iim, unsigned long itir, long arg5, long arg6, 450 unsigned long iim, unsigned long itir, long arg5, long arg6,
450 long arg7, struct pt_regs regs) 451 long arg7, struct pt_regs regs)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index a676e79e0681..30d8564e9603 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -48,6 +48,7 @@ SECTIONS
48 *(.text) 48 *(.text)
49 SCHED_TEXT 49 SCHED_TEXT
50 LOCK_TEXT 50 LOCK_TEXT
51 KPROBES_TEXT
51 *(.gnu.linkonce.t*) 52 *(.gnu.linkonce.t*)
52 } 53 }
53 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)