diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-26 16:39:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-27 12:28:43 -0400 |
commit | 6e15cf04860074ad032e88c306bea656bbdd0f22 (patch) | |
tree | c346383bb7563e8d66b2f4a502f875b259c34870 /drivers | |
parent | be0ea69674ed95e1e98cb3687a241badc756d228 (diff) | |
parent | 60db56422043aaa455ac7f858ce23c273220f9d9 (diff) |
Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts:
arch/parisc/kernel/irq.c
arch/x86/include/asm/fixmap_64.h
arch/x86/include/asm/setup.h
kernel/irq/handle.c
Semantic merge:
arch/x86/include/asm/fixmap.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
32 files changed, 335 insertions, 188 deletions
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index c3e841f3cde9..ab0aff3c7d6a 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) | |||
365 | 365 | ||
366 | /******************************************************************************* | 366 | /******************************************************************************* |
367 | * | 367 | * |
368 | * FUNCTION: acpi_get_table | 368 | * FUNCTION: acpi_get_table_with_size |
369 | * | 369 | * |
370 | * PARAMETERS: Signature - ACPI signature of needed table | 370 | * PARAMETERS: Signature - ACPI signature of needed table |
371 | * Instance - Which instance (for SSDTs) | 371 | * Instance - Which instance (for SSDTs) |
@@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) | |||
377 | * | 377 | * |
378 | *****************************************************************************/ | 378 | *****************************************************************************/ |
379 | acpi_status | 379 | acpi_status |
380 | acpi_get_table(char *signature, | 380 | acpi_get_table_with_size(char *signature, |
381 | u32 instance, struct acpi_table_header **out_table) | 381 | u32 instance, struct acpi_table_header **out_table, |
382 | acpi_size *tbl_size) | ||
382 | { | 383 | { |
383 | u32 i; | 384 | u32 i; |
384 | u32 j; | 385 | u32 j; |
@@ -408,6 +409,7 @@ acpi_get_table(char *signature, | |||
408 | acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); | 409 | acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); |
409 | if (ACPI_SUCCESS(status)) { | 410 | if (ACPI_SUCCESS(status)) { |
410 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; | 411 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; |
412 | *tbl_size = acpi_gbl_root_table_list.tables[i].length; | ||
411 | } | 413 | } |
412 | 414 | ||
413 | if (!acpi_gbl_permanent_mmap) { | 415 | if (!acpi_gbl_permanent_mmap) { |
@@ -420,6 +422,15 @@ acpi_get_table(char *signature, | |||
420 | return (AE_NOT_FOUND); | 422 | return (AE_NOT_FOUND); |
421 | } | 423 | } |
422 | 424 | ||
425 | acpi_status | ||
426 | acpi_get_table(char *signature, | ||
427 | u32 instance, struct acpi_table_header **out_table) | ||
428 | { | ||
429 | acpi_size tbl_size; | ||
430 | |||
431 | return acpi_get_table_with_size(signature, | ||
432 | instance, out_table, &tbl_size); | ||
433 | } | ||
423 | ACPI_EXPORT_SYMBOL(acpi_get_table) | 434 | ACPI_EXPORT_SYMBOL(acpi_get_table) |
424 | 435 | ||
425 | /******************************************************************************* | 436 | /******************************************************************************* |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 1e35f342957c..eb8980d67368 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -272,14 +272,21 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
272 | } | 272 | } |
273 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); | 273 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
274 | 274 | ||
275 | void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) | 275 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) |
276 | { | 276 | { |
277 | if (acpi_gbl_permanent_mmap) { | 277 | if (acpi_gbl_permanent_mmap) |
278 | iounmap(virt); | 278 | iounmap(virt); |
279 | } | 279 | else |
280 | __acpi_unmap_table(virt, size); | ||
280 | } | 281 | } |
281 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); | 282 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); |
282 | 283 | ||
284 | void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | ||
285 | { | ||
286 | if (!acpi_gbl_permanent_mmap) | ||
287 | __acpi_unmap_table(virt, size); | ||
288 | } | ||
289 | |||
283 | #ifdef ACPI_FUTURE_USAGE | 290 | #ifdef ACPI_FUTURE_USAGE |
284 | acpi_status | 291 | acpi_status |
285 | acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) | 292 | acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 9cc769b587ff..68fd3d292799 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance( | |||
516 | continue; | 516 | continue; |
517 | } | 517 | } |
518 | 518 | ||
519 | if (!performance || !percpu_ptr(performance, i)) { | 519 | if (!performance || !per_cpu_ptr(performance, i)) { |
520 | retval = -EINVAL; | 520 | retval = -EINVAL; |
521 | continue; | 521 | continue; |
522 | } | 522 | } |
523 | 523 | ||
524 | pr->performance = percpu_ptr(performance, i); | 524 | pr->performance = per_cpu_ptr(performance, i); |
525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); | 525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
526 | if (acpi_processor_get_psd(pr)) { | 526 | if (acpi_processor_get_psd(pr)) { |
527 | retval = -EINVAL; | 527 | retval = -EINVAL; |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index a8852952fac4..fec1ae36d431 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id, | |||
181 | struct acpi_subtable_header *entry; | 181 | struct acpi_subtable_header *entry; |
182 | unsigned int count = 0; | 182 | unsigned int count = 0; |
183 | unsigned long table_end; | 183 | unsigned long table_end; |
184 | acpi_size tbl_size; | ||
184 | 185 | ||
185 | if (!handler) | 186 | if (!handler) |
186 | return -EINVAL; | 187 | return -EINVAL; |
187 | 188 | ||
188 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) | 189 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) |
189 | acpi_get_table(id, acpi_apic_instance, &table_header); | 190 | acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size); |
190 | else | 191 | else |
191 | acpi_get_table(id, 0, &table_header); | 192 | acpi_get_table_with_size(id, 0, &table_header, &tbl_size); |
192 | 193 | ||
193 | if (!table_header) { | 194 | if (!table_header) { |
194 | printk(KERN_WARNING PREFIX "%4.4s not present\n", id); | 195 | printk(KERN_WARNING PREFIX "%4.4s not present\n", id); |
@@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id, | |||
206 | table_end) { | 207 | table_end) { |
207 | if (entry->type == entry_id | 208 | if (entry->type == entry_id |
208 | && (!max_entries || count++ < max_entries)) | 209 | && (!max_entries || count++ < max_entries)) |
209 | if (handler(entry, table_end)) | 210 | if (handler(entry, table_end)) { |
211 | early_acpi_os_unmap_memory((char *)table_header, tbl_size); | ||
210 | return -EINVAL; | 212 | return -EINVAL; |
213 | } | ||
211 | 214 | ||
212 | entry = (struct acpi_subtable_header *) | 215 | entry = (struct acpi_subtable_header *) |
213 | ((unsigned long)entry + entry->length); | 216 | ((unsigned long)entry + entry->length); |
@@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id, | |||
217 | "%i found\n", id, entry_id, count - max_entries, count); | 220 | "%i found\n", id, entry_id, count - max_entries, count); |
218 | } | 221 | } |
219 | 222 | ||
223 | early_acpi_os_unmap_memory((char *)table_header, tbl_size); | ||
220 | return count; | 224 | return count; |
221 | } | 225 | } |
222 | 226 | ||
@@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id, | |||
241 | int __init acpi_table_parse(char *id, acpi_table_handler handler) | 245 | int __init acpi_table_parse(char *id, acpi_table_handler handler) |
242 | { | 246 | { |
243 | struct acpi_table_header *table = NULL; | 247 | struct acpi_table_header *table = NULL; |
248 | acpi_size tbl_size; | ||
244 | 249 | ||
245 | if (!handler) | 250 | if (!handler) |
246 | return -EINVAL; | 251 | return -EINVAL; |
247 | 252 | ||
248 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) | 253 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) |
249 | acpi_get_table(id, acpi_apic_instance, &table); | 254 | acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size); |
250 | else | 255 | else |
251 | acpi_get_table(id, 0, &table); | 256 | acpi_get_table_with_size(id, 0, &table, &tbl_size); |
252 | 257 | ||
253 | if (table) { | 258 | if (table) { |
254 | handler(table); | 259 | handler(table); |
260 | early_acpi_os_unmap_memory(table, tbl_size); | ||
255 | return 0; | 261 | return 0; |
256 | } else | 262 | } else |
257 | return 1; | 263 | return 1; |
@@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
265 | static void __init check_multiple_madt(void) | 271 | static void __init check_multiple_madt(void) |
266 | { | 272 | { |
267 | struct acpi_table_header *table = NULL; | 273 | struct acpi_table_header *table = NULL; |
274 | acpi_size tbl_size; | ||
268 | 275 | ||
269 | acpi_get_table(ACPI_SIG_MADT, 2, &table); | 276 | acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size); |
270 | if (table) { | 277 | if (table) { |
271 | printk(KERN_WARNING PREFIX | 278 | printk(KERN_WARNING PREFIX |
272 | "BIOS bug: multiple APIC/MADT found," | 279 | "BIOS bug: multiple APIC/MADT found," |
@@ -275,6 +282,7 @@ static void __init check_multiple_madt(void) | |||
275 | "If \"acpi_apic_instance=%d\" works better, " | 282 | "If \"acpi_apic_instance=%d\" works better, " |
276 | "notify linux-acpi@vger.kernel.org\n", | 283 | "notify linux-acpi@vger.kernel.org\n", |
277 | acpi_apic_instance ? 0 : 2); | 284 | acpi_apic_instance ? 0 : 2); |
285 | early_acpi_os_unmap_memory(table, tbl_size); | ||
278 | 286 | ||
279 | } else | 287 | } else |
280 | acpi_apic_instance = 0; | 288 | acpi_apic_instance = 0; |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 719ee5c1c8d9..5b257a57bc57 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
107 | /* | 107 | /* |
108 | * Print cpu online, possible, present, and system maps | 108 | * Print cpu online, possible, present, and system maps |
109 | */ | 109 | */ |
110 | static ssize_t print_cpus_map(char *buf, cpumask_t *map) | 110 | static ssize_t print_cpus_map(char *buf, const struct cpumask *map) |
111 | { | 111 | { |
112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); | 112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); |
113 | 113 | ||
diff --git a/drivers/base/topology.c b/drivers/base/topology.c index a778fb52b11f..bf6b13206d00 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c | |||
@@ -31,7 +31,10 @@ | |||
31 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
32 | #include <linux/topology.h> | 32 | #include <linux/topology.h> |
33 | 33 | ||
34 | #define define_one_ro(_name) \ | 34 | #define define_one_ro_named(_name, _func) \ |
35 | static SYSDEV_ATTR(_name, 0444, _func, NULL) | ||
36 | |||
37 | #define define_one_ro(_name) \ | ||
35 | static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) | 38 | static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) |
36 | 39 | ||
37 | #define define_id_show_func(name) \ | 40 | #define define_id_show_func(name) \ |
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \ | |||
42 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ | 45 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ |
43 | } | 46 | } |
44 | 47 | ||
45 | #if defined(topology_thread_siblings) || defined(topology_core_siblings) | 48 | #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) |
46 | static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) | 49 | static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) |
47 | { | 50 | { |
48 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; | 51 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; |
49 | int n = 0; | 52 | int n = 0; |
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev, \ | |||
65 | struct sysdev_attribute *attr, char *buf) \ | 68 | struct sysdev_attribute *attr, char *buf) \ |
66 | { \ | 69 | { \ |
67 | unsigned int cpu = dev->id; \ | 70 | unsigned int cpu = dev->id; \ |
68 | return show_cpumap(0, &(topology_##name(cpu)), buf); \ | 71 | return show_cpumap(0, topology_##name(cpu), buf); \ |
69 | } | 72 | } |
70 | 73 | ||
71 | #define define_siblings_show_list(name) \ | 74 | #define define_siblings_show_list(name) \ |
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
74 | char *buf) \ | 77 | char *buf) \ |
75 | { \ | 78 | { \ |
76 | unsigned int cpu = dev->id; \ | 79 | unsigned int cpu = dev->id; \ |
77 | return show_cpumap(1, &(topology_##name(cpu)), buf); \ | 80 | return show_cpumap(1, topology_##name(cpu), buf); \ |
78 | } | 81 | } |
79 | 82 | ||
80 | #else | 83 | #else |
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
82 | static ssize_t show_##name(struct sys_device *dev, \ | 85 | static ssize_t show_##name(struct sys_device *dev, \ |
83 | struct sysdev_attribute *attr, char *buf) \ | 86 | struct sysdev_attribute *attr, char *buf) \ |
84 | { \ | 87 | { \ |
85 | unsigned int cpu = dev->id; \ | 88 | return show_cpumap(0, topology_##name(dev->id), buf); \ |
86 | cpumask_t mask = topology_##name(cpu); \ | ||
87 | return show_cpumap(0, &mask, buf); \ | ||
88 | } | 89 | } |
89 | 90 | ||
90 | #define define_siblings_show_list(name) \ | 91 | #define define_siblings_show_list(name) \ |
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
92 | struct sysdev_attribute *attr, \ | 93 | struct sysdev_attribute *attr, \ |
93 | char *buf) \ | 94 | char *buf) \ |
94 | { \ | 95 | { \ |
95 | unsigned int cpu = dev->id; \ | 96 | return show_cpumap(1, topology_##name(dev->id), buf); \ |
96 | cpumask_t mask = topology_##name(cpu); \ | ||
97 | return show_cpumap(1, &mask, buf); \ | ||
98 | } | 97 | } |
99 | #endif | 98 | #endif |
100 | 99 | ||
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id); | |||
107 | define_id_show_func(core_id); | 106 | define_id_show_func(core_id); |
108 | define_one_ro(core_id); | 107 | define_one_ro(core_id); |
109 | 108 | ||
110 | define_siblings_show_func(thread_siblings); | 109 | define_siblings_show_func(thread_cpumask); |
111 | define_one_ro(thread_siblings); | 110 | define_one_ro_named(thread_siblings, show_thread_cpumask); |
112 | define_one_ro(thread_siblings_list); | 111 | define_one_ro_named(thread_siblings_list, show_thread_cpumask_list); |
113 | 112 | ||
114 | define_siblings_show_func(core_siblings); | 113 | define_siblings_show_func(core_cpumask); |
115 | define_one_ro(core_siblings); | 114 | define_one_ro_named(core_siblings, show_core_cpumask); |
116 | define_one_ro(core_siblings_list); | 115 | define_one_ro_named(core_siblings_list, show_core_cpumask_list); |
117 | 116 | ||
118 | static struct attribute *default_attrs[] = { | 117 | static struct attribute *default_attrs[] = { |
119 | &attr_physical_package_id.attr, | 118 | &attr_physical_package_id.attr, |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index e1129fad96dd..ee19b6e8fcb4 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, | |||
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | #ifndef CONFIG_X86_64 | 145 | #ifndef CONFIG_X86_64 |
146 | #include "mach_timer.h" | 146 | #include <asm/mach_timer.h> |
147 | #define PMTMR_EXPECTED_RATE \ | 147 | #define PMTMR_EXPECTED_RATE \ |
148 | ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) | 148 | ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) |
149 | /* | 149 | /* |
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 1bde303b970b..8615059a8729 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <asm/pgtable.h> | 7 | #include <asm/pgtable.h> |
8 | #include <asm/io.h> | 8 | #include <asm/io.h> |
9 | 9 | ||
10 | #include "mach_timer.h" | 10 | #include <asm/mach_timer.h> |
11 | 11 | ||
12 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */ | 12 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */ |
13 | #define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */ | 13 | #define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */ |
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig index c0646576cf47..2705284f6223 100644 --- a/drivers/eisa/Kconfig +++ b/drivers/eisa/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | config EISA_VLB_PRIMING | 4 | config EISA_VLB_PRIMING |
5 | bool "Vesa Local Bus priming" | 5 | bool "Vesa Local Bus priming" |
6 | depends on X86_PC && EISA | 6 | depends on X86 && EISA |
7 | default n | 7 | default n |
8 | ---help--- | 8 | ---help--- |
9 | Activate this option if your system contains a Vesa Local | 9 | Activate this option if your system contains a Vesa Local |
@@ -24,11 +24,11 @@ config EISA_PCI_EISA | |||
24 | When in doubt, say Y. | 24 | When in doubt, say Y. |
25 | 25 | ||
26 | # Using EISA_VIRTUAL_ROOT on something other than an Alpha or | 26 | # Using EISA_VIRTUAL_ROOT on something other than an Alpha or |
27 | # an X86_PC may lead to crashes... | 27 | # an X86 may lead to crashes... |
28 | 28 | ||
29 | config EISA_VIRTUAL_ROOT | 29 | config EISA_VIRTUAL_ROOT |
30 | bool "EISA virtual root device" | 30 | bool "EISA virtual root device" |
31 | depends on EISA && (ALPHA || X86_PC) | 31 | depends on EISA && (ALPHA || X86) |
32 | default y | 32 | default y |
33 | ---help--- | 33 | ---help--- |
34 | Activate this option if your system only have EISA bus | 34 | Activate this option if your system only have EISA bus |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 777fba48d2d3..3009e0171e54 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
244 | */ | 244 | */ |
245 | int dcdbas_smi_request(struct smi_cmd *smi_cmd) | 245 | int dcdbas_smi_request(struct smi_cmd *smi_cmd) |
246 | { | 246 | { |
247 | cpumask_t old_mask; | 247 | cpumask_var_t old_mask; |
248 | int ret = 0; | 248 | int ret = 0; |
249 | 249 | ||
250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) | |||
254 | } | 254 | } |
255 | 255 | ||
256 | /* SMI requires CPU 0 */ | 256 | /* SMI requires CPU 0 */ |
257 | old_mask = current->cpus_allowed; | 257 | if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) |
258 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 258 | return -ENOMEM; |
259 | |||
260 | cpumask_copy(old_mask, ¤t->cpus_allowed); | ||
261 | set_cpus_allowed_ptr(current, cpumask_of(0)); | ||
259 | if (smp_processor_id() != 0) { | 262 | if (smp_processor_id() != 0) { |
260 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 263 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
261 | __func__); | 264 | __func__); |
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) | |||
275 | ); | 278 | ); |
276 | 279 | ||
277 | out: | 280 | out: |
278 | set_cpus_allowed_ptr(current, &old_mask); | 281 | set_cpus_allowed_ptr(current, old_mask); |
282 | free_cpumask_var(old_mask); | ||
279 | return ret; | 283 | return ret; |
280 | } | 284 | } |
281 | 285 | ||
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 3ab3e4a41d67..7b7ddc2d51c9 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
@@ -938,8 +938,8 @@ static int __init ibft_init(void) | |||
938 | return -ENOMEM; | 938 | return -ENOMEM; |
939 | 939 | ||
940 | if (ibft_addr) { | 940 | if (ibft_addr) { |
941 | printk(KERN_INFO "iBFT detected at 0x%lx.\n", | 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", |
942 | virt_to_phys((void *)ibft_addr)); | 942 | (u64)virt_to_phys((void *)ibft_addr)); |
943 | 943 | ||
944 | rc = ibft_check_device(); | 944 | rc = ibft_check_device(); |
945 | if (rc) | 945 | if (rc) |
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f66830..b756f043a5f4 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, | |||
678 | *start = &buf[offset]; | 678 | *start = &buf[offset]; |
679 | *eof = 0; | 679 | *eof = 0; |
680 | 680 | ||
681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", | 681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n", |
682 | atomic_read(&dev->vma_count), | 682 | atomic_read(&dev->vma_count), |
683 | high_memory, virt_to_phys(high_memory)); | 683 | high_memory, (u64)virt_to_phys(high_memory)); |
684 | list_for_each_entry(pt, &dev->vmalist, head) { | 684 | list_for_each_entry(pt, &dev->vmalist, head) { |
685 | if (!(vma = pt->vma)) | 685 | if (!(vma = pt->vma)) |
686 | continue; | 686 | continue; |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 35561689ff38..ea2638b41982 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD | |||
13 | if INPUT_KEYBOARD | 13 | if INPUT_KEYBOARD |
14 | 14 | ||
15 | config KEYBOARD_ATKBD | 15 | config KEYBOARD_ATKBD |
16 | tristate "AT keyboard" if EMBEDDED || !X86_PC | 16 | tristate "AT keyboard" if EMBEDDED || !X86 |
17 | default y | 17 | default y |
18 | select SERIO | 18 | select SERIO |
19 | select SERIO_LIBPS2 | 19 | select SERIO_LIBPS2 |
20 | select SERIO_I8042 if X86_PC | 20 | select SERIO_I8042 if X86 |
21 | select SERIO_GSCPS2 if GSC | 21 | select SERIO_GSCPS2 if GSC |
22 | help | 22 | help |
23 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually | 23 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually |
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 9705f3a00a3d..4f38e6f7dfdd 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
@@ -17,7 +17,7 @@ config MOUSE_PS2 | |||
17 | default y | 17 | default y |
18 | select SERIO | 18 | select SERIO |
19 | select SERIO_LIBPS2 | 19 | select SERIO_LIBPS2 |
20 | select SERIO_I8042 if X86_PC | 20 | select SERIO_I8042 if X86 |
21 | select SERIO_GSCPS2 if GSC | 21 | select SERIO_GSCPS2 if GSC |
22 | help | 22 | help |
23 | Say Y here if you have a PS/2 mouse connected to your system. This | 23 | Say Y here if you have a PS/2 mouse connected to your system. This |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 76f2b36881c3..a3d3cbab359a 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config LGUEST | 1 | config LGUEST |
2 | tristate "Linux hypervisor example code" | 2 | tristate "Linux hypervisor example code" |
3 | depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER | 3 | depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX |
4 | select HVC_DRIVER | 4 | select HVC_DRIVER |
5 | ---help--- | 5 | ---help--- |
6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index c64e6798878a..1c484084ed4f 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -162,7 +162,7 @@ config ENCLOSURE_SERVICES | |||
162 | config SGI_XP | 162 | config SGI_XP |
163 | tristate "Support communication between SGI SSIs" | 163 | tristate "Support communication between SGI SSIs" |
164 | depends on NET | 164 | depends on NET |
165 | depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP | 165 | depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP |
166 | select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 | 166 | select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 |
167 | select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 | 167 | select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 |
168 | select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP | 168 | select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP |
@@ -189,7 +189,7 @@ config HP_ILO | |||
189 | 189 | ||
190 | config SGI_GRU | 190 | config SGI_GRU |
191 | tristate "SGI GRU driver" | 191 | tristate "SGI GRU driver" |
192 | depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP | 192 | depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP |
193 | default n | 193 | default n |
194 | select MMU_NOTIFIER | 194 | select MMU_NOTIFIER |
195 | ---help--- | 195 | ---help--- |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 650983806392..c67e4e8bd62c 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -36,23 +36,11 @@ | |||
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | #include <asm/uv/uv.h> | ||
39 | #include "gru.h" | 40 | #include "gru.h" |
40 | #include "grulib.h" | 41 | #include "grulib.h" |
41 | #include "grutables.h" | 42 | #include "grutables.h" |
42 | 43 | ||
43 | #if defined CONFIG_X86_64 | ||
44 | #include <asm/genapic.h> | ||
45 | #include <asm/irq.h> | ||
46 | #define IS_UV() is_uv_system() | ||
47 | #elif defined CONFIG_IA64 | ||
48 | #include <asm/system.h> | ||
49 | #include <asm/sn/simulator.h> | ||
50 | /* temp support for running on hardware simulator */ | ||
51 | #define IS_UV() IS_MEDUSA() || ia64_platform_is("uv") | ||
52 | #else | ||
53 | #define IS_UV() 0 | ||
54 | #endif | ||
55 | |||
56 | #include <asm/uv/uv_hub.h> | 44 | #include <asm/uv/uv_hub.h> |
57 | #include <asm/uv/uv_mmrs.h> | 45 | #include <asm/uv/uv_mmrs.h> |
58 | 46 | ||
@@ -381,7 +369,7 @@ static int __init gru_init(void) | |||
381 | char id[10]; | 369 | char id[10]; |
382 | void *gru_start_vaddr; | 370 | void *gru_start_vaddr; |
383 | 371 | ||
384 | if (!IS_UV()) | 372 | if (!is_uv_system()) |
385 | return 0; | 373 | return 0; |
386 | 374 | ||
387 | #if defined CONFIG_IA64 | 375 | #if defined CONFIG_IA64 |
@@ -451,7 +439,7 @@ static void __exit gru_exit(void) | |||
451 | int order = get_order(sizeof(struct gru_state) * | 439 | int order = get_order(sizeof(struct gru_state) * |
452 | GRU_CHIPLETS_PER_BLADE); | 440 | GRU_CHIPLETS_PER_BLADE); |
453 | 441 | ||
454 | if (!IS_UV()) | 442 | if (!is_uv_system()) |
455 | return; | 443 | return; |
456 | 444 | ||
457 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) | 445 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 7b4cbd5e03e9..2275126cb334 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -15,19 +15,19 @@ | |||
15 | 15 | ||
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | 17 | ||
18 | #ifdef CONFIG_IA64 | 18 | #if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV |
19 | #include <asm/uv/uv.h> | ||
20 | #define is_uv() is_uv_system() | ||
21 | #endif | ||
22 | |||
23 | #ifndef is_uv | ||
24 | #define is_uv() 0 | ||
25 | #endif | ||
26 | |||
27 | #if defined CONFIG_IA64 | ||
19 | #include <asm/system.h> | 28 | #include <asm/system.h> |
20 | #include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ | 29 | #include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ |
21 | #define is_shub() ia64_platform_is("sn2") | 30 | #define is_shub() ia64_platform_is("sn2") |
22 | #ifdef CONFIG_IA64_SGI_UV | ||
23 | #define is_uv() ia64_platform_is("uv") | ||
24 | #else | ||
25 | #define is_uv() 0 | ||
26 | #endif | ||
27 | #endif | ||
28 | #ifdef CONFIG_X86_64 | ||
29 | #include <asm/genapic.h> | ||
30 | #define is_uv() is_uv_system() | ||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #ifndef is_shub1 | 33 | #ifndef is_shub1 |
@@ -42,10 +42,6 @@ | |||
42 | #define is_shub() 0 | 42 | #define is_shub() 0 |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifndef is_uv | ||
46 | #define is_uv() 0 | ||
47 | #endif | ||
48 | |||
49 | #ifdef USE_DBUG_ON | 45 | #ifdef USE_DBUG_ON |
50 | #define DBUG_ON(condition) BUG_ON(condition) | 46 | #define DBUG_ON(condition) BUG_ON(condition) |
51 | #else | 47 | #else |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 89218f7cfaa7..6576170de962 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore) | |||
318 | 318 | ||
319 | /* this thread was marked active by xpc_hb_init() */ | 319 | /* this thread was marked active by xpc_hb_init() */ |
320 | 320 | ||
321 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); | 321 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
322 | 322 | ||
323 | /* set our heartbeating to other partitions into motion */ | 323 | /* set our heartbeating to other partitions into motion */ |
324 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 324 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 8b12e6e109d3..2ff88791cebc 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE | |||
273 | 273 | ||
274 | config MTD_NAND_CS553X | 274 | config MTD_NAND_CS553X |
275 | tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" | 275 | tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" |
276 | depends on X86_32 && (X86_PC || X86_GENERICARCH) | 276 | depends on X86_32 |
277 | help | 277 | help |
278 | The CS553x companion chips for the AMD Geode processor | 278 | The CS553x companion chips for the AMD Geode processor |
279 | include NAND flash controllers with built-in hardware ECC | 279 | include NAND flash controllers with built-in hardware ECC |
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c index fac43fd6fc87..6a843f7350ab 100644 --- a/drivers/net/ne3210.c +++ b/drivers/net/ne3210.c | |||
@@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device) | |||
150 | if (phys_mem < virt_to_phys(high_memory)) { | 150 | if (phys_mem < virt_to_phys(high_memory)) { |
151 | printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); | 151 | printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); |
152 | printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); | 152 | printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); |
153 | printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory)); | 153 | printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", |
154 | (u64)virt_to_phys(high_memory)); | ||
154 | printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); | 155 | printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); |
155 | retval = -EINVAL; | 156 | retval = -EINVAL; |
156 | goto out3; | 157 | goto out3; |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 6eff9ca6c6c8..00c23b1babca 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -894,20 +894,27 @@ static void efx_fini_io(struct efx_nic *efx) | |||
894 | * interrupts across them. */ | 894 | * interrupts across them. */ |
895 | static int efx_wanted_rx_queues(void) | 895 | static int efx_wanted_rx_queues(void) |
896 | { | 896 | { |
897 | cpumask_t core_mask; | 897 | cpumask_var_t core_mask; |
898 | int count; | 898 | int count; |
899 | int cpu; | 899 | int cpu; |
900 | 900 | ||
901 | cpus_clear(core_mask); | 901 | if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) { |
902 | printk(KERN_WARNING | ||
903 | "efx.c: allocation failure, irq balancing hobbled\n"); | ||
904 | return 1; | ||
905 | } | ||
906 | |||
907 | cpumask_clear(core_mask); | ||
902 | count = 0; | 908 | count = 0; |
903 | for_each_online_cpu(cpu) { | 909 | for_each_online_cpu(cpu) { |
904 | if (!cpu_isset(cpu, core_mask)) { | 910 | if (!cpumask_test_cpu(cpu, core_mask)) { |
905 | ++count; | 911 | ++count; |
906 | cpus_or(core_mask, core_mask, | 912 | cpumask_or(core_mask, core_mask, |
907 | topology_core_siblings(cpu)); | 913 | topology_core_cpumask(cpu)); |
908 | } | 914 | } |
909 | } | 915 | } |
910 | 916 | ||
917 | free_cpumask_var(core_mask); | ||
911 | return count; | 918 | return count; |
912 | } | 919 | } |
913 | 920 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 23a1b148d5b2..d4629ab2c614 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -340,10 +340,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx, | |||
340 | nic_data->next_buffer_table += buffer->entries; | 340 | nic_data->next_buffer_table += buffer->entries; |
341 | 341 | ||
342 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | 342 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " |
343 | "(virt %p phys %lx)\n", buffer->index, | 343 | "(virt %p phys %llx)\n", buffer->index, |
344 | buffer->index + buffer->entries - 1, | 344 | buffer->index + buffer->entries - 1, |
345 | (unsigned long long)buffer->dma_addr, len, | 345 | (u64)buffer->dma_addr, len, |
346 | buffer->addr, virt_to_phys(buffer->addr)); | 346 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
347 | 347 | ||
348 | return 0; | 348 | return 0; |
349 | } | 349 | } |
@@ -355,10 +355,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx, | |||
355 | return; | 355 | return; |
356 | 356 | ||
357 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | 357 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " |
358 | "(virt %p phys %lx)\n", buffer->index, | 358 | "(virt %p phys %llx)\n", buffer->index, |
359 | buffer->index + buffer->entries - 1, | 359 | buffer->index + buffer->entries - 1, |
360 | (unsigned long long)buffer->dma_addr, buffer->len, | 360 | (u64)buffer->dma_addr, buffer->len, |
361 | buffer->addr, virt_to_phys(buffer->addr)); | 361 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
362 | 362 | ||
363 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | 363 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, |
364 | buffer->dma_addr); | 364 | buffer->dma_addr); |
@@ -2357,10 +2357,10 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2357 | FALCON_MAC_STATS_SIZE); | 2357 | FALCON_MAC_STATS_SIZE); |
2358 | if (rc) | 2358 | if (rc) |
2359 | return rc; | 2359 | return rc; |
2360 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n", | 2360 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", |
2361 | (unsigned long long)efx->stats_buffer.dma_addr, | 2361 | (u64)efx->stats_buffer.dma_addr, |
2362 | efx->stats_buffer.addr, | 2362 | efx->stats_buffer.addr, |
2363 | virt_to_phys(efx->stats_buffer.addr)); | 2363 | (u64)virt_to_phys(efx->stats_buffer.addr)); |
2364 | 2364 | ||
2365 | return 0; | 2365 | return 0; |
2366 | } | 2366 | } |
@@ -2935,9 +2935,9 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2935 | goto fail4; | 2935 | goto fail4; |
2936 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 2936 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
2937 | 2937 | ||
2938 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n", | 2938 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", |
2939 | (unsigned long long)efx->irq_status.dma_addr, | 2939 | (u64)efx->irq_status.dma_addr, |
2940 | efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); | 2940 | efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); |
2941 | 2941 | ||
2942 | falcon_probe_spi_devices(efx); | 2942 | falcon_probe_spi_devices(efx); |
2943 | 2943 | ||
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index bfca15da6f0f..14c11656e82c 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c | |||
@@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev, | |||
1082 | if (arlan_check_fingerprint(memaddr)) | 1082 | if (arlan_check_fingerprint(memaddr)) |
1083 | return -ENODEV; | 1083 | return -ENODEV; |
1084 | 1084 | ||
1085 | printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name, | 1085 | printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name, |
1086 | (int) virt_to_phys((void*)memaddr)); | 1086 | (u64) virt_to_phys((void*)memaddr)); |
1087 | 1087 | ||
1088 | ap->card = (void *) memaddr; | 1088 | ap->card = (void *) memaddr; |
1089 | dev->mem_start = memaddr; | 1089 | dev->mem_start = memaddr; |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 9da5a4b81133..c3ea5fa7d05a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | static LIST_HEAD(dying_tasks); | 39 | static LIST_HEAD(dying_tasks); |
40 | static LIST_HEAD(dead_tasks); | 40 | static LIST_HEAD(dead_tasks); |
41 | static cpumask_t marked_cpus = CPU_MASK_NONE; | 41 | static cpumask_var_t marked_cpus; |
42 | static DEFINE_SPINLOCK(task_mortuary); | 42 | static DEFINE_SPINLOCK(task_mortuary); |
43 | static void process_task_mortuary(void); | 43 | static void process_task_mortuary(void); |
44 | 44 | ||
@@ -456,10 +456,10 @@ static void mark_done(int cpu) | |||
456 | { | 456 | { |
457 | int i; | 457 | int i; |
458 | 458 | ||
459 | cpu_set(cpu, marked_cpus); | 459 | cpumask_set_cpu(cpu, marked_cpus); |
460 | 460 | ||
461 | for_each_online_cpu(i) { | 461 | for_each_online_cpu(i) { |
462 | if (!cpu_isset(i, marked_cpus)) | 462 | if (!cpumask_test_cpu(i, marked_cpus)) |
463 | return; | 463 | return; |
464 | } | 464 | } |
465 | 465 | ||
@@ -468,7 +468,7 @@ static void mark_done(int cpu) | |||
468 | */ | 468 | */ |
469 | process_task_mortuary(); | 469 | process_task_mortuary(); |
470 | 470 | ||
471 | cpus_clear(marked_cpus); | 471 | cpumask_clear(marked_cpus); |
472 | } | 472 | } |
473 | 473 | ||
474 | 474 | ||
@@ -565,6 +565,20 @@ void sync_buffer(int cpu) | |||
565 | mutex_unlock(&buffer_mutex); | 565 | mutex_unlock(&buffer_mutex); |
566 | } | 566 | } |
567 | 567 | ||
568 | int __init buffer_sync_init(void) | ||
569 | { | ||
570 | if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | cpumask_clear(marked_cpus); | ||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | void __exit buffer_sync_cleanup(void) | ||
578 | { | ||
579 | free_cpumask_var(marked_cpus); | ||
580 | } | ||
581 | |||
568 | /* The function can be used to add a buffer worth of data directly to | 582 | /* The function can be used to add a buffer worth of data directly to |
569 | * the kernel buffer. The buffer is assumed to be a circular buffer. | 583 | * the kernel buffer. The buffer is assumed to be a circular buffer. |
570 | * Take the entries from index start and end at index end, wrapping | 584 | * Take the entries from index start and end at index end, wrapping |
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h index 3110732c1835..0ebf5db62679 100644 --- a/drivers/oprofile/buffer_sync.h +++ b/drivers/oprofile/buffer_sync.h | |||
@@ -19,4 +19,8 @@ void sync_stop(void); | |||
19 | /* sync the given CPU's buffer */ | 19 | /* sync the given CPU's buffer */ |
20 | void sync_buffer(int cpu); | 20 | void sync_buffer(int cpu); |
21 | 21 | ||
22 | /* initialize/destroy the buffer system. */ | ||
23 | int buffer_sync_init(void); | ||
24 | void buffer_sync_cleanup(void); | ||
25 | |||
22 | #endif /* OPROFILE_BUFFER_SYNC_H */ | 26 | #endif /* OPROFILE_BUFFER_SYNC_H */ |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index 3cffce90f82a..ced39f602292 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -183,6 +183,10 @@ static int __init oprofile_init(void) | |||
183 | { | 183 | { |
184 | int err; | 184 | int err; |
185 | 185 | ||
186 | err = buffer_sync_init(); | ||
187 | if (err) | ||
188 | return err; | ||
189 | |||
186 | err = oprofile_arch_init(&oprofile_ops); | 190 | err = oprofile_arch_init(&oprofile_ops); |
187 | 191 | ||
188 | if (err < 0 || timer) { | 192 | if (err < 0 || timer) { |
@@ -191,8 +195,10 @@ static int __init oprofile_init(void) | |||
191 | } | 195 | } |
192 | 196 | ||
193 | err = oprofilefs_register(); | 197 | err = oprofilefs_register(); |
194 | if (err) | 198 | if (err) { |
195 | oprofile_arch_exit(); | 199 | oprofile_arch_exit(); |
200 | buffer_sync_cleanup(); | ||
201 | } | ||
196 | 202 | ||
197 | return err; | 203 | return err; |
198 | } | 204 | } |
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void) | |||
202 | { | 208 | { |
203 | oprofilefs_unregister(); | 209 | oprofilefs_unregister(); |
204 | oprofile_arch_exit(); | 210 | oprofile_arch_exit(); |
211 | buffer_sync_cleanup(); | ||
205 | } | 212 | } |
206 | 213 | ||
207 | 214 | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 26c536b51c5a..5f333403c2ea 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -42,6 +42,7 @@ | |||
42 | LIST_HEAD(dmar_drhd_units); | 42 | LIST_HEAD(dmar_drhd_units); |
43 | 43 | ||
44 | static struct acpi_table_header * __initdata dmar_tbl; | 44 | static struct acpi_table_header * __initdata dmar_tbl; |
45 | static acpi_size dmar_tbl_size; | ||
45 | 46 | ||
46 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 47 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
47 | { | 48 | { |
@@ -288,8 +289,9 @@ static int __init dmar_table_detect(void) | |||
288 | acpi_status status = AE_OK; | 289 | acpi_status status = AE_OK; |
289 | 290 | ||
290 | /* if we could find DMAR table, then there are DMAR devices */ | 291 | /* if we could find DMAR table, then there are DMAR devices */ |
291 | status = acpi_get_table(ACPI_SIG_DMAR, 0, | 292 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
292 | (struct acpi_table_header **)&dmar_tbl); | 293 | (struct acpi_table_header **)&dmar_tbl, |
294 | &dmar_tbl_size); | ||
293 | 295 | ||
294 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | 296 | if (ACPI_SUCCESS(status) && !dmar_tbl) { |
295 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | 297 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); |
@@ -489,6 +491,7 @@ void __init detect_intel_iommu(void) | |||
489 | iommu_detected = 1; | 491 | iommu_detected = 1; |
490 | #endif | 492 | #endif |
491 | } | 493 | } |
494 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | ||
492 | dmar_tbl = NULL; | 495 | dmar_tbl = NULL; |
493 | } | 496 | } |
494 | 497 | ||
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index b721c2fbe8f5..9d07a05d26f1 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/irq.h> | 6 | #include <linux/irq.h> |
7 | #include <asm/io_apic.h> | 7 | #include <asm/io_apic.h> |
8 | #include <asm/smp.h> | 8 | #include <asm/smp.h> |
9 | #include <asm/cpu.h> | ||
9 | #include <linux/intel-iommu.h> | 10 | #include <linux/intel-iommu.h> |
10 | #include "intr_remapping.h" | 11 | #include "intr_remapping.h" |
11 | 12 | ||
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index bf92802f2bbe..36e221beedcd 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | 39 | ||
40 | #include <asm/mach-rdc321x/rdc321x_defs.h> | 40 | #include <asm/rdc321x_defs.h> |
41 | 41 | ||
42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ | 42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ |
43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ | 43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index eb0dfdeaa949..30963af5dba0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -26,9 +26,11 @@ | |||
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | #include <linux/bootmem.h> | ||
29 | 30 | ||
30 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
31 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
33 | #include <asm/idle.h> | ||
32 | #include <asm/sync_bitops.h> | 34 | #include <asm/sync_bitops.h> |
33 | #include <asm/xen/hypercall.h> | 35 | #include <asm/xen/hypercall.h> |
34 | #include <asm/xen/hypervisor.h> | 36 | #include <asm/xen/hypervisor.h> |
@@ -50,36 +52,55 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |||
50 | /* IRQ <-> IPI mapping */ | 52 | /* IRQ <-> IPI mapping */ |
51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | 53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; |
52 | 54 | ||
53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | 55 | /* Interrupt types. */ |
54 | struct packed_irq | 56 | enum xen_irq_type { |
55 | { | 57 | IRQT_UNBOUND = 0, |
56 | unsigned short evtchn; | ||
57 | unsigned char index; | ||
58 | unsigned char type; | ||
59 | }; | ||
60 | |||
61 | static struct packed_irq irq_info[NR_IRQS]; | ||
62 | |||
63 | /* Binding types. */ | ||
64 | enum { | ||
65 | IRQT_UNBOUND, | ||
66 | IRQT_PIRQ, | 58 | IRQT_PIRQ, |
67 | IRQT_VIRQ, | 59 | IRQT_VIRQ, |
68 | IRQT_IPI, | 60 | IRQT_IPI, |
69 | IRQT_EVTCHN | 61 | IRQT_EVTCHN |
70 | }; | 62 | }; |
71 | 63 | ||
72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | 64 | /* |
73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | 65 | * Packed IRQ information: |
66 | * type - enum xen_irq_type | ||
67 | * event channel - irq->event channel mapping | ||
68 | * cpu - cpu this event channel is bound to | ||
69 | * index - type-specific information: | ||
70 | * PIRQ - vector, with MSB being "needs EIO" | ||
71 | * VIRQ - virq number | ||
72 | * IPI - IPI vector | ||
73 | * EVTCHN - | ||
74 | */ | ||
75 | struct irq_info | ||
76 | { | ||
77 | enum xen_irq_type type; /* type */ | ||
78 | unsigned short evtchn; /* event channel */ | ||
79 | unsigned short cpu; /* cpu bound */ | ||
80 | |||
81 | union { | ||
82 | unsigned short virq; | ||
83 | enum ipi_vector ipi; | ||
84 | struct { | ||
85 | unsigned short gsi; | ||
86 | unsigned short vector; | ||
87 | } pirq; | ||
88 | } u; | ||
89 | }; | ||
90 | |||
91 | static struct irq_info irq_info[NR_IRQS]; | ||
74 | 92 | ||
75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | 93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { |
76 | [0 ... NR_EVENT_CHANNELS-1] = -1 | 94 | [0 ... NR_EVENT_CHANNELS-1] = -1 |
77 | }; | 95 | }; |
78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | 96 | struct cpu_evtchn_s { |
79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | 97 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; |
80 | 98 | }; | |
81 | /* Reference counts for bindings to IRQs. */ | 99 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; |
82 | static int irq_bindcount[NR_IRQS]; | 100 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
101 | { | ||
102 | return cpu_evtchn_mask_p[cpu].bits; | ||
103 | } | ||
83 | 104 | ||
84 | /* Xen will never allocate port zero for any purpose. */ | 105 | /* Xen will never allocate port zero for any purpose. */ |
85 | #define VALID_EVTCHN(chn) ((chn) != 0) | 106 | #define VALID_EVTCHN(chn) ((chn) != 0) |
@@ -87,27 +108,108 @@ static int irq_bindcount[NR_IRQS]; | |||
87 | static struct irq_chip xen_dynamic_chip; | 108 | static struct irq_chip xen_dynamic_chip; |
88 | 109 | ||
89 | /* Constructor for packed IRQ information. */ | 110 | /* Constructor for packed IRQ information. */ |
90 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | 111 | static struct irq_info mk_unbound_info(void) |
112 | { | ||
113 | return (struct irq_info) { .type = IRQT_UNBOUND }; | ||
114 | } | ||
115 | |||
116 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | ||
117 | { | ||
118 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, | ||
119 | .cpu = 0 }; | ||
120 | } | ||
121 | |||
122 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | ||
91 | { | 123 | { |
92 | return (struct packed_irq) { evtchn, index, type }; | 124 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
125 | .cpu = 0, .u.ipi = ipi }; | ||
126 | } | ||
127 | |||
128 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | ||
129 | { | ||
130 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | ||
131 | .cpu = 0, .u.virq = virq }; | ||
132 | } | ||
133 | |||
134 | static struct irq_info mk_pirq_info(unsigned short evtchn, | ||
135 | unsigned short gsi, unsigned short vector) | ||
136 | { | ||
137 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | ||
138 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; | ||
93 | } | 139 | } |
94 | 140 | ||
95 | /* | 141 | /* |
96 | * Accessors for packed IRQ information. | 142 | * Accessors for packed IRQ information. |
97 | */ | 143 | */ |
98 | static inline unsigned int evtchn_from_irq(int irq) | 144 | static struct irq_info *info_for_irq(unsigned irq) |
145 | { | ||
146 | return &irq_info[irq]; | ||
147 | } | ||
148 | |||
149 | static unsigned int evtchn_from_irq(unsigned irq) | ||
99 | { | 150 | { |
100 | return irq_info[irq].evtchn; | 151 | return info_for_irq(irq)->evtchn; |
101 | } | 152 | } |
102 | 153 | ||
103 | static inline unsigned int index_from_irq(int irq) | 154 | static enum ipi_vector ipi_from_irq(unsigned irq) |
104 | { | 155 | { |
105 | return irq_info[irq].index; | 156 | struct irq_info *info = info_for_irq(irq); |
157 | |||
158 | BUG_ON(info == NULL); | ||
159 | BUG_ON(info->type != IRQT_IPI); | ||
160 | |||
161 | return info->u.ipi; | ||
106 | } | 162 | } |
107 | 163 | ||
108 | static inline unsigned int type_from_irq(int irq) | 164 | static unsigned virq_from_irq(unsigned irq) |
109 | { | 165 | { |
110 | return irq_info[irq].type; | 166 | struct irq_info *info = info_for_irq(irq); |
167 | |||
168 | BUG_ON(info == NULL); | ||
169 | BUG_ON(info->type != IRQT_VIRQ); | ||
170 | |||
171 | return info->u.virq; | ||
172 | } | ||
173 | |||
174 | static unsigned gsi_from_irq(unsigned irq) | ||
175 | { | ||
176 | struct irq_info *info = info_for_irq(irq); | ||
177 | |||
178 | BUG_ON(info == NULL); | ||
179 | BUG_ON(info->type != IRQT_PIRQ); | ||
180 | |||
181 | return info->u.pirq.gsi; | ||
182 | } | ||
183 | |||
184 | static unsigned vector_from_irq(unsigned irq) | ||
185 | { | ||
186 | struct irq_info *info = info_for_irq(irq); | ||
187 | |||
188 | BUG_ON(info == NULL); | ||
189 | BUG_ON(info->type != IRQT_PIRQ); | ||
190 | |||
191 | return info->u.pirq.vector; | ||
192 | } | ||
193 | |||
194 | static enum xen_irq_type type_from_irq(unsigned irq) | ||
195 | { | ||
196 | return info_for_irq(irq)->type; | ||
197 | } | ||
198 | |||
199 | static unsigned cpu_from_irq(unsigned irq) | ||
200 | { | ||
201 | return info_for_irq(irq)->cpu; | ||
202 | } | ||
203 | |||
204 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
205 | { | ||
206 | int irq = evtchn_to_irq[evtchn]; | ||
207 | unsigned ret = 0; | ||
208 | |||
209 | if (irq != -1) | ||
210 | ret = cpu_from_irq(irq); | ||
211 | |||
212 | return ret; | ||
111 | } | 213 | } |
112 | 214 | ||
113 | static inline unsigned long active_evtchns(unsigned int cpu, | 215 | static inline unsigned long active_evtchns(unsigned int cpu, |
@@ -115,7 +217,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, | |||
115 | unsigned int idx) | 217 | unsigned int idx) |
116 | { | 218 | { |
117 | return (sh->evtchn_pending[idx] & | 219 | return (sh->evtchn_pending[idx] & |
118 | cpu_evtchn_mask[cpu][idx] & | 220 | cpu_evtchn_mask(cpu)[idx] & |
119 | ~sh->evtchn_mask[idx]); | 221 | ~sh->evtchn_mask[idx]); |
120 | } | 222 | } |
121 | 223 | ||
@@ -125,13 +227,13 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
125 | 227 | ||
126 | BUG_ON(irq == -1); | 228 | BUG_ON(irq == -1); |
127 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
128 | irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); | 230 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
129 | #endif | 231 | #endif |
130 | 232 | ||
131 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | 233 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
132 | __set_bit(chn, cpu_evtchn_mask[cpu]); | 234 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
133 | 235 | ||
134 | cpu_evtchn[chn] = cpu; | 236 | irq_info[irq].cpu = cpu; |
135 | } | 237 | } |
136 | 238 | ||
137 | static void init_evtchn_cpu_bindings(void) | 239 | static void init_evtchn_cpu_bindings(void) |
@@ -142,17 +244,11 @@ static void init_evtchn_cpu_bindings(void) | |||
142 | 244 | ||
143 | /* By default all event channels notify CPU#0. */ | 245 | /* By default all event channels notify CPU#0. */ |
144 | for_each_irq_desc(i, desc) { | 246 | for_each_irq_desc(i, desc) { |
145 | desc->affinity = cpumask_of_cpu(0); | 247 | cpumask_copy(desc->affinity, cpumask_of(0)); |
146 | } | 248 | } |
147 | #endif | 249 | #endif |
148 | 250 | ||
149 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | 251 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
150 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | ||
151 | } | ||
152 | |||
153 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
154 | { | ||
155 | return cpu_evtchn[evtchn]; | ||
156 | } | 252 | } |
157 | 253 | ||
158 | static inline void clear_evtchn(int port) | 254 | static inline void clear_evtchn(int port) |
@@ -232,9 +328,8 @@ static int find_unbound_irq(void) | |||
232 | int irq; | 328 | int irq; |
233 | struct irq_desc *desc; | 329 | struct irq_desc *desc; |
234 | 330 | ||
235 | /* Only allocate from dynirq range */ | ||
236 | for (irq = 0; irq < nr_irqs; irq++) | 331 | for (irq = 0; irq < nr_irqs; irq++) |
237 | if (irq_bindcount[irq] == 0) | 332 | if (irq_info[irq].type == IRQT_UNBOUND) |
238 | break; | 333 | break; |
239 | 334 | ||
240 | if (irq == nr_irqs) | 335 | if (irq == nr_irqs) |
@@ -244,6 +339,8 @@ static int find_unbound_irq(void) | |||
244 | if (WARN_ON(desc == NULL)) | 339 | if (WARN_ON(desc == NULL)) |
245 | return -1; | 340 | return -1; |
246 | 341 | ||
342 | dynamic_irq_init(irq); | ||
343 | |||
247 | return irq; | 344 | return irq; |
248 | } | 345 | } |
249 | 346 | ||
@@ -258,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
258 | if (irq == -1) { | 355 | if (irq == -1) { |
259 | irq = find_unbound_irq(); | 356 | irq = find_unbound_irq(); |
260 | 357 | ||
261 | dynamic_irq_init(irq); | ||
262 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 358 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
263 | handle_level_irq, "event"); | 359 | handle_level_irq, "event"); |
264 | 360 | ||
265 | evtchn_to_irq[evtchn] = irq; | 361 | evtchn_to_irq[evtchn] = irq; |
266 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 362 | irq_info[irq] = mk_evtchn_info(evtchn); |
267 | } | 363 | } |
268 | 364 | ||
269 | irq_bindcount[irq]++; | ||
270 | |||
271 | spin_unlock(&irq_mapping_update_lock); | 365 | spin_unlock(&irq_mapping_update_lock); |
272 | 366 | ||
273 | return irq; | 367 | return irq; |
@@ -282,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
282 | spin_lock(&irq_mapping_update_lock); | 376 | spin_lock(&irq_mapping_update_lock); |
283 | 377 | ||
284 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 378 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
379 | |||
285 | if (irq == -1) { | 380 | if (irq == -1) { |
286 | irq = find_unbound_irq(); | 381 | irq = find_unbound_irq(); |
287 | if (irq < 0) | 382 | if (irq < 0) |
288 | goto out; | 383 | goto out; |
289 | 384 | ||
290 | dynamic_irq_init(irq); | ||
291 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 385 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
292 | handle_level_irq, "ipi"); | 386 | handle_level_irq, "ipi"); |
293 | 387 | ||
@@ -298,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
298 | evtchn = bind_ipi.port; | 392 | evtchn = bind_ipi.port; |
299 | 393 | ||
300 | evtchn_to_irq[evtchn] = irq; | 394 | evtchn_to_irq[evtchn] = irq; |
301 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 395 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
302 | |||
303 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | 396 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
304 | 397 | ||
305 | bind_evtchn_to_cpu(evtchn, cpu); | 398 | bind_evtchn_to_cpu(evtchn, cpu); |
306 | } | 399 | } |
307 | 400 | ||
308 | irq_bindcount[irq]++; | ||
309 | |||
310 | out: | 401 | out: |
311 | spin_unlock(&irq_mapping_update_lock); | 402 | spin_unlock(&irq_mapping_update_lock); |
312 | return irq; | 403 | return irq; |
@@ -332,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
332 | 423 | ||
333 | irq = find_unbound_irq(); | 424 | irq = find_unbound_irq(); |
334 | 425 | ||
335 | dynamic_irq_init(irq); | ||
336 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 426 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
337 | handle_level_irq, "virq"); | 427 | handle_level_irq, "virq"); |
338 | 428 | ||
339 | evtchn_to_irq[evtchn] = irq; | 429 | evtchn_to_irq[evtchn] = irq; |
340 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 430 | irq_info[irq] = mk_virq_info(evtchn, virq); |
341 | 431 | ||
342 | per_cpu(virq_to_irq, cpu)[virq] = irq; | 432 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
343 | 433 | ||
344 | bind_evtchn_to_cpu(evtchn, cpu); | 434 | bind_evtchn_to_cpu(evtchn, cpu); |
345 | } | 435 | } |
346 | 436 | ||
347 | irq_bindcount[irq]++; | ||
348 | |||
349 | spin_unlock(&irq_mapping_update_lock); | 437 | spin_unlock(&irq_mapping_update_lock); |
350 | 438 | ||
351 | return irq; | 439 | return irq; |
@@ -358,7 +446,7 @@ static void unbind_from_irq(unsigned int irq) | |||
358 | 446 | ||
359 | spin_lock(&irq_mapping_update_lock); | 447 | spin_lock(&irq_mapping_update_lock); |
360 | 448 | ||
361 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { | 449 | if (VALID_EVTCHN(evtchn)) { |
362 | close.port = evtchn; | 450 | close.port = evtchn; |
363 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 451 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
364 | BUG(); | 452 | BUG(); |
@@ -366,11 +454,11 @@ static void unbind_from_irq(unsigned int irq) | |||
366 | switch (type_from_irq(irq)) { | 454 | switch (type_from_irq(irq)) { |
367 | case IRQT_VIRQ: | 455 | case IRQT_VIRQ: |
368 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | 456 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
369 | [index_from_irq(irq)] = -1; | 457 | [virq_from_irq(irq)] = -1; |
370 | break; | 458 | break; |
371 | case IRQT_IPI: | 459 | case IRQT_IPI: |
372 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | 460 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
373 | [index_from_irq(irq)] = -1; | 461 | [ipi_from_irq(irq)] = -1; |
374 | break; | 462 | break; |
375 | default: | 463 | default: |
376 | break; | 464 | break; |
@@ -380,7 +468,7 @@ static void unbind_from_irq(unsigned int irq) | |||
380 | bind_evtchn_to_cpu(evtchn, 0); | 468 | bind_evtchn_to_cpu(evtchn, 0); |
381 | 469 | ||
382 | evtchn_to_irq[evtchn] = -1; | 470 | evtchn_to_irq[evtchn] = -1; |
383 | irq_info[irq] = IRQ_UNBOUND; | 471 | irq_info[irq] = mk_unbound_info(); |
384 | 472 | ||
385 | dynamic_irq_cleanup(irq); | 473 | dynamic_irq_cleanup(irq); |
386 | } | 474 | } |
@@ -498,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
498 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | 586 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
499 | if (sync_test_bit(i, sh->evtchn_pending)) { | 587 | if (sync_test_bit(i, sh->evtchn_pending)) { |
500 | printk(" %d: event %d -> irq %d\n", | 588 | printk(" %d: event %d -> irq %d\n", |
501 | cpu_evtchn[i], i, | 589 | cpu_from_evtchn(i), i, |
502 | evtchn_to_irq[i]); | 590 | evtchn_to_irq[i]); |
503 | } | 591 | } |
504 | } | 592 | } |
505 | 593 | ||
@@ -508,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
508 | return IRQ_HANDLED; | 596 | return IRQ_HANDLED; |
509 | } | 597 | } |
510 | 598 | ||
511 | |||
512 | /* | 599 | /* |
513 | * Search the CPUs pending events bitmasks. For each one found, map | 600 | * Search the CPUs pending events bitmasks. For each one found, map |
514 | * the event number to an irq, and feed it into do_IRQ() for | 601 | * the event number to an irq, and feed it into do_IRQ() for |
@@ -521,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
521 | void xen_evtchn_do_upcall(struct pt_regs *regs) | 608 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
522 | { | 609 | { |
523 | int cpu = get_cpu(); | 610 | int cpu = get_cpu(); |
611 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
524 | struct shared_info *s = HYPERVISOR_shared_info; | 612 | struct shared_info *s = HYPERVISOR_shared_info; |
525 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 613 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
526 | static DEFINE_PER_CPU(unsigned, nesting_count); | 614 | static DEFINE_PER_CPU(unsigned, nesting_count); |
527 | unsigned count; | 615 | unsigned count; |
528 | 616 | ||
617 | exit_idle(); | ||
618 | irq_enter(); | ||
619 | |||
529 | do { | 620 | do { |
530 | unsigned long pending_words; | 621 | unsigned long pending_words; |
531 | 622 | ||
@@ -550,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
550 | int irq = evtchn_to_irq[port]; | 641 | int irq = evtchn_to_irq[port]; |
551 | 642 | ||
552 | if (irq != -1) | 643 | if (irq != -1) |
553 | xen_do_IRQ(irq, regs); | 644 | handle_irq(irq, regs); |
554 | } | 645 | } |
555 | } | 646 | } |
556 | 647 | ||
@@ -561,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
561 | } while(count != 1); | 652 | } while(count != 1); |
562 | 653 | ||
563 | out: | 654 | out: |
655 | irq_exit(); | ||
656 | set_irq_regs(old_regs); | ||
657 | |||
564 | put_cpu(); | 658 | put_cpu(); |
565 | } | 659 | } |
566 | 660 | ||
567 | /* Rebind a new event channel to an existing irq. */ | 661 | /* Rebind a new event channel to an existing irq. */ |
568 | void rebind_evtchn_irq(int evtchn, int irq) | 662 | void rebind_evtchn_irq(int evtchn, int irq) |
569 | { | 663 | { |
664 | struct irq_info *info = info_for_irq(irq); | ||
665 | |||
570 | /* Make sure the irq is masked, since the new event channel | 666 | /* Make sure the irq is masked, since the new event channel |
571 | will also be masked. */ | 667 | will also be masked. */ |
572 | disable_irq(irq); | 668 | disable_irq(irq); |
@@ -576,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
576 | /* After resume the irq<->evtchn mappings are all cleared out */ | 672 | /* After resume the irq<->evtchn mappings are all cleared out */ |
577 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 673 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
578 | /* Expect irq to have been bound before, | 674 | /* Expect irq to have been bound before, |
579 | so the bindcount should be non-0 */ | 675 | so there should be a proper type */ |
580 | BUG_ON(irq_bindcount[irq] == 0); | 676 | BUG_ON(info->type == IRQT_UNBOUND); |
581 | 677 | ||
582 | evtchn_to_irq[evtchn] = irq; | 678 | evtchn_to_irq[evtchn] = irq; |
583 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 679 | irq_info[irq] = mk_evtchn_info(evtchn); |
584 | 680 | ||
585 | spin_unlock(&irq_mapping_update_lock); | 681 | spin_unlock(&irq_mapping_update_lock); |
586 | 682 | ||
@@ -690,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
690 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | 786 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
691 | continue; | 787 | continue; |
692 | 788 | ||
693 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | 789 | BUG_ON(virq_from_irq(irq) != virq); |
694 | BUG_ON(irq_info[irq].index != virq); | ||
695 | 790 | ||
696 | /* Get a new binding from Xen. */ | 791 | /* Get a new binding from Xen. */ |
697 | bind_virq.virq = virq; | 792 | bind_virq.virq = virq; |
@@ -703,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
703 | 798 | ||
704 | /* Record the new mapping. */ | 799 | /* Record the new mapping. */ |
705 | evtchn_to_irq[evtchn] = irq; | 800 | evtchn_to_irq[evtchn] = irq; |
706 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 801 | irq_info[irq] = mk_virq_info(evtchn, virq); |
707 | bind_evtchn_to_cpu(evtchn, cpu); | 802 | bind_evtchn_to_cpu(evtchn, cpu); |
708 | 803 | ||
709 | /* Ready for use. */ | 804 | /* Ready for use. */ |
@@ -720,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
720 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | 815 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
721 | continue; | 816 | continue; |
722 | 817 | ||
723 | BUG_ON(irq_info[irq].type != IRQT_IPI); | 818 | BUG_ON(ipi_from_irq(irq) != ipi); |
724 | BUG_ON(irq_info[irq].index != ipi); | ||
725 | 819 | ||
726 | /* Get a new binding from Xen. */ | 820 | /* Get a new binding from Xen. */ |
727 | bind_ipi.vcpu = cpu; | 821 | bind_ipi.vcpu = cpu; |
@@ -732,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
732 | 826 | ||
733 | /* Record the new mapping. */ | 827 | /* Record the new mapping. */ |
734 | evtchn_to_irq[evtchn] = irq; | 828 | evtchn_to_irq[evtchn] = irq; |
735 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 829 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
736 | bind_evtchn_to_cpu(evtchn, cpu); | 830 | bind_evtchn_to_cpu(evtchn, cpu); |
737 | 831 | ||
738 | /* Ready for use. */ | 832 | /* Ready for use. */ |
@@ -812,8 +906,11 @@ void xen_irq_resume(void) | |||
812 | 906 | ||
813 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 907 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
814 | .name = "xen-dyn", | 908 | .name = "xen-dyn", |
909 | |||
910 | .disable = disable_dynirq, | ||
815 | .mask = disable_dynirq, | 911 | .mask = disable_dynirq, |
816 | .unmask = enable_dynirq, | 912 | .unmask = enable_dynirq, |
913 | |||
817 | .ack = ack_dynirq, | 914 | .ack = ack_dynirq, |
818 | .set_affinity = set_affinity_irq, | 915 | .set_affinity = set_affinity_irq, |
819 | .retrigger = retrigger_dynirq, | 916 | .retrigger = retrigger_dynirq, |
@@ -822,6 +919,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
822 | void __init xen_init_IRQ(void) | 919 | void __init xen_init_IRQ(void) |
823 | { | 920 | { |
824 | int i; | 921 | int i; |
922 | size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); | ||
923 | |||
924 | cpu_evtchn_mask_p = alloc_bootmem(size); | ||
925 | BUG_ON(cpu_evtchn_mask_p == NULL); | ||
825 | 926 | ||
826 | init_evtchn_cpu_bindings(); | 927 | init_evtchn_cpu_bindings(); |
827 | 928 | ||
@@ -829,9 +930,5 @@ void __init xen_init_IRQ(void) | |||
829 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 930 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
830 | mask_evtchn(i); | 931 | mask_evtchn(i); |
831 | 932 | ||
832 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
833 | for (i = 0; i < nr_irqs; i++) | ||
834 | irq_bindcount[i] = 0; | ||
835 | |||
836 | irq_ctx_init(smp_processor_id()); | 933 | irq_ctx_init(smp_processor_id()); |
837 | } | 934 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 56892a142ee2..3ccd348d112d 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -108,7 +108,7 @@ static void do_suspend(void) | |||
108 | /* XXX use normal device tree? */ | 108 | /* XXX use normal device tree? */ |
109 | xenbus_suspend(); | 109 | xenbus_suspend(); |
110 | 110 | ||
111 | err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0)); | 111 | err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); |
112 | if (err) { | 112 | if (err) { |
113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
114 | goto out; | 114 | goto out; |