diff options
Diffstat (limited to 'drivers')
33 files changed, 739 insertions, 410 deletions
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index c3e841f3cde9..ab0aff3c7d6a 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) | |||
365 | 365 | ||
366 | /******************************************************************************* | 366 | /******************************************************************************* |
367 | * | 367 | * |
368 | * FUNCTION: acpi_get_table | 368 | * FUNCTION: acpi_get_table_with_size |
369 | * | 369 | * |
370 | * PARAMETERS: Signature - ACPI signature of needed table | 370 | * PARAMETERS: Signature - ACPI signature of needed table |
371 | * Instance - Which instance (for SSDTs) | 371 | * Instance - Which instance (for SSDTs) |
@@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) | |||
377 | * | 377 | * |
378 | *****************************************************************************/ | 378 | *****************************************************************************/ |
379 | acpi_status | 379 | acpi_status |
380 | acpi_get_table(char *signature, | 380 | acpi_get_table_with_size(char *signature, |
381 | u32 instance, struct acpi_table_header **out_table) | 381 | u32 instance, struct acpi_table_header **out_table, |
382 | acpi_size *tbl_size) | ||
382 | { | 383 | { |
383 | u32 i; | 384 | u32 i; |
384 | u32 j; | 385 | u32 j; |
@@ -408,6 +409,7 @@ acpi_get_table(char *signature, | |||
408 | acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); | 409 | acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); |
409 | if (ACPI_SUCCESS(status)) { | 410 | if (ACPI_SUCCESS(status)) { |
410 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; | 411 | *out_table = acpi_gbl_root_table_list.tables[i].pointer; |
412 | *tbl_size = acpi_gbl_root_table_list.tables[i].length; | ||
411 | } | 413 | } |
412 | 414 | ||
413 | if (!acpi_gbl_permanent_mmap) { | 415 | if (!acpi_gbl_permanent_mmap) { |
@@ -420,6 +422,15 @@ acpi_get_table(char *signature, | |||
420 | return (AE_NOT_FOUND); | 422 | return (AE_NOT_FOUND); |
421 | } | 423 | } |
422 | 424 | ||
425 | acpi_status | ||
426 | acpi_get_table(char *signature, | ||
427 | u32 instance, struct acpi_table_header **out_table) | ||
428 | { | ||
429 | acpi_size tbl_size; | ||
430 | |||
431 | return acpi_get_table_with_size(signature, | ||
432 | instance, out_table, &tbl_size); | ||
433 | } | ||
423 | ACPI_EXPORT_SYMBOL(acpi_get_table) | 434 | ACPI_EXPORT_SYMBOL(acpi_get_table) |
424 | 435 | ||
425 | /******************************************************************************* | 436 | /******************************************************************************* |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 1e35f342957c..eb8980d67368 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -272,14 +272,21 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
272 | } | 272 | } |
273 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); | 273 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
274 | 274 | ||
275 | void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) | 275 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) |
276 | { | 276 | { |
277 | if (acpi_gbl_permanent_mmap) { | 277 | if (acpi_gbl_permanent_mmap) |
278 | iounmap(virt); | 278 | iounmap(virt); |
279 | } | 279 | else |
280 | __acpi_unmap_table(virt, size); | ||
280 | } | 281 | } |
281 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); | 282 | EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); |
282 | 283 | ||
284 | void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | ||
285 | { | ||
286 | if (!acpi_gbl_permanent_mmap) | ||
287 | __acpi_unmap_table(virt, size); | ||
288 | } | ||
289 | |||
283 | #ifdef ACPI_FUTURE_USAGE | 290 | #ifdef ACPI_FUTURE_USAGE |
284 | acpi_status | 291 | acpi_status |
285 | acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) | 292 | acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 9cc769b587ff..68fd3d292799 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance( | |||
516 | continue; | 516 | continue; |
517 | } | 517 | } |
518 | 518 | ||
519 | if (!performance || !percpu_ptr(performance, i)) { | 519 | if (!performance || !per_cpu_ptr(performance, i)) { |
520 | retval = -EINVAL; | 520 | retval = -EINVAL; |
521 | continue; | 521 | continue; |
522 | } | 522 | } |
523 | 523 | ||
524 | pr->performance = percpu_ptr(performance, i); | 524 | pr->performance = per_cpu_ptr(performance, i); |
525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); | 525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
526 | if (acpi_processor_get_psd(pr)) { | 526 | if (acpi_processor_get_psd(pr)) { |
527 | retval = -EINVAL; | 527 | retval = -EINVAL; |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index a8852952fac4..fec1ae36d431 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id, | |||
181 | struct acpi_subtable_header *entry; | 181 | struct acpi_subtable_header *entry; |
182 | unsigned int count = 0; | 182 | unsigned int count = 0; |
183 | unsigned long table_end; | 183 | unsigned long table_end; |
184 | acpi_size tbl_size; | ||
184 | 185 | ||
185 | if (!handler) | 186 | if (!handler) |
186 | return -EINVAL; | 187 | return -EINVAL; |
187 | 188 | ||
188 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) | 189 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) |
189 | acpi_get_table(id, acpi_apic_instance, &table_header); | 190 | acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size); |
190 | else | 191 | else |
191 | acpi_get_table(id, 0, &table_header); | 192 | acpi_get_table_with_size(id, 0, &table_header, &tbl_size); |
192 | 193 | ||
193 | if (!table_header) { | 194 | if (!table_header) { |
194 | printk(KERN_WARNING PREFIX "%4.4s not present\n", id); | 195 | printk(KERN_WARNING PREFIX "%4.4s not present\n", id); |
@@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id, | |||
206 | table_end) { | 207 | table_end) { |
207 | if (entry->type == entry_id | 208 | if (entry->type == entry_id |
208 | && (!max_entries || count++ < max_entries)) | 209 | && (!max_entries || count++ < max_entries)) |
209 | if (handler(entry, table_end)) | 210 | if (handler(entry, table_end)) { |
211 | early_acpi_os_unmap_memory((char *)table_header, tbl_size); | ||
210 | return -EINVAL; | 212 | return -EINVAL; |
213 | } | ||
211 | 214 | ||
212 | entry = (struct acpi_subtable_header *) | 215 | entry = (struct acpi_subtable_header *) |
213 | ((unsigned long)entry + entry->length); | 216 | ((unsigned long)entry + entry->length); |
@@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id, | |||
217 | "%i found\n", id, entry_id, count - max_entries, count); | 220 | "%i found\n", id, entry_id, count - max_entries, count); |
218 | } | 221 | } |
219 | 222 | ||
223 | early_acpi_os_unmap_memory((char *)table_header, tbl_size); | ||
220 | return count; | 224 | return count; |
221 | } | 225 | } |
222 | 226 | ||
@@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id, | |||
241 | int __init acpi_table_parse(char *id, acpi_table_handler handler) | 245 | int __init acpi_table_parse(char *id, acpi_table_handler handler) |
242 | { | 246 | { |
243 | struct acpi_table_header *table = NULL; | 247 | struct acpi_table_header *table = NULL; |
248 | acpi_size tbl_size; | ||
244 | 249 | ||
245 | if (!handler) | 250 | if (!handler) |
246 | return -EINVAL; | 251 | return -EINVAL; |
247 | 252 | ||
248 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) | 253 | if (strncmp(id, ACPI_SIG_MADT, 4) == 0) |
249 | acpi_get_table(id, acpi_apic_instance, &table); | 254 | acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size); |
250 | else | 255 | else |
251 | acpi_get_table(id, 0, &table); | 256 | acpi_get_table_with_size(id, 0, &table, &tbl_size); |
252 | 257 | ||
253 | if (table) { | 258 | if (table) { |
254 | handler(table); | 259 | handler(table); |
260 | early_acpi_os_unmap_memory(table, tbl_size); | ||
255 | return 0; | 261 | return 0; |
256 | } else | 262 | } else |
257 | return 1; | 263 | return 1; |
@@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
265 | static void __init check_multiple_madt(void) | 271 | static void __init check_multiple_madt(void) |
266 | { | 272 | { |
267 | struct acpi_table_header *table = NULL; | 273 | struct acpi_table_header *table = NULL; |
274 | acpi_size tbl_size; | ||
268 | 275 | ||
269 | acpi_get_table(ACPI_SIG_MADT, 2, &table); | 276 | acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size); |
270 | if (table) { | 277 | if (table) { |
271 | printk(KERN_WARNING PREFIX | 278 | printk(KERN_WARNING PREFIX |
272 | "BIOS bug: multiple APIC/MADT found," | 279 | "BIOS bug: multiple APIC/MADT found," |
@@ -275,6 +282,7 @@ static void __init check_multiple_madt(void) | |||
275 | "If \"acpi_apic_instance=%d\" works better, " | 282 | "If \"acpi_apic_instance=%d\" works better, " |
276 | "notify linux-acpi@vger.kernel.org\n", | 283 | "notify linux-acpi@vger.kernel.org\n", |
277 | acpi_apic_instance ? 0 : 2); | 284 | acpi_apic_instance ? 0 : 2); |
285 | early_acpi_os_unmap_memory(table, tbl_size); | ||
278 | 286 | ||
279 | } else | 287 | } else |
280 | acpi_apic_instance = 0; | 288 | acpi_apic_instance = 0; |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 719ee5c1c8d9..5b257a57bc57 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
107 | /* | 107 | /* |
108 | * Print cpu online, possible, present, and system maps | 108 | * Print cpu online, possible, present, and system maps |
109 | */ | 109 | */ |
110 | static ssize_t print_cpus_map(char *buf, cpumask_t *map) | 110 | static ssize_t print_cpus_map(char *buf, const struct cpumask *map) |
111 | { | 111 | { |
112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); | 112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); |
113 | 113 | ||
diff --git a/drivers/base/topology.c b/drivers/base/topology.c index a778fb52b11f..bf6b13206d00 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c | |||
@@ -31,7 +31,10 @@ | |||
31 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
32 | #include <linux/topology.h> | 32 | #include <linux/topology.h> |
33 | 33 | ||
34 | #define define_one_ro(_name) \ | 34 | #define define_one_ro_named(_name, _func) \ |
35 | static SYSDEV_ATTR(_name, 0444, _func, NULL) | ||
36 | |||
37 | #define define_one_ro(_name) \ | ||
35 | static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) | 38 | static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) |
36 | 39 | ||
37 | #define define_id_show_func(name) \ | 40 | #define define_id_show_func(name) \ |
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \ | |||
42 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ | 45 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ |
43 | } | 46 | } |
44 | 47 | ||
45 | #if defined(topology_thread_siblings) || defined(topology_core_siblings) | 48 | #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) |
46 | static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) | 49 | static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) |
47 | { | 50 | { |
48 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; | 51 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; |
49 | int n = 0; | 52 | int n = 0; |
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev, \ | |||
65 | struct sysdev_attribute *attr, char *buf) \ | 68 | struct sysdev_attribute *attr, char *buf) \ |
66 | { \ | 69 | { \ |
67 | unsigned int cpu = dev->id; \ | 70 | unsigned int cpu = dev->id; \ |
68 | return show_cpumap(0, &(topology_##name(cpu)), buf); \ | 71 | return show_cpumap(0, topology_##name(cpu), buf); \ |
69 | } | 72 | } |
70 | 73 | ||
71 | #define define_siblings_show_list(name) \ | 74 | #define define_siblings_show_list(name) \ |
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
74 | char *buf) \ | 77 | char *buf) \ |
75 | { \ | 78 | { \ |
76 | unsigned int cpu = dev->id; \ | 79 | unsigned int cpu = dev->id; \ |
77 | return show_cpumap(1, &(topology_##name(cpu)), buf); \ | 80 | return show_cpumap(1, topology_##name(cpu), buf); \ |
78 | } | 81 | } |
79 | 82 | ||
80 | #else | 83 | #else |
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
82 | static ssize_t show_##name(struct sys_device *dev, \ | 85 | static ssize_t show_##name(struct sys_device *dev, \ |
83 | struct sysdev_attribute *attr, char *buf) \ | 86 | struct sysdev_attribute *attr, char *buf) \ |
84 | { \ | 87 | { \ |
85 | unsigned int cpu = dev->id; \ | 88 | return show_cpumap(0, topology_##name(dev->id), buf); \ |
86 | cpumask_t mask = topology_##name(cpu); \ | ||
87 | return show_cpumap(0, &mask, buf); \ | ||
88 | } | 89 | } |
89 | 90 | ||
90 | #define define_siblings_show_list(name) \ | 91 | #define define_siblings_show_list(name) \ |
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ | |||
92 | struct sysdev_attribute *attr, \ | 93 | struct sysdev_attribute *attr, \ |
93 | char *buf) \ | 94 | char *buf) \ |
94 | { \ | 95 | { \ |
95 | unsigned int cpu = dev->id; \ | 96 | return show_cpumap(1, topology_##name(dev->id), buf); \ |
96 | cpumask_t mask = topology_##name(cpu); \ | ||
97 | return show_cpumap(1, &mask, buf); \ | ||
98 | } | 97 | } |
99 | #endif | 98 | #endif |
100 | 99 | ||
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id); | |||
107 | define_id_show_func(core_id); | 106 | define_id_show_func(core_id); |
108 | define_one_ro(core_id); | 107 | define_one_ro(core_id); |
109 | 108 | ||
110 | define_siblings_show_func(thread_siblings); | 109 | define_siblings_show_func(thread_cpumask); |
111 | define_one_ro(thread_siblings); | 110 | define_one_ro_named(thread_siblings, show_thread_cpumask); |
112 | define_one_ro(thread_siblings_list); | 111 | define_one_ro_named(thread_siblings_list, show_thread_cpumask_list); |
113 | 112 | ||
114 | define_siblings_show_func(core_siblings); | 113 | define_siblings_show_func(core_cpumask); |
115 | define_one_ro(core_siblings); | 114 | define_one_ro_named(core_siblings, show_core_cpumask); |
116 | define_one_ro(core_siblings_list); | 115 | define_one_ro_named(core_siblings_list, show_core_cpumask_list); |
117 | 116 | ||
118 | static struct attribute *default_attrs[] = { | 117 | static struct attribute *default_attrs[] = { |
119 | &attr_physical_package_id.attr, | 118 | &attr_physical_package_id.attr, |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index e1129fad96dd..ee19b6e8fcb4 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, | |||
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | #ifndef CONFIG_X86_64 | 145 | #ifndef CONFIG_X86_64 |
146 | #include "mach_timer.h" | 146 | #include <asm/mach_timer.h> |
147 | #define PMTMR_EXPECTED_RATE \ | 147 | #define PMTMR_EXPECTED_RATE \ |
148 | ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) | 148 | ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) |
149 | /* | 149 | /* |
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 1bde303b970b..8615059a8729 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <asm/pgtable.h> | 7 | #include <asm/pgtable.h> |
8 | #include <asm/io.h> | 8 | #include <asm/io.h> |
9 | 9 | ||
10 | #include "mach_timer.h" | 10 | #include <asm/mach_timer.h> |
11 | 11 | ||
12 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */ | 12 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */ |
13 | #define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */ | 13 | #define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */ |
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig index c0646576cf47..2705284f6223 100644 --- a/drivers/eisa/Kconfig +++ b/drivers/eisa/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | config EISA_VLB_PRIMING | 4 | config EISA_VLB_PRIMING |
5 | bool "Vesa Local Bus priming" | 5 | bool "Vesa Local Bus priming" |
6 | depends on X86_PC && EISA | 6 | depends on X86 && EISA |
7 | default n | 7 | default n |
8 | ---help--- | 8 | ---help--- |
9 | Activate this option if your system contains a Vesa Local | 9 | Activate this option if your system contains a Vesa Local |
@@ -24,11 +24,11 @@ config EISA_PCI_EISA | |||
24 | When in doubt, say Y. | 24 | When in doubt, say Y. |
25 | 25 | ||
26 | # Using EISA_VIRTUAL_ROOT on something other than an Alpha or | 26 | # Using EISA_VIRTUAL_ROOT on something other than an Alpha or |
27 | # an X86_PC may lead to crashes... | 27 | # an X86 may lead to crashes... |
28 | 28 | ||
29 | config EISA_VIRTUAL_ROOT | 29 | config EISA_VIRTUAL_ROOT |
30 | bool "EISA virtual root device" | 30 | bool "EISA virtual root device" |
31 | depends on EISA && (ALPHA || X86_PC) | 31 | depends on EISA && (ALPHA || X86) |
32 | default y | 32 | default y |
33 | ---help--- | 33 | ---help--- |
34 | Activate this option if your system only have EISA bus | 34 | Activate this option if your system only have EISA bus |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 777fba48d2d3..3009e0171e54 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
244 | */ | 244 | */ |
245 | int dcdbas_smi_request(struct smi_cmd *smi_cmd) | 245 | int dcdbas_smi_request(struct smi_cmd *smi_cmd) |
246 | { | 246 | { |
247 | cpumask_t old_mask; | 247 | cpumask_var_t old_mask; |
248 | int ret = 0; | 248 | int ret = 0; |
249 | 249 | ||
250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) | |||
254 | } | 254 | } |
255 | 255 | ||
256 | /* SMI requires CPU 0 */ | 256 | /* SMI requires CPU 0 */ |
257 | old_mask = current->cpus_allowed; | 257 | if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) |
258 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 258 | return -ENOMEM; |
259 | |||
260 | cpumask_copy(old_mask, ¤t->cpus_allowed); | ||
261 | set_cpus_allowed_ptr(current, cpumask_of(0)); | ||
259 | if (smp_processor_id() != 0) { | 262 | if (smp_processor_id() != 0) { |
260 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 263 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
261 | __func__); | 264 | __func__); |
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) | |||
275 | ); | 278 | ); |
276 | 279 | ||
277 | out: | 280 | out: |
278 | set_cpus_allowed_ptr(current, &old_mask); | 281 | set_cpus_allowed_ptr(current, old_mask); |
282 | free_cpumask_var(old_mask); | ||
279 | return ret; | 283 | return ret; |
280 | } | 284 | } |
281 | 285 | ||
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 3ab3e4a41d67..7b7ddc2d51c9 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
@@ -938,8 +938,8 @@ static int __init ibft_init(void) | |||
938 | return -ENOMEM; | 938 | return -ENOMEM; |
939 | 939 | ||
940 | if (ibft_addr) { | 940 | if (ibft_addr) { |
941 | printk(KERN_INFO "iBFT detected at 0x%lx.\n", | 941 | printk(KERN_INFO "iBFT detected at 0x%llx.\n", |
942 | virt_to_phys((void *)ibft_addr)); | 942 | (u64)virt_to_phys((void *)ibft_addr)); |
943 | 943 | ||
944 | rc = ibft_check_device(); | 944 | rc = ibft_check_device(); |
945 | if (rc) | 945 | if (rc) |
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f66830..b756f043a5f4 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, | |||
678 | *start = &buf[offset]; | 678 | *start = &buf[offset]; |
679 | *eof = 0; | 679 | *eof = 0; |
680 | 680 | ||
681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", | 681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n", |
682 | atomic_read(&dev->vma_count), | 682 | atomic_read(&dev->vma_count), |
683 | high_memory, virt_to_phys(high_memory)); | 683 | high_memory, (u64)virt_to_phys(high_memory)); |
684 | list_for_each_entry(pt, &dev->vmalist, head) { | 684 | list_for_each_entry(pt, &dev->vmalist, head) { |
685 | if (!(vma = pt->vma)) | 685 | if (!(vma = pt->vma)) |
686 | continue; | 686 | continue; |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 35561689ff38..ea2638b41982 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD | |||
13 | if INPUT_KEYBOARD | 13 | if INPUT_KEYBOARD |
14 | 14 | ||
15 | config KEYBOARD_ATKBD | 15 | config KEYBOARD_ATKBD |
16 | tristate "AT keyboard" if EMBEDDED || !X86_PC | 16 | tristate "AT keyboard" if EMBEDDED || !X86 |
17 | default y | 17 | default y |
18 | select SERIO | 18 | select SERIO |
19 | select SERIO_LIBPS2 | 19 | select SERIO_LIBPS2 |
20 | select SERIO_I8042 if X86_PC | 20 | select SERIO_I8042 if X86 |
21 | select SERIO_GSCPS2 if GSC | 21 | select SERIO_GSCPS2 if GSC |
22 | help | 22 | help |
23 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually | 23 | Say Y here if you want to use a standard AT or PS/2 keyboard. Usually |
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 9705f3a00a3d..4f38e6f7dfdd 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
@@ -17,7 +17,7 @@ config MOUSE_PS2 | |||
17 | default y | 17 | default y |
18 | select SERIO | 18 | select SERIO |
19 | select SERIO_LIBPS2 | 19 | select SERIO_LIBPS2 |
20 | select SERIO_I8042 if X86_PC | 20 | select SERIO_I8042 if X86 |
21 | select SERIO_GSCPS2 if GSC | 21 | select SERIO_GSCPS2 if GSC |
22 | help | 22 | help |
23 | Say Y here if you have a PS/2 mouse connected to your system. This | 23 | Say Y here if you have a PS/2 mouse connected to your system. This |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 76f2b36881c3..a3d3cbab359a 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config LGUEST | 1 | config LGUEST |
2 | tristate "Linux hypervisor example code" | 2 | tristate "Linux hypervisor example code" |
3 | depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER | 3 | depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX |
4 | select HVC_DRIVER | 4 | select HVC_DRIVER |
5 | ---help--- | 5 | ---help--- |
6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index c64e6798878a..1c484084ed4f 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -162,7 +162,7 @@ config ENCLOSURE_SERVICES | |||
162 | config SGI_XP | 162 | config SGI_XP |
163 | tristate "Support communication between SGI SSIs" | 163 | tristate "Support communication between SGI SSIs" |
164 | depends on NET | 164 | depends on NET |
165 | depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP | 165 | depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP |
166 | select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 | 166 | select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 |
167 | select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 | 167 | select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 |
168 | select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP | 168 | select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP |
@@ -189,7 +189,7 @@ config HP_ILO | |||
189 | 189 | ||
190 | config SGI_GRU | 190 | config SGI_GRU |
191 | tristate "SGI GRU driver" | 191 | tristate "SGI GRU driver" |
192 | depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP | 192 | depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP |
193 | default n | 193 | default n |
194 | select MMU_NOTIFIER | 194 | select MMU_NOTIFIER |
195 | ---help--- | 195 | ---help--- |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 650983806392..c67e4e8bd62c 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -36,23 +36,11 @@ | |||
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | #include <asm/uv/uv.h> | ||
39 | #include "gru.h" | 40 | #include "gru.h" |
40 | #include "grulib.h" | 41 | #include "grulib.h" |
41 | #include "grutables.h" | 42 | #include "grutables.h" |
42 | 43 | ||
43 | #if defined CONFIG_X86_64 | ||
44 | #include <asm/genapic.h> | ||
45 | #include <asm/irq.h> | ||
46 | #define IS_UV() is_uv_system() | ||
47 | #elif defined CONFIG_IA64 | ||
48 | #include <asm/system.h> | ||
49 | #include <asm/sn/simulator.h> | ||
50 | /* temp support for running on hardware simulator */ | ||
51 | #define IS_UV() IS_MEDUSA() || ia64_platform_is("uv") | ||
52 | #else | ||
53 | #define IS_UV() 0 | ||
54 | #endif | ||
55 | |||
56 | #include <asm/uv/uv_hub.h> | 44 | #include <asm/uv/uv_hub.h> |
57 | #include <asm/uv/uv_mmrs.h> | 45 | #include <asm/uv/uv_mmrs.h> |
58 | 46 | ||
@@ -381,7 +369,7 @@ static int __init gru_init(void) | |||
381 | char id[10]; | 369 | char id[10]; |
382 | void *gru_start_vaddr; | 370 | void *gru_start_vaddr; |
383 | 371 | ||
384 | if (!IS_UV()) | 372 | if (!is_uv_system()) |
385 | return 0; | 373 | return 0; |
386 | 374 | ||
387 | #if defined CONFIG_IA64 | 375 | #if defined CONFIG_IA64 |
@@ -451,7 +439,7 @@ static void __exit gru_exit(void) | |||
451 | int order = get_order(sizeof(struct gru_state) * | 439 | int order = get_order(sizeof(struct gru_state) * |
452 | GRU_CHIPLETS_PER_BLADE); | 440 | GRU_CHIPLETS_PER_BLADE); |
453 | 441 | ||
454 | if (!IS_UV()) | 442 | if (!is_uv_system()) |
455 | return; | 443 | return; |
456 | 444 | ||
457 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) | 445 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 7b4cbd5e03e9..2275126cb334 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -15,19 +15,19 @@ | |||
15 | 15 | ||
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | 17 | ||
18 | #ifdef CONFIG_IA64 | 18 | #if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV |
19 | #include <asm/uv/uv.h> | ||
20 | #define is_uv() is_uv_system() | ||
21 | #endif | ||
22 | |||
23 | #ifndef is_uv | ||
24 | #define is_uv() 0 | ||
25 | #endif | ||
26 | |||
27 | #if defined CONFIG_IA64 | ||
19 | #include <asm/system.h> | 28 | #include <asm/system.h> |
20 | #include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ | 29 | #include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ |
21 | #define is_shub() ia64_platform_is("sn2") | 30 | #define is_shub() ia64_platform_is("sn2") |
22 | #ifdef CONFIG_IA64_SGI_UV | ||
23 | #define is_uv() ia64_platform_is("uv") | ||
24 | #else | ||
25 | #define is_uv() 0 | ||
26 | #endif | ||
27 | #endif | ||
28 | #ifdef CONFIG_X86_64 | ||
29 | #include <asm/genapic.h> | ||
30 | #define is_uv() is_uv_system() | ||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #ifndef is_shub1 | 33 | #ifndef is_shub1 |
@@ -42,10 +42,6 @@ | |||
42 | #define is_shub() 0 | 42 | #define is_shub() 0 |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifndef is_uv | ||
46 | #define is_uv() 0 | ||
47 | #endif | ||
48 | |||
49 | #ifdef USE_DBUG_ON | 45 | #ifdef USE_DBUG_ON |
50 | #define DBUG_ON(condition) BUG_ON(condition) | 46 | #define DBUG_ON(condition) BUG_ON(condition) |
51 | #else | 47 | #else |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 89218f7cfaa7..6576170de962 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore) | |||
318 | 318 | ||
319 | /* this thread was marked active by xpc_hb_init() */ | 319 | /* this thread was marked active by xpc_hb_init() */ |
320 | 320 | ||
321 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); | 321 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
322 | 322 | ||
323 | /* set our heartbeating to other partitions into motion */ | 323 | /* set our heartbeating to other partitions into motion */ |
324 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 324 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 8b12e6e109d3..2ff88791cebc 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE | |||
273 | 273 | ||
274 | config MTD_NAND_CS553X | 274 | config MTD_NAND_CS553X |
275 | tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" | 275 | tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" |
276 | depends on X86_32 && (X86_PC || X86_GENERICARCH) | 276 | depends on X86_32 |
277 | help | 277 | help |
278 | The CS553x companion chips for the AMD Geode processor | 278 | The CS553x companion chips for the AMD Geode processor |
279 | include NAND flash controllers with built-in hardware ECC | 279 | include NAND flash controllers with built-in hardware ECC |
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c index fac43fd6fc87..6a843f7350ab 100644 --- a/drivers/net/ne3210.c +++ b/drivers/net/ne3210.c | |||
@@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device) | |||
150 | if (phys_mem < virt_to_phys(high_memory)) { | 150 | if (phys_mem < virt_to_phys(high_memory)) { |
151 | printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); | 151 | printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); |
152 | printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); | 152 | printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); |
153 | printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory)); | 153 | printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", |
154 | (u64)virt_to_phys(high_memory)); | ||
154 | printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); | 155 | printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); |
155 | retval = -EINVAL; | 156 | retval = -EINVAL; |
156 | goto out3; | 157 | goto out3; |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index ab0e09bf154d..847e9bb0098f 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx) | |||
854 | * interrupts across them. */ | 854 | * interrupts across them. */ |
855 | static int efx_wanted_rx_queues(void) | 855 | static int efx_wanted_rx_queues(void) |
856 | { | 856 | { |
857 | cpumask_t core_mask; | 857 | cpumask_var_t core_mask; |
858 | int count; | 858 | int count; |
859 | int cpu; | 859 | int cpu; |
860 | 860 | ||
861 | cpus_clear(core_mask); | 861 | if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) { |
862 | printk(KERN_WARNING | ||
863 | "efx.c: allocation failure, irq balancing hobbled\n"); | ||
864 | return 1; | ||
865 | } | ||
866 | |||
867 | cpumask_clear(core_mask); | ||
862 | count = 0; | 868 | count = 0; |
863 | for_each_online_cpu(cpu) { | 869 | for_each_online_cpu(cpu) { |
864 | if (!cpu_isset(cpu, core_mask)) { | 870 | if (!cpumask_test_cpu(cpu, core_mask)) { |
865 | ++count; | 871 | ++count; |
866 | cpus_or(core_mask, core_mask, | 872 | cpumask_or(core_mask, core_mask, |
867 | topology_core_siblings(cpu)); | 873 | topology_core_cpumask(cpu)); |
868 | } | 874 | } |
869 | } | 875 | } |
870 | 876 | ||
877 | free_cpumask_var(core_mask); | ||
871 | return count; | 878 | return count; |
872 | } | 879 | } |
873 | 880 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d5378e60fcdd..064307c2277e 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -338,10 +338,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx, | |||
338 | nic_data->next_buffer_table += buffer->entries; | 338 | nic_data->next_buffer_table += buffer->entries; |
339 | 339 | ||
340 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | 340 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " |
341 | "(virt %p phys %lx)\n", buffer->index, | 341 | "(virt %p phys %llx)\n", buffer->index, |
342 | buffer->index + buffer->entries - 1, | 342 | buffer->index + buffer->entries - 1, |
343 | (unsigned long long)buffer->dma_addr, len, | 343 | (u64)buffer->dma_addr, len, |
344 | buffer->addr, virt_to_phys(buffer->addr)); | 344 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
345 | 345 | ||
346 | return 0; | 346 | return 0; |
347 | } | 347 | } |
@@ -353,10 +353,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx, | |||
353 | return; | 353 | return; |
354 | 354 | ||
355 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | 355 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " |
356 | "(virt %p phys %lx)\n", buffer->index, | 356 | "(virt %p phys %llx)\n", buffer->index, |
357 | buffer->index + buffer->entries - 1, | 357 | buffer->index + buffer->entries - 1, |
358 | (unsigned long long)buffer->dma_addr, buffer->len, | 358 | (u64)buffer->dma_addr, buffer->len, |
359 | buffer->addr, virt_to_phys(buffer->addr)); | 359 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
360 | 360 | ||
361 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | 361 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, |
362 | buffer->dma_addr); | 362 | buffer->dma_addr); |
@@ -2343,10 +2343,10 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2343 | FALCON_MAC_STATS_SIZE); | 2343 | FALCON_MAC_STATS_SIZE); |
2344 | if (rc) | 2344 | if (rc) |
2345 | return rc; | 2345 | return rc; |
2346 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n", | 2346 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n", |
2347 | (unsigned long long)efx->stats_buffer.dma_addr, | 2347 | (u64)efx->stats_buffer.dma_addr, |
2348 | efx->stats_buffer.addr, | 2348 | efx->stats_buffer.addr, |
2349 | virt_to_phys(efx->stats_buffer.addr)); | 2349 | (u64)virt_to_phys(efx->stats_buffer.addr)); |
2350 | 2350 | ||
2351 | return 0; | 2351 | return 0; |
2352 | } | 2352 | } |
@@ -2921,9 +2921,9 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2921 | goto fail4; | 2921 | goto fail4; |
2922 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 2922 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
2923 | 2923 | ||
2924 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n", | 2924 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", |
2925 | (unsigned long long)efx->irq_status.dma_addr, | 2925 | (u64)efx->irq_status.dma_addr, |
2926 | efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); | 2926 | efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); |
2927 | 2927 | ||
2928 | falcon_probe_spi_devices(efx); | 2928 | falcon_probe_spi_devices(efx); |
2929 | 2929 | ||
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index bfca15da6f0f..14c11656e82c 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c | |||
@@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev, | |||
1082 | if (arlan_check_fingerprint(memaddr)) | 1082 | if (arlan_check_fingerprint(memaddr)) |
1083 | return -ENODEV; | 1083 | return -ENODEV; |
1084 | 1084 | ||
1085 | printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name, | 1085 | printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name, |
1086 | (int) virt_to_phys((void*)memaddr)); | 1086 | (u64) virt_to_phys((void*)memaddr)); |
1087 | 1087 | ||
1088 | ap->card = (void *) memaddr; | 1088 | ap->card = (void *) memaddr; |
1089 | dev->mem_start = memaddr; | 1089 | dev->mem_start = memaddr; |
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 9da5a4b81133..c3ea5fa7d05a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | static LIST_HEAD(dying_tasks); | 39 | static LIST_HEAD(dying_tasks); |
40 | static LIST_HEAD(dead_tasks); | 40 | static LIST_HEAD(dead_tasks); |
41 | static cpumask_t marked_cpus = CPU_MASK_NONE; | 41 | static cpumask_var_t marked_cpus; |
42 | static DEFINE_SPINLOCK(task_mortuary); | 42 | static DEFINE_SPINLOCK(task_mortuary); |
43 | static void process_task_mortuary(void); | 43 | static void process_task_mortuary(void); |
44 | 44 | ||
@@ -456,10 +456,10 @@ static void mark_done(int cpu) | |||
456 | { | 456 | { |
457 | int i; | 457 | int i; |
458 | 458 | ||
459 | cpu_set(cpu, marked_cpus); | 459 | cpumask_set_cpu(cpu, marked_cpus); |
460 | 460 | ||
461 | for_each_online_cpu(i) { | 461 | for_each_online_cpu(i) { |
462 | if (!cpu_isset(i, marked_cpus)) | 462 | if (!cpumask_test_cpu(i, marked_cpus)) |
463 | return; | 463 | return; |
464 | } | 464 | } |
465 | 465 | ||
@@ -468,7 +468,7 @@ static void mark_done(int cpu) | |||
468 | */ | 468 | */ |
469 | process_task_mortuary(); | 469 | process_task_mortuary(); |
470 | 470 | ||
471 | cpus_clear(marked_cpus); | 471 | cpumask_clear(marked_cpus); |
472 | } | 472 | } |
473 | 473 | ||
474 | 474 | ||
@@ -565,6 +565,20 @@ void sync_buffer(int cpu) | |||
565 | mutex_unlock(&buffer_mutex); | 565 | mutex_unlock(&buffer_mutex); |
566 | } | 566 | } |
567 | 567 | ||
568 | int __init buffer_sync_init(void) | ||
569 | { | ||
570 | if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | cpumask_clear(marked_cpus); | ||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | void __exit buffer_sync_cleanup(void) | ||
578 | { | ||
579 | free_cpumask_var(marked_cpus); | ||
580 | } | ||
581 | |||
568 | /* The function can be used to add a buffer worth of data directly to | 582 | /* The function can be used to add a buffer worth of data directly to |
569 | * the kernel buffer. The buffer is assumed to be a circular buffer. | 583 | * the kernel buffer. The buffer is assumed to be a circular buffer. |
570 | * Take the entries from index start and end at index end, wrapping | 584 | * Take the entries from index start and end at index end, wrapping |
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h index 3110732c1835..0ebf5db62679 100644 --- a/drivers/oprofile/buffer_sync.h +++ b/drivers/oprofile/buffer_sync.h | |||
@@ -19,4 +19,8 @@ void sync_stop(void); | |||
19 | /* sync the given CPU's buffer */ | 19 | /* sync the given CPU's buffer */ |
20 | void sync_buffer(int cpu); | 20 | void sync_buffer(int cpu); |
21 | 21 | ||
22 | /* initialize/destroy the buffer system. */ | ||
23 | int buffer_sync_init(void); | ||
24 | void buffer_sync_cleanup(void); | ||
25 | |||
22 | #endif /* OPROFILE_BUFFER_SYNC_H */ | 26 | #endif /* OPROFILE_BUFFER_SYNC_H */ |
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index 3cffce90f82a..ced39f602292 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c | |||
@@ -183,6 +183,10 @@ static int __init oprofile_init(void) | |||
183 | { | 183 | { |
184 | int err; | 184 | int err; |
185 | 185 | ||
186 | err = buffer_sync_init(); | ||
187 | if (err) | ||
188 | return err; | ||
189 | |||
186 | err = oprofile_arch_init(&oprofile_ops); | 190 | err = oprofile_arch_init(&oprofile_ops); |
187 | 191 | ||
188 | if (err < 0 || timer) { | 192 | if (err < 0 || timer) { |
@@ -191,8 +195,10 @@ static int __init oprofile_init(void) | |||
191 | } | 195 | } |
192 | 196 | ||
193 | err = oprofilefs_register(); | 197 | err = oprofilefs_register(); |
194 | if (err) | 198 | if (err) { |
195 | oprofile_arch_exit(); | 199 | oprofile_arch_exit(); |
200 | buffer_sync_cleanup(); | ||
201 | } | ||
196 | 202 | ||
197 | return err; | 203 | return err; |
198 | } | 204 | } |
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void) | |||
202 | { | 208 | { |
203 | oprofilefs_unregister(); | 209 | oprofilefs_unregister(); |
204 | oprofile_arch_exit(); | 210 | oprofile_arch_exit(); |
211 | buffer_sync_cleanup(); | ||
205 | } | 212 | } |
206 | 213 | ||
207 | 214 | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 26c536b51c5a..d313039e2fdf 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/iova.h> | 31 | #include <linux/iova.h> |
32 | #include <linux/intel-iommu.h> | 32 | #include <linux/intel-iommu.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/irq.h> | ||
35 | #include <linux/interrupt.h> | ||
34 | 36 | ||
35 | #undef PREFIX | 37 | #undef PREFIX |
36 | #define PREFIX "DMAR:" | 38 | #define PREFIX "DMAR:" |
@@ -42,6 +44,7 @@ | |||
42 | LIST_HEAD(dmar_drhd_units); | 44 | LIST_HEAD(dmar_drhd_units); |
43 | 45 | ||
44 | static struct acpi_table_header * __initdata dmar_tbl; | 46 | static struct acpi_table_header * __initdata dmar_tbl; |
47 | static acpi_size dmar_tbl_size; | ||
45 | 48 | ||
46 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 49 | static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
47 | { | 50 | { |
@@ -288,8 +291,9 @@ static int __init dmar_table_detect(void) | |||
288 | acpi_status status = AE_OK; | 291 | acpi_status status = AE_OK; |
289 | 292 | ||
290 | /* if we could find DMAR table, then there are DMAR devices */ | 293 | /* if we could find DMAR table, then there are DMAR devices */ |
291 | status = acpi_get_table(ACPI_SIG_DMAR, 0, | 294 | status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, |
292 | (struct acpi_table_header **)&dmar_tbl); | 295 | (struct acpi_table_header **)&dmar_tbl, |
296 | &dmar_tbl_size); | ||
293 | 297 | ||
294 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | 298 | if (ACPI_SUCCESS(status) && !dmar_tbl) { |
295 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); | 299 | printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); |
@@ -489,6 +493,7 @@ void __init detect_intel_iommu(void) | |||
489 | iommu_detected = 1; | 493 | iommu_detected = 1; |
490 | #endif | 494 | #endif |
491 | } | 495 | } |
496 | early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); | ||
492 | dmar_tbl = NULL; | 497 | dmar_tbl = NULL; |
493 | } | 498 | } |
494 | 499 | ||
@@ -506,6 +511,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
506 | return -ENOMEM; | 511 | return -ENOMEM; |
507 | 512 | ||
508 | iommu->seq_id = iommu_allocated++; | 513 | iommu->seq_id = iommu_allocated++; |
514 | sprintf (iommu->name, "dmar%d", iommu->seq_id); | ||
509 | 515 | ||
510 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); | 516 | iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); |
511 | if (!iommu->reg) { | 517 | if (!iommu->reg) { |
@@ -748,6 +754,42 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
748 | } | 754 | } |
749 | 755 | ||
750 | /* | 756 | /* |
757 | * Disable Queued Invalidation interface. | ||
758 | */ | ||
759 | void dmar_disable_qi(struct intel_iommu *iommu) | ||
760 | { | ||
761 | unsigned long flags; | ||
762 | u32 sts; | ||
763 | cycles_t start_time = get_cycles(); | ||
764 | |||
765 | if (!ecap_qis(iommu->ecap)) | ||
766 | return; | ||
767 | |||
768 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
769 | |||
770 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | ||
771 | if (!(sts & DMA_GSTS_QIES)) | ||
772 | goto end; | ||
773 | |||
774 | /* | ||
775 | * Give a chance to HW to complete the pending invalidation requests. | ||
776 | */ | ||
777 | while ((readl(iommu->reg + DMAR_IQT_REG) != | ||
778 | readl(iommu->reg + DMAR_IQH_REG)) && | ||
779 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | ||
780 | cpu_relax(); | ||
781 | |||
782 | iommu->gcmd &= ~DMA_GCMD_QIE; | ||
783 | |||
784 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | ||
785 | |||
786 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | ||
787 | !(sts & DMA_GSTS_QIES), sts); | ||
788 | end: | ||
789 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
790 | } | ||
791 | |||
792 | /* | ||
751 | * Enable Queued Invalidation interface. This is a must to support | 793 | * Enable Queued Invalidation interface. This is a must to support |
752 | * interrupt-remapping. Also used by DMA-remapping, which replaces | 794 | * interrupt-remapping. Also used by DMA-remapping, which replaces |
753 | * register based IOTLB invalidation. | 795 | * register based IOTLB invalidation. |
@@ -767,20 +809,20 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
767 | if (iommu->qi) | 809 | if (iommu->qi) |
768 | return 0; | 810 | return 0; |
769 | 811 | ||
770 | iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); | 812 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); |
771 | if (!iommu->qi) | 813 | if (!iommu->qi) |
772 | return -ENOMEM; | 814 | return -ENOMEM; |
773 | 815 | ||
774 | qi = iommu->qi; | 816 | qi = iommu->qi; |
775 | 817 | ||
776 | qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); | 818 | qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC)); |
777 | if (!qi->desc) { | 819 | if (!qi->desc) { |
778 | kfree(qi); | 820 | kfree(qi); |
779 | iommu->qi = 0; | 821 | iommu->qi = 0; |
780 | return -ENOMEM; | 822 | return -ENOMEM; |
781 | } | 823 | } |
782 | 824 | ||
783 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); | 825 | qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); |
784 | if (!qi->desc_status) { | 826 | if (!qi->desc_status) { |
785 | free_page((unsigned long) qi->desc); | 827 | free_page((unsigned long) qi->desc); |
786 | kfree(qi); | 828 | kfree(qi); |
@@ -809,3 +851,254 @@ int dmar_enable_qi(struct intel_iommu *iommu) | |||
809 | 851 | ||
810 | return 0; | 852 | return 0; |
811 | } | 853 | } |
854 | |||
855 | /* iommu interrupt handling. Most stuff are MSI-like. */ | ||
856 | |||
857 | enum faulttype { | ||
858 | DMA_REMAP, | ||
859 | INTR_REMAP, | ||
860 | UNKNOWN, | ||
861 | }; | ||
862 | |||
863 | static const char *dma_remap_fault_reasons[] = | ||
864 | { | ||
865 | "Software", | ||
866 | "Present bit in root entry is clear", | ||
867 | "Present bit in context entry is clear", | ||
868 | "Invalid context entry", | ||
869 | "Access beyond MGAW", | ||
870 | "PTE Write access is not set", | ||
871 | "PTE Read access is not set", | ||
872 | "Next page table ptr is invalid", | ||
873 | "Root table address invalid", | ||
874 | "Context table ptr is invalid", | ||
875 | "non-zero reserved fields in RTP", | ||
876 | "non-zero reserved fields in CTP", | ||
877 | "non-zero reserved fields in PTE", | ||
878 | }; | ||
879 | |||
880 | static const char *intr_remap_fault_reasons[] = | ||
881 | { | ||
882 | "Detected reserved fields in the decoded interrupt-remapped request", | ||
883 | "Interrupt index exceeded the interrupt-remapping table size", | ||
884 | "Present field in the IRTE entry is clear", | ||
885 | "Error accessing interrupt-remapping table pointed by IRTA_REG", | ||
886 | "Detected reserved fields in the IRTE entry", | ||
887 | "Blocked a compatibility format interrupt request", | ||
888 | "Blocked an interrupt request due to source-id verification failure", | ||
889 | }; | ||
890 | |||
891 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) | ||
892 | |||
893 | const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | ||
894 | { | ||
895 | if (fault_reason >= 0x20 && (fault_reason <= 0x20 + | ||
896 | ARRAY_SIZE(intr_remap_fault_reasons))) { | ||
897 | *fault_type = INTR_REMAP; | ||
898 | return intr_remap_fault_reasons[fault_reason - 0x20]; | ||
899 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { | ||
900 | *fault_type = DMA_REMAP; | ||
901 | return dma_remap_fault_reasons[fault_reason]; | ||
902 | } else { | ||
903 | *fault_type = UNKNOWN; | ||
904 | return "Unknown"; | ||
905 | } | ||
906 | } | ||
907 | |||
908 | void dmar_msi_unmask(unsigned int irq) | ||
909 | { | ||
910 | struct intel_iommu *iommu = get_irq_data(irq); | ||
911 | unsigned long flag; | ||
912 | |||
913 | /* unmask it */ | ||
914 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
915 | writel(0, iommu->reg + DMAR_FECTL_REG); | ||
916 | /* Read a reg to force flush the post write */ | ||
917 | readl(iommu->reg + DMAR_FECTL_REG); | ||
918 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
919 | } | ||
920 | |||
921 | void dmar_msi_mask(unsigned int irq) | ||
922 | { | ||
923 | unsigned long flag; | ||
924 | struct intel_iommu *iommu = get_irq_data(irq); | ||
925 | |||
926 | /* mask it */ | ||
927 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
928 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | ||
929 | /* Read a reg to force flush the post write */ | ||
930 | readl(iommu->reg + DMAR_FECTL_REG); | ||
931 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
932 | } | ||
933 | |||
934 | void dmar_msi_write(int irq, struct msi_msg *msg) | ||
935 | { | ||
936 | struct intel_iommu *iommu = get_irq_data(irq); | ||
937 | unsigned long flag; | ||
938 | |||
939 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
940 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | ||
941 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | ||
942 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | ||
943 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
944 | } | ||
945 | |||
946 | void dmar_msi_read(int irq, struct msi_msg *msg) | ||
947 | { | ||
948 | struct intel_iommu *iommu = get_irq_data(irq); | ||
949 | unsigned long flag; | ||
950 | |||
951 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
952 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | ||
953 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | ||
954 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | ||
955 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
956 | } | ||
957 | |||
958 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | ||
959 | u8 fault_reason, u16 source_id, unsigned long long addr) | ||
960 | { | ||
961 | const char *reason; | ||
962 | int fault_type; | ||
963 | |||
964 | reason = dmar_get_fault_reason(fault_reason, &fault_type); | ||
965 | |||
966 | if (fault_type == INTR_REMAP) | ||
967 | printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " | ||
968 | "fault index %llx\n" | ||
969 | "INTR-REMAP:[fault reason %02d] %s\n", | ||
970 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | ||
971 | PCI_FUNC(source_id & 0xFF), addr >> 48, | ||
972 | fault_reason, reason); | ||
973 | else | ||
974 | printk(KERN_ERR | ||
975 | "DMAR:[%s] Request device [%02x:%02x.%d] " | ||
976 | "fault addr %llx \n" | ||
977 | "DMAR:[fault reason %02d] %s\n", | ||
978 | (type ? "DMA Read" : "DMA Write"), | ||
979 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | ||
980 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | ||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | #define PRIMARY_FAULT_REG_LEN (16) | ||
985 | irqreturn_t dmar_fault(int irq, void *dev_id) | ||
986 | { | ||
987 | struct intel_iommu *iommu = dev_id; | ||
988 | int reg, fault_index; | ||
989 | u32 fault_status; | ||
990 | unsigned long flag; | ||
991 | |||
992 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
993 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
994 | if (fault_status) | ||
995 | printk(KERN_ERR "DRHD: handling fault status reg %x\n", | ||
996 | fault_status); | ||
997 | |||
998 | /* TBD: ignore advanced fault log currently */ | ||
999 | if (!(fault_status & DMA_FSTS_PPF)) | ||
1000 | goto clear_rest; | ||
1001 | |||
1002 | fault_index = dma_fsts_fault_record_index(fault_status); | ||
1003 | reg = cap_fault_reg_offset(iommu->cap); | ||
1004 | while (1) { | ||
1005 | u8 fault_reason; | ||
1006 | u16 source_id; | ||
1007 | u64 guest_addr; | ||
1008 | int type; | ||
1009 | u32 data; | ||
1010 | |||
1011 | /* highest 32 bits */ | ||
1012 | data = readl(iommu->reg + reg + | ||
1013 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1014 | if (!(data & DMA_FRCD_F)) | ||
1015 | break; | ||
1016 | |||
1017 | fault_reason = dma_frcd_fault_reason(data); | ||
1018 | type = dma_frcd_type(data); | ||
1019 | |||
1020 | data = readl(iommu->reg + reg + | ||
1021 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | ||
1022 | source_id = dma_frcd_source_id(data); | ||
1023 | |||
1024 | guest_addr = dmar_readq(iommu->reg + reg + | ||
1025 | fault_index * PRIMARY_FAULT_REG_LEN); | ||
1026 | guest_addr = dma_frcd_page_addr(guest_addr); | ||
1027 | /* clear the fault */ | ||
1028 | writel(DMA_FRCD_F, iommu->reg + reg + | ||
1029 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1030 | |||
1031 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1032 | |||
1033 | dmar_fault_do_one(iommu, type, fault_reason, | ||
1034 | source_id, guest_addr); | ||
1035 | |||
1036 | fault_index++; | ||
1037 | if (fault_index > cap_num_fault_regs(iommu->cap)) | ||
1038 | fault_index = 0; | ||
1039 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1040 | } | ||
1041 | clear_rest: | ||
1042 | /* clear all the other faults */ | ||
1043 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1044 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | ||
1045 | |||
1046 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1047 | return IRQ_HANDLED; | ||
1048 | } | ||
1049 | |||
1050 | int dmar_set_interrupt(struct intel_iommu *iommu) | ||
1051 | { | ||
1052 | int irq, ret; | ||
1053 | |||
1054 | /* | ||
1055 | * Check if the fault interrupt is already initialized. | ||
1056 | */ | ||
1057 | if (iommu->irq) | ||
1058 | return 0; | ||
1059 | |||
1060 | irq = create_irq(); | ||
1061 | if (!irq) { | ||
1062 | printk(KERN_ERR "IOMMU: no free vectors\n"); | ||
1063 | return -EINVAL; | ||
1064 | } | ||
1065 | |||
1066 | set_irq_data(irq, iommu); | ||
1067 | iommu->irq = irq; | ||
1068 | |||
1069 | ret = arch_setup_dmar_msi(irq); | ||
1070 | if (ret) { | ||
1071 | set_irq_data(irq, NULL); | ||
1072 | iommu->irq = 0; | ||
1073 | destroy_irq(irq); | ||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); | ||
1078 | if (ret) | ||
1079 | printk(KERN_ERR "IOMMU: can't request irq\n"); | ||
1080 | return ret; | ||
1081 | } | ||
1082 | |||
1083 | int __init enable_drhd_fault_handling(void) | ||
1084 | { | ||
1085 | struct dmar_drhd_unit *drhd; | ||
1086 | |||
1087 | /* | ||
1088 | * Enable fault control interrupt. | ||
1089 | */ | ||
1090 | for_each_drhd_unit(drhd) { | ||
1091 | int ret; | ||
1092 | struct intel_iommu *iommu = drhd->iommu; | ||
1093 | ret = dmar_set_interrupt(iommu); | ||
1094 | |||
1095 | if (ret) { | ||
1096 | printk(KERN_ERR "DRHD %Lx: failed to enable fault, " | ||
1097 | " interrupt, ret %d\n", | ||
1098 | (unsigned long long)drhd->reg_base_addr, ret); | ||
1099 | return -1; | ||
1100 | } | ||
1101 | } | ||
1102 | |||
1103 | return 0; | ||
1104 | } | ||
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index f3f686581a90..ef167b8b047d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1004,194 +1004,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
1004 | return 0; | 1004 | return 0; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | /* iommu interrupt handling. Most stuff are MSI-like. */ | ||
1008 | |||
1009 | static const char *fault_reason_strings[] = | ||
1010 | { | ||
1011 | "Software", | ||
1012 | "Present bit in root entry is clear", | ||
1013 | "Present bit in context entry is clear", | ||
1014 | "Invalid context entry", | ||
1015 | "Access beyond MGAW", | ||
1016 | "PTE Write access is not set", | ||
1017 | "PTE Read access is not set", | ||
1018 | "Next page table ptr is invalid", | ||
1019 | "Root table address invalid", | ||
1020 | "Context table ptr is invalid", | ||
1021 | "non-zero reserved fields in RTP", | ||
1022 | "non-zero reserved fields in CTP", | ||
1023 | "non-zero reserved fields in PTE", | ||
1024 | }; | ||
1025 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) | ||
1026 | |||
1027 | const char *dmar_get_fault_reason(u8 fault_reason) | ||
1028 | { | ||
1029 | if (fault_reason > MAX_FAULT_REASON_IDX) | ||
1030 | return "Unknown"; | ||
1031 | else | ||
1032 | return fault_reason_strings[fault_reason]; | ||
1033 | } | ||
1034 | |||
1035 | void dmar_msi_unmask(unsigned int irq) | ||
1036 | { | ||
1037 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1038 | unsigned long flag; | ||
1039 | |||
1040 | /* unmask it */ | ||
1041 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1042 | writel(0, iommu->reg + DMAR_FECTL_REG); | ||
1043 | /* Read a reg to force flush the post write */ | ||
1044 | readl(iommu->reg + DMAR_FECTL_REG); | ||
1045 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1046 | } | ||
1047 | |||
1048 | void dmar_msi_mask(unsigned int irq) | ||
1049 | { | ||
1050 | unsigned long flag; | ||
1051 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1052 | |||
1053 | /* mask it */ | ||
1054 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1055 | writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); | ||
1056 | /* Read a reg to force flush the post write */ | ||
1057 | readl(iommu->reg + DMAR_FECTL_REG); | ||
1058 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1059 | } | ||
1060 | |||
1061 | void dmar_msi_write(int irq, struct msi_msg *msg) | ||
1062 | { | ||
1063 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1064 | unsigned long flag; | ||
1065 | |||
1066 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1067 | writel(msg->data, iommu->reg + DMAR_FEDATA_REG); | ||
1068 | writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); | ||
1069 | writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); | ||
1070 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1071 | } | ||
1072 | |||
1073 | void dmar_msi_read(int irq, struct msi_msg *msg) | ||
1074 | { | ||
1075 | struct intel_iommu *iommu = get_irq_data(irq); | ||
1076 | unsigned long flag; | ||
1077 | |||
1078 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1079 | msg->data = readl(iommu->reg + DMAR_FEDATA_REG); | ||
1080 | msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); | ||
1081 | msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); | ||
1082 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1083 | } | ||
1084 | |||
1085 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | ||
1086 | u8 fault_reason, u16 source_id, unsigned long long addr) | ||
1087 | { | ||
1088 | const char *reason; | ||
1089 | |||
1090 | reason = dmar_get_fault_reason(fault_reason); | ||
1091 | |||
1092 | printk(KERN_ERR | ||
1093 | "DMAR:[%s] Request device [%02x:%02x.%d] " | ||
1094 | "fault addr %llx \n" | ||
1095 | "DMAR:[fault reason %02d] %s\n", | ||
1096 | (type ? "DMA Read" : "DMA Write"), | ||
1097 | (source_id >> 8), PCI_SLOT(source_id & 0xFF), | ||
1098 | PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | #define PRIMARY_FAULT_REG_LEN (16) | ||
1103 | static irqreturn_t iommu_page_fault(int irq, void *dev_id) | ||
1104 | { | ||
1105 | struct intel_iommu *iommu = dev_id; | ||
1106 | int reg, fault_index; | ||
1107 | u32 fault_status; | ||
1108 | unsigned long flag; | ||
1109 | |||
1110 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1111 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1112 | |||
1113 | /* TBD: ignore advanced fault log currently */ | ||
1114 | if (!(fault_status & DMA_FSTS_PPF)) | ||
1115 | goto clear_overflow; | ||
1116 | |||
1117 | fault_index = dma_fsts_fault_record_index(fault_status); | ||
1118 | reg = cap_fault_reg_offset(iommu->cap); | ||
1119 | while (1) { | ||
1120 | u8 fault_reason; | ||
1121 | u16 source_id; | ||
1122 | u64 guest_addr; | ||
1123 | int type; | ||
1124 | u32 data; | ||
1125 | |||
1126 | /* highest 32 bits */ | ||
1127 | data = readl(iommu->reg + reg + | ||
1128 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1129 | if (!(data & DMA_FRCD_F)) | ||
1130 | break; | ||
1131 | |||
1132 | fault_reason = dma_frcd_fault_reason(data); | ||
1133 | type = dma_frcd_type(data); | ||
1134 | |||
1135 | data = readl(iommu->reg + reg + | ||
1136 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | ||
1137 | source_id = dma_frcd_source_id(data); | ||
1138 | |||
1139 | guest_addr = dmar_readq(iommu->reg + reg + | ||
1140 | fault_index * PRIMARY_FAULT_REG_LEN); | ||
1141 | guest_addr = dma_frcd_page_addr(guest_addr); | ||
1142 | /* clear the fault */ | ||
1143 | writel(DMA_FRCD_F, iommu->reg + reg + | ||
1144 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | ||
1145 | |||
1146 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1147 | |||
1148 | iommu_page_fault_do_one(iommu, type, fault_reason, | ||
1149 | source_id, guest_addr); | ||
1150 | |||
1151 | fault_index++; | ||
1152 | if (fault_index > cap_num_fault_regs(iommu->cap)) | ||
1153 | fault_index = 0; | ||
1154 | spin_lock_irqsave(&iommu->register_lock, flag); | ||
1155 | } | ||
1156 | clear_overflow: | ||
1157 | /* clear primary fault overflow */ | ||
1158 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1159 | if (fault_status & DMA_FSTS_PFO) | ||
1160 | writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG); | ||
1161 | |||
1162 | spin_unlock_irqrestore(&iommu->register_lock, flag); | ||
1163 | return IRQ_HANDLED; | ||
1164 | } | ||
1165 | |||
1166 | int dmar_set_interrupt(struct intel_iommu *iommu) | ||
1167 | { | ||
1168 | int irq, ret; | ||
1169 | |||
1170 | irq = create_irq(); | ||
1171 | if (!irq) { | ||
1172 | printk(KERN_ERR "IOMMU: no free vectors\n"); | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1175 | |||
1176 | set_irq_data(irq, iommu); | ||
1177 | iommu->irq = irq; | ||
1178 | |||
1179 | ret = arch_setup_dmar_msi(irq); | ||
1180 | if (ret) { | ||
1181 | set_irq_data(irq, NULL); | ||
1182 | iommu->irq = 0; | ||
1183 | destroy_irq(irq); | ||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1187 | /* Force fault register is cleared */ | ||
1188 | iommu_page_fault(irq, iommu); | ||
1189 | |||
1190 | ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu); | ||
1191 | if (ret) | ||
1192 | printk(KERN_ERR "IOMMU: can't request irq\n"); | ||
1193 | return ret; | ||
1194 | } | ||
1195 | 1007 | ||
1196 | static int iommu_init_domains(struct intel_iommu *iommu) | 1008 | static int iommu_init_domains(struct intel_iommu *iommu) |
1197 | { | 1009 | { |
@@ -1987,7 +1799,7 @@ static int __init init_dmars(void) | |||
1987 | struct dmar_rmrr_unit *rmrr; | 1799 | struct dmar_rmrr_unit *rmrr; |
1988 | struct pci_dev *pdev; | 1800 | struct pci_dev *pdev; |
1989 | struct intel_iommu *iommu; | 1801 | struct intel_iommu *iommu; |
1990 | int i, ret, unit = 0; | 1802 | int i, ret; |
1991 | 1803 | ||
1992 | /* | 1804 | /* |
1993 | * for each drhd | 1805 | * for each drhd |
@@ -2043,11 +1855,40 @@ static int __init init_dmars(void) | |||
2043 | } | 1855 | } |
2044 | } | 1856 | } |
2045 | 1857 | ||
1858 | /* | ||
1859 | * Start from the sane iommu hardware state. | ||
1860 | */ | ||
1861 | for_each_drhd_unit(drhd) { | ||
1862 | if (drhd->ignored) | ||
1863 | continue; | ||
1864 | |||
1865 | iommu = drhd->iommu; | ||
1866 | |||
1867 | /* | ||
1868 | * If the queued invalidation is already initialized by us | ||
1869 | * (for example, while enabling interrupt-remapping) then | ||
1870 | * we got the things already rolling from a sane state. | ||
1871 | */ | ||
1872 | if (iommu->qi) | ||
1873 | continue; | ||
1874 | |||
1875 | /* | ||
1876 | * Clear any previous faults. | ||
1877 | */ | ||
1878 | dmar_fault(-1, iommu); | ||
1879 | /* | ||
1880 | * Disable queued invalidation if supported and already enabled | ||
1881 | * before OS handover. | ||
1882 | */ | ||
1883 | dmar_disable_qi(iommu); | ||
1884 | } | ||
1885 | |||
2046 | for_each_drhd_unit(drhd) { | 1886 | for_each_drhd_unit(drhd) { |
2047 | if (drhd->ignored) | 1887 | if (drhd->ignored) |
2048 | continue; | 1888 | continue; |
2049 | 1889 | ||
2050 | iommu = drhd->iommu; | 1890 | iommu = drhd->iommu; |
1891 | |||
2051 | if (dmar_enable_qi(iommu)) { | 1892 | if (dmar_enable_qi(iommu)) { |
2052 | /* | 1893 | /* |
2053 | * Queued Invalidate not enabled, use Register Based | 1894 | * Queued Invalidate not enabled, use Register Based |
@@ -2109,7 +1950,6 @@ static int __init init_dmars(void) | |||
2109 | if (drhd->ignored) | 1950 | if (drhd->ignored) |
2110 | continue; | 1951 | continue; |
2111 | iommu = drhd->iommu; | 1952 | iommu = drhd->iommu; |
2112 | sprintf (iommu->name, "dmar%d", unit++); | ||
2113 | 1953 | ||
2114 | iommu_flush_write_buffer(iommu); | 1954 | iommu_flush_write_buffer(iommu); |
2115 | 1955 | ||
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 45effc5726c0..bc5b6976f918 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/irq.h> | 6 | #include <linux/irq.h> |
7 | #include <asm/io_apic.h> | 7 | #include <asm/io_apic.h> |
8 | #include <asm/smp.h> | 8 | #include <asm/smp.h> |
9 | #include <asm/cpu.h> | ||
9 | #include <linux/intel-iommu.h> | 10 | #include <linux/intel-iommu.h> |
10 | #include "intr_remapping.h" | 11 | #include "intr_remapping.h" |
11 | 12 | ||
@@ -116,21 +117,22 @@ int get_irte(int irq, struct irte *entry) | |||
116 | { | 117 | { |
117 | int index; | 118 | int index; |
118 | struct irq_2_iommu *irq_iommu; | 119 | struct irq_2_iommu *irq_iommu; |
120 | unsigned long flags; | ||
119 | 121 | ||
120 | if (!entry) | 122 | if (!entry) |
121 | return -1; | 123 | return -1; |
122 | 124 | ||
123 | spin_lock(&irq_2_ir_lock); | 125 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
124 | irq_iommu = valid_irq_2_iommu(irq); | 126 | irq_iommu = valid_irq_2_iommu(irq); |
125 | if (!irq_iommu) { | 127 | if (!irq_iommu) { |
126 | spin_unlock(&irq_2_ir_lock); | 128 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
127 | return -1; | 129 | return -1; |
128 | } | 130 | } |
129 | 131 | ||
130 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 132 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
131 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 133 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
132 | 134 | ||
133 | spin_unlock(&irq_2_ir_lock); | 135 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
134 | return 0; | 136 | return 0; |
135 | } | 137 | } |
136 | 138 | ||
@@ -140,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
140 | struct irq_2_iommu *irq_iommu; | 142 | struct irq_2_iommu *irq_iommu; |
141 | u16 index, start_index; | 143 | u16 index, start_index; |
142 | unsigned int mask = 0; | 144 | unsigned int mask = 0; |
145 | unsigned long flags; | ||
143 | int i; | 146 | int i; |
144 | 147 | ||
145 | if (!count) | 148 | if (!count) |
@@ -169,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
169 | return -1; | 172 | return -1; |
170 | } | 173 | } |
171 | 174 | ||
172 | spin_lock(&irq_2_ir_lock); | 175 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
173 | do { | 176 | do { |
174 | for (i = index; i < index + count; i++) | 177 | for (i = index; i < index + count; i++) |
175 | if (table->base[i].present) | 178 | if (table->base[i].present) |
@@ -181,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
181 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | 184 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; |
182 | 185 | ||
183 | if (index == start_index) { | 186 | if (index == start_index) { |
184 | spin_unlock(&irq_2_ir_lock); | 187 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
185 | printk(KERN_ERR "can't allocate an IRTE\n"); | 188 | printk(KERN_ERR "can't allocate an IRTE\n"); |
186 | return -1; | 189 | return -1; |
187 | } | 190 | } |
@@ -192,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
192 | 195 | ||
193 | irq_iommu = irq_2_iommu_alloc(irq); | 196 | irq_iommu = irq_2_iommu_alloc(irq); |
194 | if (!irq_iommu) { | 197 | if (!irq_iommu) { |
195 | spin_unlock(&irq_2_ir_lock); | 198 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
196 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | 199 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
197 | return -1; | 200 | return -1; |
198 | } | 201 | } |
@@ -202,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
202 | irq_iommu->sub_handle = 0; | 205 | irq_iommu->sub_handle = 0; |
203 | irq_iommu->irte_mask = mask; | 206 | irq_iommu->irte_mask = mask; |
204 | 207 | ||
205 | spin_unlock(&irq_2_ir_lock); | 208 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
206 | 209 | ||
207 | return index; | 210 | return index; |
208 | } | 211 | } |
@@ -222,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
222 | { | 225 | { |
223 | int index; | 226 | int index; |
224 | struct irq_2_iommu *irq_iommu; | 227 | struct irq_2_iommu *irq_iommu; |
228 | unsigned long flags; | ||
225 | 229 | ||
226 | spin_lock(&irq_2_ir_lock); | 230 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
227 | irq_iommu = valid_irq_2_iommu(irq); | 231 | irq_iommu = valid_irq_2_iommu(irq); |
228 | if (!irq_iommu) { | 232 | if (!irq_iommu) { |
229 | spin_unlock(&irq_2_ir_lock); | 233 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
230 | return -1; | 234 | return -1; |
231 | } | 235 | } |
232 | 236 | ||
233 | *sub_handle = irq_iommu->sub_handle; | 237 | *sub_handle = irq_iommu->sub_handle; |
234 | index = irq_iommu->irte_index; | 238 | index = irq_iommu->irte_index; |
235 | spin_unlock(&irq_2_ir_lock); | 239 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
236 | return index; | 240 | return index; |
237 | } | 241 | } |
238 | 242 | ||
239 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 243 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
240 | { | 244 | { |
241 | struct irq_2_iommu *irq_iommu; | 245 | struct irq_2_iommu *irq_iommu; |
246 | unsigned long flags; | ||
242 | 247 | ||
243 | spin_lock(&irq_2_ir_lock); | 248 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
244 | 249 | ||
245 | irq_iommu = irq_2_iommu_alloc(irq); | 250 | irq_iommu = irq_2_iommu_alloc(irq); |
246 | 251 | ||
247 | if (!irq_iommu) { | 252 | if (!irq_iommu) { |
248 | spin_unlock(&irq_2_ir_lock); | 253 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
249 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | 254 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
250 | return -1; | 255 | return -1; |
251 | } | 256 | } |
@@ -255,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
255 | irq_iommu->sub_handle = subhandle; | 260 | irq_iommu->sub_handle = subhandle; |
256 | irq_iommu->irte_mask = 0; | 261 | irq_iommu->irte_mask = 0; |
257 | 262 | ||
258 | spin_unlock(&irq_2_ir_lock); | 263 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
259 | 264 | ||
260 | return 0; | 265 | return 0; |
261 | } | 266 | } |
@@ -263,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
263 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | 268 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) |
264 | { | 269 | { |
265 | struct irq_2_iommu *irq_iommu; | 270 | struct irq_2_iommu *irq_iommu; |
271 | unsigned long flags; | ||
266 | 272 | ||
267 | spin_lock(&irq_2_ir_lock); | 273 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
268 | irq_iommu = valid_irq_2_iommu(irq); | 274 | irq_iommu = valid_irq_2_iommu(irq); |
269 | if (!irq_iommu) { | 275 | if (!irq_iommu) { |
270 | spin_unlock(&irq_2_ir_lock); | 276 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
271 | return -1; | 277 | return -1; |
272 | } | 278 | } |
273 | 279 | ||
@@ -276,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | |||
276 | irq_iommu->sub_handle = 0; | 282 | irq_iommu->sub_handle = 0; |
277 | irq_2_iommu(irq)->irte_mask = 0; | 283 | irq_2_iommu(irq)->irte_mask = 0; |
278 | 284 | ||
279 | spin_unlock(&irq_2_ir_lock); | 285 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
280 | 286 | ||
281 | return 0; | 287 | return 0; |
282 | } | 288 | } |
@@ -288,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
288 | struct irte *irte; | 294 | struct irte *irte; |
289 | struct intel_iommu *iommu; | 295 | struct intel_iommu *iommu; |
290 | struct irq_2_iommu *irq_iommu; | 296 | struct irq_2_iommu *irq_iommu; |
297 | unsigned long flags; | ||
291 | 298 | ||
292 | spin_lock(&irq_2_ir_lock); | 299 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
293 | irq_iommu = valid_irq_2_iommu(irq); | 300 | irq_iommu = valid_irq_2_iommu(irq); |
294 | if (!irq_iommu) { | 301 | if (!irq_iommu) { |
295 | spin_unlock(&irq_2_ir_lock); | 302 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
296 | return -1; | 303 | return -1; |
297 | } | 304 | } |
298 | 305 | ||
@@ -301,11 +308,11 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
301 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 308 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
302 | irte = &iommu->ir_table->base[index]; | 309 | irte = &iommu->ir_table->base[index]; |
303 | 310 | ||
304 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 311 | set_64bit((unsigned long *)irte, irte_modified->low); |
305 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 312 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
306 | 313 | ||
307 | rc = qi_flush_iec(iommu, index, 0); | 314 | rc = qi_flush_iec(iommu, index, 0); |
308 | spin_unlock(&irq_2_ir_lock); | 315 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
309 | 316 | ||
310 | return rc; | 317 | return rc; |
311 | } | 318 | } |
@@ -316,11 +323,12 @@ int flush_irte(int irq) | |||
316 | int index; | 323 | int index; |
317 | struct intel_iommu *iommu; | 324 | struct intel_iommu *iommu; |
318 | struct irq_2_iommu *irq_iommu; | 325 | struct irq_2_iommu *irq_iommu; |
326 | unsigned long flags; | ||
319 | 327 | ||
320 | spin_lock(&irq_2_ir_lock); | 328 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
321 | irq_iommu = valid_irq_2_iommu(irq); | 329 | irq_iommu = valid_irq_2_iommu(irq); |
322 | if (!irq_iommu) { | 330 | if (!irq_iommu) { |
323 | spin_unlock(&irq_2_ir_lock); | 331 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
324 | return -1; | 332 | return -1; |
325 | } | 333 | } |
326 | 334 | ||
@@ -329,7 +337,7 @@ int flush_irte(int irq) | |||
329 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 337 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
330 | 338 | ||
331 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 339 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
332 | spin_unlock(&irq_2_ir_lock); | 340 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
333 | 341 | ||
334 | return rc; | 342 | return rc; |
335 | } | 343 | } |
@@ -362,11 +370,12 @@ int free_irte(int irq) | |||
362 | struct irte *irte; | 370 | struct irte *irte; |
363 | struct intel_iommu *iommu; | 371 | struct intel_iommu *iommu; |
364 | struct irq_2_iommu *irq_iommu; | 372 | struct irq_2_iommu *irq_iommu; |
373 | unsigned long flags; | ||
365 | 374 | ||
366 | spin_lock(&irq_2_ir_lock); | 375 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
367 | irq_iommu = valid_irq_2_iommu(irq); | 376 | irq_iommu = valid_irq_2_iommu(irq); |
368 | if (!irq_iommu) { | 377 | if (!irq_iommu) { |
369 | spin_unlock(&irq_2_ir_lock); | 378 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
370 | return -1; | 379 | return -1; |
371 | } | 380 | } |
372 | 381 | ||
@@ -377,7 +386,7 @@ int free_irte(int irq) | |||
377 | 386 | ||
378 | if (!irq_iommu->sub_handle) { | 387 | if (!irq_iommu->sub_handle) { |
379 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | 388 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
380 | set_64bit((unsigned long *)irte, 0); | 389 | set_64bit((unsigned long *)(irte + i), 0); |
381 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 390 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
382 | } | 391 | } |
383 | 392 | ||
@@ -386,7 +395,7 @@ int free_irte(int irq) | |||
386 | irq_iommu->sub_handle = 0; | 395 | irq_iommu->sub_handle = 0; |
387 | irq_iommu->irte_mask = 0; | 396 | irq_iommu->irte_mask = 0; |
388 | 397 | ||
389 | spin_unlock(&irq_2_ir_lock); | 398 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
390 | 399 | ||
391 | return rc; | 400 | return rc; |
392 | } | 401 | } |
@@ -438,12 +447,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |||
438 | struct page *pages; | 447 | struct page *pages; |
439 | 448 | ||
440 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | 449 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), |
441 | GFP_KERNEL); | 450 | GFP_ATOMIC); |
442 | 451 | ||
443 | if (!iommu->ir_table) | 452 | if (!iommu->ir_table) |
444 | return -ENOMEM; | 453 | return -ENOMEM; |
445 | 454 | ||
446 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); | 455 | pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); |
447 | 456 | ||
448 | if (!pages) { | 457 | if (!pages) { |
449 | printk(KERN_ERR "failed to allocate pages of order %d\n", | 458 | printk(KERN_ERR "failed to allocate pages of order %d\n", |
@@ -458,11 +467,55 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |||
458 | return 0; | 467 | return 0; |
459 | } | 468 | } |
460 | 469 | ||
470 | /* | ||
471 | * Disable Interrupt Remapping. | ||
472 | */ | ||
473 | static void disable_intr_remapping(struct intel_iommu *iommu) | ||
474 | { | ||
475 | unsigned long flags; | ||
476 | u32 sts; | ||
477 | |||
478 | if (!ecap_ir_support(iommu->ecap)) | ||
479 | return; | ||
480 | |||
481 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
482 | |||
483 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | ||
484 | if (!(sts & DMA_GSTS_IRES)) | ||
485 | goto end; | ||
486 | |||
487 | iommu->gcmd &= ~DMA_GCMD_IRE; | ||
488 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | ||
489 | |||
490 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | ||
491 | readl, !(sts & DMA_GSTS_IRES), sts); | ||
492 | |||
493 | end: | ||
494 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
495 | } | ||
496 | |||
461 | int __init enable_intr_remapping(int eim) | 497 | int __init enable_intr_remapping(int eim) |
462 | { | 498 | { |
463 | struct dmar_drhd_unit *drhd; | 499 | struct dmar_drhd_unit *drhd; |
464 | int setup = 0; | 500 | int setup = 0; |
465 | 501 | ||
502 | for_each_drhd_unit(drhd) { | ||
503 | struct intel_iommu *iommu = drhd->iommu; | ||
504 | |||
505 | /* | ||
506 | * Clear previous faults. | ||
507 | */ | ||
508 | dmar_fault(-1, iommu); | ||
509 | |||
510 | /* | ||
511 | * Disable intr remapping and queued invalidation, if already | ||
512 | * enabled prior to OS handover. | ||
513 | */ | ||
514 | disable_intr_remapping(iommu); | ||
515 | |||
516 | dmar_disable_qi(iommu); | ||
517 | } | ||
518 | |||
466 | /* | 519 | /* |
467 | * check for the Interrupt-remapping support | 520 | * check for the Interrupt-remapping support |
468 | */ | 521 | */ |
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index bf92802f2bbe..36e221beedcd 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | 39 | ||
40 | #include <asm/mach-rdc321x/rdc321x_defs.h> | 40 | #include <asm/rdc321x_defs.h> |
41 | 41 | ||
42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ | 42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ |
43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ | 43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index eb0dfdeaa949..30963af5dba0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -26,9 +26,11 @@ | |||
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | #include <linux/bootmem.h> | ||
29 | 30 | ||
30 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
31 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
33 | #include <asm/idle.h> | ||
32 | #include <asm/sync_bitops.h> | 34 | #include <asm/sync_bitops.h> |
33 | #include <asm/xen/hypercall.h> | 35 | #include <asm/xen/hypercall.h> |
34 | #include <asm/xen/hypervisor.h> | 36 | #include <asm/xen/hypervisor.h> |
@@ -50,36 +52,55 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |||
50 | /* IRQ <-> IPI mapping */ | 52 | /* IRQ <-> IPI mapping */ |
51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | 53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; |
52 | 54 | ||
53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | 55 | /* Interrupt types. */ |
54 | struct packed_irq | 56 | enum xen_irq_type { |
55 | { | 57 | IRQT_UNBOUND = 0, |
56 | unsigned short evtchn; | ||
57 | unsigned char index; | ||
58 | unsigned char type; | ||
59 | }; | ||
60 | |||
61 | static struct packed_irq irq_info[NR_IRQS]; | ||
62 | |||
63 | /* Binding types. */ | ||
64 | enum { | ||
65 | IRQT_UNBOUND, | ||
66 | IRQT_PIRQ, | 58 | IRQT_PIRQ, |
67 | IRQT_VIRQ, | 59 | IRQT_VIRQ, |
68 | IRQT_IPI, | 60 | IRQT_IPI, |
69 | IRQT_EVTCHN | 61 | IRQT_EVTCHN |
70 | }; | 62 | }; |
71 | 63 | ||
72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | 64 | /* |
73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | 65 | * Packed IRQ information: |
66 | * type - enum xen_irq_type | ||
67 | * event channel - irq->event channel mapping | ||
68 | * cpu - cpu this event channel is bound to | ||
69 | * index - type-specific information: | ||
70 | * PIRQ - vector, with MSB being "needs EIO" | ||
71 | * VIRQ - virq number | ||
72 | * IPI - IPI vector | ||
73 | * EVTCHN - | ||
74 | */ | ||
75 | struct irq_info | ||
76 | { | ||
77 | enum xen_irq_type type; /* type */ | ||
78 | unsigned short evtchn; /* event channel */ | ||
79 | unsigned short cpu; /* cpu bound */ | ||
80 | |||
81 | union { | ||
82 | unsigned short virq; | ||
83 | enum ipi_vector ipi; | ||
84 | struct { | ||
85 | unsigned short gsi; | ||
86 | unsigned short vector; | ||
87 | } pirq; | ||
88 | } u; | ||
89 | }; | ||
90 | |||
91 | static struct irq_info irq_info[NR_IRQS]; | ||
74 | 92 | ||
75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | 93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { |
76 | [0 ... NR_EVENT_CHANNELS-1] = -1 | 94 | [0 ... NR_EVENT_CHANNELS-1] = -1 |
77 | }; | 95 | }; |
78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | 96 | struct cpu_evtchn_s { |
79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | 97 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; |
80 | 98 | }; | |
81 | /* Reference counts for bindings to IRQs. */ | 99 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; |
82 | static int irq_bindcount[NR_IRQS]; | 100 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
101 | { | ||
102 | return cpu_evtchn_mask_p[cpu].bits; | ||
103 | } | ||
83 | 104 | ||
84 | /* Xen will never allocate port zero for any purpose. */ | 105 | /* Xen will never allocate port zero for any purpose. */ |
85 | #define VALID_EVTCHN(chn) ((chn) != 0) | 106 | #define VALID_EVTCHN(chn) ((chn) != 0) |
@@ -87,27 +108,108 @@ static int irq_bindcount[NR_IRQS]; | |||
87 | static struct irq_chip xen_dynamic_chip; | 108 | static struct irq_chip xen_dynamic_chip; |
88 | 109 | ||
89 | /* Constructor for packed IRQ information. */ | 110 | /* Constructor for packed IRQ information. */ |
90 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | 111 | static struct irq_info mk_unbound_info(void) |
112 | { | ||
113 | return (struct irq_info) { .type = IRQT_UNBOUND }; | ||
114 | } | ||
115 | |||
116 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | ||
117 | { | ||
118 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, | ||
119 | .cpu = 0 }; | ||
120 | } | ||
121 | |||
122 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | ||
91 | { | 123 | { |
92 | return (struct packed_irq) { evtchn, index, type }; | 124 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
125 | .cpu = 0, .u.ipi = ipi }; | ||
126 | } | ||
127 | |||
128 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | ||
129 | { | ||
130 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | ||
131 | .cpu = 0, .u.virq = virq }; | ||
132 | } | ||
133 | |||
134 | static struct irq_info mk_pirq_info(unsigned short evtchn, | ||
135 | unsigned short gsi, unsigned short vector) | ||
136 | { | ||
137 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | ||
138 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; | ||
93 | } | 139 | } |
94 | 140 | ||
95 | /* | 141 | /* |
96 | * Accessors for packed IRQ information. | 142 | * Accessors for packed IRQ information. |
97 | */ | 143 | */ |
98 | static inline unsigned int evtchn_from_irq(int irq) | 144 | static struct irq_info *info_for_irq(unsigned irq) |
145 | { | ||
146 | return &irq_info[irq]; | ||
147 | } | ||
148 | |||
149 | static unsigned int evtchn_from_irq(unsigned irq) | ||
99 | { | 150 | { |
100 | return irq_info[irq].evtchn; | 151 | return info_for_irq(irq)->evtchn; |
101 | } | 152 | } |
102 | 153 | ||
103 | static inline unsigned int index_from_irq(int irq) | 154 | static enum ipi_vector ipi_from_irq(unsigned irq) |
104 | { | 155 | { |
105 | return irq_info[irq].index; | 156 | struct irq_info *info = info_for_irq(irq); |
157 | |||
158 | BUG_ON(info == NULL); | ||
159 | BUG_ON(info->type != IRQT_IPI); | ||
160 | |||
161 | return info->u.ipi; | ||
106 | } | 162 | } |
107 | 163 | ||
108 | static inline unsigned int type_from_irq(int irq) | 164 | static unsigned virq_from_irq(unsigned irq) |
109 | { | 165 | { |
110 | return irq_info[irq].type; | 166 | struct irq_info *info = info_for_irq(irq); |
167 | |||
168 | BUG_ON(info == NULL); | ||
169 | BUG_ON(info->type != IRQT_VIRQ); | ||
170 | |||
171 | return info->u.virq; | ||
172 | } | ||
173 | |||
174 | static unsigned gsi_from_irq(unsigned irq) | ||
175 | { | ||
176 | struct irq_info *info = info_for_irq(irq); | ||
177 | |||
178 | BUG_ON(info == NULL); | ||
179 | BUG_ON(info->type != IRQT_PIRQ); | ||
180 | |||
181 | return info->u.pirq.gsi; | ||
182 | } | ||
183 | |||
184 | static unsigned vector_from_irq(unsigned irq) | ||
185 | { | ||
186 | struct irq_info *info = info_for_irq(irq); | ||
187 | |||
188 | BUG_ON(info == NULL); | ||
189 | BUG_ON(info->type != IRQT_PIRQ); | ||
190 | |||
191 | return info->u.pirq.vector; | ||
192 | } | ||
193 | |||
194 | static enum xen_irq_type type_from_irq(unsigned irq) | ||
195 | { | ||
196 | return info_for_irq(irq)->type; | ||
197 | } | ||
198 | |||
199 | static unsigned cpu_from_irq(unsigned irq) | ||
200 | { | ||
201 | return info_for_irq(irq)->cpu; | ||
202 | } | ||
203 | |||
204 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
205 | { | ||
206 | int irq = evtchn_to_irq[evtchn]; | ||
207 | unsigned ret = 0; | ||
208 | |||
209 | if (irq != -1) | ||
210 | ret = cpu_from_irq(irq); | ||
211 | |||
212 | return ret; | ||
111 | } | 213 | } |
112 | 214 | ||
113 | static inline unsigned long active_evtchns(unsigned int cpu, | 215 | static inline unsigned long active_evtchns(unsigned int cpu, |
@@ -115,7 +217,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, | |||
115 | unsigned int idx) | 217 | unsigned int idx) |
116 | { | 218 | { |
117 | return (sh->evtchn_pending[idx] & | 219 | return (sh->evtchn_pending[idx] & |
118 | cpu_evtchn_mask[cpu][idx] & | 220 | cpu_evtchn_mask(cpu)[idx] & |
119 | ~sh->evtchn_mask[idx]); | 221 | ~sh->evtchn_mask[idx]); |
120 | } | 222 | } |
121 | 223 | ||
@@ -125,13 +227,13 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
125 | 227 | ||
126 | BUG_ON(irq == -1); | 228 | BUG_ON(irq == -1); |
127 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
128 | irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); | 230 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
129 | #endif | 231 | #endif |
130 | 232 | ||
131 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | 233 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
132 | __set_bit(chn, cpu_evtchn_mask[cpu]); | 234 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
133 | 235 | ||
134 | cpu_evtchn[chn] = cpu; | 236 | irq_info[irq].cpu = cpu; |
135 | } | 237 | } |
136 | 238 | ||
137 | static void init_evtchn_cpu_bindings(void) | 239 | static void init_evtchn_cpu_bindings(void) |
@@ -142,17 +244,11 @@ static void init_evtchn_cpu_bindings(void) | |||
142 | 244 | ||
143 | /* By default all event channels notify CPU#0. */ | 245 | /* By default all event channels notify CPU#0. */ |
144 | for_each_irq_desc(i, desc) { | 246 | for_each_irq_desc(i, desc) { |
145 | desc->affinity = cpumask_of_cpu(0); | 247 | cpumask_copy(desc->affinity, cpumask_of(0)); |
146 | } | 248 | } |
147 | #endif | 249 | #endif |
148 | 250 | ||
149 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | 251 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
150 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | ||
151 | } | ||
152 | |||
153 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
154 | { | ||
155 | return cpu_evtchn[evtchn]; | ||
156 | } | 252 | } |
157 | 253 | ||
158 | static inline void clear_evtchn(int port) | 254 | static inline void clear_evtchn(int port) |
@@ -232,9 +328,8 @@ static int find_unbound_irq(void) | |||
232 | int irq; | 328 | int irq; |
233 | struct irq_desc *desc; | 329 | struct irq_desc *desc; |
234 | 330 | ||
235 | /* Only allocate from dynirq range */ | ||
236 | for (irq = 0; irq < nr_irqs; irq++) | 331 | for (irq = 0; irq < nr_irqs; irq++) |
237 | if (irq_bindcount[irq] == 0) | 332 | if (irq_info[irq].type == IRQT_UNBOUND) |
238 | break; | 333 | break; |
239 | 334 | ||
240 | if (irq == nr_irqs) | 335 | if (irq == nr_irqs) |
@@ -244,6 +339,8 @@ static int find_unbound_irq(void) | |||
244 | if (WARN_ON(desc == NULL)) | 339 | if (WARN_ON(desc == NULL)) |
245 | return -1; | 340 | return -1; |
246 | 341 | ||
342 | dynamic_irq_init(irq); | ||
343 | |||
247 | return irq; | 344 | return irq; |
248 | } | 345 | } |
249 | 346 | ||
@@ -258,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
258 | if (irq == -1) { | 355 | if (irq == -1) { |
259 | irq = find_unbound_irq(); | 356 | irq = find_unbound_irq(); |
260 | 357 | ||
261 | dynamic_irq_init(irq); | ||
262 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 358 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
263 | handle_level_irq, "event"); | 359 | handle_level_irq, "event"); |
264 | 360 | ||
265 | evtchn_to_irq[evtchn] = irq; | 361 | evtchn_to_irq[evtchn] = irq; |
266 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 362 | irq_info[irq] = mk_evtchn_info(evtchn); |
267 | } | 363 | } |
268 | 364 | ||
269 | irq_bindcount[irq]++; | ||
270 | |||
271 | spin_unlock(&irq_mapping_update_lock); | 365 | spin_unlock(&irq_mapping_update_lock); |
272 | 366 | ||
273 | return irq; | 367 | return irq; |
@@ -282,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
282 | spin_lock(&irq_mapping_update_lock); | 376 | spin_lock(&irq_mapping_update_lock); |
283 | 377 | ||
284 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 378 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
379 | |||
285 | if (irq == -1) { | 380 | if (irq == -1) { |
286 | irq = find_unbound_irq(); | 381 | irq = find_unbound_irq(); |
287 | if (irq < 0) | 382 | if (irq < 0) |
288 | goto out; | 383 | goto out; |
289 | 384 | ||
290 | dynamic_irq_init(irq); | ||
291 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 385 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
292 | handle_level_irq, "ipi"); | 386 | handle_level_irq, "ipi"); |
293 | 387 | ||
@@ -298,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
298 | evtchn = bind_ipi.port; | 392 | evtchn = bind_ipi.port; |
299 | 393 | ||
300 | evtchn_to_irq[evtchn] = irq; | 394 | evtchn_to_irq[evtchn] = irq; |
301 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 395 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
302 | |||
303 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | 396 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
304 | 397 | ||
305 | bind_evtchn_to_cpu(evtchn, cpu); | 398 | bind_evtchn_to_cpu(evtchn, cpu); |
306 | } | 399 | } |
307 | 400 | ||
308 | irq_bindcount[irq]++; | ||
309 | |||
310 | out: | 401 | out: |
311 | spin_unlock(&irq_mapping_update_lock); | 402 | spin_unlock(&irq_mapping_update_lock); |
312 | return irq; | 403 | return irq; |
@@ -332,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
332 | 423 | ||
333 | irq = find_unbound_irq(); | 424 | irq = find_unbound_irq(); |
334 | 425 | ||
335 | dynamic_irq_init(irq); | ||
336 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 426 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
337 | handle_level_irq, "virq"); | 427 | handle_level_irq, "virq"); |
338 | 428 | ||
339 | evtchn_to_irq[evtchn] = irq; | 429 | evtchn_to_irq[evtchn] = irq; |
340 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 430 | irq_info[irq] = mk_virq_info(evtchn, virq); |
341 | 431 | ||
342 | per_cpu(virq_to_irq, cpu)[virq] = irq; | 432 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
343 | 433 | ||
344 | bind_evtchn_to_cpu(evtchn, cpu); | 434 | bind_evtchn_to_cpu(evtchn, cpu); |
345 | } | 435 | } |
346 | 436 | ||
347 | irq_bindcount[irq]++; | ||
348 | |||
349 | spin_unlock(&irq_mapping_update_lock); | 437 | spin_unlock(&irq_mapping_update_lock); |
350 | 438 | ||
351 | return irq; | 439 | return irq; |
@@ -358,7 +446,7 @@ static void unbind_from_irq(unsigned int irq) | |||
358 | 446 | ||
359 | spin_lock(&irq_mapping_update_lock); | 447 | spin_lock(&irq_mapping_update_lock); |
360 | 448 | ||
361 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { | 449 | if (VALID_EVTCHN(evtchn)) { |
362 | close.port = evtchn; | 450 | close.port = evtchn; |
363 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 451 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
364 | BUG(); | 452 | BUG(); |
@@ -366,11 +454,11 @@ static void unbind_from_irq(unsigned int irq) | |||
366 | switch (type_from_irq(irq)) { | 454 | switch (type_from_irq(irq)) { |
367 | case IRQT_VIRQ: | 455 | case IRQT_VIRQ: |
368 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | 456 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
369 | [index_from_irq(irq)] = -1; | 457 | [virq_from_irq(irq)] = -1; |
370 | break; | 458 | break; |
371 | case IRQT_IPI: | 459 | case IRQT_IPI: |
372 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | 460 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
373 | [index_from_irq(irq)] = -1; | 461 | [ipi_from_irq(irq)] = -1; |
374 | break; | 462 | break; |
375 | default: | 463 | default: |
376 | break; | 464 | break; |
@@ -380,7 +468,7 @@ static void unbind_from_irq(unsigned int irq) | |||
380 | bind_evtchn_to_cpu(evtchn, 0); | 468 | bind_evtchn_to_cpu(evtchn, 0); |
381 | 469 | ||
382 | evtchn_to_irq[evtchn] = -1; | 470 | evtchn_to_irq[evtchn] = -1; |
383 | irq_info[irq] = IRQ_UNBOUND; | 471 | irq_info[irq] = mk_unbound_info(); |
384 | 472 | ||
385 | dynamic_irq_cleanup(irq); | 473 | dynamic_irq_cleanup(irq); |
386 | } | 474 | } |
@@ -498,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
498 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | 586 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
499 | if (sync_test_bit(i, sh->evtchn_pending)) { | 587 | if (sync_test_bit(i, sh->evtchn_pending)) { |
500 | printk(" %d: event %d -> irq %d\n", | 588 | printk(" %d: event %d -> irq %d\n", |
501 | cpu_evtchn[i], i, | 589 | cpu_from_evtchn(i), i, |
502 | evtchn_to_irq[i]); | 590 | evtchn_to_irq[i]); |
503 | } | 591 | } |
504 | } | 592 | } |
505 | 593 | ||
@@ -508,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
508 | return IRQ_HANDLED; | 596 | return IRQ_HANDLED; |
509 | } | 597 | } |
510 | 598 | ||
511 | |||
512 | /* | 599 | /* |
513 | * Search the CPUs pending events bitmasks. For each one found, map | 600 | * Search the CPUs pending events bitmasks. For each one found, map |
514 | * the event number to an irq, and feed it into do_IRQ() for | 601 | * the event number to an irq, and feed it into do_IRQ() for |
@@ -521,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
521 | void xen_evtchn_do_upcall(struct pt_regs *regs) | 608 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
522 | { | 609 | { |
523 | int cpu = get_cpu(); | 610 | int cpu = get_cpu(); |
611 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
524 | struct shared_info *s = HYPERVISOR_shared_info; | 612 | struct shared_info *s = HYPERVISOR_shared_info; |
525 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 613 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
526 | static DEFINE_PER_CPU(unsigned, nesting_count); | 614 | static DEFINE_PER_CPU(unsigned, nesting_count); |
527 | unsigned count; | 615 | unsigned count; |
528 | 616 | ||
617 | exit_idle(); | ||
618 | irq_enter(); | ||
619 | |||
529 | do { | 620 | do { |
530 | unsigned long pending_words; | 621 | unsigned long pending_words; |
531 | 622 | ||
@@ -550,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
550 | int irq = evtchn_to_irq[port]; | 641 | int irq = evtchn_to_irq[port]; |
551 | 642 | ||
552 | if (irq != -1) | 643 | if (irq != -1) |
553 | xen_do_IRQ(irq, regs); | 644 | handle_irq(irq, regs); |
554 | } | 645 | } |
555 | } | 646 | } |
556 | 647 | ||
@@ -561,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
561 | } while(count != 1); | 652 | } while(count != 1); |
562 | 653 | ||
563 | out: | 654 | out: |
655 | irq_exit(); | ||
656 | set_irq_regs(old_regs); | ||
657 | |||
564 | put_cpu(); | 658 | put_cpu(); |
565 | } | 659 | } |
566 | 660 | ||
567 | /* Rebind a new event channel to an existing irq. */ | 661 | /* Rebind a new event channel to an existing irq. */ |
568 | void rebind_evtchn_irq(int evtchn, int irq) | 662 | void rebind_evtchn_irq(int evtchn, int irq) |
569 | { | 663 | { |
664 | struct irq_info *info = info_for_irq(irq); | ||
665 | |||
570 | /* Make sure the irq is masked, since the new event channel | 666 | /* Make sure the irq is masked, since the new event channel |
571 | will also be masked. */ | 667 | will also be masked. */ |
572 | disable_irq(irq); | 668 | disable_irq(irq); |
@@ -576,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
576 | /* After resume the irq<->evtchn mappings are all cleared out */ | 672 | /* After resume the irq<->evtchn mappings are all cleared out */ |
577 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 673 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
578 | /* Expect irq to have been bound before, | 674 | /* Expect irq to have been bound before, |
579 | so the bindcount should be non-0 */ | 675 | so there should be a proper type */ |
580 | BUG_ON(irq_bindcount[irq] == 0); | 676 | BUG_ON(info->type == IRQT_UNBOUND); |
581 | 677 | ||
582 | evtchn_to_irq[evtchn] = irq; | 678 | evtchn_to_irq[evtchn] = irq; |
583 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 679 | irq_info[irq] = mk_evtchn_info(evtchn); |
584 | 680 | ||
585 | spin_unlock(&irq_mapping_update_lock); | 681 | spin_unlock(&irq_mapping_update_lock); |
586 | 682 | ||
@@ -690,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
690 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | 786 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
691 | continue; | 787 | continue; |
692 | 788 | ||
693 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | 789 | BUG_ON(virq_from_irq(irq) != virq); |
694 | BUG_ON(irq_info[irq].index != virq); | ||
695 | 790 | ||
696 | /* Get a new binding from Xen. */ | 791 | /* Get a new binding from Xen. */ |
697 | bind_virq.virq = virq; | 792 | bind_virq.virq = virq; |
@@ -703,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
703 | 798 | ||
704 | /* Record the new mapping. */ | 799 | /* Record the new mapping. */ |
705 | evtchn_to_irq[evtchn] = irq; | 800 | evtchn_to_irq[evtchn] = irq; |
706 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 801 | irq_info[irq] = mk_virq_info(evtchn, virq); |
707 | bind_evtchn_to_cpu(evtchn, cpu); | 802 | bind_evtchn_to_cpu(evtchn, cpu); |
708 | 803 | ||
709 | /* Ready for use. */ | 804 | /* Ready for use. */ |
@@ -720,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
720 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | 815 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
721 | continue; | 816 | continue; |
722 | 817 | ||
723 | BUG_ON(irq_info[irq].type != IRQT_IPI); | 818 | BUG_ON(ipi_from_irq(irq) != ipi); |
724 | BUG_ON(irq_info[irq].index != ipi); | ||
725 | 819 | ||
726 | /* Get a new binding from Xen. */ | 820 | /* Get a new binding from Xen. */ |
727 | bind_ipi.vcpu = cpu; | 821 | bind_ipi.vcpu = cpu; |
@@ -732,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
732 | 826 | ||
733 | /* Record the new mapping. */ | 827 | /* Record the new mapping. */ |
734 | evtchn_to_irq[evtchn] = irq; | 828 | evtchn_to_irq[evtchn] = irq; |
735 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 829 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
736 | bind_evtchn_to_cpu(evtchn, cpu); | 830 | bind_evtchn_to_cpu(evtchn, cpu); |
737 | 831 | ||
738 | /* Ready for use. */ | 832 | /* Ready for use. */ |
@@ -812,8 +906,11 @@ void xen_irq_resume(void) | |||
812 | 906 | ||
813 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 907 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
814 | .name = "xen-dyn", | 908 | .name = "xen-dyn", |
909 | |||
910 | .disable = disable_dynirq, | ||
815 | .mask = disable_dynirq, | 911 | .mask = disable_dynirq, |
816 | .unmask = enable_dynirq, | 912 | .unmask = enable_dynirq, |
913 | |||
817 | .ack = ack_dynirq, | 914 | .ack = ack_dynirq, |
818 | .set_affinity = set_affinity_irq, | 915 | .set_affinity = set_affinity_irq, |
819 | .retrigger = retrigger_dynirq, | 916 | .retrigger = retrigger_dynirq, |
@@ -822,6 +919,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
822 | void __init xen_init_IRQ(void) | 919 | void __init xen_init_IRQ(void) |
823 | { | 920 | { |
824 | int i; | 921 | int i; |
922 | size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); | ||
923 | |||
924 | cpu_evtchn_mask_p = alloc_bootmem(size); | ||
925 | BUG_ON(cpu_evtchn_mask_p == NULL); | ||
825 | 926 | ||
826 | init_evtchn_cpu_bindings(); | 927 | init_evtchn_cpu_bindings(); |
827 | 928 | ||
@@ -829,9 +930,5 @@ void __init xen_init_IRQ(void) | |||
829 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 930 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
830 | mask_evtchn(i); | 931 | mask_evtchn(i); |
831 | 932 | ||
832 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
833 | for (i = 0; i < nr_irqs; i++) | ||
834 | irq_bindcount[i] = 0; | ||
835 | |||
836 | irq_ctx_init(smp_processor_id()); | 933 | irq_ctx_init(smp_processor_id()); |
837 | } | 934 | } |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 56892a142ee2..3ccd348d112d 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -108,7 +108,7 @@ static void do_suspend(void) | |||
108 | /* XXX use normal device tree? */ | 108 | /* XXX use normal device tree? */ |
109 | xenbus_suspend(); | 109 | xenbus_suspend(); |
110 | 110 | ||
111 | err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0)); | 111 | err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); |
112 | if (err) { | 112 | if (err) { |
113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | 113 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); |
114 | goto out; | 114 | goto out; |