aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-13 03:49:38 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-13 03:49:38 -0500
commitb1864e9a1afef41709886072c6e6248def0386f4 (patch)
tree2fe749209cf860c1dd10efd1bd2ad8df572bd66e /drivers
parente9c4ffb11f0b19005b5b9dc8481687a3637e5887 (diff)
parent7032e8696726354d6180d8a2d17191f958cd93ae (diff)
Merge branch 'x86/core' into perfcounters/core
Conflicts: arch/x86/Kconfig arch/x86/kernel/apic.c arch/x86/kernel/setup_percpu.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/tbxface.c17
-rw-r--r--drivers/acpi/osl.c11
-rw-r--r--drivers/acpi/tables.c20
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/clocksource/cyclone.c2
-rw-r--r--drivers/eisa/Kconfig6
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/gpu/drm/drm_proc.c4
-rw-r--r--drivers/input/keyboard/Kconfig4
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/misc/sgi-gru/gru.h2
-rw-r--r--drivers/misc/sgi-gru/grufile.c18
-rw-r--r--drivers/misc/sgi-xp/xp.h22
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/net/ne3210.c3
-rw-r--r--drivers/net/sfc/falcon.c24
-rw-r--r--drivers/net/wireless/arlan-main.c4
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/xen/events.c227
20 files changed, 239 insertions, 144 deletions
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index c3e841f3cde..ab0aff3c7d6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
365 365
366/******************************************************************************* 366/*******************************************************************************
367 * 367 *
368 * FUNCTION: acpi_get_table 368 * FUNCTION: acpi_get_table_with_size
369 * 369 *
370 * PARAMETERS: Signature - ACPI signature of needed table 370 * PARAMETERS: Signature - ACPI signature of needed table
371 * Instance - Which instance (for SSDTs) 371 * Instance - Which instance (for SSDTs)
@@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
377 * 377 *
378 *****************************************************************************/ 378 *****************************************************************************/
379acpi_status 379acpi_status
380acpi_get_table(char *signature, 380acpi_get_table_with_size(char *signature,
381 u32 instance, struct acpi_table_header **out_table) 381 u32 instance, struct acpi_table_header **out_table,
382 acpi_size *tbl_size)
382{ 383{
383 u32 i; 384 u32 i;
384 u32 j; 385 u32 j;
@@ -408,6 +409,7 @@ acpi_get_table(char *signature,
408 acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); 409 acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
409 if (ACPI_SUCCESS(status)) { 410 if (ACPI_SUCCESS(status)) {
410 *out_table = acpi_gbl_root_table_list.tables[i].pointer; 411 *out_table = acpi_gbl_root_table_list.tables[i].pointer;
412 *tbl_size = acpi_gbl_root_table_list.tables[i].length;
411 } 413 }
412 414
413 if (!acpi_gbl_permanent_mmap) { 415 if (!acpi_gbl_permanent_mmap) {
@@ -420,6 +422,15 @@ acpi_get_table(char *signature,
420 return (AE_NOT_FOUND); 422 return (AE_NOT_FOUND);
421} 423}
422 424
425acpi_status
426acpi_get_table(char *signature,
427 u32 instance, struct acpi_table_header **out_table)
428{
429 acpi_size tbl_size;
430
431 return acpi_get_table_with_size(signature,
432 instance, out_table, &tbl_size);
433}
423ACPI_EXPORT_SYMBOL(acpi_get_table) 434ACPI_EXPORT_SYMBOL(acpi_get_table)
424 435
425/******************************************************************************* 436/*******************************************************************************
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b3193ec0a2e..d1dd5160daa 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -274,12 +274,19 @@ EXPORT_SYMBOL_GPL(acpi_os_map_memory);
274 274
275void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 275void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
276{ 276{
277 if (acpi_gbl_permanent_mmap) { 277 if (acpi_gbl_permanent_mmap)
278 iounmap(virt); 278 iounmap(virt);
279 } 279 else
280 __acpi_unmap_table(virt, size);
280} 281}
281EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 282EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
282 283
284void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
285{
286 if (!acpi_gbl_permanent_mmap)
287 __acpi_unmap_table(virt, size);
288}
289
283#ifdef ACPI_FUTURE_USAGE 290#ifdef ACPI_FUTURE_USAGE
284acpi_status 291acpi_status
285acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 292acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a8852952fac..fec1ae36d43 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id,
181 struct acpi_subtable_header *entry; 181 struct acpi_subtable_header *entry;
182 unsigned int count = 0; 182 unsigned int count = 0;
183 unsigned long table_end; 183 unsigned long table_end;
184 acpi_size tbl_size;
184 185
185 if (!handler) 186 if (!handler)
186 return -EINVAL; 187 return -EINVAL;
187 188
188 if (strncmp(id, ACPI_SIG_MADT, 4) == 0) 189 if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
189 acpi_get_table(id, acpi_apic_instance, &table_header); 190 acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size);
190 else 191 else
191 acpi_get_table(id, 0, &table_header); 192 acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
192 193
193 if (!table_header) { 194 if (!table_header) {
194 printk(KERN_WARNING PREFIX "%4.4s not present\n", id); 195 printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
@@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id,
206 table_end) { 207 table_end) {
207 if (entry->type == entry_id 208 if (entry->type == entry_id
208 && (!max_entries || count++ < max_entries)) 209 && (!max_entries || count++ < max_entries))
209 if (handler(entry, table_end)) 210 if (handler(entry, table_end)) {
211 early_acpi_os_unmap_memory((char *)table_header, tbl_size);
210 return -EINVAL; 212 return -EINVAL;
213 }
211 214
212 entry = (struct acpi_subtable_header *) 215 entry = (struct acpi_subtable_header *)
213 ((unsigned long)entry + entry->length); 216 ((unsigned long)entry + entry->length);
@@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id,
217 "%i found\n", id, entry_id, count - max_entries, count); 220 "%i found\n", id, entry_id, count - max_entries, count);
218 } 221 }
219 222
223 early_acpi_os_unmap_memory((char *)table_header, tbl_size);
220 return count; 224 return count;
221} 225}
222 226
@@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id,
241int __init acpi_table_parse(char *id, acpi_table_handler handler) 245int __init acpi_table_parse(char *id, acpi_table_handler handler)
242{ 246{
243 struct acpi_table_header *table = NULL; 247 struct acpi_table_header *table = NULL;
248 acpi_size tbl_size;
244 249
245 if (!handler) 250 if (!handler)
246 return -EINVAL; 251 return -EINVAL;
247 252
248 if (strncmp(id, ACPI_SIG_MADT, 4) == 0) 253 if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
249 acpi_get_table(id, acpi_apic_instance, &table); 254 acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
250 else 255 else
251 acpi_get_table(id, 0, &table); 256 acpi_get_table_with_size(id, 0, &table, &tbl_size);
252 257
253 if (table) { 258 if (table) {
254 handler(table); 259 handler(table);
260 early_acpi_os_unmap_memory(table, tbl_size);
255 return 0; 261 return 0;
256 } else 262 } else
257 return 1; 263 return 1;
@@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
265static void __init check_multiple_madt(void) 271static void __init check_multiple_madt(void)
266{ 272{
267 struct acpi_table_header *table = NULL; 273 struct acpi_table_header *table = NULL;
274 acpi_size tbl_size;
268 275
269 acpi_get_table(ACPI_SIG_MADT, 2, &table); 276 acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
270 if (table) { 277 if (table) {
271 printk(KERN_WARNING PREFIX 278 printk(KERN_WARNING PREFIX
272 "BIOS bug: multiple APIC/MADT found," 279 "BIOS bug: multiple APIC/MADT found,"
@@ -275,6 +282,7 @@ static void __init check_multiple_madt(void)
275 "If \"acpi_apic_instance=%d\" works better, " 282 "If \"acpi_apic_instance=%d\" works better, "
276 "notify linux-acpi@vger.kernel.org\n", 283 "notify linux-acpi@vger.kernel.org\n",
277 acpi_apic_instance ? 0 : 2); 284 acpi_apic_instance ? 0 : 2);
285 early_acpi_os_unmap_memory(table, tbl_size);
278 286
279 } else 287 } else
280 acpi_apic_instance = 0; 288 acpi_apic_instance = 0;
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index e1129fad96d..ee19b6e8fcb 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
143#endif 143#endif
144 144
145#ifndef CONFIG_X86_64 145#ifndef CONFIG_X86_64
146#include "mach_timer.h" 146#include <asm/mach_timer.h>
147#define PMTMR_EXPECTED_RATE \ 147#define PMTMR_EXPECTED_RATE \
148 ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) 148 ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
149/* 149/*
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index 1bde303b970..8615059a872 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -7,7 +7,7 @@
7#include <asm/pgtable.h> 7#include <asm/pgtable.h>
8#include <asm/io.h> 8#include <asm/io.h>
9 9
10#include "mach_timer.h" 10#include <asm/mach_timer.h>
11 11
12#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */ 12#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
13#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */ 13#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig
index c0646576cf4..2705284f622 100644
--- a/drivers/eisa/Kconfig
+++ b/drivers/eisa/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config EISA_VLB_PRIMING 4config EISA_VLB_PRIMING
5 bool "Vesa Local Bus priming" 5 bool "Vesa Local Bus priming"
6 depends on X86_PC && EISA 6 depends on X86 && EISA
7 default n 7 default n
8 ---help--- 8 ---help---
9 Activate this option if your system contains a Vesa Local 9 Activate this option if your system contains a Vesa Local
@@ -24,11 +24,11 @@ config EISA_PCI_EISA
24 When in doubt, say Y. 24 When in doubt, say Y.
25 25
26# Using EISA_VIRTUAL_ROOT on something other than an Alpha or 26# Using EISA_VIRTUAL_ROOT on something other than an Alpha or
27# an X86_PC may lead to crashes... 27# an X86 may lead to crashes...
28 28
29config EISA_VIRTUAL_ROOT 29config EISA_VIRTUAL_ROOT
30 bool "EISA virtual root device" 30 bool "EISA virtual root device"
31 depends on EISA && (ALPHA || X86_PC) 31 depends on EISA && (ALPHA || X86)
32 default y 32 default y
33 ---help--- 33 ---help---
34 Activate this option if your system only have EISA bus 34 Activate this option if your system only have EISA bus
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 3ab3e4a41d6..7b7ddc2d51c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -938,8 +938,8 @@ static int __init ibft_init(void)
938 return -ENOMEM; 938 return -ENOMEM;
939 939
940 if (ibft_addr) { 940 if (ibft_addr) {
941 printk(KERN_INFO "iBFT detected at 0x%lx.\n", 941 printk(KERN_INFO "iBFT detected at 0x%llx.\n",
942 virt_to_phys((void *)ibft_addr)); 942 (u64)virt_to_phys((void *)ibft_addr));
943 943
944 rc = ibft_check_device(); 944 rc = ibft_check_device();
945 if (rc) 945 if (rc)
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 8df849f6683..b756f043a5f 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
678 *start = &buf[offset]; 678 *start = &buf[offset];
679 *eof = 0; 679 *eof = 0;
680 680
681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", 681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
682 atomic_read(&dev->vma_count), 682 atomic_read(&dev->vma_count),
683 high_memory, virt_to_phys(high_memory)); 683 high_memory, (u64)virt_to_phys(high_memory));
684 list_for_each_entry(pt, &dev->vmalist, head) { 684 list_for_each_entry(pt, &dev->vmalist, head) {
685 if (!(vma = pt->vma)) 685 if (!(vma = pt->vma))
686 continue; 686 continue;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 35561689ff3..ea2638b4198 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD
13if INPUT_KEYBOARD 13if INPUT_KEYBOARD
14 14
15config KEYBOARD_ATKBD 15config KEYBOARD_ATKBD
16 tristate "AT keyboard" if EMBEDDED || !X86_PC 16 tristate "AT keyboard" if EMBEDDED || !X86
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86_PC 20 select SERIO_I8042 if X86
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 23 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 093c8c1bca7..9bef935ef19 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86_PC 20 select SERIO_I8042 if X86
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index 1b5f579df15..f93f03a9e6e 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -19,8 +19,6 @@
19#ifndef __GRU_H__ 19#ifndef __GRU_H__
20#define __GRU_H__ 20#define __GRU_H__
21 21
22#include <asm/uv/uv.h>
23
24/* 22/*
25 * GRU architectural definitions 23 * GRU architectural definitions
26 */ 24 */
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 65098380639..c67e4e8bd62 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -36,23 +36,11 @@
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39#include <asm/uv/uv.h>
39#include "gru.h" 40#include "gru.h"
40#include "grulib.h" 41#include "grulib.h"
41#include "grutables.h" 42#include "grutables.h"
42 43
43#if defined CONFIG_X86_64
44#include <asm/genapic.h>
45#include <asm/irq.h>
46#define IS_UV() is_uv_system()
47#elif defined CONFIG_IA64
48#include <asm/system.h>
49#include <asm/sn/simulator.h>
50/* temp support for running on hardware simulator */
51#define IS_UV() IS_MEDUSA() || ia64_platform_is("uv")
52#else
53#define IS_UV() 0
54#endif
55
56#include <asm/uv/uv_hub.h> 44#include <asm/uv/uv_hub.h>
57#include <asm/uv/uv_mmrs.h> 45#include <asm/uv/uv_mmrs.h>
58 46
@@ -381,7 +369,7 @@ static int __init gru_init(void)
381 char id[10]; 369 char id[10];
382 void *gru_start_vaddr; 370 void *gru_start_vaddr;
383 371
384 if (!IS_UV()) 372 if (!is_uv_system())
385 return 0; 373 return 0;
386 374
387#if defined CONFIG_IA64 375#if defined CONFIG_IA64
@@ -451,7 +439,7 @@ static void __exit gru_exit(void)
451 int order = get_order(sizeof(struct gru_state) * 439 int order = get_order(sizeof(struct gru_state) *
452 GRU_CHIPLETS_PER_BLADE); 440 GRU_CHIPLETS_PER_BLADE);
453 441
454 if (!IS_UV()) 442 if (!is_uv_system())
455 return; 443 return;
456 444
457 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 445 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 069ad3a1c2a..2275126cb33 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -15,21 +15,19 @@
15 15
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV
18#include <asm/uv/uv.h> 19#include <asm/uv/uv.h>
20#define is_uv() is_uv_system()
21#endif
22
23#ifndef is_uv
24#define is_uv() 0
25#endif
19 26
20#ifdef CONFIG_IA64 27#if defined CONFIG_IA64
21#include <asm/system.h> 28#include <asm/system.h>
22#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */ 29#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
23#define is_shub() ia64_platform_is("sn2") 30#define is_shub() ia64_platform_is("sn2")
24#ifdef CONFIG_IA64_SGI_UV
25#define is_uv() ia64_platform_is("uv")
26#else
27#define is_uv() 0
28#endif
29#endif
30#ifdef CONFIG_X86_64
31#include <asm/genapic.h>
32#define is_uv() is_uv_system()
33#endif 31#endif
34 32
35#ifndef is_shub1 33#ifndef is_shub1
@@ -44,10 +42,6 @@
44#define is_shub() 0 42#define is_shub() 0
45#endif 43#endif
46 44
47#ifndef is_uv
48#define is_uv() 0
49#endif
50
51#ifdef USE_DBUG_ON 45#ifdef USE_DBUG_ON
52#define DBUG_ON(condition) BUG_ON(condition) 46#define DBUG_ON(condition) BUG_ON(condition)
53#else 47#else
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8b12e6e109d..2ff88791ceb 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE
273 273
274config MTD_NAND_CS553X 274config MTD_NAND_CS553X
275 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" 275 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
276 depends on X86_32 && (X86_PC || X86_GENERICARCH) 276 depends on X86_32
277 help 277 help
278 The CS553x companion chips for the AMD Geode processor 278 The CS553x companion chips for the AMD Geode processor
279 include NAND flash controllers with built-in hardware ECC 279 include NAND flash controllers with built-in hardware ECC
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index fac43fd6fc8..6a843f7350a 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device)
150 if (phys_mem < virt_to_phys(high_memory)) { 150 if (phys_mem < virt_to_phys(high_memory)) {
151 printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); 151 printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
152 printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); 152 printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
153 printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory)); 153 printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
154 (u64)virt_to_phys(high_memory));
154 printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); 155 printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
155 retval = -EINVAL; 156 retval = -EINVAL;
156 goto out3; 157 goto out3;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d5378e60fcd..064307c2277 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -338,10 +338,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
338 nic_data->next_buffer_table += buffer->entries; 338 nic_data->next_buffer_table += buffer->entries;
339 339
340 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " 340 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
341 "(virt %p phys %lx)\n", buffer->index, 341 "(virt %p phys %llx)\n", buffer->index,
342 buffer->index + buffer->entries - 1, 342 buffer->index + buffer->entries - 1,
343 (unsigned long long)buffer->dma_addr, len, 343 (u64)buffer->dma_addr, len,
344 buffer->addr, virt_to_phys(buffer->addr)); 344 buffer->addr, (u64)virt_to_phys(buffer->addr));
345 345
346 return 0; 346 return 0;
347} 347}
@@ -353,10 +353,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx,
353 return; 353 return;
354 354
355 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " 355 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
356 "(virt %p phys %lx)\n", buffer->index, 356 "(virt %p phys %llx)\n", buffer->index,
357 buffer->index + buffer->entries - 1, 357 buffer->index + buffer->entries - 1,
358 (unsigned long long)buffer->dma_addr, buffer->len, 358 (u64)buffer->dma_addr, buffer->len,
359 buffer->addr, virt_to_phys(buffer->addr)); 359 buffer->addr, (u64)virt_to_phys(buffer->addr));
360 360
361 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, 361 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
362 buffer->dma_addr); 362 buffer->dma_addr);
@@ -2343,10 +2343,10 @@ int falcon_probe_port(struct efx_nic *efx)
2343 FALCON_MAC_STATS_SIZE); 2343 FALCON_MAC_STATS_SIZE);
2344 if (rc) 2344 if (rc)
2345 return rc; 2345 return rc;
2346 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n", 2346 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
2347 (unsigned long long)efx->stats_buffer.dma_addr, 2347 (u64)efx->stats_buffer.dma_addr,
2348 efx->stats_buffer.addr, 2348 efx->stats_buffer.addr,
2349 virt_to_phys(efx->stats_buffer.addr)); 2349 (u64)virt_to_phys(efx->stats_buffer.addr));
2350 2350
2351 return 0; 2351 return 0;
2352} 2352}
@@ -2921,9 +2921,9 @@ int falcon_probe_nic(struct efx_nic *efx)
2921 goto fail4; 2921 goto fail4;
2922 BUG_ON(efx->irq_status.dma_addr & 0x0f); 2922 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2923 2923
2924 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n", 2924 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
2925 (unsigned long long)efx->irq_status.dma_addr, 2925 (u64)efx->irq_status.dma_addr,
2926 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); 2926 efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
2927 2927
2928 falcon_probe_spi_devices(efx); 2928 falcon_probe_spi_devices(efx);
2929 2929
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index bfca15da6f0..14c11656e82 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev,
1082 if (arlan_check_fingerprint(memaddr)) 1082 if (arlan_check_fingerprint(memaddr))
1083 return -ENODEV; 1083 return -ENODEV;
1084 1084
1085 printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name, 1085 printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name,
1086 (int) virt_to_phys((void*)memaddr)); 1086 (u64) virt_to_phys((void*)memaddr));
1087 1087
1088 ap->card = (void *) memaddr; 1088 ap->card = (void *) memaddr;
1089 dev->mem_start = memaddr; 1089 dev->mem_start = memaddr;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f5a662a50ac..519f5f91e76 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -42,6 +42,7 @@
42LIST_HEAD(dmar_drhd_units); 42LIST_HEAD(dmar_drhd_units);
43 43
44static struct acpi_table_header * __initdata dmar_tbl; 44static struct acpi_table_header * __initdata dmar_tbl;
45static acpi_size dmar_tbl_size;
45 46
46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 47static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
47{ 48{
@@ -288,8 +289,9 @@ static int __init dmar_table_detect(void)
288 acpi_status status = AE_OK; 289 acpi_status status = AE_OK;
289 290
290 /* if we could find DMAR table, then there are DMAR devices */ 291 /* if we could find DMAR table, then there are DMAR devices */
291 status = acpi_get_table(ACPI_SIG_DMAR, 0, 292 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
292 (struct acpi_table_header **)&dmar_tbl); 293 (struct acpi_table_header **)&dmar_tbl,
294 &dmar_tbl_size);
293 295
294 if (ACPI_SUCCESS(status) && !dmar_tbl) { 296 if (ACPI_SUCCESS(status) && !dmar_tbl) {
295 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 297 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -481,6 +483,7 @@ void __init detect_intel_iommu(void)
481 iommu_detected = 1; 483 iommu_detected = 1;
482#endif 484#endif
483 } 485 }
486 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
484 dmar_tbl = NULL; 487 dmar_tbl = NULL;
485} 488}
486 489
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index bf92802f2bb..36e221beedc 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,7 +37,7 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39 39
40#include <asm/mach-rdc321x/rdc321x_defs.h> 40#include <asm/rdc321x_defs.h>
41 41
42#define RDC_WDT_MASK 0x80000000 /* Mask */ 42#define RDC_WDT_MASK 0x80000000 /* Mask */
43#define RDC_WDT_EN 0x00800000 /* Enable bit */ 43#define RDC_WDT_EN 0x00800000 /* Enable bit */
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 3141e149d59..30963af5dba 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -30,6 +30,7 @@
30 30
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/idle.h>
33#include <asm/sync_bitops.h> 34#include <asm/sync_bitops.h>
34#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
35#include <asm/xen/hypervisor.h> 36#include <asm/xen/hypervisor.h>
@@ -51,27 +52,43 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
51/* IRQ <-> IPI mapping */ 52/* IRQ <-> IPI mapping */
52static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; 53static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
53 54
54/* Packed IRQ information: binding type, sub-type index, and event channel. */ 55/* Interrupt types. */
55struct packed_irq 56enum xen_irq_type {
56{ 57 IRQT_UNBOUND = 0,
57 unsigned short evtchn;
58 unsigned char index;
59 unsigned char type;
60};
61
62static struct packed_irq irq_info[NR_IRQS];
63
64/* Binding types. */
65enum {
66 IRQT_UNBOUND,
67 IRQT_PIRQ, 58 IRQT_PIRQ,
68 IRQT_VIRQ, 59 IRQT_VIRQ,
69 IRQT_IPI, 60 IRQT_IPI,
70 IRQT_EVTCHN 61 IRQT_EVTCHN
71}; 62};
72 63
73/* Convenient shorthand for packed representation of an unbound IRQ. */ 64/*
74#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) 65 * Packed IRQ information:
66 * type - enum xen_irq_type
67 * event channel - irq->event channel mapping
68 * cpu - cpu this event channel is bound to
69 * index - type-specific information:
70 * PIRQ - vector, with MSB being "needs EIO"
71 * VIRQ - virq number
72 * IPI - IPI vector
73 * EVTCHN -
74 */
75struct irq_info
76{
77 enum xen_irq_type type; /* type */
78 unsigned short evtchn; /* event channel */
79 unsigned short cpu; /* cpu bound */
80
81 union {
82 unsigned short virq;
83 enum ipi_vector ipi;
84 struct {
85 unsigned short gsi;
86 unsigned short vector;
87 } pirq;
88 } u;
89};
90
91static struct irq_info irq_info[NR_IRQS];
75 92
76static int evtchn_to_irq[NR_EVENT_CHANNELS] = { 93static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
77 [0 ... NR_EVENT_CHANNELS-1] = -1 94 [0 ... NR_EVENT_CHANNELS-1] = -1
@@ -84,10 +101,6 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
84{ 101{
85 return cpu_evtchn_mask_p[cpu].bits; 102 return cpu_evtchn_mask_p[cpu].bits;
86} 103}
87static u8 cpu_evtchn[NR_EVENT_CHANNELS];
88
89/* Reference counts for bindings to IRQs. */
90static int irq_bindcount[NR_IRQS];
91 104
92/* Xen will never allocate port zero for any purpose. */ 105/* Xen will never allocate port zero for any purpose. */
93#define VALID_EVTCHN(chn) ((chn) != 0) 106#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -95,27 +108,108 @@ static int irq_bindcount[NR_IRQS];
95static struct irq_chip xen_dynamic_chip; 108static struct irq_chip xen_dynamic_chip;
96 109
97/* Constructor for packed IRQ information. */ 110/* Constructor for packed IRQ information. */
98static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) 111static struct irq_info mk_unbound_info(void)
112{
113 return (struct irq_info) { .type = IRQT_UNBOUND };
114}
115
116static struct irq_info mk_evtchn_info(unsigned short evtchn)
117{
118 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
119 .cpu = 0 };
120}
121
122static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
123{
124 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
125 .cpu = 0, .u.ipi = ipi };
126}
127
128static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
99{ 129{
100 return (struct packed_irq) { evtchn, index, type }; 130 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
131 .cpu = 0, .u.virq = virq };
132}
133
134static struct irq_info mk_pirq_info(unsigned short evtchn,
135 unsigned short gsi, unsigned short vector)
136{
137 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
138 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
101} 139}
102 140
103/* 141/*
104 * Accessors for packed IRQ information. 142 * Accessors for packed IRQ information.
105 */ 143 */
106static inline unsigned int evtchn_from_irq(int irq) 144static struct irq_info *info_for_irq(unsigned irq)
107{ 145{
108 return irq_info[irq].evtchn; 146 return &irq_info[irq];
109} 147}
110 148
111static inline unsigned int index_from_irq(int irq) 149static unsigned int evtchn_from_irq(unsigned irq)
112{ 150{
113 return irq_info[irq].index; 151 return info_for_irq(irq)->evtchn;
114} 152}
115 153
116static inline unsigned int type_from_irq(int irq) 154static enum ipi_vector ipi_from_irq(unsigned irq)
117{ 155{
118 return irq_info[irq].type; 156 struct irq_info *info = info_for_irq(irq);
157
158 BUG_ON(info == NULL);
159 BUG_ON(info->type != IRQT_IPI);
160
161 return info->u.ipi;
162}
163
164static unsigned virq_from_irq(unsigned irq)
165{
166 struct irq_info *info = info_for_irq(irq);
167
168 BUG_ON(info == NULL);
169 BUG_ON(info->type != IRQT_VIRQ);
170
171 return info->u.virq;
172}
173
174static unsigned gsi_from_irq(unsigned irq)
175{
176 struct irq_info *info = info_for_irq(irq);
177
178 BUG_ON(info == NULL);
179 BUG_ON(info->type != IRQT_PIRQ);
180
181 return info->u.pirq.gsi;
182}
183
184static unsigned vector_from_irq(unsigned irq)
185{
186 struct irq_info *info = info_for_irq(irq);
187
188 BUG_ON(info == NULL);
189 BUG_ON(info->type != IRQT_PIRQ);
190
191 return info->u.pirq.vector;
192}
193
194static enum xen_irq_type type_from_irq(unsigned irq)
195{
196 return info_for_irq(irq)->type;
197}
198
199static unsigned cpu_from_irq(unsigned irq)
200{
201 return info_for_irq(irq)->cpu;
202}
203
204static unsigned int cpu_from_evtchn(unsigned int evtchn)
205{
206 int irq = evtchn_to_irq[evtchn];
207 unsigned ret = 0;
208
209 if (irq != -1)
210 ret = cpu_from_irq(irq);
211
212 return ret;
119} 213}
120 214
121static inline unsigned long active_evtchns(unsigned int cpu, 215static inline unsigned long active_evtchns(unsigned int cpu,
@@ -136,10 +230,10 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
136 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 230 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
137#endif 231#endif
138 232
139 __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn])); 233 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
140 __set_bit(chn, cpu_evtchn_mask(cpu)); 234 __set_bit(chn, cpu_evtchn_mask(cpu));
141 235
142 cpu_evtchn[chn] = cpu; 236 irq_info[irq].cpu = cpu;
143} 237}
144 238
145static void init_evtchn_cpu_bindings(void) 239static void init_evtchn_cpu_bindings(void)
@@ -154,15 +248,9 @@ static void init_evtchn_cpu_bindings(void)
154 } 248 }
155#endif 249#endif
156 250
157 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
158 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); 251 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
159} 252}
160 253
161static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
162{
163 return cpu_evtchn[evtchn];
164}
165
166static inline void clear_evtchn(int port) 254static inline void clear_evtchn(int port)
167{ 255{
168 struct shared_info *s = HYPERVISOR_shared_info; 256 struct shared_info *s = HYPERVISOR_shared_info;
@@ -240,9 +328,8 @@ static int find_unbound_irq(void)
240 int irq; 328 int irq;
241 struct irq_desc *desc; 329 struct irq_desc *desc;
242 330
243 /* Only allocate from dynirq range */
244 for (irq = 0; irq < nr_irqs; irq++) 331 for (irq = 0; irq < nr_irqs; irq++)
245 if (irq_bindcount[irq] == 0) 332 if (irq_info[irq].type == IRQT_UNBOUND)
246 break; 333 break;
247 334
248 if (irq == nr_irqs) 335 if (irq == nr_irqs)
@@ -252,6 +339,8 @@ static int find_unbound_irq(void)
252 if (WARN_ON(desc == NULL)) 339 if (WARN_ON(desc == NULL))
253 return -1; 340 return -1;
254 341
342 dynamic_irq_init(irq);
343
255 return irq; 344 return irq;
256} 345}
257 346
@@ -266,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn)
266 if (irq == -1) { 355 if (irq == -1) {
267 irq = find_unbound_irq(); 356 irq = find_unbound_irq();
268 357
269 dynamic_irq_init(irq);
270 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 358 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
271 handle_level_irq, "event"); 359 handle_level_irq, "event");
272 360
273 evtchn_to_irq[evtchn] = irq; 361 evtchn_to_irq[evtchn] = irq;
274 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); 362 irq_info[irq] = mk_evtchn_info(evtchn);
275 } 363 }
276 364
277 irq_bindcount[irq]++;
278
279 spin_unlock(&irq_mapping_update_lock); 365 spin_unlock(&irq_mapping_update_lock);
280 366
281 return irq; 367 return irq;
@@ -290,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
290 spin_lock(&irq_mapping_update_lock); 376 spin_lock(&irq_mapping_update_lock);
291 377
292 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 378 irq = per_cpu(ipi_to_irq, cpu)[ipi];
379
293 if (irq == -1) { 380 if (irq == -1) {
294 irq = find_unbound_irq(); 381 irq = find_unbound_irq();
295 if (irq < 0) 382 if (irq < 0)
296 goto out; 383 goto out;
297 384
298 dynamic_irq_init(irq);
299 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 385 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
300 handle_level_irq, "ipi"); 386 handle_level_irq, "ipi");
301 387
@@ -306,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
306 evtchn = bind_ipi.port; 392 evtchn = bind_ipi.port;
307 393
308 evtchn_to_irq[evtchn] = irq; 394 evtchn_to_irq[evtchn] = irq;
309 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); 395 irq_info[irq] = mk_ipi_info(evtchn, ipi);
310
311 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 396 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
312 397
313 bind_evtchn_to_cpu(evtchn, cpu); 398 bind_evtchn_to_cpu(evtchn, cpu);
314 } 399 }
315 400
316 irq_bindcount[irq]++;
317
318 out: 401 out:
319 spin_unlock(&irq_mapping_update_lock); 402 spin_unlock(&irq_mapping_update_lock);
320 return irq; 403 return irq;
@@ -340,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
340 423
341 irq = find_unbound_irq(); 424 irq = find_unbound_irq();
342 425
343 dynamic_irq_init(irq);
344 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 426 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
345 handle_level_irq, "virq"); 427 handle_level_irq, "virq");
346 428
347 evtchn_to_irq[evtchn] = irq; 429 evtchn_to_irq[evtchn] = irq;
348 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); 430 irq_info[irq] = mk_virq_info(evtchn, virq);
349 431
350 per_cpu(virq_to_irq, cpu)[virq] = irq; 432 per_cpu(virq_to_irq, cpu)[virq] = irq;
351 433
352 bind_evtchn_to_cpu(evtchn, cpu); 434 bind_evtchn_to_cpu(evtchn, cpu);
353 } 435 }
354 436
355 irq_bindcount[irq]++;
356
357 spin_unlock(&irq_mapping_update_lock); 437 spin_unlock(&irq_mapping_update_lock);
358 438
359 return irq; 439 return irq;
@@ -366,7 +446,7 @@ static void unbind_from_irq(unsigned int irq)
366 446
367 spin_lock(&irq_mapping_update_lock); 447 spin_lock(&irq_mapping_update_lock);
368 448
369 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { 449 if (VALID_EVTCHN(evtchn)) {
370 close.port = evtchn; 450 close.port = evtchn;
371 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 451 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
372 BUG(); 452 BUG();
@@ -374,11 +454,11 @@ static void unbind_from_irq(unsigned int irq)
374 switch (type_from_irq(irq)) { 454 switch (type_from_irq(irq)) {
375 case IRQT_VIRQ: 455 case IRQT_VIRQ:
376 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 456 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
377 [index_from_irq(irq)] = -1; 457 [virq_from_irq(irq)] = -1;
378 break; 458 break;
379 case IRQT_IPI: 459 case IRQT_IPI:
380 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 460 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
381 [index_from_irq(irq)] = -1; 461 [ipi_from_irq(irq)] = -1;
382 break; 462 break;
383 default: 463 default:
384 break; 464 break;
@@ -388,7 +468,7 @@ static void unbind_from_irq(unsigned int irq)
388 bind_evtchn_to_cpu(evtchn, 0); 468 bind_evtchn_to_cpu(evtchn, 0);
389 469
390 evtchn_to_irq[evtchn] = -1; 470 evtchn_to_irq[evtchn] = -1;
391 irq_info[irq] = IRQ_UNBOUND; 471 irq_info[irq] = mk_unbound_info();
392 472
393 dynamic_irq_cleanup(irq); 473 dynamic_irq_cleanup(irq);
394 } 474 }
@@ -506,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
506 for(i = 0; i < NR_EVENT_CHANNELS; i++) { 586 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
507 if (sync_test_bit(i, sh->evtchn_pending)) { 587 if (sync_test_bit(i, sh->evtchn_pending)) {
508 printk(" %d: event %d -> irq %d\n", 588 printk(" %d: event %d -> irq %d\n",
509 cpu_evtchn[i], i, 589 cpu_from_evtchn(i), i,
510 evtchn_to_irq[i]); 590 evtchn_to_irq[i]);
511 } 591 }
512 } 592 }
513 593
@@ -516,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
516 return IRQ_HANDLED; 596 return IRQ_HANDLED;
517} 597}
518 598
519
520/* 599/*
521 * Search the CPUs pending events bitmasks. For each one found, map 600 * Search the CPUs pending events bitmasks. For each one found, map
522 * the event number to an irq, and feed it into do_IRQ() for 601 * the event number to an irq, and feed it into do_IRQ() for
@@ -529,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
529void xen_evtchn_do_upcall(struct pt_regs *regs) 608void xen_evtchn_do_upcall(struct pt_regs *regs)
530{ 609{
531 int cpu = get_cpu(); 610 int cpu = get_cpu();
611 struct pt_regs *old_regs = set_irq_regs(regs);
532 struct shared_info *s = HYPERVISOR_shared_info; 612 struct shared_info *s = HYPERVISOR_shared_info;
533 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 613 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
534 static DEFINE_PER_CPU(unsigned, nesting_count); 614 static DEFINE_PER_CPU(unsigned, nesting_count);
535 unsigned count; 615 unsigned count;
536 616
617 exit_idle();
618 irq_enter();
619
537 do { 620 do {
538 unsigned long pending_words; 621 unsigned long pending_words;
539 622
@@ -558,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
558 int irq = evtchn_to_irq[port]; 641 int irq = evtchn_to_irq[port];
559 642
560 if (irq != -1) 643 if (irq != -1)
561 xen_do_IRQ(irq, regs); 644 handle_irq(irq, regs);
562 } 645 }
563 } 646 }
564 647
@@ -569,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
569 } while(count != 1); 652 } while(count != 1);
570 653
571out: 654out:
655 irq_exit();
656 set_irq_regs(old_regs);
657
572 put_cpu(); 658 put_cpu();
573} 659}
574 660
575/* Rebind a new event channel to an existing irq. */ 661/* Rebind a new event channel to an existing irq. */
576void rebind_evtchn_irq(int evtchn, int irq) 662void rebind_evtchn_irq(int evtchn, int irq)
577{ 663{
664 struct irq_info *info = info_for_irq(irq);
665
578 /* Make sure the irq is masked, since the new event channel 666 /* Make sure the irq is masked, since the new event channel
579 will also be masked. */ 667 will also be masked. */
580 disable_irq(irq); 668 disable_irq(irq);
@@ -584,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq)
584 /* After resume the irq<->evtchn mappings are all cleared out */ 672 /* After resume the irq<->evtchn mappings are all cleared out */
585 BUG_ON(evtchn_to_irq[evtchn] != -1); 673 BUG_ON(evtchn_to_irq[evtchn] != -1);
586 /* Expect irq to have been bound before, 674 /* Expect irq to have been bound before,
587 so the bindcount should be non-0 */ 675 so there should be a proper type */
588 BUG_ON(irq_bindcount[irq] == 0); 676 BUG_ON(info->type == IRQT_UNBOUND);
589 677
590 evtchn_to_irq[evtchn] = irq; 678 evtchn_to_irq[evtchn] = irq;
591 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); 679 irq_info[irq] = mk_evtchn_info(evtchn);
592 680
593 spin_unlock(&irq_mapping_update_lock); 681 spin_unlock(&irq_mapping_update_lock);
594 682
@@ -698,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu)
698 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 786 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
699 continue; 787 continue;
700 788
701 BUG_ON(irq_info[irq].type != IRQT_VIRQ); 789 BUG_ON(virq_from_irq(irq) != virq);
702 BUG_ON(irq_info[irq].index != virq);
703 790
704 /* Get a new binding from Xen. */ 791 /* Get a new binding from Xen. */
705 bind_virq.virq = virq; 792 bind_virq.virq = virq;
@@ -711,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu)
711 798
712 /* Record the new mapping. */ 799 /* Record the new mapping. */
713 evtchn_to_irq[evtchn] = irq; 800 evtchn_to_irq[evtchn] = irq;
714 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); 801 irq_info[irq] = mk_virq_info(evtchn, virq);
715 bind_evtchn_to_cpu(evtchn, cpu); 802 bind_evtchn_to_cpu(evtchn, cpu);
716 803
717 /* Ready for use. */ 804 /* Ready for use. */
@@ -728,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu)
728 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 815 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
729 continue; 816 continue;
730 817
731 BUG_ON(irq_info[irq].type != IRQT_IPI); 818 BUG_ON(ipi_from_irq(irq) != ipi);
732 BUG_ON(irq_info[irq].index != ipi);
733 819
734 /* Get a new binding from Xen. */ 820 /* Get a new binding from Xen. */
735 bind_ipi.vcpu = cpu; 821 bind_ipi.vcpu = cpu;
@@ -740,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu)
740 826
741 /* Record the new mapping. */ 827 /* Record the new mapping. */
742 evtchn_to_irq[evtchn] = irq; 828 evtchn_to_irq[evtchn] = irq;
743 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); 829 irq_info[irq] = mk_ipi_info(evtchn, ipi);
744 bind_evtchn_to_cpu(evtchn, cpu); 830 bind_evtchn_to_cpu(evtchn, cpu);
745 831
746 /* Ready for use. */ 832 /* Ready for use. */
@@ -820,8 +906,11 @@ void xen_irq_resume(void)
820 906
821static struct irq_chip xen_dynamic_chip __read_mostly = { 907static struct irq_chip xen_dynamic_chip __read_mostly = {
822 .name = "xen-dyn", 908 .name = "xen-dyn",
909
910 .disable = disable_dynirq,
823 .mask = disable_dynirq, 911 .mask = disable_dynirq,
824 .unmask = enable_dynirq, 912 .unmask = enable_dynirq,
913
825 .ack = ack_dynirq, 914 .ack = ack_dynirq,
826 .set_affinity = set_affinity_irq, 915 .set_affinity = set_affinity_irq,
827 .retrigger = retrigger_dynirq, 916 .retrigger = retrigger_dynirq,
@@ -841,9 +930,5 @@ void __init xen_init_IRQ(void)
841 for (i = 0; i < NR_EVENT_CHANNELS; i++) 930 for (i = 0; i < NR_EVENT_CHANNELS; i++)
842 mask_evtchn(i); 931 mask_evtchn(i);
843 932
844 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
845 for (i = 0; i < nr_irqs; i++)
846 irq_bindcount[i] = 0;
847
848 irq_ctx_init(smp_processor_id()); 933 irq_ctx_init(smp_processor_id());
849} 934}