aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig46
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c3
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c31
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/hp/sim/simserial.c15
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/bitops.h3
-rw-r--r--arch/ia64/include/asm/compat.h208
-rw-r--r--arch/ia64/include/asm/cputime.h6
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/futex.h15
-rw-r--r--arch/ia64/include/asm/hardirq.h11
-rw-r--r--arch/ia64/include/asm/hw_irq.h3
-rw-r--r--arch/ia64/include/asm/io.h5
-rw-r--r--arch/ia64/include/asm/ioctls.h89
-rw-r--r--arch/ia64/include/asm/iommu_table.h6
-rw-r--r--arch/ia64/include/asm/irqflags.h94
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/include/asm/page.h1
-rw-r--r--arch/ia64/include/asm/pal.h2
-rw-r--r--arch/ia64/include/asm/perfmon.h2
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h4
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/processor.h5
-rw-r--r--arch/ia64/include/asm/rwsem.h37
-rw-r--r--arch/ia64/include/asm/siginfo.h1
-rw-r--r--arch/ia64/include/asm/sn/bte.h2
-rw-r--r--arch/ia64/include/asm/sn/shub_mmr.h2
-rw-r--r--arch/ia64/include/asm/sn/shubio.h4
-rw-r--r--arch/ia64/include/asm/system.h80
-rw-r--r--arch/ia64/include/asm/thread_info.h14
-rw-r--r--arch/ia64/include/asm/tlb.h66
-rw-r--r--arch/ia64/include/asm/types.h3
-rw-r--r--arch/ia64/include/asm/unistd.h8
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c29
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/cyclone.c8
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S6
-rw-r--r--arch/ia64/kernel/iosapic.c165
-rw-r--r--arch/ia64/kernel/irq.c73
-rw-r--r--arch/ia64/kernel/irq_ia64.c15
-rw-r--r--arch/ia64/kernel/irq_lsapic.c23
-rw-r--r--arch/ia64/kernel/mca.c47
-rw-r--r--arch/ia64/kernel/msi_ia64.c53
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c32
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c2
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/ptrace.c3
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c22
-rw-r--r--arch/ia64/kernel/smp.c13
-rw-r--r--arch/ia64/kernel/smpboot.c16
-rw-r--r--arch/ia64/kernel/stacktrace.c39
-rw-r--r--arch/ia64/kernel/time.c32
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/unwind.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
-rw-r--r--arch/ia64/kvm/Makefile4
-rw-r--r--arch/ia64/kvm/kvm-ia64.c32
-rw-r--r--arch/ia64/kvm/lapic.h1
-rw-r--r--arch/ia64/kvm/mmio.c2
-rw-r--r--arch/ia64/kvm/process.c2
-rw-r--r--arch/ia64/kvm/vti.h26
-rw-r--r--arch/ia64/lib/do_csum.S2
-rw-r--r--arch/ia64/mm/contig.c12
-rw-r--r--arch/ia64/mm/discontig.c12
-rw-r--r--arch/ia64/mm/fault.c1
-rw-r--r--arch/ia64/mm/hugetlbpage.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/oprofile/backtrace.c2
-rw-r--r--arch/ia64/sn/kernel/Makefile2
-rw-r--r--arch/ia64/sn/kernel/irq.c101
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c32
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c42
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c6
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--arch/ia64/uv/kernel/Makefile2
-rw-r--r--arch/ia64/xen/irq_xen.c14
-rw-r--r--arch/ia64/xen/suspend.c9
-rw-r--r--arch/ia64/xen/time.c13
-rw-r--r--arch/ia64/xen/xen_pv_ops.c5
94 files changed, 676 insertions, 1118 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ba22849ee3ec..38280ef4a2af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -1,10 +1,3 @@
1#
2# For a description of the syntax of this configuration file,
3# see Documentation/kbuild/kconfig-language.txt.
4#
5
6mainmenu "IA-64 Linux Kernel Configuration"
7
8source "init/Kconfig" 1source "init/Kconfig"
9 2
10source "kernel/Kconfig.freezer" 3source "kernel/Kconfig.freezer"
@@ -29,6 +22,11 @@ config IA64
29 select HAVE_KVM 22 select HAVE_KVM
30 select HAVE_ARCH_TRACEHOOK 23 select HAVE_ARCH_TRACEHOOK
31 select HAVE_DMA_API_DEBUG 24 select HAVE_DMA_API_DEBUG
25 select HAVE_GENERIC_HARDIRQS
26 select GENERIC_IRQ_PROBE
27 select GENERIC_PENDING_IRQ if SMP
28 select IRQ_PER_CPU
29 select GENERIC_IRQ_SHOW
32 default y 30 default y
33 help 31 help
34 The Itanium Processor Family is Intel's 64-bit successor to 32 The Itanium Processor Family is Intel's 64-bit successor to
@@ -53,6 +51,9 @@ config MMU
53 bool 51 bool
54 default y 52 default y
55 53
54config ARCH_DMA_ADDR_T_64BIT
55 def_bool y
56
56config NEED_DMA_MAP_STATE 57config NEED_DMA_MAP_STATE
57 def_bool y 58 def_bool y
58 59
@@ -62,6 +63,9 @@ config NEED_SG_DMA_LENGTH
62config SWIOTLB 63config SWIOTLB
63 bool 64 bool
64 65
66config STACKTRACE_SUPPORT
67 def_bool y
68
65config GENERIC_LOCKBREAK 69config GENERIC_LOCKBREAK
66 def_bool n 70 def_bool n
67 71
@@ -74,10 +78,6 @@ config HUGETLB_PAGE_SIZE_VARIABLE
74 depends on HUGETLB_PAGE 78 depends on HUGETLB_PAGE
75 default y 79 default y
76 80
77config GENERIC_FIND_NEXT_BIT
78 bool
79 default y
80
81config GENERIC_CALIBRATE_DELAY 81config GENERIC_CALIBRATE_DELAY
82 bool 82 bool
83 default y 83 default y
@@ -410,11 +410,11 @@ config PERMIT_BSP_REMOVE
410 support. 410 support.
411 411
412config FORCE_CPEI_RETARGET 412config FORCE_CPEI_RETARGET
413 bool "Force assumption that CPEI can be re-targetted" 413 bool "Force assumption that CPEI can be re-targeted"
414 depends on PERMIT_BSP_REMOVE 414 depends on PERMIT_BSP_REMOVE
415 default n 415 default n
416 ---help--- 416 ---help---
417 Say Y if you need to force the assumption that CPEI can be re-targetted to 417 Say Y if you need to force the assumption that CPEI can be re-targeted to
418 any cpu in the system. This hint is available via ACPI 3.0 specifications. 418 any cpu in the system. This hint is available via ACPI 3.0 specifications.
419 Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP. 419 Tiger4 systems are capable of re-directing CPEI to any CPU other than BSP.
420 This option it useful to enable this feature on older BIOS's as well. 420 This option it useful to enable this feature on older BIOS's as well.
@@ -679,26 +679,6 @@ source "arch/ia64/kvm/Kconfig"
679 679
680source "lib/Kconfig" 680source "lib/Kconfig"
681 681
682#
683# Use the generic interrupt handling code in kernel/irq/:
684#
685config GENERIC_HARDIRQS
686 bool
687 default y
688
689config GENERIC_IRQ_PROBE
690 bool
691 default y
692
693config GENERIC_PENDING_IRQ
694 bool
695 depends on GENERIC_HARDIRQS && SMP
696 default y
697
698config IRQ_PER_CPU
699 bool
700 default y
701
702config IOMMU_HELPER 682config IOMMU_HELPER
703 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) 683 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
704 684
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 3ded8fe62759..1d7bca0a396d 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -233,3 +233,4 @@ CONFIG_CRYPTO_PCBC=m
233CONFIG_CRYPTO_MD5=y 233CONFIG_CRYPTO_MD5=y
234# CONFIG_CRYPTO_ANSI_CPRNG is not set 234# CONFIG_CRYPTO_ANSI_CPRNG is not set
235CONFIG_CRC_T10DIF=y 235CONFIG_CRC_T10DIF=y
236CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 3a98b2dd58ac..b11fa880e4b6 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -208,3 +208,4 @@ CONFIG_MAGIC_SYSRQ=y
208CONFIG_DEBUG_KERNEL=y 208CONFIG_DEBUG_KERNEL=y
209CONFIG_DEBUG_MUTEXES=y 209CONFIG_DEBUG_MUTEXES=y
210CONFIG_CRYPTO_MD5=y 210CONFIG_CRYPTO_MD5=y
211CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 4ce8d1358fee..80241fe03f50 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -37,6 +37,7 @@
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h> 38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/prefetch.h>
40 41
41#include <asm/delay.h> /* ia64_get_itc() */ 42#include <asm/delay.h> /* ia64_get_itc() */
42#include <asm/io.h> 43#include <asm/io.h>
@@ -1063,7 +1064,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1063 /* 1064 /*
1064 ** Address does not fall w/in IOVA, must be bypassing 1065 ** Address does not fall w/in IOVA, must be bypassing
1065 */ 1066 */
1066 DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n", 1067 DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
1067 iova); 1068 iova);
1068 1069
1069#ifdef ENABLE_MARK_CLEAN 1070#ifdef ENABLE_MARK_CLEAN
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index b272261d77cc..4bd9a63260ee 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -11,42 +11,41 @@
11#include <linux/irq.h> 11#include <linux/irq.h>
12 12
13static unsigned int 13static unsigned int
14hpsim_irq_startup (unsigned int irq) 14hpsim_irq_startup(struct irq_data *data)
15{ 15{
16 return 0; 16 return 0;
17} 17}
18 18
19static void 19static void
20hpsim_irq_noop (unsigned int irq) 20hpsim_irq_noop(struct irq_data *data)
21{ 21{
22} 22}
23 23
24static int 24static int
25hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) 25hpsim_set_affinity_noop(struct irq_data *d, const struct cpumask *b, bool f)
26{ 26{
27 return 0; 27 return 0;
28} 28}
29 29
30static struct irq_chip irq_type_hp_sim = { 30static struct irq_chip irq_type_hp_sim = {
31 .name = "hpsim", 31 .name = "hpsim",
32 .startup = hpsim_irq_startup, 32 .irq_startup = hpsim_irq_startup,
33 .shutdown = hpsim_irq_noop, 33 .irq_shutdown = hpsim_irq_noop,
34 .enable = hpsim_irq_noop, 34 .irq_enable = hpsim_irq_noop,
35 .disable = hpsim_irq_noop, 35 .irq_disable = hpsim_irq_noop,
36 .ack = hpsim_irq_noop, 36 .irq_ack = hpsim_irq_noop,
37 .end = hpsim_irq_noop, 37 .irq_set_affinity = hpsim_set_affinity_noop,
38 .set_affinity = hpsim_set_affinity_noop,
39}; 38};
40 39
41void __init 40void __init
42hpsim_irq_init (void) 41hpsim_irq_init (void)
43{ 42{
44 struct irq_desc *idesc;
45 int i; 43 int i;
46 44
47 for (i = 0; i < NR_IRQS; ++i) { 45 for_each_active_irq(i) {
48 idesc = irq_desc + i; 46 struct irq_chip *chip = irq_get_chip(i);
49 if (idesc->chip == &no_irq_chip) 47
50 idesc->chip = &irq_type_hp_sim; 48 if (chip == &no_irq_chip)
49 irq_set_chip(i, &irq_type_hp_sim);
51 } 50 }
52} 51}
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 3a078ad3aa44..331de723c676 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
202} 202}
203 203
204static int 204static int
205simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 205simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
206{ 206{
207 unsigned int target_id = sc->device->id; 207 unsigned int target_id = sc->device->id;
208 char fname[MAX_ROOT_LEN+16]; 208 char fname[MAX_ROOT_LEN+16];
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
326 return 0; 326 return 0;
327} 327}
328 328
329static DEF_SCSI_QCMD(simscsi_queuecommand)
330
329static int 331static int
330simscsi_host_reset (struct scsi_cmnd *sc) 332simscsi_host_reset (struct scsi_cmnd *sc)
331{ 333{
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 1e8d71ad93ef..bff0824cf8a4 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -390,12 +390,11 @@ static void rs_unthrottle(struct tty_struct * tty)
390} 390}
391 391
392 392
393static int rs_ioctl(struct tty_struct *tty, struct file * file, 393static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
394 unsigned int cmd, unsigned long arg)
395{ 394{
396 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 395 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
397 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && 396 (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
398 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 397 (cmd != TIOCMIWAIT)) {
399 if (tty->flags & (1 << TTY_IO_ERROR)) 398 if (tty->flags & (1 << TTY_IO_ERROR))
400 return -EIO; 399 return -EIO;
401 } 400 }
@@ -433,16 +432,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
433 case TIOCMIWAIT: 432 case TIOCMIWAIT:
434 printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); 433 printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n");
435 return 0; 434 return 0;
436 /*
437 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
438 * Return: write counters to the user passed counter struct
439 * NB: both 1->0 and 0->1 transitions are counted except for
440 * RI where only 0->1 is counted.
441 */
442 case TIOCGICOUNT:
443 printk(KERN_INFO "rs_ioctl: TIOCGICOUNT called\n");
444 return 0;
445
446 case TIOCSERGWILD: 435 case TIOCSERGWILD:
447 case TIOCSERSWILD: 436 case TIOCSERSWILD:
448 /* "setserial -W" is called in Debian boot */ 437 /* "setserial -W" is called in Debian boot */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 837dc82a013e..a06dfb13d518 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -128,9 +128,9 @@ static inline const char *acpi_get_sysname (void)
128int acpi_request_vector (u32 int_type); 128int acpi_request_vector (u32 int_type);
129int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 129int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
130 130
131/* routines for saving/restoring kernel state */ 131/* Low-level suspend routine. */
132extern int acpi_save_state_mem(void); 132extern int acpi_suspend_lowlevel(void);
133extern void acpi_restore_state_mem(void); 133
134extern unsigned long acpi_wakeup_address; 134extern unsigned long acpi_wakeup_address;
135 135
136/* 136/*
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 9da3df6f1a52..b76f7e009218 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -456,12 +456,11 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
456 456
457#ifdef __KERNEL__ 457#ifdef __KERNEL__
458 458
459#include <asm-generic/bitops/ext2-non-atomic.h> 459#include <asm-generic/bitops/le.h>
460 460
461#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 461#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
462#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 462#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
463 463
464#include <asm-generic/bitops/minix.h>
465#include <asm-generic/bitops/sched.h> 464#include <asm-generic/bitops/sched.h>
466 465
467#endif /* __KERNEL__ */ 466#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
deleted file mode 100644
index 9301a2821615..000000000000
--- a/arch/ia64/include/asm/compat.h
+++ /dev/null
@@ -1,208 +0,0 @@
1#ifndef _ASM_IA64_COMPAT_H
2#define _ASM_IA64_COMPAT_H
3/*
4 * Architecture specific compatibility types
5 */
6#include <linux/types.h>
7
8#define COMPAT_USER_HZ 100
9#define COMPAT_UTS_MACHINE "i686\0\0\0"
10
11typedef u32 compat_size_t;
12typedef s32 compat_ssize_t;
13typedef s32 compat_time_t;
14typedef s32 compat_clock_t;
15typedef s32 compat_key_t;
16typedef s32 compat_pid_t;
17typedef u16 __compat_uid_t;
18typedef u16 __compat_gid_t;
19typedef u32 __compat_uid32_t;
20typedef u32 __compat_gid32_t;
21typedef u16 compat_mode_t;
22typedef u32 compat_ino_t;
23typedef u16 compat_dev_t;
24typedef s32 compat_off_t;
25typedef s64 compat_loff_t;
26typedef u16 compat_nlink_t;
27typedef u16 compat_ipc_pid_t;
28typedef s32 compat_daddr_t;
29typedef u32 compat_caddr_t;
30typedef __kernel_fsid_t compat_fsid_t;
31typedef s32 compat_timer_t;
32
33typedef s32 compat_int_t;
34typedef s32 compat_long_t;
35typedef s64 __attribute__((aligned(4))) compat_s64;
36typedef u32 compat_uint_t;
37typedef u32 compat_ulong_t;
38typedef u64 __attribute__((aligned(4))) compat_u64;
39
40struct compat_timespec {
41 compat_time_t tv_sec;
42 s32 tv_nsec;
43};
44
45struct compat_timeval {
46 compat_time_t tv_sec;
47 s32 tv_usec;
48};
49
50struct compat_stat {
51 compat_dev_t st_dev;
52 u16 __pad1;
53 compat_ino_t st_ino;
54 compat_mode_t st_mode;
55 compat_nlink_t st_nlink;
56 __compat_uid_t st_uid;
57 __compat_gid_t st_gid;
58 compat_dev_t st_rdev;
59 u16 __pad2;
60 u32 st_size;
61 u32 st_blksize;
62 u32 st_blocks;
63 u32 st_atime;
64 u32 st_atime_nsec;
65 u32 st_mtime;
66 u32 st_mtime_nsec;
67 u32 st_ctime;
68 u32 st_ctime_nsec;
69 u32 __unused4;
70 u32 __unused5;
71};
72
73struct compat_flock {
74 short l_type;
75 short l_whence;
76 compat_off_t l_start;
77 compat_off_t l_len;
78 compat_pid_t l_pid;
79};
80
81#define F_GETLK64 12
82#define F_SETLK64 13
83#define F_SETLKW64 14
84
85/*
86 * IA32 uses 4 byte alignment for 64 bit quantities,
87 * so we need to pack this structure.
88 */
89struct compat_flock64 {
90 short l_type;
91 short l_whence;
92 compat_loff_t l_start;
93 compat_loff_t l_len;
94 compat_pid_t l_pid;
95} __attribute__((packed));
96
97struct compat_statfs {
98 int f_type;
99 int f_bsize;
100 int f_blocks;
101 int f_bfree;
102 int f_bavail;
103 int f_files;
104 int f_ffree;
105 compat_fsid_t f_fsid;
106 int f_namelen; /* SunOS ignores this field. */
107 int f_frsize;
108 int f_spare[5];
109};
110
111#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
112#define COMPAT_RLIM_INFINITY 0xffffffff
113
114typedef u32 compat_old_sigset_t; /* at least 32 bits */
115
116#define _COMPAT_NSIG 64
117#define _COMPAT_NSIG_BPW 32
118
119typedef u32 compat_sigset_word;
120
121#define COMPAT_OFF_T_MAX 0x7fffffff
122#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
123
124struct compat_ipc64_perm {
125 compat_key_t key;
126 __compat_uid32_t uid;
127 __compat_gid32_t gid;
128 __compat_uid32_t cuid;
129 __compat_gid32_t cgid;
130 unsigned short mode;
131 unsigned short __pad1;
132 unsigned short seq;
133 unsigned short __pad2;
134 compat_ulong_t unused1;
135 compat_ulong_t unused2;
136};
137
138struct compat_semid64_ds {
139 struct compat_ipc64_perm sem_perm;
140 compat_time_t sem_otime;
141 compat_ulong_t __unused1;
142 compat_time_t sem_ctime;
143 compat_ulong_t __unused2;
144 compat_ulong_t sem_nsems;
145 compat_ulong_t __unused3;
146 compat_ulong_t __unused4;
147};
148
149struct compat_msqid64_ds {
150 struct compat_ipc64_perm msg_perm;
151 compat_time_t msg_stime;
152 compat_ulong_t __unused1;
153 compat_time_t msg_rtime;
154 compat_ulong_t __unused2;
155 compat_time_t msg_ctime;
156 compat_ulong_t __unused3;
157 compat_ulong_t msg_cbytes;
158 compat_ulong_t msg_qnum;
159 compat_ulong_t msg_qbytes;
160 compat_pid_t msg_lspid;
161 compat_pid_t msg_lrpid;
162 compat_ulong_t __unused4;
163 compat_ulong_t __unused5;
164};
165
166struct compat_shmid64_ds {
167 struct compat_ipc64_perm shm_perm;
168 compat_size_t shm_segsz;
169 compat_time_t shm_atime;
170 compat_ulong_t __unused1;
171 compat_time_t shm_dtime;
172 compat_ulong_t __unused2;
173 compat_time_t shm_ctime;
174 compat_ulong_t __unused3;
175 compat_pid_t shm_cpid;
176 compat_pid_t shm_lpid;
177 compat_ulong_t shm_nattch;
178 compat_ulong_t __unused4;
179 compat_ulong_t __unused5;
180};
181
182/*
183 * A pointer passed in from user mode. This should not be used for syscall parameters,
184 * just declare them as pointers because the syscall entry code will have appropriately
185 * converted them already.
186 */
187typedef u32 compat_uptr_t;
188
189static inline void __user *
190compat_ptr (compat_uptr_t uptr)
191{
192 return (void __user *) (unsigned long) uptr;
193}
194
195static inline compat_uptr_t
196ptr_to_compat(void __user *uptr)
197{
198 return (u32)(unsigned long)uptr;
199}
200
201static __inline__ void __user *
202arch_compat_alloc_user_space (long len)
203{
204 struct pt_regs *regs = task_pt_regs(current);
205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
206}
207
208#endif /* _ASM_IA64_COMPAT_H */
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fa8a8594660..6073b187528a 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
56#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) 56#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
57 57
58/* 58/*
59 * Convert cputime <-> milliseconds 59 * Convert cputime <-> microseconds
60 */ 60 */
61#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC) 61#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
62#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC) 62#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
63 63
64/* 64/*
65 * Convert cputime <-> seconds 65 * Convert cputime <-> seconds
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index a2e7368a0150..4336d080b241 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -12,6 +12,8 @@
12 12
13#define ARCH_HAS_DMA_GET_REQUIRED_MASK 13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
14 14
15#define DMA_ERROR_CODE 0
16
15extern struct dma_map_ops *dma_ops; 17extern struct dma_map_ops *dma_ops;
16extern struct ia64_machine_vector ia64_mv; 18extern struct ia64_machine_vector ia64_mv;
17extern void set_iommu_machvec(void); 19extern void set_iommu_machvec(void);
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index c7f0f062239c..8428525ddb22 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -46,7 +46,7 @@ do { \
46} while (0) 46} while (0)
47 47
48static inline int 48static inline int
49futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 49futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
50{ 50{
51 int op = (encoded_op >> 28) & 7; 51 int op = (encoded_op >> 28) & 7;
52 int cmp = (encoded_op >> 24) & 15; 52 int cmp = (encoded_op >> 24) & 15;
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 56 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
57 oparg = 1 << oparg; 57 oparg = 1 << oparg;
58 58
59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
60 return -EFAULT; 60 return -EFAULT;
61 61
62 pagefault_disable(); 62 pagefault_disable();
@@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
100} 100}
101 101
102static inline int 102static inline int
103futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 103futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
104 u32 oldval, u32 newval)
104{ 105{
105 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 106 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
106 return -EFAULT; 107 return -EFAULT;
107 108
108 { 109 {
109 register unsigned long r8 __asm ("r8"); 110 register unsigned long r8 __asm ("r8") = 0;
111 unsigned long prev;
110 __asm__ __volatile__( 112 __asm__ __volatile__(
111 " mf;; \n" 113 " mf;; \n"
112 " mov ar.ccv=%3;; \n" 114 " mov ar.ccv=%3;; \n"
113 "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" 115 "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
114 " .xdata4 \"__ex_table\", 1b-., 2f-. \n" 116 " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
115 "[2:]" 117 "[2:]"
116 : "=r" (r8) 118 : "=r" (prev)
117 : "r" (uaddr), "r" (newval), 119 : "r" (uaddr), "r" (newval),
118 "rO" ((long) (unsigned) oldval) 120 "rO" ((long) (unsigned) oldval)
119 : "memory"); 121 : "memory");
122 *uval = prev;
120 return r8; 123 return r8;
121 } 124 }
122} 125}
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index d514cd9edb49..8fb7d33a661f 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -6,12 +6,6 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8 8
9
10#include <linux/threads.h>
11#include <linux/irq.h>
12
13#include <asm/processor.h>
14
15/* 9/*
16 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 10 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
17 */ 11 */
@@ -20,6 +14,11 @@
20 14
21#define local_softirq_pending() (local_cpu_data->softirq_pending) 15#define local_softirq_pending() (local_cpu_data->softirq_pending)
22 16
17#include <linux/threads.h>
18#include <linux/irq.h>
19
20#include <asm/processor.h>
21
23extern void __iomem *ipi_base_addr; 22extern void __iomem *ipi_base_addr;
24 23
25void ack_bad_irq(unsigned int irq); 24void ack_bad_irq(unsigned int irq);
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index bf2e37493e04..a681d02cb324 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -151,9 +151,6 @@ static inline void ia64_native_resend_irq(unsigned int vector)
151/* 151/*
152 * Default implementations for the irq-descriptor API: 152 * Default implementations for the irq-descriptor API:
153 */ 153 */
154
155extern struct irq_desc irq_desc[NR_IRQS];
156
157#ifndef CONFIG_IA64_GENERIC 154#ifndef CONFIG_IA64_GENERIC
158static inline ia64_vector __ia64_irq_to_vector(int irq) 155static inline ia64_vector __ia64_irq_to_vector(int irq)
159{ 156{
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index cc8335eb3110..e5a6c3530c6c 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -426,6 +426,11 @@ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size)
426extern void iounmap (volatile void __iomem *addr); 426extern void iounmap (volatile void __iomem *addr);
427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); 427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
428extern void early_iounmap (volatile void __iomem *addr, unsigned long size); 428extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
429static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
430{
431 return ioremap(phys_addr, size);
432}
433
429 434
430/* 435/*
431 * String version of IO memory access ops: 436 * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/ioctls.h b/arch/ia64/include/asm/ioctls.h
index b79c385114ef..f3aab5512e98 100644
--- a/arch/ia64/include/asm/ioctls.h
+++ b/arch/ia64/include/asm/ioctls.h
@@ -1,93 +1,6 @@
1#ifndef _ASM_IA64_IOCTLS_H 1#ifndef _ASM_IA64_IOCTLS_H
2#define _ASM_IA64_IOCTLS_H 2#define _ASM_IA64_IOCTLS_H
3 3
4/* 4#include <asm-generic/ioctls.h>
5 * Based on <asm-i386/ioctls.h>
6 *
7 * Modified 1998, 1999, 2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#include <asm/ioctl.h>
12
13/* 0x54 is just a magic number to make these relatively unique ('T') */
14
15#define TCGETS 0x5401
16#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
17#define TCSETSW 0x5403
18#define TCSETSF 0x5404
19#define TCGETA 0x5405
20#define TCSETA 0x5406
21#define TCSETAW 0x5407
22#define TCSETAF 0x5408
23#define TCSBRK 0x5409
24#define TCXONC 0x540A
25#define TCFLSH 0x540B
26#define TIOCEXCL 0x540C
27#define TIOCNXCL 0x540D
28#define TIOCSCTTY 0x540E
29#define TIOCGPGRP 0x540F
30#define TIOCSPGRP 0x5410
31#define TIOCOUTQ 0x5411
32#define TIOCSTI 0x5412
33#define TIOCGWINSZ 0x5413
34#define TIOCSWINSZ 0x5414
35#define TIOCMGET 0x5415
36#define TIOCMBIS 0x5416
37#define TIOCMBIC 0x5417
38#define TIOCMSET 0x5418
39#define TIOCGSOFTCAR 0x5419
40#define TIOCSSOFTCAR 0x541A
41#define FIONREAD 0x541B
42#define TIOCINQ FIONREAD
43#define TIOCLINUX 0x541C
44#define TIOCCONS 0x541D
45#define TIOCGSERIAL 0x541E
46#define TIOCSSERIAL 0x541F
47#define TIOCPKT 0x5420
48#define FIONBIO 0x5421
49#define TIOCNOTTY 0x5422
50#define TIOCSETD 0x5423
51#define TIOCGETD 0x5424
52#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
53#define TIOCSBRK 0x5427 /* BSD compatibility */
54#define TIOCCBRK 0x5428 /* BSD compatibility */
55#define TIOCGSID 0x5429 /* Return the session ID of FD */
56#define TCGETS2 _IOR('T',0x2A, struct termios2)
57#define TCSETS2 _IOW('T',0x2B, struct termios2)
58#define TCSETSW2 _IOW('T',0x2C, struct termios2)
59#define TCSETSF2 _IOW('T',0x2D, struct termios2)
60#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
61#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
62#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
63
64#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
65#define FIOCLEX 0x5451
66#define FIOASYNC 0x5452
67#define TIOCSERCONFIG 0x5453
68#define TIOCSERGWILD 0x5454
69#define TIOCSERSWILD 0x5455
70#define TIOCGLCKTRMIOS 0x5456
71#define TIOCSLCKTRMIOS 0x5457
72#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
73#define TIOCSERGETLSR 0x5459 /* Get line status register */
74#define TIOCSERGETMULTI 0x545A /* Get multiport config */
75#define TIOCSERSETMULTI 0x545B /* Set multiport config */
76
77#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
78#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
79#define FIOQSIZE 0x5460
80
81/* Used for packet mode */
82#define TIOCPKT_DATA 0
83#define TIOCPKT_FLUSHREAD 1
84#define TIOCPKT_FLUSHWRITE 2
85#define TIOCPKT_STOP 4
86#define TIOCPKT_START 8
87#define TIOCPKT_NOSTOP 16
88#define TIOCPKT_DOSTOP 32
89#define TIOCPKT_IOCTL 64
90
91#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
92 5
93#endif /* _ASM_IA64_IOCTLS_H */ 6#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/arch/ia64/include/asm/iommu_table.h b/arch/ia64/include/asm/iommu_table.h
new file mode 100644
index 000000000000..92c8d36ae5ae
--- /dev/null
+++ b/arch/ia64/include/asm/iommu_table.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_IA64_IOMMU_TABLE_H
2#define _ASM_IA64_IOMMU_TABLE_H
3
4#define IOMMU_INIT_POST(_detect)
5
6#endif /* _ASM_IA64_IOMMU_TABLE_H */
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
new file mode 100644
index 000000000000..f82d6be2ecd2
--- /dev/null
+++ b/arch/ia64/include/asm/irqflags.h
@@ -0,0 +1,94 @@
1/*
2 * IRQ flags defines.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 */
9
10#ifndef _ASM_IA64_IRQFLAGS_H
11#define _ASM_IA64_IRQFLAGS_H
12
13#ifdef CONFIG_IA64_DEBUG_IRQ
14extern unsigned long last_cli_ip;
15static inline void arch_maybe_save_ip(unsigned long flags)
16{
17 if (flags & IA64_PSR_I)
18 last_cli_ip = ia64_getreg(_IA64_REG_IP);
19}
20#else
21#define arch_maybe_save_ip(flags) do {} while (0)
22#endif
23
24/*
25 * - clearing psr.i is implicitly serialized (visible by next insn)
26 * - setting psr.i requires data serialization
27 * - we need a stop-bit before reading PSR because we sometimes
28 * write a floating-point register right before reading the PSR
29 * and that writes to PSR.mfl
30 */
31
32static inline unsigned long arch_local_save_flags(void)
33{
34 ia64_stop();
35#ifdef CONFIG_PARAVIRT
36 return ia64_get_psr_i();
37#else
38 return ia64_getreg(_IA64_REG_PSR);
39#endif
40}
41
42static inline unsigned long arch_local_irq_save(void)
43{
44 unsigned long flags = arch_local_save_flags();
45
46 ia64_stop();
47 ia64_rsm(IA64_PSR_I);
48 arch_maybe_save_ip(flags);
49 return flags;
50}
51
52static inline void arch_local_irq_disable(void)
53{
54#ifdef CONFIG_IA64_DEBUG_IRQ
55 arch_local_irq_save();
56#else
57 ia64_stop();
58 ia64_rsm(IA64_PSR_I);
59#endif
60}
61
62static inline void arch_local_irq_enable(void)
63{
64 ia64_stop();
65 ia64_ssm(IA64_PSR_I);
66 ia64_srlz_d();
67}
68
69static inline void arch_local_irq_restore(unsigned long flags)
70{
71#ifdef CONFIG_IA64_DEBUG_IRQ
72 unsigned long old_psr = arch_local_save_flags();
73#endif
74 ia64_intrin_local_irq_restore(flags & IA64_PSR_I);
75 arch_maybe_save_ip(old_psr & ~flags);
76}
77
78static inline bool arch_irqs_disabled_flags(unsigned long flags)
79{
80 return (flags & IA64_PSR_I) == 0;
81}
82
83static inline bool arch_irqs_disabled(void)
84{
85 return arch_irqs_disabled_flags(arch_local_save_flags());
86}
87
88static inline void arch_safe_halt(void)
89{
90 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
91}
92
93
94#endif /* _ASM_IA64_IRQFLAGS_H */
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2f229e5de498..2689ee54a1c9 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -590,6 +590,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
590int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 590int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
591void kvm_sal_emul(struct kvm_vcpu *vcpu); 591void kvm_sal_emul(struct kvm_vcpu *vcpu);
592 592
593#define __KVM_HAVE_ARCH_VM_ALLOC 1
594struct kvm *kvm_arch_alloc_vm(void);
595void kvm_arch_free_vm(struct kvm *kvm);
596
593#endif /* __ASSEMBLY__*/ 597#endif /* __ASSEMBLY__*/
594 598
595#endif 599#endif
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 41b6d31110fd..961a16f43e6b 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -189,6 +189,7 @@ get_order (unsigned long size)
189# define pgprot_val(x) ((x).pgprot) 189# define pgprot_val(x) ((x).pgprot)
190 190
191# define __pte(x) ((pte_t) { (x) } ) 191# define __pte(x) ((pte_t) { (x) } )
192# define __pmd(x) ((pmd_t) { (x) } )
192# define __pgprot(x) ((pgprot_t) { (x) } ) 193# define __pgprot(x) ((pgprot_t) { (x) } )
193 194
194#else /* !STRICT_MM_TYPECHECKS */ 195#else /* !STRICT_MM_TYPECHECKS */
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index 6a292505b396..2e69284df8e7 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -1669,7 +1669,7 @@ typedef union pal_vp_info_u {
1669} pal_vp_info_u_t; 1669} pal_vp_info_u_t;
1670 1670
1671/* 1671/*
1672 * Returns infomation about virtual processor features 1672 * Returns information about virtual processor features
1673 */ 1673 */
1674static inline s64 1674static inline s64
1675ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id) 1675ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
index 7f3333dd00e4..d551183fee90 100644
--- a/arch/ia64/include/asm/perfmon.h
+++ b/arch/ia64/include/asm/perfmon.h
@@ -7,7 +7,7 @@
7#define _ASM_IA64_PERFMON_H 7#define _ASM_IA64_PERFMON_H
8 8
9/* 9/*
10 * perfmon comamnds supported on all CPU models 10 * perfmon commands supported on all CPU models
11 */ 11 */
12#define PFM_WRITE_PMCS 0x01 12#define PFM_WRITE_PMCS 0x01
13#define PFM_WRITE_PMDS 0x02 13#define PFM_WRITE_PMDS 0x02
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 74724b24c2b7..a2d560c67230 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,8 +67,8 @@ typedef struct {
67 unsigned long ip; /* where did the overflow interrupt happened */ 67 unsigned long ip; /* where did the overflow interrupt happened */
68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ 68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
69 69
70 unsigned short cpu; /* cpu on which the overflow occured */ 70 unsigned short cpu; /* cpu on which the overflow occurred */
71 unsigned short set; /* event set active when overflow ocurred */ 71 unsigned short set; /* event set active when overflow occurred */
72 int tgid; /* thread group id (for NPTL, this is getpid()) */ 72 int tgid; /* thread group id (for NPTL, this is getpid()) */
73} pfm_default_smpl_entry_t; 73} pfm_default_smpl_entry_t;
74 74
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index c3286f42e501..1a97af31ef17 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
406#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 406#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
407#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 407#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
408#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) 408#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
409#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
410#define pte_unmap(pte) do { } while (0) 409#define pte_unmap(pte) do { } while (0)
411#define pte_unmap_nested(pte) do { } while (0)
412 410
413/* atomic versions of the some PTE manipulations: */ 411/* atomic versions of the some PTE manipulations: */
414 412
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 348e44d08ce3..03afe7970748 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -717,8 +717,9 @@ prefetchw (const void *x)
717#define spin_lock_prefetch(x) prefetchw(x) 717#define spin_lock_prefetch(x) prefetchw(x)
718 718
719extern unsigned long boot_option_idle_override; 719extern unsigned long boot_option_idle_override;
720extern unsigned long idle_halt; 720
721extern unsigned long idle_nomwait; 721enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
722 IDLE_NOMWAIT, IDLE_POLL};
722 723
723#endif /* !__ASSEMBLY__ */ 724#endif /* !__ASSEMBLY__ */
724 725
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index 215d5454c7d3..3027e7516d85 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -25,20 +25,8 @@
25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." 25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
26#endif 26#endif
27 27
28#include <linux/list.h>
29#include <linux/spinlock.h>
30
31#include <asm/intrinsics.h> 28#include <asm/intrinsics.h>
32 29
33/*
34 * the semaphore definition
35 */
36struct rw_semaphore {
37 signed long count;
38 spinlock_t wait_lock;
39 struct list_head wait_list;
40};
41
42#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) 30#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
43#define RWSEM_ACTIVE_BIAS (1L) 31#define RWSEM_ACTIVE_BIAS (1L)
44#define RWSEM_ACTIVE_MASK (0xffffffffL) 32#define RWSEM_ACTIVE_MASK (0xffffffffL)
@@ -46,26 +34,6 @@ struct rw_semaphore {
46#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 34#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 35#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 36
49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51 LIST_HEAD_INIT((name).wait_list) }
52
53#define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
55
56extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
57extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
58extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
59extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
60
61static inline void
62init_rwsem (struct rw_semaphore *sem)
63{
64 sem->count = RWSEM_UNLOCKED_VALUE;
65 spin_lock_init(&sem->wait_lock);
66 INIT_LIST_HEAD(&sem->wait_list);
67}
68
69/* 37/*
70 * lock for reading 38 * lock for reading
71 */ 39 */
@@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem)
174#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) 142#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
175#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) 143#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
176 144
177static inline int rwsem_is_locked(struct rw_semaphore *sem)
178{
179 return (sem->count != 0);
180}
181
182#endif /* _ASM_IA64_RWSEM_H */ 145#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
index 118d42979003..c8fcaa2ac48f 100644
--- a/arch/ia64/include/asm/siginfo.h
+++ b/arch/ia64/include/asm/siginfo.h
@@ -62,6 +62,7 @@ typedef struct siginfo {
62 int _imm; /* immediate value for "break" */ 62 int _imm; /* immediate value for "break" */
63 unsigned int _flags; /* see below */ 63 unsigned int _flags; /* see below */
64 unsigned long _isr; /* isr */ 64 unsigned long _isr; /* isr */
65 short _addr_lsb; /* lsb of faulting address */
65 } _sigfault; 66 } _sigfault;
66 67
67 /* SIGPOLL */ 68 /* SIGPOLL */
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
index 96798d2da7c2..cc6c4dbf53af 100644
--- a/arch/ia64/include/asm/sn/bte.h
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -216,7 +216,7 @@ extern void bte_error_handler(unsigned long);
216 bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification) 216 bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
217 217
218/* 218/*
219 * The following is the prefered way of calling bte_unaligned_copy 219 * The following is the preferred way of calling bte_unaligned_copy
220 * If the copy is fully cache line aligned, then bte_copy is 220 * If the copy is fully cache line aligned, then bte_copy is
221 * used instead. Since bte_copy is inlined, this saves a call 221 * used instead. Since bte_copy is inlined, this saves a call
222 * stack. NOTE: bte_copy is called synchronously and does block 222 * stack. NOTE: bte_copy is called synchronously and does block
diff --git a/arch/ia64/include/asm/sn/shub_mmr.h b/arch/ia64/include/asm/sn/shub_mmr.h
index 7de1d1d4b71a..a84d870f4294 100644
--- a/arch/ia64/include/asm/sn/shub_mmr.h
+++ b/arch/ia64/include/asm/sn/shub_mmr.h
@@ -459,7 +459,7 @@
459/* ==================================================================== */ 459/* ==================================================================== */
460/* Some MMRs are functionally identical (or close enough) on both SHUB1 */ 460/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
461/* and SHUB2 that it makes sense to define a geberic name for the MMR. */ 461/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
462/* It is acceptible to use (for example) SH_IPI_INT to reference the */ 462/* It is acceptable to use (for example) SH_IPI_INT to reference the */
463/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */ 463/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
464/* on the type of the SHUB. Do not use these #defines in performance */ 464/* on the type of the SHUB. Do not use these #defines in performance */
465/* critical code or loops - there is a small performance penalty. */ 465/* critical code or loops - there is a small performance penalty. */
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 6052422a22b3..ecb8a49476b6 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -1383,7 +1383,7 @@ typedef union ii_ibcr_u {
1383 * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The * 1383 * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
1384 * errant header is thereby captured, and no further spurious read * 1384 * errant header is thereby captured, and no further spurious read *
1385 * respones are captured until IXSS[VALID] is cleared by setting the * 1385 * respones are captured until IXSS[VALID] is cleared by setting the *
1386 * appropriate bit in IECLR.Everytime a spurious read response is * 1386 * appropriate bit in IECLR. Every time a spurious read response is *
1387 * detected, the SPUR_RD bit of the PRB corresponding to the incoming * 1387 * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
1388 * message's SIDN field is set. This always happens, regarless of * 1388 * message's SIDN field is set. This always happens, regarless of *
1389 * whether a header is captured. The programmer should check * 1389 * whether a header is captured. The programmer should check *
@@ -2738,7 +2738,7 @@ typedef union ii_ippr_u {
2738/************************************************************************ 2738/************************************************************************
2739 * * 2739 * *
2740 * The following defines which were not formed into structures are * 2740 * The following defines which were not formed into structures are *
2741 * probably indentical to another register, and the name of the * 2741 * probably identical to another register, and the name of the *
2742 * register is provided against each of these registers. This * 2742 * register is provided against each of these registers. This *
2743 * information needs to be checked carefully * 2743 * information needs to be checked carefully *
2744 * * 2744 * *
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
index 9f342a574ce8..6cca30705d50 100644
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -107,87 +107,11 @@ extern struct ia64_boot_param {
107 */ 107 */
108#define set_mb(var, value) do { (var) = (value); mb(); } while (0) 108#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
109 109
110#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
111
112/* 110/*
113 * The group barrier in front of the rsm & ssm are necessary to ensure 111 * The group barrier in front of the rsm & ssm are necessary to ensure
114 * that none of the previous instructions in the same group are 112 * that none of the previous instructions in the same group are
115 * affected by the rsm/ssm. 113 * affected by the rsm/ssm.
116 */ 114 */
117/* For spinlocks etc */
118
119/*
120 * - clearing psr.i is implicitly serialized (visible by next insn)
121 * - setting psr.i requires data serialization
122 * - we need a stop-bit before reading PSR because we sometimes
123 * write a floating-point register right before reading the PSR
124 * and that writes to PSR.mfl
125 */
126#ifdef CONFIG_PARAVIRT
127#define __local_save_flags() ia64_get_psr_i()
128#else
129#define __local_save_flags() ia64_getreg(_IA64_REG_PSR)
130#endif
131
132#define __local_irq_save(x) \
133do { \
134 ia64_stop(); \
135 (x) = __local_save_flags(); \
136 ia64_stop(); \
137 ia64_rsm(IA64_PSR_I); \
138} while (0)
139
140#define __local_irq_disable() \
141do { \
142 ia64_stop(); \
143 ia64_rsm(IA64_PSR_I); \
144} while (0)
145
146#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
147
148#ifdef CONFIG_IA64_DEBUG_IRQ
149
150 extern unsigned long last_cli_ip;
151
152# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
153
154# define local_irq_save(x) \
155do { \
156 unsigned long __psr; \
157 \
158 __local_irq_save(__psr); \
159 if (__psr & IA64_PSR_I) \
160 __save_ip(); \
161 (x) = __psr; \
162} while (0)
163
164# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0)
165
166# define local_irq_restore(x) \
167do { \
168 unsigned long __old_psr, __psr = (x); \
169 \
170 local_save_flags(__old_psr); \
171 __local_irq_restore(__psr); \
172 if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \
173 __save_ip(); \
174} while (0)
175
176#else /* !CONFIG_IA64_DEBUG_IRQ */
177# define local_irq_save(x) __local_irq_save(x)
178# define local_irq_disable() __local_irq_disable()
179# define local_irq_restore(x) __local_irq_restore(x)
180#endif /* !CONFIG_IA64_DEBUG_IRQ */
181
182#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
183#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); })
184
185#define irqs_disabled() \
186({ \
187 unsigned long __ia64_id_flags; \
188 local_save_flags(__ia64_id_flags); \
189 (__ia64_id_flags & IA64_PSR_I) == 0; \
190})
191 115
192#ifdef __KERNEL__ 116#ifdef __KERNEL__
193 117
@@ -272,10 +196,6 @@ void cpu_idle_wait(void);
272 196
273void default_idle(void); 197void default_idle(void);
274 198
275#ifdef CONFIG_VIRT_CPU_ACCOUNTING
276extern void account_system_vtime(struct task_struct *);
277#endif
278
279#endif /* __KERNEL__ */ 199#endif /* __KERNEL__ */
280 200
281#endif /* __ASSEMBLY__ */ 201#endif /* __ASSEMBLY__ */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index b6a5ba2aca34..ff0cc84e7bcc 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -59,11 +59,12 @@ struct thread_info {
59#ifndef ASM_OFFSETS_C 59#ifndef ASM_OFFSETS_C
60/* how to get the thread information struct from C */ 60/* how to get the thread information struct from C */
61#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 61#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
62#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 62#define alloc_thread_info_node(tsk, node) \
63 ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
63#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 64#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
64#else 65#else
65#define current_thread_info() ((struct thread_info *) 0) 66#define current_thread_info() ((struct thread_info *) 0)
66#define alloc_thread_info(tsk) ((struct thread_info *) 0) 67#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
67#define task_thread_info(tsk) ((struct thread_info *) 0) 68#define task_thread_info(tsk) ((struct thread_info *) 0)
68#endif 69#endif
69#define free_thread_info(ti) /* nothing */ 70#define free_thread_info(ti) /* nothing */
@@ -84,7 +85,14 @@ struct thread_info {
84#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 85#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
85 86
86#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 87#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 88#define alloc_task_struct_node(node) \
89({ \
90 struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
91 KERNEL_STACK_SIZE_ORDER); \
92 struct task_struct *ret = page ? page_address(page) : NULL; \
93 \
94 ret; \
95})
88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 96#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
89 97
90#endif /* !__ASSEMBLY */ 98#endif /* !__ASSEMBLY */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 23cce999eb1c..c3ffe3e54edc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50# define FREE_PTE_NR 2048
51# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
52#else 51#else
53# define FREE_PTE_NR 0
54# define tlb_fast_mode(tlb) (1) 52# define tlb_fast_mode(tlb) (1)
55#endif 53#endif
56 54
55/*
56 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure.
58 */
59#define IA64_GATHER_BUNDLE 8
60
57struct mmu_gather { 61struct mmu_gather {
58 struct mm_struct *mm; 62 struct mm_struct *mm;
59 unsigned int nr; /* == ~0U => fast mode */ 63 unsigned int nr; /* == ~0U => fast mode */
64 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */ 65 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */ 66 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start_addr; 67 unsigned long start_addr;
63 unsigned long end_addr; 68 unsigned long end_addr;
64 struct page *pages[FREE_PTE_NR]; 69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
65}; 71};
66 72
67struct ia64_tr_entry { 73struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90#define RR_RID_MASK 0x00000000ffffff00L 96#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92 98
93/* Users of the generic TLB shootdown code must declare this storage space. */
94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95
96/* 99/*
97 * Flush the TLB for address range START to END and, if not in fast mode, release the 100 * Flush the TLB for address range START to END and, if not in fast mode, release the
98 * freed pages that where gathered up to this point. 101 * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147 } 150 }
148} 151}
149 152
150/* 153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
151 * Return a pointer to an initialized struct mmu_gather.
152 */
153static inline struct mmu_gather *
154tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
155{ 154{
156 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157 156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
158 tlb->mm = mm; 167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
159 /* 170 /*
160 * Use fast mode if only 1 CPU is online. 171 * Use fast mode if only 1 CPU is online.
161 * 172 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173 tlb->fullmm = full_mm_flush; 184 tlb->fullmm = full_mm_flush;
174 tlb->start_addr = ~0UL; 185 tlb->start_addr = ~0UL;
175 return tlb;
176} 186}
177 187
178/* 188/*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
180 * collected. 190 * collected.
181 */ 191 */
182static inline void 192static inline void
183tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184{ 194{
185 /* 195 /*
186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191 /* keep the page table cache within bounds */ 201 /* keep the page table cache within bounds */
192 check_pgt_cache(); 202 check_pgt_cache();
193 203
194 put_cpu_var(mmu_gathers); 204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
195} 206}
196 207
197/* 208/*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
200 * this file). 211 * this file).
201 */ 212 */
202static inline void 213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
203tlb_remove_page (struct mmu_gather *tlb, struct page *page)
204{ 214{
205 tlb->need_flush = 1; 215 tlb->need_flush = 1;
206 216
207 if (tlb_fast_mode(tlb)) { 217 if (tlb_fast_mode(tlb)) {
208 free_page_and_swap_cache(page); 218 free_page_and_swap_cache(page);
209 return; 219 return 1; /* avoid calling tlb_flush_mmu */
210 } 220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
211 tlb->pages[tlb->nr++] = page; 225 tlb->pages[tlb->nr++] = page;
212 if (tlb->nr >= FREE_PTE_NR) 226 VM_BUG_ON(tlb->nr > tlb->max);
213 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
214} 240}
215 241
216/* 242/*
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index 93773fd37be0..82b3939d2718 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -40,9 +40,6 @@ struct fnptr {
40 unsigned long gp; 40 unsigned long gp;
41}; 41};
42 42
43/* DMA addresses are 64-bits wide, in general. */
44typedef u64 dma_addr_t;
45
46# endif /* __KERNEL__ */ 43# endif /* __KERNEL__ */
47#endif /* !__ASSEMBLY__ */ 44#endif /* !__ASSEMBLY__ */
48 45
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 954d398a54b4..7c928da35b17 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -315,11 +315,17 @@
315#define __NR_fanotify_init 1323 315#define __NR_fanotify_init 1323
316#define __NR_fanotify_mark 1324 316#define __NR_fanotify_mark 1324
317#define __NR_prlimit64 1325 317#define __NR_prlimit64 1325
318#define __NR_name_to_handle_at 1326
319#define __NR_open_by_handle_at 1327
320#define __NR_clock_adjtime 1328
321#define __NR_syncfs 1329
322#define __NR_setns 1330
323#define __NR_sendmmsg 1331
318 324
319#ifdef __KERNEL__ 325#ifdef __KERNEL__
320 326
321 327
322#define NR_syscalls 302 /* length of syscall table */ 328#define NR_syscalls 308 /* length of syscall table */
323 329
324/* 330/*
325 * The following defines stop scripts/checksyscalls.sh from complaining about 331 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
index 96fc62366aa4..ed28bcd5bb85 100644
--- a/arch/ia64/include/asm/xen/hypercall.h
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
107static inline int 107static inline int
108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) 108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
109{ 109{
110 return _hypercall2(int, sched_op_new, cmd, arg); 110 return _hypercall2(int, sched_op, cmd, arg);
111} 111}
112 112
113static inline long 113static inline long
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index db10b1e378b0..395c2f216dd8 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_AUDIT) += audit.o
34obj-$(CONFIG_PCI_MSI) += msi_ia64.o 34obj-$(CONFIG_PCI_MSI) += msi_ia64.o
35mca_recovery-y += mca_drv.o mca_drv_asm.o 35mca_recovery-y += mca_drv.o mca_drv_asm.o
36obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 36obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
37obj-$(CONFIG_STACKTRACE) += stacktrace.o
37 38
38obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
39 paravirt_patch.o 40 paravirt_patch.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c6c90f39f4d9..3be485a300b1 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -477,6 +477,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
477 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) 477 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
478 return; 478 return;
479 479
480 if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
481 printk_once(KERN_WARNING
482 "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
483 ARRAY_SIZE(node_cpuid));
484 return;
485 }
480 pxm = get_processor_proximity_domain(pa); 486 pxm = get_processor_proximity_domain(pa);
481 487
482 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -797,7 +803,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
797 * ACPI based hotplug CPU support 803 * ACPI based hotplug CPU support
798 */ 804 */
799#ifdef CONFIG_ACPI_HOTPLUG_CPU 805#ifdef CONFIG_ACPI_HOTPLUG_CPU
800static 806static __cpuinit
801int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 807int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
802{ 808{
803#ifdef CONFIG_ACPI_NUMA 809#ifdef CONFIG_ACPI_NUMA
@@ -872,7 +878,7 @@ __init void prefill_possible_map(void)
872 set_cpu_possible(i, true); 878 set_cpu_possible(i, true);
873} 879}
874 880
875int acpi_map_lsapic(acpi_handle handle, int *pcpu) 881static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
876{ 882{
877 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 883 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
878 union acpi_object *obj; 884 union acpi_object *obj;
@@ -923,6 +929,11 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
923 return (0); 929 return (0);
924} 930}
925 931
932/* wrapper to silence section mismatch warning */
933int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
934{
935 return _acpi_map_lsapic(handle, pcpu);
936}
926EXPORT_SYMBOL(acpi_map_lsapic); 937EXPORT_SYMBOL(acpi_map_lsapic);
927 938
928int acpi_unmap_lsapic(int cpu) 939int acpi_unmap_lsapic(int cpu)
@@ -1028,18 +1039,8 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
1028EXPORT_SYMBOL(acpi_unregister_ioapic); 1039EXPORT_SYMBOL(acpi_unregister_ioapic);
1029 1040
1030/* 1041/*
1031 * acpi_save_state_mem() - save kernel state 1042 * acpi_suspend_lowlevel() - save kernel state and suspend.
1032 * 1043 *
1033 * TBD when when IA64 starts to support suspend... 1044 * TBD when when IA64 starts to support suspend...
1034 */ 1045 */
1035int acpi_save_state_mem(void) { return 0; } 1046int acpi_suspend_lowlevel(void) { return 0; }
1036
1037/*
1038 * acpi_restore_state()
1039 */
1040void acpi_restore_state_mem(void) {}
1041
1042/*
1043 * do_suspend_lowlevel()
1044 */
1045void do_suspend_lowlevel(void) {}
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 22f61526a8e1..f09b174244d5 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -23,8 +23,6 @@
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <acpi/processor.h> 24#include <acpi/processor.h>
25 25
26#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
27
28MODULE_AUTHOR("Venkatesh Pallipadi"); 26MODULE_AUTHOR("Venkatesh Pallipadi");
29MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 27MODULE_DESCRIPTION("ACPI Processor P-States Driver");
30MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
@@ -47,12 +45,12 @@ processor_set_pstate (
47{ 45{
48 s64 retval; 46 s64 retval;
49 47
50 dprintk("processor_set_pstate\n"); 48 pr_debug("processor_set_pstate\n");
51 49
52 retval = ia64_pal_set_pstate((u64)value); 50 retval = ia64_pal_set_pstate((u64)value);
53 51
54 if (retval) { 52 if (retval) {
55 dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", 53 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
56 value, retval); 54 value, retval);
57 return -ENODEV; 55 return -ENODEV;
58 } 56 }
@@ -67,14 +65,14 @@ processor_get_pstate (
67 u64 pstate_index = 0; 65 u64 pstate_index = 0;
68 s64 retval; 66 s64 retval;
69 67
70 dprintk("processor_get_pstate\n"); 68 pr_debug("processor_get_pstate\n");
71 69
72 retval = ia64_pal_get_pstate(&pstate_index, 70 retval = ia64_pal_get_pstate(&pstate_index,
73 PAL_GET_PSTATE_TYPE_INSTANT); 71 PAL_GET_PSTATE_TYPE_INSTANT);
74 *value = (u32) pstate_index; 72 *value = (u32) pstate_index;
75 73
76 if (retval) 74 if (retval)
77 dprintk("Failed to get current freq with " 75 pr_debug("Failed to get current freq with "
78 "error 0x%lx, idx 0x%x\n", retval, *value); 76 "error 0x%lx, idx 0x%x\n", retval, *value);
79 77
80 return (int)retval; 78 return (int)retval;
@@ -90,7 +88,7 @@ extract_clock (
90{ 88{
91 unsigned long i; 89 unsigned long i;
92 90
93 dprintk("extract_clock\n"); 91 pr_debug("extract_clock\n");
94 92
95 for (i = 0; i < data->acpi_data.state_count; i++) { 93 for (i = 0; i < data->acpi_data.state_count; i++) {
96 if (value == data->acpi_data.states[i].status) 94 if (value == data->acpi_data.states[i].status)
@@ -110,7 +108,7 @@ processor_get_freq (
110 cpumask_t saved_mask; 108 cpumask_t saved_mask;
111 unsigned long clock_freq; 109 unsigned long clock_freq;
112 110
113 dprintk("processor_get_freq\n"); 111 pr_debug("processor_get_freq\n");
114 112
115 saved_mask = current->cpus_allowed; 113 saved_mask = current->cpus_allowed;
116 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 114 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -148,7 +146,7 @@ processor_set_freq (
148 cpumask_t saved_mask; 146 cpumask_t saved_mask;
149 int retval; 147 int retval;
150 148
151 dprintk("processor_set_freq\n"); 149 pr_debug("processor_set_freq\n");
152 150
153 saved_mask = current->cpus_allowed; 151 saved_mask = current->cpus_allowed;
154 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 152 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -159,16 +157,16 @@ processor_set_freq (
159 157
160 if (state == data->acpi_data.state) { 158 if (state == data->acpi_data.state) {
161 if (unlikely(data->resume)) { 159 if (unlikely(data->resume)) {
162 dprintk("Called after resume, resetting to P%d\n", state); 160 pr_debug("Called after resume, resetting to P%d\n", state);
163 data->resume = 0; 161 data->resume = 0;
164 } else { 162 } else {
165 dprintk("Already at target state (P%d)\n", state); 163 pr_debug("Already at target state (P%d)\n", state);
166 retval = 0; 164 retval = 0;
167 goto migrate_end; 165 goto migrate_end;
168 } 166 }
169 } 167 }
170 168
171 dprintk("Transitioning from P%d to P%d\n", 169 pr_debug("Transitioning from P%d to P%d\n",
172 data->acpi_data.state, state); 170 data->acpi_data.state, state);
173 171
174 /* cpufreq frequency struct */ 172 /* cpufreq frequency struct */
@@ -186,7 +184,7 @@ processor_set_freq (
186 184
187 value = (u32) data->acpi_data.states[state].control; 185 value = (u32) data->acpi_data.states[state].control;
188 186
189 dprintk("Transitioning to state: 0x%08x\n", value); 187 pr_debug("Transitioning to state: 0x%08x\n", value);
190 188
191 ret = processor_set_pstate(value); 189 ret = processor_set_pstate(value);
192 if (ret) { 190 if (ret) {
@@ -219,7 +217,7 @@ acpi_cpufreq_get (
219{ 217{
220 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; 218 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
221 219
222 dprintk("acpi_cpufreq_get\n"); 220 pr_debug("acpi_cpufreq_get\n");
223 221
224 return processor_get_freq(data, cpu); 222 return processor_get_freq(data, cpu);
225} 223}
@@ -235,7 +233,7 @@ acpi_cpufreq_target (
235 unsigned int next_state = 0; 233 unsigned int next_state = 0;
236 unsigned int result = 0; 234 unsigned int result = 0;
237 235
238 dprintk("acpi_cpufreq_setpolicy\n"); 236 pr_debug("acpi_cpufreq_setpolicy\n");
239 237
240 result = cpufreq_frequency_table_target(policy, 238 result = cpufreq_frequency_table_target(policy,
241 data->freq_table, target_freq, relation, &next_state); 239 data->freq_table, target_freq, relation, &next_state);
@@ -255,7 +253,7 @@ acpi_cpufreq_verify (
255 unsigned int result = 0; 253 unsigned int result = 0;
256 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 254 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
257 255
258 dprintk("acpi_cpufreq_verify\n"); 256 pr_debug("acpi_cpufreq_verify\n");
259 257
260 result = cpufreq_frequency_table_verify(policy, 258 result = cpufreq_frequency_table_verify(policy,
261 data->freq_table); 259 data->freq_table);
@@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init (
273 struct cpufreq_acpi_io *data; 271 struct cpufreq_acpi_io *data;
274 unsigned int result = 0; 272 unsigned int result = 0;
275 273
276 dprintk("acpi_cpufreq_cpu_init\n"); 274 pr_debug("acpi_cpufreq_cpu_init\n");
277 275
278 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 276 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
279 if (!data) 277 if (!data)
@@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init (
288 286
289 /* capability check */ 287 /* capability check */
290 if (data->acpi_data.state_count <= 1) { 288 if (data->acpi_data.state_count <= 1) {
291 dprintk("No P-States\n"); 289 pr_debug("No P-States\n");
292 result = -ENODEV; 290 result = -ENODEV;
293 goto err_unreg; 291 goto err_unreg;
294 } 292 }
@@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init (
297 ACPI_ADR_SPACE_FIXED_HARDWARE) || 295 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
298 (data->acpi_data.status_register.space_id != 296 (data->acpi_data.status_register.space_id !=
299 ACPI_ADR_SPACE_FIXED_HARDWARE)) { 297 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
300 dprintk("Unsupported address space [%d, %d]\n", 298 pr_debug("Unsupported address space [%d, %d]\n",
301 (u32) (data->acpi_data.control_register.space_id), 299 (u32) (data->acpi_data.control_register.space_id),
302 (u32) (data->acpi_data.status_register.space_id)); 300 (u32) (data->acpi_data.status_register.space_id));
303 result = -ENODEV; 301 result = -ENODEV;
@@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init (
348 "activated.\n", cpu); 346 "activated.\n", cpu);
349 347
350 for (i = 0; i < data->acpi_data.state_count; i++) 348 for (i = 0; i < data->acpi_data.state_count; i++)
351 dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", 349 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
352 (i == data->acpi_data.state?'*':' '), i, 350 (i == data->acpi_data.state?'*':' '), i,
353 (u32) data->acpi_data.states[i].core_frequency, 351 (u32) data->acpi_data.states[i].core_frequency,
354 (u32) data->acpi_data.states[i].power, 352 (u32) data->acpi_data.states[i].power,
@@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit (
383{ 381{
384 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 382 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
385 383
386 dprintk("acpi_cpufreq_cpu_exit\n"); 384 pr_debug("acpi_cpufreq_cpu_exit\n");
387 385
388 if (data) { 386 if (data) {
389 cpufreq_frequency_table_put_attr(policy->cpu); 387 cpufreq_frequency_table_put_attr(policy->cpu);
@@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
418static int __init 416static int __init
419acpi_cpufreq_init (void) 417acpi_cpufreq_init (void)
420{ 418{
421 dprintk("acpi_cpufreq_init\n"); 419 pr_debug("acpi_cpufreq_init\n");
422 420
423 return cpufreq_register_driver(&acpi_cpufreq_driver); 421 return cpufreq_register_driver(&acpi_cpufreq_driver);
424} 422}
@@ -427,7 +425,7 @@ acpi_cpufreq_init (void)
427static void __exit 425static void __exit
428acpi_cpufreq_exit (void) 426acpi_cpufreq_exit (void)
429{ 427{
430 dprintk("acpi_cpufreq_exit\n"); 428 pr_debug("acpi_cpufreq_exit\n");
431 429
432 cpufreq_unregister_driver(&acpi_cpufreq_driver); 430 cpufreq_unregister_driver(&acpi_cpufreq_driver);
433 return; 431 return;
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 23e91290e41f..c8c9298666fb 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -13,9 +13,6 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15 15
16/* Stores the physical address of elf header of crash image. */
17unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
18
19/** 16/**
20 * copy_oldmem_page - copy one page from "oldmem" 17 * copy_oldmem_page - copy one page from "oldmem"
21 * @pfn: page frame number to be copied 18 * @pfn: page frame number to be copied
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 71e35864d2e2..f64097b5118a 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -31,8 +31,6 @@ static struct clocksource clocksource_cyclone = {
31 .rating = 300, 31 .rating = 300,
32 .read = read_cyclone, 32 .read = read_cyclone,
33 .mask = (1LL << 40) - 1, 33 .mask = (1LL << 40) - 1,
34 .mult = 0, /*to be caluclated*/
35 .shift = 16,
36 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 34 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
37}; 35};
38 36
@@ -59,13 +57,13 @@ int __init init_cyclone_clock(void)
59 return -ENODEV; 57 return -ENODEV;
60 } 58 }
61 base = readq(reg); 59 base = readq(reg);
60 iounmap(reg);
62 if(!base){ 61 if(!base){
63 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 62 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
64 " value.\n"); 63 " value.\n");
65 use_cyclone = 0; 64 use_cyclone = 0;
66 return -ENODEV; 65 return -ENODEV;
67 } 66 }
68 iounmap(reg);
69 67
70 /* setup PMCC */ 68 /* setup PMCC */
71 offset = (base + CYCLONE_PMCC_OFFSET); 69 offset = (base + CYCLONE_PMCC_OFFSET);
@@ -118,9 +116,7 @@ int __init init_cyclone_clock(void)
118 /* initialize last tick */ 116 /* initialize last tick */
119 cyclone_mc = cyclone_timer; 117 cyclone_mc = cyclone_timer;
120 clocksource_cyclone.fsys_mmio = cyclone_timer; 118 clocksource_cyclone.fsys_mmio = cyclone_timer;
121 clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, 119 clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
122 clocksource_cyclone.shift);
123 clocksource_register(&clocksource_cyclone);
124 120
125 return 0; 121 return 0;
126} 122}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a0f001928502..6fc03aff046c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/crash_dump.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/types.h> 29#include <linux/types.h>
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 244704a174de..97dd2abdeb1a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,6 +1771,12 @@ sys_call_table:
1771 data8 sys_fanotify_init 1771 data8 sys_fanotify_init
1772 data8 sys_fanotify_mark 1772 data8 sys_fanotify_mark
1773 data8 sys_prlimit64 // 1325 1773 data8 sys_prlimit64 // 1325
1774 data8 sys_name_to_handle_at
1775 data8 sys_open_by_handle_at
1776 data8 sys_clock_adjtime
1777 data8 sys_syncfs
1778 data8 sys_setns // 1330
1779 data8 sys_sendmmsg
1774 1780
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1781 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1776#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1782#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7ded76658d2d..b0f9afebb146 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -108,10 +108,6 @@
108#define DBG(fmt...) 108#define DBG(fmt...)
109#endif 109#endif
110 110
111#define NR_PREALLOCATE_RTE_ENTRIES \
112 (PAGE_SIZE / sizeof(struct iosapic_rte_info))
113#define RTE_PREALLOCATED (1)
114
115static DEFINE_SPINLOCK(iosapic_lock); 111static DEFINE_SPINLOCK(iosapic_lock);
116 112
117/* 113/*
@@ -136,7 +132,6 @@ struct iosapic_rte_info {
136 struct list_head rte_list; /* RTEs sharing the same vector */ 132 struct list_head rte_list; /* RTEs sharing the same vector */
137 char rte_index; /* IOSAPIC RTE index */ 133 char rte_index; /* IOSAPIC RTE index */
138 int refcnt; /* reference counter */ 134 int refcnt; /* reference counter */
139 unsigned int flags; /* flags */
140 struct iosapic *iosapic; 135 struct iosapic *iosapic;
141} ____cacheline_aligned; 136} ____cacheline_aligned;
142 137
@@ -155,9 +150,6 @@ static struct iosapic_intr_info {
155 150
156static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 151static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
157 152
158static int iosapic_kmalloc_ok;
159static LIST_HEAD(free_rte_list);
160
161static inline void 153static inline void
162iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) 154iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
163{ 155{
@@ -265,7 +257,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
265} 257}
266 258
267static void 259static void
268nop (unsigned int irq) 260nop (struct irq_data *data)
269{ 261{
270 /* do nothing... */ 262 /* do nothing... */
271} 263}
@@ -295,8 +287,9 @@ kexec_disable_iosapic(void)
295#endif 287#endif
296 288
297static void 289static void
298mask_irq (unsigned int irq) 290mask_irq (struct irq_data *data)
299{ 291{
292 unsigned int irq = data->irq;
300 u32 low32; 293 u32 low32;
301 int rte_index; 294 int rte_index;
302 struct iosapic_rte_info *rte; 295 struct iosapic_rte_info *rte;
@@ -313,8 +306,9 @@ mask_irq (unsigned int irq)
313} 306}
314 307
315static void 308static void
316unmask_irq (unsigned int irq) 309unmask_irq (struct irq_data *data)
317{ 310{
311 unsigned int irq = data->irq;
318 u32 low32; 312 u32 low32;
319 int rte_index; 313 int rte_index;
320 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
@@ -331,9 +325,11 @@ unmask_irq (unsigned int irq)
331 325
332 326
333static int 327static int
334iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) 328iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
329 bool force)
335{ 330{
336#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
332 unsigned int irq = data->irq;
337 u32 high32, low32; 333 u32 high32, low32;
338 int cpu, dest, rte_index; 334 int cpu, dest, rte_index;
339 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 335 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
@@ -387,31 +383,33 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
387 */ 383 */
388 384
389static unsigned int 385static unsigned int
390iosapic_startup_level_irq (unsigned int irq) 386iosapic_startup_level_irq (struct irq_data *data)
391{ 387{
392 unmask_irq(irq); 388 unmask_irq(data);
393 return 0; 389 return 0;
394} 390}
395 391
396static void 392static void
397iosapic_end_level_irq (unsigned int irq) 393iosapic_unmask_level_irq (struct irq_data *data)
398{ 394{
395 unsigned int irq = data->irq;
399 ia64_vector vec = irq_to_vector(irq); 396 ia64_vector vec = irq_to_vector(irq);
400 struct iosapic_rte_info *rte; 397 struct iosapic_rte_info *rte;
401 int do_unmask_irq = 0; 398 int do_unmask_irq = 0;
402 399
403 irq_complete_move(irq); 400 irq_complete_move(irq);
404 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 if (unlikely(irqd_is_setaffinity_pending(data))) {
405 do_unmask_irq = 1; 402 do_unmask_irq = 1;
406 mask_irq(irq); 403 mask_irq(data);
407 } 404 } else
405 unmask_irq(data);
408 406
409 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) 407 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
410 iosapic_eoi(rte->iosapic->addr, vec); 408 iosapic_eoi(rte->iosapic->addr, vec);
411 409
412 if (unlikely(do_unmask_irq)) { 410 if (unlikely(do_unmask_irq)) {
413 move_masked_irq(irq); 411 irq_move_masked_irq(data);
414 unmask_irq(irq); 412 unmask_irq(data);
415 } 413 }
416} 414}
417 415
@@ -421,16 +419,15 @@ iosapic_end_level_irq (unsigned int irq)
421#define iosapic_ack_level_irq nop 419#define iosapic_ack_level_irq nop
422 420
423static struct irq_chip irq_type_iosapic_level = { 421static struct irq_chip irq_type_iosapic_level = {
424 .name = "IO-SAPIC-level", 422 .name = "IO-SAPIC-level",
425 .startup = iosapic_startup_level_irq, 423 .irq_startup = iosapic_startup_level_irq,
426 .shutdown = iosapic_shutdown_level_irq, 424 .irq_shutdown = iosapic_shutdown_level_irq,
427 .enable = iosapic_enable_level_irq, 425 .irq_enable = iosapic_enable_level_irq,
428 .disable = iosapic_disable_level_irq, 426 .irq_disable = iosapic_disable_level_irq,
429 .ack = iosapic_ack_level_irq, 427 .irq_ack = iosapic_ack_level_irq,
430 .end = iosapic_end_level_irq, 428 .irq_mask = mask_irq,
431 .mask = mask_irq, 429 .irq_unmask = iosapic_unmask_level_irq,
432 .unmask = unmask_irq, 430 .irq_set_affinity = iosapic_set_affinity
433 .set_affinity = iosapic_set_affinity
434}; 431};
435 432
436/* 433/*
@@ -438,9 +435,9 @@ static struct irq_chip irq_type_iosapic_level = {
438 */ 435 */
439 436
440static unsigned int 437static unsigned int
441iosapic_startup_edge_irq (unsigned int irq) 438iosapic_startup_edge_irq (struct irq_data *data)
442{ 439{
443 unmask_irq(irq); 440 unmask_irq(data);
444 /* 441 /*
445 * IOSAPIC simply drops interrupts pended while the 442 * IOSAPIC simply drops interrupts pended while the
446 * corresponding pin was masked, so we can't know if an 443 * corresponding pin was masked, so we can't know if an
@@ -450,37 +447,25 @@ iosapic_startup_edge_irq (unsigned int irq)
450} 447}
451 448
452static void 449static void
453iosapic_ack_edge_irq (unsigned int irq) 450iosapic_ack_edge_irq (struct irq_data *data)
454{ 451{
455 struct irq_desc *idesc = irq_desc + irq; 452 irq_complete_move(data->irq);
456 453 irq_move_irq(data);
457 irq_complete_move(irq);
458 move_native_irq(irq);
459 /*
460 * Once we have recorded IRQ_PENDING already, we can mask the
461 * interrupt for real. This prevents IRQ storms from unhandled
462 * devices.
463 */
464 if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
465 (IRQ_PENDING|IRQ_DISABLED))
466 mask_irq(irq);
467} 454}
468 455
469#define iosapic_enable_edge_irq unmask_irq 456#define iosapic_enable_edge_irq unmask_irq
470#define iosapic_disable_edge_irq nop 457#define iosapic_disable_edge_irq nop
471#define iosapic_end_edge_irq nop
472 458
473static struct irq_chip irq_type_iosapic_edge = { 459static struct irq_chip irq_type_iosapic_edge = {
474 .name = "IO-SAPIC-edge", 460 .name = "IO-SAPIC-edge",
475 .startup = iosapic_startup_edge_irq, 461 .irq_startup = iosapic_startup_edge_irq,
476 .shutdown = iosapic_disable_edge_irq, 462 .irq_shutdown = iosapic_disable_edge_irq,
477 .enable = iosapic_enable_edge_irq, 463 .irq_enable = iosapic_enable_edge_irq,
478 .disable = iosapic_disable_edge_irq, 464 .irq_disable = iosapic_disable_edge_irq,
479 .ack = iosapic_ack_edge_irq, 465 .irq_ack = iosapic_ack_edge_irq,
480 .end = iosapic_end_edge_irq, 466 .irq_mask = mask_irq,
481 .mask = mask_irq, 467 .irq_unmask = unmask_irq,
482 .unmask = unmask_irq, 468 .irq_set_affinity = iosapic_set_affinity
483 .set_affinity = iosapic_set_affinity
484}; 469};
485 470
486static unsigned int 471static unsigned int
@@ -552,37 +537,6 @@ iosapic_reassign_vector (int irq)
552 } 537 }
553} 538}
554 539
555static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
556{
557 int i;
558 struct iosapic_rte_info *rte;
559 int preallocated = 0;
560
561 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
562 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
563 NR_PREALLOCATE_RTE_ENTRIES);
564 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
565 list_add(&rte->rte_list, &free_rte_list);
566 }
567
568 if (!list_empty(&free_rte_list)) {
569 rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
570 rte_list);
571 list_del(&rte->rte_list);
572 preallocated++;
573 } else {
574 rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
575 if (!rte)
576 return NULL;
577 }
578
579 memset(rte, 0, sizeof(struct iosapic_rte_info));
580 if (preallocated)
581 rte->flags |= RTE_PREALLOCATED;
582
583 return rte;
584}
585
586static inline int irq_is_shared (int irq) 540static inline int irq_is_shared (int irq)
587{ 541{
588 return (iosapic_intr_info[irq].count > 1); 542 return (iosapic_intr_info[irq].count > 1);
@@ -601,8 +555,7 @@ static int
601register_intr (unsigned int gsi, int irq, unsigned char delivery, 555register_intr (unsigned int gsi, int irq, unsigned char delivery,
602 unsigned long polarity, unsigned long trigger) 556 unsigned long polarity, unsigned long trigger)
603{ 557{
604 struct irq_desc *idesc; 558 struct irq_chip *chip, *irq_type;
605 struct irq_chip *irq_type;
606 int index; 559 int index;
607 struct iosapic_rte_info *rte; 560 struct iosapic_rte_info *rte;
608 561
@@ -615,7 +568,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
615 568
616 rte = find_rte(irq, gsi); 569 rte = find_rte(irq, gsi);
617 if (!rte) { 570 if (!rte) {
618 rte = iosapic_alloc_rte(); 571 rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
619 if (!rte) { 572 if (!rte) {
620 printk(KERN_WARNING "%s: cannot allocate memory\n", 573 printk(KERN_WARNING "%s: cannot allocate memory\n",
621 __func__); 574 __func__);
@@ -649,15 +602,18 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
649 602
650 irq_type = iosapic_get_irq_chip(trigger); 603 irq_type = iosapic_get_irq_chip(trigger);
651 604
652 idesc = irq_desc + irq; 605 chip = irq_get_chip(irq);
653 if (irq_type != NULL && idesc->chip != irq_type) { 606 if (irq_type != NULL && chip != irq_type) {
654 if (idesc->chip != &no_irq_chip) 607 if (chip != &no_irq_chip)
655 printk(KERN_WARNING 608 printk(KERN_WARNING
656 "%s: changing vector %d from %s to %s\n", 609 "%s: changing vector %d from %s to %s\n",
657 __func__, irq_to_vector(irq), 610 __func__, irq_to_vector(irq),
658 idesc->chip->name, irq_type->name); 611 chip->name, irq_type->name);
659 idesc->chip = irq_type; 612 chip = irq_type;
660 } 613 }
614 __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
615 handle_edge_irq : handle_level_irq,
616 NULL);
661 return 0; 617 return 0;
662} 618}
663 619
@@ -767,6 +723,7 @@ iosapic_register_intr (unsigned int gsi,
767 struct iosapic_rte_info *rte; 723 struct iosapic_rte_info *rte;
768 u32 low32; 724 u32 low32;
769 unsigned char dmode; 725 unsigned char dmode;
726 struct irq_desc *desc;
770 727
771 /* 728 /*
772 * If this GSI has already been registered (i.e., it's a 729 * If this GSI has already been registered (i.e., it's a
@@ -794,12 +751,13 @@ iosapic_register_intr (unsigned int gsi,
794 goto unlock_iosapic_lock; 751 goto unlock_iosapic_lock;
795 } 752 }
796 753
797 raw_spin_lock(&irq_desc[irq].lock); 754 desc = irq_to_desc(irq);
755 raw_spin_lock(&desc->lock);
798 dest = get_target_cpu(gsi, irq); 756 dest = get_target_cpu(gsi, irq);
799 dmode = choose_dmode(); 757 dmode = choose_dmode();
800 err = register_intr(gsi, irq, dmode, polarity, trigger); 758 err = register_intr(gsi, irq, dmode, polarity, trigger);
801 if (err < 0) { 759 if (err < 0) {
802 raw_spin_unlock(&irq_desc[irq].lock); 760 raw_spin_unlock(&desc->lock);
803 irq = err; 761 irq = err;
804 goto unlock_iosapic_lock; 762 goto unlock_iosapic_lock;
805 } 763 }
@@ -818,7 +776,7 @@ iosapic_register_intr (unsigned int gsi,
818 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 776 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
819 cpu_logical_id(dest), dest, irq_to_vector(irq)); 777 cpu_logical_id(dest), dest, irq_to_vector(irq));
820 778
821 raw_spin_unlock(&irq_desc[irq].lock); 779 raw_spin_unlock(&desc->lock);
822 unlock_iosapic_lock: 780 unlock_iosapic_lock:
823 spin_unlock_irqrestore(&iosapic_lock, flags); 781 spin_unlock_irqrestore(&iosapic_lock, flags);
824 return irq; 782 return irq;
@@ -829,7 +787,6 @@ iosapic_unregister_intr (unsigned int gsi)
829{ 787{
830 unsigned long flags; 788 unsigned long flags;
831 int irq, index; 789 int irq, index;
832 struct irq_desc *idesc;
833 u32 low32; 790 u32 low32;
834 unsigned long trigger, polarity; 791 unsigned long trigger, polarity;
835 unsigned int dest; 792 unsigned int dest;
@@ -859,7 +816,6 @@ iosapic_unregister_intr (unsigned int gsi)
859 if (--rte->refcnt > 0) 816 if (--rte->refcnt > 0)
860 goto out; 817 goto out;
861 818
862 idesc = irq_desc + irq;
863 rte->refcnt = NO_REF_RTE; 819 rte->refcnt = NO_REF_RTE;
864 820
865 /* Mask the interrupt */ 821 /* Mask the interrupt */
@@ -883,7 +839,7 @@ iosapic_unregister_intr (unsigned int gsi)
883 if (iosapic_intr_info[irq].count == 0) { 839 if (iosapic_intr_info[irq].count == 0) {
884#ifdef CONFIG_SMP 840#ifdef CONFIG_SMP
885 /* Clear affinity */ 841 /* Clear affinity */
886 cpumask_setall(idesc->affinity); 842 cpumask_setall(irq_get_irq_data(irq)->affinity);
887#endif 843#endif
888 /* Clear the interrupt information */ 844 /* Clear the interrupt information */
889 iosapic_intr_info[irq].dest = 0; 845 iosapic_intr_info[irq].dest = 0;
@@ -1161,10 +1117,3 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1161 return; 1117 return;
1162} 1118}
1163#endif 1119#endif
1164
1165static int __init iosapic_enable_kmalloc (void)
1166{
1167 iosapic_kmalloc_ok = 1;
1168 return 0;
1169}
1170core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 94ee9d067cbd..ad69606613eb 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -53,47 +53,9 @@ atomic_t irq_err_count;
53/* 53/*
54 * /proc/interrupts printing: 54 * /proc/interrupts printing:
55 */ 55 */
56 56int arch_show_interrupts(struct seq_file *p, int prec)
57int show_interrupts(struct seq_file *p, void *v)
58{ 57{
59 int i = *(loff_t *) v, j; 58 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
60 struct irqaction * action;
61 unsigned long flags;
62
63 if (i == 0) {
64 char cpuname[16];
65 seq_printf(p, " ");
66 for_each_online_cpu(j) {
67 snprintf(cpuname, 10, "CPU%d", j);
68 seq_printf(p, "%10s ", cpuname);
69 }
70 seq_putc(p, '\n');
71 }
72
73 if (i < NR_IRQS) {
74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action;
76 if (!action)
77 goto skip;
78 seq_printf(p, "%3d: ",i);
79#ifndef CONFIG_SMP
80 seq_printf(p, "%10u ", kstat_irqs(i));
81#else
82 for_each_online_cpu(j) {
83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
84 }
85#endif
86 seq_printf(p, " %14s", irq_desc[i].chip->name);
87 seq_printf(p, " %s", action->name);
88
89 for (action=action->next; action; action = action->next)
90 seq_printf(p, ", %s", action->name);
91
92 seq_putc(p, '\n');
93skip:
94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 59 return 0;
98} 60}
99 61
@@ -103,7 +65,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
103void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 65void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
104{ 66{
105 if (irq < NR_IRQS) { 67 if (irq < NR_IRQS) {
106 cpumask_copy(irq_desc[irq].affinity, 68 cpumask_copy(irq_get_irq_data(irq)->affinity,
107 cpumask_of(cpu_logical_id(hwid))); 69 cpumask_of(cpu_logical_id(hwid)));
108 irq_redir[irq] = (char) (redir & 0xff); 70 irq_redir[irq] = (char) (redir & 0xff);
109 } 71 }
@@ -130,13 +92,14 @@ unsigned int vectors_in_migration[NR_IRQS];
130 */ 92 */
131static void migrate_irqs(void) 93static void migrate_irqs(void)
132{ 94{
133 struct irq_desc *desc;
134 int irq, new_cpu; 95 int irq, new_cpu;
135 96
136 for (irq=0; irq < NR_IRQS; irq++) { 97 for (irq=0; irq < NR_IRQS; irq++) {
137 desc = irq_desc + irq; 98 struct irq_desc *desc = irq_to_desc(irq);
99 struct irq_data *data = irq_desc_get_irq_data(desc);
100 struct irq_chip *chip = irq_data_get_irq_chip(data);
138 101
139 if (desc->status == IRQ_DISABLED) 102 if (irqd_irq_disabled(data))
140 continue; 103 continue;
141 104
142 /* 105 /*
@@ -145,10 +108,10 @@ static void migrate_irqs(void)
145 * tell CPU not to respond to these local intr sources. 108 * tell CPU not to respond to these local intr sources.
146 * such as ITV,CPEI,MCA etc. 109 * such as ITV,CPEI,MCA etc.
147 */ 110 */
148 if (desc->status == IRQ_PER_CPU) 111 if (irqd_is_per_cpu(data))
149 continue; 112 continue;
150 113
151 if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) 114 if (cpumask_any_and(data->affinity, cpu_online_mask)
152 >= nr_cpu_ids) { 115 >= nr_cpu_ids) {
153 /* 116 /*
154 * Save it for phase 2 processing 117 * Save it for phase 2 processing
@@ -160,16 +123,16 @@ static void migrate_irqs(void)
160 /* 123 /*
161 * Al three are essential, currently WARN_ON.. maybe panic? 124 * Al three are essential, currently WARN_ON.. maybe panic?
162 */ 125 */
163 if (desc->chip && desc->chip->disable && 126 if (chip && chip->irq_disable &&
164 desc->chip->enable && desc->chip->set_affinity) { 127 chip->irq_enable && chip->irq_set_affinity) {
165 desc->chip->disable(irq); 128 chip->irq_disable(data);
166 desc->chip->set_affinity(irq, 129 chip->irq_set_affinity(data,
167 cpumask_of(new_cpu)); 130 cpumask_of(new_cpu), false);
168 desc->chip->enable(irq); 131 chip->irq_enable(data);
169 } else { 132 } else {
170 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 133 WARN_ON((!chip || !chip->irq_disable ||
171 !(desc->chip->enable) || 134 !chip->irq_enable ||
172 !(desc->chip->set_affinity))); 135 !chip->irq_set_affinity));
173 } 136 }
174 } 137 }
175 } 138 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f14c35f9b03a..782c3a357f24 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -30,6 +30,8 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/acpi.h>
34#include <linux/sched.h>
33 35
34#include <asm/delay.h> 36#include <asm/delay.h>
35#include <asm/intrinsics.h> 37#include <asm/intrinsics.h>
@@ -342,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
342 if (irq < 0) 344 if (irq < 0)
343 continue; 345 continue;
344 346
345 desc = irq_desc + irq; 347 desc = irq_to_desc(irq);
346 cfg = irq_cfg + irq; 348 cfg = irq_cfg + irq;
347 raw_spin_lock(&desc->lock); 349 raw_spin_lock(&desc->lock);
348 if (!cfg->move_cleanup_count) 350 if (!cfg->move_cleanup_count)
@@ -495,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
495 smp_local_flush_tlb(); 497 smp_local_flush_tlb();
496 kstat_incr_irqs_this_cpu(irq, desc); 498 kstat_incr_irqs_this_cpu(irq, desc);
497 } else if (unlikely(IS_RESCHEDULE(vector))) { 499 } else if (unlikely(IS_RESCHEDULE(vector))) {
500 scheduler_ipi();
498 kstat_incr_irqs_this_cpu(irq, desc); 501 kstat_incr_irqs_this_cpu(irq, desc);
499 } else { 502 } else {
500 ia64_setreg(_IA64_REG_CR_TPR, vector); 503 ia64_setreg(_IA64_REG_CR_TPR, vector);
@@ -625,16 +628,15 @@ static struct irqaction tlb_irqaction = {
625void 628void
626ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) 629ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
627{ 630{
628 struct irq_desc *desc;
629 unsigned int irq; 631 unsigned int irq;
630 632
631 irq = vec; 633 irq = vec;
632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); 634 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
633 desc = irq_desc + irq; 635 irq_set_status_flags(irq, IRQ_PER_CPU);
634 desc->status |= IRQ_PER_CPU; 636 irq_set_chip(irq, &irq_type_ia64_lsapic);
635 desc->chip = &irq_type_ia64_lsapic;
636 if (action) 637 if (action)
637 setup_irq(irq, action); 638 setup_irq(irq, action);
639 irq_set_handler(irq, handle_percpu_irq);
638} 640}
639 641
640void __init 642void __init
@@ -650,6 +652,9 @@ ia64_native_register_ipi(void)
650void __init 652void __init
651init_IRQ (void) 653init_IRQ (void)
652{ 654{
655#ifdef CONFIG_ACPI
656 acpi_boot_init();
657#endif
653 ia64_register_ipi(); 658 ia64_register_ipi();
654 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
655#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index fc1549d4564d..1b3a776e5161 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -15,31 +15,30 @@
15#include <linux/irq.h> 15#include <linux/irq.h>
16 16
17static unsigned int 17static unsigned int
18lsapic_noop_startup (unsigned int irq) 18lsapic_noop_startup (struct irq_data *data)
19{ 19{
20 return 0; 20 return 0;
21} 21}
22 22
23static void 23static void
24lsapic_noop (unsigned int irq) 24lsapic_noop (struct irq_data *data)
25{ 25{
26 /* nothing to do... */ 26 /* nothing to do... */
27} 27}
28 28
29static int lsapic_retrigger(unsigned int irq) 29static int lsapic_retrigger(struct irq_data *data)
30{ 30{
31 ia64_resend_irq(irq); 31 ia64_resend_irq(data->irq);
32 32
33 return 1; 33 return 1;
34} 34}
35 35
36struct irq_chip irq_type_ia64_lsapic = { 36struct irq_chip irq_type_ia64_lsapic = {
37 .name = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .irq_startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .irq_shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .irq_enable = lsapic_noop,
41 .disable = lsapic_noop, 41 .irq_disable = lsapic_noop,
42 .ack = lsapic_noop, 42 .irq_ack = lsapic_noop,
43 .end = lsapic_noop, 43 .irq_retrigger = lsapic_retrigger,
44 .retrigger = lsapic_retrigger,
45}; 44};
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index a0220dc5ff42..84fb405eee87 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -582,6 +582,8 @@ out:
582 /* Get the CPE error record and log it */ 582 /* Get the CPE error record and log it */
583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
584 584
585 local_irq_disable();
586
585 return IRQ_HANDLED; 587 return IRQ_HANDLED;
586} 588}
587 589
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data)
1859 data = mca_bootmem(); 1861 data = mca_bootmem();
1860 first_time = 0; 1862 first_time = 0;
1861 } else 1863 } else
1862 data = __get_free_pages(GFP_KERNEL, get_order(sz)); 1864 data = (void *)__get_free_pages(GFP_KERNEL,
1865 get_order(sz));
1863 if (!data) 1866 if (!data)
1864 panic("Could not allocate MCA memory for cpu %d\n", 1867 panic("Could not allocate MCA memory for cpu %d\n",
1865 cpu); 1868 cpu);
@@ -2055,25 +2058,6 @@ ia64_mca_init(void)
2055 2058
2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2059 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2057 2060
2058 /*
2059 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2060 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2061 */
2062 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2063 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2064 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2065
2066 /* Setup the MCA rendezvous interrupt vector */
2067 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2068
2069 /* Setup the MCA wakeup interrupt vector */
2070 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2071
2072#ifdef CONFIG_ACPI
2073 /* Setup the CPEI/P handler */
2074 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2075#endif
2076
2077 /* Initialize the areas set aside by the OS to buffer the 2061 /* Initialize the areas set aside by the OS to buffer the
2078 * platform/processor error states for MCA/INIT/CMC 2062 * platform/processor error states for MCA/INIT/CMC
2079 * handling. 2063 * handling.
@@ -2103,6 +2087,25 @@ ia64_mca_late_init(void)
2103 if (!mca_init) 2087 if (!mca_init)
2104 return 0; 2088 return 0;
2105 2089
2090 /*
2091 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2092 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2093 */
2094 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2095 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2096 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2097
2098 /* Setup the MCA rendezvous interrupt vector */
2099 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2100
2101 /* Setup the MCA wakeup interrupt vector */
2102 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2103
2104#ifdef CONFIG_ACPI
2105 /* Setup the CPEI/P handler */
2106 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2107#endif
2108
2106 register_hotcpu_notifier(&mca_cpu_notifier); 2109 register_hotcpu_notifier(&mca_cpu_notifier);
2107 2110
2108 /* Setup the CMCI/P vector and handler */ 2111 /* Setup the CMCI/P vector and handler */
@@ -2122,7 +2125,6 @@ ia64_mca_late_init(void)
2122 cpe_poll_timer.function = ia64_mca_cpe_poll; 2125 cpe_poll_timer.function = ia64_mca_cpe_poll;
2123 2126
2124 { 2127 {
2125 struct irq_desc *desc;
2126 unsigned int irq; 2128 unsigned int irq;
2127 2129
2128 if (cpe_vector >= 0) { 2130 if (cpe_vector >= 0) {
@@ -2130,8 +2132,7 @@ ia64_mca_late_init(void)
2130 irq = local_vector_to_irq(cpe_vector); 2132 irq = local_vector_to_irq(cpe_vector);
2131 if (irq > 0) { 2133 if (irq > 0) {
2132 cpe_poll_enabled = 0; 2134 cpe_poll_enabled = 0;
2133 desc = irq_desc + irq; 2135 irq_set_status_flags(irq, IRQ_PER_CPU);
2134 desc->status |= IRQ_PER_CPU;
2135 setup_irq(irq, &mca_cpe_irqaction); 2136 setup_irq(irq, &mca_cpe_irqaction);
2136 ia64_cpe_irq = irq; 2137 ia64_cpe_irq = irq;
2137 ia64_mca_register_cpev(cpe_vector); 2138 ia64_mca_register_cpev(cpe_vector);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 4a746ea838ff..009df5434a7a 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,12 +12,13 @@
12static struct irq_chip ia64_msi_chip; 12static struct irq_chip ia64_msi_chip;
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(unsigned int irq, 15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask) 16 const cpumask_t *cpu_mask, bool force)
17{ 17{
18 struct msi_msg msg; 18 struct msi_msg msg;
19 u32 addr, data; 19 u32 addr, data;
20 int cpu = first_cpu(*cpu_mask); 20 int cpu = first_cpu(*cpu_mask);
21 unsigned int irq = idata->irq;
21 22
22 if (!cpu_online(cpu)) 23 if (!cpu_online(cpu))
23 return -1; 24 return -1;
@@ -38,7 +39,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
38 msg.data = data; 39 msg.data = data;
39 40
40 write_msi_msg(irq, &msg); 41 write_msi_msg(irq, &msg);
41 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 42 cpumask_copy(idata->affinity, cpumask_of(cpu));
42 43
43 return 0; 44 return 0;
44} 45}
@@ -55,7 +56,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
55 if (irq < 0) 56 if (irq < 0)
56 return irq; 57 return irq;
57 58
58 set_irq_msi(irq, desc); 59 irq_set_msi_desc(irq, desc);
59 cpus_and(mask, irq_to_domain(irq), cpu_online_map); 60 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
60 dest_phys_id = cpu_physical_id(first_cpu(mask)); 61 dest_phys_id = cpu_physical_id(first_cpu(mask));
61 vector = irq_to_vector(irq); 62 vector = irq_to_vector(irq);
@@ -74,7 +75,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
74 MSI_DATA_VECTOR(vector); 75 MSI_DATA_VECTOR(vector);
75 76
76 write_msi_msg(irq, &msg); 77 write_msi_msg(irq, &msg);
77 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 78 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
78 79
79 return 0; 80 return 0;
80} 81}
@@ -84,16 +85,16 @@ void ia64_teardown_msi_irq(unsigned int irq)
84 destroy_irq(irq); 85 destroy_irq(irq);
85} 86}
86 87
87static void ia64_ack_msi_irq(unsigned int irq) 88static void ia64_ack_msi_irq(struct irq_data *data)
88{ 89{
89 irq_complete_move(irq); 90 irq_complete_move(data->irq);
90 move_native_irq(irq); 91 irq_move_irq(data);
91 ia64_eoi(); 92 ia64_eoi();
92} 93}
93 94
94static int ia64_msi_retrigger_irq(unsigned int irq) 95static int ia64_msi_retrigger_irq(struct irq_data *data)
95{ 96{
96 unsigned int vector = irq_to_vector(irq); 97 unsigned int vector = irq_to_vector(data->irq);
97 ia64_resend_irq(vector); 98 ia64_resend_irq(vector);
98 99
99 return 1; 100 return 1;
@@ -103,14 +104,14 @@ static int ia64_msi_retrigger_irq(unsigned int irq)
103 * Generic ops used on most IA64 platforms. 104 * Generic ops used on most IA64 platforms.
104 */ 105 */
105static struct irq_chip ia64_msi_chip = { 106static struct irq_chip ia64_msi_chip = {
106 .name = "PCI-MSI", 107 .name = "PCI-MSI",
107 .mask = mask_msi_irq, 108 .irq_mask = mask_msi_irq,
108 .unmask = unmask_msi_irq, 109 .irq_unmask = unmask_msi_irq,
109 .ack = ia64_ack_msi_irq, 110 .irq_ack = ia64_ack_msi_irq,
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111 .set_affinity = ia64_set_msi_irq_affinity, 112 .irq_set_affinity = ia64_set_msi_irq_affinity,
112#endif 113#endif
113 .retrigger = ia64_msi_retrigger_irq, 114 .irq_retrigger = ia64_msi_retrigger_irq,
114}; 115};
115 116
116 117
@@ -132,8 +133,10 @@ void arch_teardown_msi_irq(unsigned int irq)
132 133
133#ifdef CONFIG_DMAR 134#ifdef CONFIG_DMAR
134#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
135static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 136static int dmar_msi_set_affinity(struct irq_data *data,
137 const struct cpumask *mask, bool force)
136{ 138{
139 unsigned int irq = data->irq;
137 struct irq_cfg *cfg = irq_cfg + irq; 140 struct irq_cfg *cfg = irq_cfg + irq;
138 struct msi_msg msg; 141 struct msi_msg msg;
139 int cpu = cpumask_first(mask); 142 int cpu = cpumask_first(mask);
@@ -152,7 +155,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
152 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); 155 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
153 156
154 dmar_msi_write(irq, &msg); 157 dmar_msi_write(irq, &msg);
155 cpumask_copy(irq_desc[irq].affinity, mask); 158 cpumask_copy(data->affinity, mask);
156 159
157 return 0; 160 return 0;
158} 161}
@@ -160,13 +163,13 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
160 163
161static struct irq_chip dmar_msi_type = { 164static struct irq_chip dmar_msi_type = {
162 .name = "DMAR_MSI", 165 .name = "DMAR_MSI",
163 .unmask = dmar_msi_unmask, 166 .irq_unmask = dmar_msi_unmask,
164 .mask = dmar_msi_mask, 167 .irq_mask = dmar_msi_mask,
165 .ack = ia64_ack_msi_irq, 168 .irq_ack = ia64_ack_msi_irq,
166#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
167 .set_affinity = dmar_msi_set_affinity, 170 .irq_set_affinity = dmar_msi_set_affinity,
168#endif 171#endif
169 .retrigger = ia64_msi_retrigger_irq, 172 .irq_retrigger = ia64_msi_retrigger_irq,
170}; 173};
171 174
172static int 175static int
@@ -203,8 +206,8 @@ int arch_setup_dmar_msi(unsigned int irq)
203 if (ret < 0) 206 if (ret < 0)
204 return ret; 207 return ret;
205 dmar_msi_write(irq, &msg); 208 dmar_msi_write(irq, &msg);
206 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 209 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
207 "edge"); 210 "edge");
208 return 0; 211 return 0;
209} 212}
210#endif /* CONFIG_DMAR */ 213#endif /* CONFIG_DMAR */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index fdf6f9d013e5..77597e5ea60a 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -434,7 +434,7 @@ register_info(char *page)
434 unsigned long phys_stacked; 434 unsigned long phys_stacked;
435 pal_hints_u_t hints; 435 pal_hints_u_t hints;
436 unsigned long iregs, dregs; 436 unsigned long iregs, dregs;
437 char *info_type[]={ 437 static const char * const info_type[] = {
438 "Implemented AR(s)", 438 "Implemented AR(s)",
439 "AR(s) with read side-effects", 439 "AR(s) with read side-effects",
440 "Implemented CR(s)", 440 "Implemented CR(s)",
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index cce050e85c73..89accc626b86 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -617,17 +617,19 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
617 return get_unmapped_area(file, addr, len, pgoff, flags); 617 return get_unmapped_area(file, addr, len, pgoff, flags);
618} 618}
619 619
620/* forward declaration */
621static const struct dentry_operations pfmfs_dentry_operations;
620 622
621static int 623static struct dentry *
622pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, 624pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
623 struct vfsmount *mnt)
624{ 625{
625 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); 626 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
627 PFMFS_MAGIC);
626} 628}
627 629
628static struct file_system_type pfm_fs_type = { 630static struct file_system_type pfm_fs_type = {
629 .name = "pfmfs", 631 .name = "pfmfs",
630 .get_sb = pfmfs_get_sb, 632 .mount = pfmfs_mount,
631 .kill_sb = kill_anon_super, 633 .kill_sb = kill_anon_super,
632}; 634};
633 635
@@ -830,10 +832,9 @@ pfm_rvmalloc(unsigned long size)
830 unsigned long addr; 832 unsigned long addr;
831 833
832 size = PAGE_ALIGN(size); 834 size = PAGE_ALIGN(size);
833 mem = vmalloc(size); 835 mem = vzalloc(size);
834 if (mem) { 836 if (mem) {
835 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); 837 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
836 memset(mem, 0, size);
837 addr = (unsigned long)mem; 838 addr = (unsigned long)mem;
838 while (size > 0) { 839 while (size > 0) {
839 pfm_reserve_page(addr); 840 pfm_reserve_page(addr);
@@ -1543,7 +1544,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1543 * any operations on the root directory. However, we need a non-trivial 1544 * any operations on the root directory. However, we need a non-trivial
1544 * d_name - pfm: will go nicely and kill the special-casing in procfs. 1545 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1545 */ 1546 */
1546static struct vfsmount *pfmfs_mnt; 1547static struct vfsmount *pfmfs_mnt __read_mostly;
1547 1548
1548static int __init 1549static int __init
1549init_pfm_fs(void) 1550init_pfm_fs(void)
@@ -1573,7 +1574,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1573 return -EINVAL; 1574 return -EINVAL;
1574 } 1575 }
1575 1576
1576 ctx = (pfm_context_t *)filp->private_data; 1577 ctx = filp->private_data;
1577 if (ctx == NULL) { 1578 if (ctx == NULL) {
1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1579 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1579 return -EINVAL; 1580 return -EINVAL;
@@ -1673,7 +1674,7 @@ pfm_poll(struct file *filp, poll_table * wait)
1673 return 0; 1674 return 0;
1674 } 1675 }
1675 1676
1676 ctx = (pfm_context_t *)filp->private_data; 1677 ctx = filp->private_data;
1677 if (ctx == NULL) { 1678 if (ctx == NULL) {
1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1679 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1679 return 0; 1680 return 0;
@@ -1733,7 +1734,7 @@ pfm_fasync(int fd, struct file *filp, int on)
1733 return -EBADF; 1734 return -EBADF;
1734 } 1735 }
1735 1736
1736 ctx = (pfm_context_t *)filp->private_data; 1737 ctx = filp->private_data;
1737 if (ctx == NULL) { 1738 if (ctx == NULL) {
1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1739 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1739 return -EBADF; 1740 return -EBADF;
@@ -1841,7 +1842,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1841 return -EBADF; 1842 return -EBADF;
1842 } 1843 }
1843 1844
1844 ctx = (pfm_context_t *)filp->private_data; 1845 ctx = filp->private_data;
1845 if (ctx == NULL) { 1846 if (ctx == NULL) {
1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1847 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1847 return -EBADF; 1848 return -EBADF;
@@ -1984,7 +1985,7 @@ pfm_close(struct inode *inode, struct file *filp)
1984 return -EBADF; 1985 return -EBADF;
1985 } 1986 }
1986 1987
1987 ctx = (pfm_context_t *)filp->private_data; 1988 ctx = filp->private_data;
1988 if (ctx == NULL) { 1989 if (ctx == NULL) {
1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1990 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1990 return -EBADF; 1991 return -EBADF;
@@ -2186,7 +2187,7 @@ static const struct file_operations pfm_file_ops = {
2186}; 2187};
2187 2188
2188static int 2189static int
2189pfmfs_delete_dentry(struct dentry *dentry) 2190pfmfs_delete_dentry(const struct dentry *dentry)
2190{ 2191{
2191 return 1; 2192 return 1;
2192} 2193}
@@ -2234,7 +2235,6 @@ pfm_alloc_file(pfm_context_t *ctx)
2234 } 2235 }
2235 path.mnt = mntget(pfmfs_mnt); 2236 path.mnt = mntget(pfmfs_mnt);
2236 2237
2237 path.dentry->d_op = &pfmfs_dentry_operations;
2238 d_add(path.dentry, inode); 2238 d_add(path.dentry, inode);
2239 2239
2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops); 2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
@@ -4907,7 +4907,7 @@ restart_args:
4907 goto error_args; 4907 goto error_args;
4908 } 4908 }
4909 4909
4910 ctx = (pfm_context_t *)file->private_data; 4910 ctx = file->private_data;
4911 if (unlikely(ctx == NULL)) { 4911 if (unlikely(ctx == NULL)) {
4912 DPRINT(("no context for fd %d\n", fd)); 4912 DPRINT(("no context for fd %d\n", fd));
4913 goto error_args; 4913 goto error_args;
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 5f637bbfcccd..30c644ea44c9 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -150,7 +150,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
150 * current = task running at the time of the overflow. 150 * current = task running at the time of the overflow.
151 * 151 *
152 * per-task mode: 152 * per-task mode:
153 * - this is ususally the task being monitored. 153 * - this is usually the task being monitored.
154 * Under certain conditions, it might be a different task 154 * Under certain conditions, it might be a different task
155 * 155 *
156 * system-wide: 156 * system-wide:
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 16f1c7b04c69..6d33c5cc94f0 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -53,12 +53,8 @@
53 53
54void (*ia64_mark_idle)(int); 54void (*ia64_mark_idle)(int);
55 55
56unsigned long boot_option_idle_override = 0; 56unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
57EXPORT_SYMBOL(boot_option_idle_override); 57EXPORT_SYMBOL(boot_option_idle_override);
58unsigned long idle_halt;
59EXPORT_SYMBOL(idle_halt);
60unsigned long idle_nomwait;
61EXPORT_SYMBOL(idle_nomwait);
62void (*pm_idle) (void); 58void (*pm_idle) (void);
63EXPORT_SYMBOL(pm_idle); 59EXPORT_SYMBOL(pm_idle);
64void (*pm_power_off) (void); 60void (*pm_power_off) (void);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f9bc93..8848f43d819e 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
1177} 1177}
1178 1178
1179long 1179long
1180arch_ptrace (struct task_struct *child, long request, long addr, long data) 1180arch_ptrace (struct task_struct *child, long request,
1181 unsigned long addr, unsigned long data)
1181{ 1182{
1182 switch (request) { 1183 switch (request) {
1183 case PTRACE_PEEKTEXT: 1184 case PTRACE_PEEKTEXT:
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index aa8b5fa1a8de..79802e540e53 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -354,6 +354,7 @@ retry:
354static const struct file_operations salinfo_event_fops = { 354static const struct file_operations salinfo_event_fops = {
355 .open = salinfo_event_open, 355 .open = salinfo_event_open,
356 .read = salinfo_event_read, 356 .read = salinfo_event_read,
357 .llseek = noop_llseek,
357}; 358};
358 359
359static int 360static int
@@ -571,6 +572,7 @@ static const struct file_operations salinfo_data_fops = {
571 .release = salinfo_log_release, 572 .release = salinfo_log_release,
572 .read = salinfo_log_read, 573 .read = salinfo_log_read,
573 .write = salinfo_log_write, 574 .write = salinfo_log_write,
575 .llseek = default_llseek,
574}; 576};
575 577
576static int __cpuinit 578static int __cpuinit
@@ -642,7 +644,7 @@ salinfo_init(void)
642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 644 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
643 data = salinfo_data + i; 645 data = salinfo_data + i;
644 data->type = i; 646 data->type = i;
645 init_MUTEX(&data->mutex); 647 sema_init(&data->mutex, 1);
646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 648 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
647 if (!dir) 649 if (!dir)
648 continue; 650 continue;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 8fb958abf8d0..5e2c72498c51 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -479,25 +479,7 @@ static __init int setup_nomca(char *s)
479} 479}
480early_param("nomca", setup_nomca); 480early_param("nomca", setup_nomca);
481 481
482/*
483 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
484 * is_kdump_kernel() to determine if we are booting after a panic. Hence
485 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
486 */
487#ifdef CONFIG_CRASH_DUMP 482#ifdef CONFIG_CRASH_DUMP
488/* elfcorehdr= specifies the location of elf core header
489 * stored by the crashed kernel.
490 */
491static int __init parse_elfcorehdr(char *arg)
492{
493 if (!arg)
494 return -EINVAL;
495
496 elfcorehdr_addr = memparse(arg, &arg);
497 return 0;
498}
499early_param("elfcorehdr", parse_elfcorehdr);
500
501int __init reserve_elfcorehdr(u64 *start, u64 *end) 483int __init reserve_elfcorehdr(u64 *start, u64 *end)
502{ 484{
503 u64 length; 485 u64 length;
@@ -594,10 +576,6 @@ setup_arch (char **cmdline_p)
594 cpu_init(); /* initialize the bootstrap CPU */ 576 cpu_init(); /* initialize the bootstrap CPU */
595 mmu_context_init(); /* initialize context_id bitmap */ 577 mmu_context_init(); /* initialize context_id bitmap */
596 578
597#ifdef CONFIG_ACPI
598 acpi_boot_init();
599#endif
600
601 paravirt_banner(); 579 paravirt_banner();
602 paravirt_arch_setup_console(cmdline_p); 580 paravirt_arch_setup_console(cmdline_p);
603 581
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index dabeefe21134..be450a3e9871 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -293,6 +293,7 @@ smp_flush_tlb_all (void)
293void 293void
294smp_flush_tlb_mm (struct mm_struct *mm) 294smp_flush_tlb_mm (struct mm_struct *mm)
295{ 295{
296 cpumask_var_t cpus;
296 preempt_disable(); 297 preempt_disable();
297 /* this happens for the common case of a single-threaded fork(): */ 298 /* this happens for the common case of a single-threaded fork(): */
298 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) 299 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
@@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
301 preempt_enable(); 302 preempt_enable();
302 return; 303 return;
303 } 304 }
304 305 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
305 smp_call_function_many(mm_cpumask(mm), 306 smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 307 mm, 1);
308 } else {
309 cpumask_copy(cpus, mm_cpumask(mm));
310 smp_call_function_many(cpus,
311 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
312 free_cpumask_var(cpus);
313 }
307 local_irq_disable(); 314 local_irq_disable();
308 local_finish_flush_tlb_mm(mm); 315 local_finish_flush_tlb_mm(mm);
309 local_irq_enable(); 316 local_irq_enable();
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d003b502a432..14ec641003da 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -677,7 +677,7 @@ extern void fixup_irqs(void);
677int migrate_platform_irqs(unsigned int cpu) 677int migrate_platform_irqs(unsigned int cpu)
678{ 678{
679 int new_cpei_cpu; 679 int new_cpei_cpu;
680 struct irq_desc *desc = NULL; 680 struct irq_data *data = NULL;
681 const struct cpumask *mask; 681 const struct cpumask *mask;
682 int retval = 0; 682 int retval = 0;
683 683
@@ -693,20 +693,20 @@ int migrate_platform_irqs(unsigned int cpu)
693 new_cpei_cpu = any_online_cpu(cpu_online_map); 693 new_cpei_cpu = any_online_cpu(cpu_online_map);
694 mask = cpumask_of(new_cpei_cpu); 694 mask = cpumask_of(new_cpei_cpu);
695 set_cpei_target_cpu(new_cpei_cpu); 695 set_cpei_target_cpu(new_cpei_cpu);
696 desc = irq_desc + ia64_cpe_irq; 696 data = irq_get_irq_data(ia64_cpe_irq);
697 /* 697 /*
698 * Switch for now, immediately, we need to do fake intr 698 * Switch for now, immediately, we need to do fake intr
699 * as other interrupts, but need to study CPEI behaviour with 699 * as other interrupts, but need to study CPEI behaviour with
700 * polling before making changes. 700 * polling before making changes.
701 */ 701 */
702 if (desc) { 702 if (data && data->chip) {
703 desc->chip->disable(ia64_cpe_irq); 703 data->chip->irq_disable(data);
704 desc->chip->set_affinity(ia64_cpe_irq, mask); 704 data->chip->irq_set_affinity(data, mask, false);
705 desc->chip->enable(ia64_cpe_irq); 705 data->chip->irq_enable(data);
706 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 706 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
707 } 707 }
708 } 708 }
709 if (!desc) { 709 if (!data) {
710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
711 retval = -EBUSY; 711 retval = -EBUSY;
712 } 712 }
diff --git a/arch/ia64/kernel/stacktrace.c b/arch/ia64/kernel/stacktrace.c
new file mode 100644
index 000000000000..5af2783a87f4
--- /dev/null
+++ b/arch/ia64/kernel/stacktrace.c
@@ -0,0 +1,39 @@
1/*
2 * arch/ia64/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 */
7#include <linux/sched.h>
8#include <linux/stacktrace.h>
9#include <linux/module.h>
10
11static void
12ia64_do_save_stack(struct unw_frame_info *info, void *arg)
13{
14 struct stack_trace *trace = arg;
15 unsigned long ip;
16 int skip = trace->skip;
17
18 trace->nr_entries = 0;
19 do {
20 unw_get_ip(info, &ip);
21 if (ip == 0)
22 break;
23 if (skip == 0) {
24 trace->entries[trace->nr_entries++] = ip;
25 if (trace->nr_entries == trace->max_entries)
26 break;
27 } else
28 skip--;
29 } while (unw_unwind(info) >= 0);
30}
31
32/*
33 * Save stack-backtrace addresses into a stack_trace buffer.
34 */
35void save_stack_trace(struct stack_trace *trace)
36{
37 unw_init_running(ia64_do_save_stack, trace);
38}
39EXPORT_SYMBOL(save_stack_trace);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ed6f22eb5b12..85118dfe9bb5 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -36,7 +36,7 @@
36static cycle_t itc_get_cycles(struct clocksource *cs); 36static cycle_t itc_get_cycles(struct clocksource *cs);
37 37
38struct fsyscall_gtod_data_t fsyscall_gtod_data = { 38struct fsyscall_gtod_data_t fsyscall_gtod_data = {
39 .lock = SEQLOCK_UNLOCKED, 39 .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
40}; 40};
41 41
42struct itc_jitter_data_t itc_jitter_data; 42struct itc_jitter_data_t itc_jitter_data;
@@ -73,8 +73,6 @@ static struct clocksource clocksource_itc = {
73 .rating = 350, 73 .rating = 350,
74 .read = itc_get_cycles, 74 .read = itc_get_cycles,
75 .mask = CLOCKSOURCE_MASK(64), 75 .mask = CLOCKSOURCE_MASK(64),
76 .mult = 0, /*to be calculated*/
77 .shift = 16,
78 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 76 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
79#ifdef CONFIG_PARAVIRT 77#ifdef CONFIG_PARAVIRT
80 .resume = paravirt_clocksource_resume, 78 .resume = paravirt_clocksource_resume,
@@ -168,7 +166,7 @@ timer_interrupt (int irq, void *dev_id)
168{ 166{
169 unsigned long new_itm; 167 unsigned long new_itm;
170 168
171 if (unlikely(cpu_is_offline(smp_processor_id()))) { 169 if (cpu_is_offline(smp_processor_id())) {
172 return IRQ_HANDLED; 170 return IRQ_HANDLED;
173 } 171 }
174 172
@@ -190,19 +188,10 @@ timer_interrupt (int irq, void *dev_id)
190 188
191 new_itm += local_cpu_data->itm_delta; 189 new_itm += local_cpu_data->itm_delta;
192 190
193 if (smp_processor_id() == time_keeper_id) { 191 if (smp_processor_id() == time_keeper_id)
194 /* 192 xtime_update(1);
195 * Here we are in the timer irq handler. We have irqs locally 193
196 * disabled, but we don't know if the timer_bh is running on 194 local_cpu_data->itm_next = new_itm;
197 * another CPU. We need to avoid to SMP race by acquiring the
198 * xtime_lock.
199 */
200 write_seqlock(&xtime_lock);
201 do_timer(1);
202 local_cpu_data->itm_next = new_itm;
203 write_sequnlock(&xtime_lock);
204 } else
205 local_cpu_data->itm_next = new_itm;
206 195
207 if (time_after(new_itm, ia64_get_itc())) 196 if (time_after(new_itm, ia64_get_itc()))
208 break; 197 break;
@@ -222,7 +211,7 @@ skip_process_time_accounting:
222 * comfort, we increase the safety margin by 211 * comfort, we increase the safety margin by
223 * intentionally dropping the next tick(s). We do NOT 212 * intentionally dropping the next tick(s). We do NOT
224 * update itm.next because that would force us to call 213 * update itm.next because that would force us to call
225 * do_timer() which in turn would let our clock run 214 * xtime_update() which in turn would let our clock run
226 * too fast (with the potentially devastating effect 215 * too fast (with the potentially devastating effect
227 * of losing monotony of time). 216 * of losing monotony of time).
228 */ 217 */
@@ -374,11 +363,8 @@ ia64_init_itm (void)
374 ia64_cpu_local_tick(); 363 ia64_cpu_local_tick();
375 364
376 if (!itc_clocksource) { 365 if (!itc_clocksource) {
377 /* Sort out mult/shift values: */ 366 clocksource_register_hz(&clocksource_itc,
378 clocksource_itc.mult = 367 local_cpu_data->itc_freq);
379 clocksource_hz2mult(local_cpu_data->itc_freq,
380 clocksource_itc.shift);
381 clocksource_register(&clocksource_itc);
382 itc_clocksource = &clocksource_itc; 368 itc_clocksource = &clocksource_itc;
383 } 369 }
384} 370}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 0baa1bbb65fe..0e0e0cc9e392 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -43,7 +43,7 @@ int __ref arch_register_cpu(int num)
43{ 43{
44#ifdef CONFIG_ACPI 44#ifdef CONFIG_ACPI
45 /* 45 /*
46 * If CPEI can be re-targetted or if this is not 46 * If CPEI can be re-targeted or if this is not
47 * CPEI target, then it is hotpluggable 47 * CPEI target, then it is hotpluggable
48 */ 48 */
49 if (can_cpei_retarget() || !is_cpu_cpei_target(num)) 49 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index b6c0e63a0bf6..fed6afa2e8a9 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1204,10 +1204,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
1204static inline unw_hash_index_t 1204static inline unw_hash_index_t
1205hash (unsigned long ip) 1205hash (unsigned long ip)
1206{ 1206{
1207# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ 1207 /* magic number = ((sqrt(5)-1)/2)*2^64 */
1208 static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1208 1209
1209 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1210 return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210#undef hashmagic
1211} 1211}
1212 1212
1213static inline long 1213static inline long
@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info)
1531 struct unw_labeled_state *ls, *next; 1531 struct unw_labeled_state *ls, *next;
1532 unsigned long ip = info->ip; 1532 unsigned long ip = info->ip;
1533 struct unw_state_record sr; 1533 struct unw_state_record sr;
1534 struct unw_table *table; 1534 struct unw_table *table, *prev;
1535 struct unw_reg_info *r; 1535 struct unw_reg_info *r;
1536 struct unw_insn insn; 1536 struct unw_insn insn;
1537 u8 *dp, *desc_end; 1537 u8 *dp, *desc_end;
@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info)
1560 1560
1561 STAT(parse_start = ia64_get_itc()); 1561 STAT(parse_start = ia64_get_itc());
1562 1562
1563 prev = NULL;
1563 for (table = unw.tables; table; table = table->next) { 1564 for (table = unw.tables; table; table = table->next) {
1564 if (ip >= table->start && ip < table->end) { 1565 if (ip >= table->start && ip < table->end) {
1566 /*
1567 * Leave the kernel unwind table at the very front,
1568 * lest moving it breaks some assumption elsewhere.
1569 * Otherwise, move the matching table to the second
1570 * position in the list so that traversals can benefit
1571 * from commonality in backtrace paths.
1572 */
1573 if (prev && prev != unw.tables) {
1574 /* unw is safe - we're already spinlocked */
1575 prev->next = table->next;
1576 table->next = unw.tables->next;
1577 unw.tables->next = table;
1578 }
1565 e = lookup(table, ip - table->segment_base); 1579 e = lookup(table, ip - table->segment_base);
1566 break; 1580 break;
1567 } 1581 }
1582 prev = table;
1568 } 1583 }
1569 if (!e) { 1584 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1585 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..53c0ba004e9e 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits
@@ -209,6 +209,7 @@ SECTIONS {
209 data : { 209 data : {
210 } :data 210 } :data
211 .data : AT(ADDR(.data) - LOAD_OFFSET) { 211 .data : AT(ADDR(.data) - LOAD_OFFSET) {
212 _sdata = .;
212 INIT_TASK_DATA(PAGE_SIZE) 213 INIT_TASK_DATA(PAGE_SIZE)
213 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 214 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
214 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 215 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 1089b3e918ac..db3d7c5d1071 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -45,8 +45,8 @@ FORCE : $(obj)/$(offsets-file)
45# Makefile for Kernel-based Virtual Machine module 45# Makefile for Kernel-based Virtual Machine module
46# 46#
47 47
48EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 48ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 49asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
50 50
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
52 coalesced_mmio.o irq_comm.o assigned-dev.o) 52 coalesced_mmio.o irq_comm.o assigned-dev.o)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f56a6316e134..8213efe1998c 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -662,6 +662,7 @@ again:
662 goto vcpu_run_fail; 662 goto vcpu_run_fail;
663 663
664 srcu_read_unlock(&vcpu->kvm->srcu, idx); 664 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 vcpu->mode = IN_GUEST_MODE;
665 kvm_guest_enter(); 666 kvm_guest_enter();
666 667
667 /* 668 /*
@@ -683,6 +684,7 @@ again:
683 */ 684 */
684 barrier(); 685 barrier();
685 kvm_guest_exit(); 686 kvm_guest_exit();
687 vcpu->mode = OUTSIDE_GUEST_MODE;
686 preempt_enable(); 688 preempt_enable();
687 689
688 idx = srcu_read_lock(&vcpu->kvm->srcu); 690 idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -749,7 +751,7 @@ out:
749 return r; 751 return r;
750} 752}
751 753
752static struct kvm *kvm_alloc_kvm(void) 754struct kvm *kvm_arch_alloc_vm(void)
753{ 755{
754 756
755 struct kvm *kvm; 757 struct kvm *kvm;
@@ -760,7 +762,7 @@ static struct kvm *kvm_alloc_kvm(void)
760 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); 762 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
761 763
762 if (!vm_base) 764 if (!vm_base)
763 return ERR_PTR(-ENOMEM); 765 return NULL;
764 766
765 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); 767 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
766 kvm = (struct kvm *)(vm_base + 768 kvm = (struct kvm *)(vm_base +
@@ -806,10 +808,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
806#define GUEST_PHYSICAL_RR4 0x2739 808#define GUEST_PHYSICAL_RR4 0x2739
807#define VMM_INIT_RR 0x1660 809#define VMM_INIT_RR 0x1660
808 810
809static void kvm_init_vm(struct kvm *kvm) 811int kvm_arch_init_vm(struct kvm *kvm)
810{ 812{
811 BUG_ON(!kvm); 813 BUG_ON(!kvm);
812 814
815 kvm->arch.is_sn2 = ia64_platform_is("sn2");
816
813 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; 817 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
814 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; 818 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
815 kvm->arch.vmm_init_rr = VMM_INIT_RR; 819 kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +827,8 @@ static void kvm_init_vm(struct kvm *kvm)
823 827
824 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 828 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
825 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 829 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
826}
827
828struct kvm *kvm_arch_create_vm(void)
829{
830 struct kvm *kvm = kvm_alloc_kvm();
831
832 if (IS_ERR(kvm))
833 return ERR_PTR(-ENOMEM);
834
835 kvm->arch.is_sn2 = ia64_platform_is("sn2");
836
837 kvm_init_vm(kvm);
838
839 return kvm;
840 830
831 return 0;
841} 832}
842 833
843static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, 834static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -962,7 +953,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
962 goto out; 953 goto out;
963 r = kvm_setup_default_irq_routing(kvm); 954 r = kvm_setup_default_irq_routing(kvm);
964 if (r) { 955 if (r) {
956 mutex_lock(&kvm->slots_lock);
965 kvm_ioapic_destroy(kvm); 957 kvm_ioapic_destroy(kvm);
958 mutex_unlock(&kvm->slots_lock);
966 goto out; 959 goto out;
967 } 960 }
968 break; 961 break;
@@ -1357,7 +1350,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1357 return -EINVAL; 1350 return -EINVAL;
1358} 1351}
1359 1352
1360static void free_kvm(struct kvm *kvm) 1353void kvm_arch_free_vm(struct kvm *kvm)
1361{ 1354{
1362 unsigned long vm_base = kvm->arch.vm_base; 1355 unsigned long vm_base = kvm->arch.vm_base;
1363 1356
@@ -1399,9 +1392,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1399#endif 1392#endif
1400 kfree(kvm->arch.vioapic); 1393 kfree(kvm->arch.vioapic);
1401 kvm_release_vm_pages(kvm); 1394 kvm_release_vm_pages(kvm);
1402 kvm_free_physmem(kvm);
1403 cleanup_srcu_struct(&kvm->srcu);
1404 free_kvm(kvm);
1405} 1395}
1406 1396
1407void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1397void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index ee541cebcd78..c5f92a926a9a 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -25,5 +25,6 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
25int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); 25int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
26int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); 26int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
27#define kvm_apic_present(x) (true) 27#define kvm_apic_present(x) (true)
28#define kvm_lapic_enabled(x) (true)
28 29
29#endif 30#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index fb8f9f59a1ed..f1e17d3d6cd9 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -130,7 +130,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
130 130
131 local_irq_save(psr); 131 local_irq_save(psr);
132 132
133 /*Intercept the acces for PIB range*/ 133 /*Intercept the access for PIB range*/
134 if (iot == GPFN_PIB) { 134 if (iot == GPFN_PIB) {
135 if (!dir) 135 if (!dir)
136 lsapic_write(vcpu, src_pa, s, *dest); 136 lsapic_write(vcpu, src_pa, s, *dest);
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index bb862fb224f2..b0398740b48d 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -987,7 +987,7 @@ static void vmm_sanity_check(struct kvm_vcpu *vcpu)
987 987
988static void kvm_do_resume_op(struct kvm_vcpu *vcpu) 988static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
989{ 989{
990 vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/ 990 vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/
991 991
992 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { 992 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
993 vcpu_do_resume(vcpu); 993 vcpu_do_resume(vcpu);
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
index f6c5617e16af..b214b5b0432d 100644
--- a/arch/ia64/kvm/vti.h
+++ b/arch/ia64/kvm/vti.h
@@ -83,13 +83,13 @@
83union vac { 83union vac {
84 unsigned long value; 84 unsigned long value;
85 struct { 85 struct {
86 int a_int:1; 86 unsigned int a_int:1;
87 int a_from_int_cr:1; 87 unsigned int a_from_int_cr:1;
88 int a_to_int_cr:1; 88 unsigned int a_to_int_cr:1;
89 int a_from_psr:1; 89 unsigned int a_from_psr:1;
90 int a_from_cpuid:1; 90 unsigned int a_from_cpuid:1;
91 int a_cover:1; 91 unsigned int a_cover:1;
92 int a_bsw:1; 92 unsigned int a_bsw:1;
93 long reserved:57; 93 long reserved:57;
94 }; 94 };
95}; 95};
@@ -97,12 +97,12 @@ union vac {
97union vdc { 97union vdc {
98 unsigned long value; 98 unsigned long value;
99 struct { 99 struct {
100 int d_vmsw:1; 100 unsigned int d_vmsw:1;
101 int d_extint:1; 101 unsigned int d_extint:1;
102 int d_ibr_dbr:1; 102 unsigned int d_ibr_dbr:1;
103 int d_pmc:1; 103 unsigned int d_pmc:1;
104 int d_to_pmd:1; 104 unsigned int d_to_pmd:1;
105 int d_itm:1; 105 unsigned int d_itm:1;
106 long reserved:58; 106 long reserved:58;
107 }; 107 };
108}; 108};
diff --git a/arch/ia64/lib/do_csum.S b/arch/ia64/lib/do_csum.S
index 6bec2fc9f5b2..1a431a5cf86f 100644
--- a/arch/ia64/lib/do_csum.S
+++ b/arch/ia64/lib/do_csum.S
@@ -201,7 +201,7 @@ GLOBAL_ENTRY(do_csum)
201 ;; 201 ;;
202(p6) adds result1[0]=1,result1[0] 202(p6) adds result1[0]=1,result1[0]
203(p9) br.cond.sptk .do_csum_exit // if (count == 1) exit 203(p9) br.cond.sptk .do_csum_exit // if (count == 1) exit
204 // Fall through to caluculate the checksum, feeding result1[0] as 204 // Fall through to calculate the checksum, feeding result1[0] as
205 // the initial value in result1[0]. 205 // the initial value in result1[0].
206 // 206 //
207 // Calculate the checksum loading two 8-byte words per loop. 207 // Calculate the checksum loading two 8-byte words per loop.
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 54bf54059811..f114a3b14c6a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -36,7 +36,7 @@ static unsigned long max_gap;
36 * Shows a simple page count of reserved and used pages in the system. 36 * Shows a simple page count of reserved and used pages in the system.
37 * For discontig machines, it does this on a per-pgdat basis. 37 * For discontig machines, it does this on a per-pgdat basis.
38 */ 38 */
39void show_mem(void) 39void show_mem(unsigned int filter)
40{ 40{
41 int i, total_reserved = 0; 41 int i, total_reserved = 0;
42 int total_shared = 0, total_cached = 0; 42 int total_shared = 0, total_cached = 0;
@@ -44,13 +44,16 @@ void show_mem(void)
44 pg_data_t *pgdat; 44 pg_data_t *pgdat;
45 45
46 printk(KERN_INFO "Mem-info:\n"); 46 printk(KERN_INFO "Mem-info:\n");
47 show_free_areas(); 47 show_free_areas(filter);
48 printk(KERN_INFO "Node memory in pages:\n"); 48 printk(KERN_INFO "Node memory in pages:\n");
49 for_each_online_pgdat(pgdat) { 49 for_each_online_pgdat(pgdat) {
50 unsigned long present; 50 unsigned long present;
51 unsigned long flags; 51 unsigned long flags;
52 int shared = 0, cached = 0, reserved = 0; 52 int shared = 0, cached = 0, reserved = 0;
53 int nid = pgdat->node_id;
53 54
55 if (skip_free_areas_node(filter, nid))
56 continue;
54 pgdat_resize_lock(pgdat, &flags); 57 pgdat_resize_lock(pgdat, &flags);
55 present = pgdat->node_present_pages; 58 present = pgdat->node_present_pages;
56 for(i = 0; i < pgdat->node_spanned_pages; i++) { 59 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -64,8 +67,7 @@ void show_mem(void)
64 if (max_gap < LARGE_GAP) 67 if (max_gap < LARGE_GAP)
65 continue; 68 continue;
66#endif 69#endif
67 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 70 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
68 i) - 1;
69 continue; 71 continue;
70 } 72 }
71 if (PageReserved(page)) 73 if (PageReserved(page))
@@ -81,7 +83,7 @@ void show_mem(void)
81 total_cached += cached; 83 total_cached += cached;
82 total_shared += shared; 84 total_shared += shared;
83 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 85 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
84 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 86 "shrd: %10d, swpd: %10d\n", nid,
85 present, reserved, shared, cached); 87 present, reserved, shared, cached);
86 } 88 }
87 printk(KERN_INFO "%ld pages of RAM\n", total_present); 89 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 61620323bb60..c641333cd997 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -614,7 +614,7 @@ void __cpuinit *per_cpu_init(void)
614 * Shows a simple page count of reserved and used pages in the system. 614 * Shows a simple page count of reserved and used pages in the system.
615 * For discontig machines, it does this on a per-pgdat basis. 615 * For discontig machines, it does this on a per-pgdat basis.
616 */ 616 */
617void show_mem(void) 617void show_mem(unsigned int filter)
618{ 618{
619 int i, total_reserved = 0; 619 int i, total_reserved = 0;
620 int total_shared = 0, total_cached = 0; 620 int total_shared = 0, total_cached = 0;
@@ -622,13 +622,16 @@ void show_mem(void)
622 pg_data_t *pgdat; 622 pg_data_t *pgdat;
623 623
624 printk(KERN_INFO "Mem-info:\n"); 624 printk(KERN_INFO "Mem-info:\n");
625 show_free_areas(); 625 show_free_areas(filter);
626 printk(KERN_INFO "Node memory in pages:\n"); 626 printk(KERN_INFO "Node memory in pages:\n");
627 for_each_online_pgdat(pgdat) { 627 for_each_online_pgdat(pgdat) {
628 unsigned long present; 628 unsigned long present;
629 unsigned long flags; 629 unsigned long flags;
630 int shared = 0, cached = 0, reserved = 0; 630 int shared = 0, cached = 0, reserved = 0;
631 int nid = pgdat->node_id;
631 632
633 if (skip_free_areas_node(filter, nid))
634 continue;
632 pgdat_resize_lock(pgdat, &flags); 635 pgdat_resize_lock(pgdat, &flags);
633 present = pgdat->node_present_pages; 636 present = pgdat->node_present_pages;
634 for(i = 0; i < pgdat->node_spanned_pages; i++) { 637 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -638,8 +641,7 @@ void show_mem(void)
638 if (pfn_valid(pgdat->node_start_pfn + i)) 641 if (pfn_valid(pgdat->node_start_pfn + i))
639 page = pfn_to_page(pgdat->node_start_pfn + i); 642 page = pfn_to_page(pgdat->node_start_pfn + i);
640 else { 643 else {
641 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 644 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
642 i) - 1;
643 continue; 645 continue;
644 } 646 }
645 if (PageReserved(page)) 647 if (PageReserved(page))
@@ -655,7 +657,7 @@ void show_mem(void)
655 total_cached += cached; 657 total_cached += cached;
656 total_shared += shared; 658 total_shared += shared;
657 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 659 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
658 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 660 "shrd: %10d, swpd: %10d\n", nid,
659 present, reserved, shared, cached); 661 present, reserved, shared, cached);
660 } 662 }
661 printk(KERN_INFO "%ld pages of RAM\n", total_present); 663 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 0799fea4c588..20b359376128 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -10,6 +10,7 @@
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/kprobes.h> 11#include <linux/kprobes.h>
12#include <linux/kdebug.h> 12#include <linux/kdebug.h>
13#include <linux/prefetch.h>
13 14
14#include <asm/pgtable.h> 15#include <asm/pgtable.h>
15#include <asm/processor.h> 16#include <asm/processor.h>
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 1841ee7e65f9..5ca674b74737 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -38,7 +38,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
38 if (pud) { 38 if (pud) {
39 pmd = pmd_alloc(mm, pud, taddr); 39 pmd = pmd_alloc(mm, pud, taddr);
40 if (pmd) 40 if (pmd)
41 pte = pte_alloc_map(mm, pmd, taddr); 41 pte = pte_alloc_map(mm, NULL, pmd, taddr);
42 } 42 }
43 return pte; 43 return pte;
44} 44}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ed41759efcac..00cb0e26c64e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -36,8 +36,6 @@
36#include <asm/mca.h> 36#include <asm/mca.h>
37#include <asm/paravirt.h> 37#include <asm/paravirt.h>
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41extern void ia64_tlb_init (void); 39extern void ia64_tlb_init (void);
42 40
43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index 5cdd7e4a597c..f7b798993cea 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -29,7 +29,7 @@ typedef struct
29 unsigned int depth; 29 unsigned int depth;
30 struct pt_regs *regs; 30 struct pt_regs *regs;
31 struct unw_frame_info frame; 31 struct unw_frame_info frame;
32 u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */ 32 unsigned long *prev_pfs_loc; /* state for WAR for old spinlock ool code */
33} ia64_backtrace_t; 33} ia64_backtrace_t;
34 34
35/* Returns non-zero if the PC is in the Interrupt Vector Table */ 35/* Returns non-zero if the PC is in the Interrupt Vector Table */
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 0591038735af..d27df1d45da7 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -7,7 +7,7 @@
7# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10ccflags-y := -Iarch/ia64/sn/include
11 11
12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
13 huberror.o io_acpi_init.o io_common.o \ 13 huberror.o io_acpi_init.o io_common.o \
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 13c15d968098..81a1f4e6bcd8 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -23,11 +23,9 @@
23#include <asm/sn/sn_sal.h> 23#include <asm/sn/sn_sal.h>
24#include <asm/sn/sn_feature_sets.h> 24#include <asm/sn/sn_feature_sets.h>
25 25
26static void force_interrupt(int irq);
27static void register_intr_pda(struct sn_irq_info *sn_irq_info); 26static void register_intr_pda(struct sn_irq_info *sn_irq_info);
28static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); 27static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
29 28
30int sn_force_interrupt_flag = 1;
31extern int sn_ioif_inited; 29extern int sn_ioif_inited;
32struct list_head **sn_irq_lh; 30struct list_head **sn_irq_lh;
33static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ 31static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
@@ -78,62 +76,40 @@ u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
78 return ret_stuff.status; 76 return ret_stuff.status;
79} 77}
80 78
81static unsigned int sn_startup_irq(unsigned int irq) 79static unsigned int sn_startup_irq(struct irq_data *data)
82{ 80{
83 return 0; 81 return 0;
84} 82}
85 83
86static void sn_shutdown_irq(unsigned int irq) 84static void sn_shutdown_irq(struct irq_data *data)
87{ 85{
88} 86}
89 87
90extern void ia64_mca_register_cpev(int); 88extern void ia64_mca_register_cpev(int);
91 89
92static void sn_disable_irq(unsigned int irq) 90static void sn_disable_irq(struct irq_data *data)
93{ 91{
94 if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) 92 if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
95 ia64_mca_register_cpev(0); 93 ia64_mca_register_cpev(0);
96} 94}
97 95
98static void sn_enable_irq(unsigned int irq) 96static void sn_enable_irq(struct irq_data *data)
99{ 97{
100 if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) 98 if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
101 ia64_mca_register_cpev(irq); 99 ia64_mca_register_cpev(data->irq);
102} 100}
103 101
104static void sn_ack_irq(unsigned int irq) 102static void sn_ack_irq(struct irq_data *data)
105{ 103{
106 u64 event_occurred, mask; 104 u64 event_occurred, mask;
105 unsigned int irq = data->irq & 0xff;
107 106
108 irq = irq & 0xff;
109 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); 107 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
110 mask = event_occurred & SH_ALL_INT_MASK; 108 mask = event_occurred & SH_ALL_INT_MASK;
111 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); 109 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
112 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 110 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
113 111
114 move_native_irq(irq); 112 irq_move_irq(data);
115}
116
117static void sn_end_irq(unsigned int irq)
118{
119 int ivec;
120 u64 event_occurred;
121
122 ivec = irq & 0xff;
123 if (ivec == SGI_UART_VECTOR) {
124 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
125 /* If the UART bit is set here, we may have received an
126 * interrupt from the UART that the driver missed. To
127 * make sure, we IPI ourselves to force us to look again.
128 */
129 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
130 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
131 IA64_IPI_DM_INT, 0);
132 }
133 }
134 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
135 if (sn_force_interrupt_flag)
136 force_interrupt(irq);
137} 113}
138 114
139static void sn_irq_info_free(struct rcu_head *head); 115static void sn_irq_info_free(struct rcu_head *head);
@@ -228,9 +204,11 @@ finish_up:
228 return new_irq_info; 204 return new_irq_info;
229} 205}
230 206
231static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) 207static int sn_set_affinity_irq(struct irq_data *data,
208 const struct cpumask *mask, bool force)
232{ 209{
233 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 210 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
211 unsigned int irq = data->irq;
234 nasid_t nasid; 212 nasid_t nasid;
235 int slice; 213 int slice;
236 214
@@ -249,7 +227,7 @@ void sn_set_err_irq_affinity(unsigned int irq)
249{ 227{
250 /* 228 /*
251 * On systems which support CPU disabling (SHub2), all error interrupts 229 * On systems which support CPU disabling (SHub2), all error interrupts
252 * are targetted at the boot CPU. 230 * are targeted at the boot CPU.
253 */ 231 */
254 if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) 232 if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
255 set_irq_affinity_info(irq, cpu_physical_id(0), 0); 233 set_irq_affinity_info(irq, cpu_physical_id(0), 0);
@@ -259,26 +237,25 @@ void sn_set_err_irq_affinity(unsigned int irq) { }
259#endif 237#endif
260 238
261static void 239static void
262sn_mask_irq(unsigned int irq) 240sn_mask_irq(struct irq_data *data)
263{ 241{
264} 242}
265 243
266static void 244static void
267sn_unmask_irq(unsigned int irq) 245sn_unmask_irq(struct irq_data *data)
268{ 246{
269} 247}
270 248
271struct irq_chip irq_type_sn = { 249struct irq_chip irq_type_sn = {
272 .name = "SN hub", 250 .name = "SN hub",
273 .startup = sn_startup_irq, 251 .irq_startup = sn_startup_irq,
274 .shutdown = sn_shutdown_irq, 252 .irq_shutdown = sn_shutdown_irq,
275 .enable = sn_enable_irq, 253 .irq_enable = sn_enable_irq,
276 .disable = sn_disable_irq, 254 .irq_disable = sn_disable_irq,
277 .ack = sn_ack_irq, 255 .irq_ack = sn_ack_irq,
278 .end = sn_end_irq, 256 .irq_mask = sn_mask_irq,
279 .mask = sn_mask_irq, 257 .irq_unmask = sn_unmask_irq,
280 .unmask = sn_unmask_irq, 258 .irq_set_affinity = sn_set_affinity_irq
281 .set_affinity = sn_set_affinity_irq
282}; 259};
283 260
284ia64_vector sn_irq_to_vector(int irq) 261ia64_vector sn_irq_to_vector(int irq)
@@ -296,15 +273,13 @@ unsigned int sn_local_vector_to_irq(u8 vector)
296void sn_irq_init(void) 273void sn_irq_init(void)
297{ 274{
298 int i; 275 int i;
299 struct irq_desc *base_desc = irq_desc;
300 276
301 ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; 277 ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
302 ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; 278 ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
303 279
304 for (i = 0; i < NR_IRQS; i++) { 280 for (i = 0; i < NR_IRQS; i++) {
305 if (base_desc[i].chip == &no_irq_chip) { 281 if (irq_get_chip(i) == &no_irq_chip)
306 base_desc[i].chip = &irq_type_sn; 282 irq_set_chip(i, &irq_type_sn);
307 }
308 } 283 }
309} 284}
310 285
@@ -378,7 +353,6 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
378 int cpu = nasid_slice_to_cpuid(nasid, slice); 353 int cpu = nasid_slice_to_cpuid(nasid, slice);
379#ifdef CONFIG_SMP 354#ifdef CONFIG_SMP
380 int cpuphys; 355 int cpuphys;
381 struct irq_desc *desc;
382#endif 356#endif
383 357
384 pci_dev_get(pci_dev); 358 pci_dev_get(pci_dev);
@@ -395,12 +369,11 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
395#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
396 cpuphys = cpu_physical_id(cpu); 370 cpuphys = cpu_physical_id(cpu);
397 set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); 371 set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
398 desc = irq_to_desc(sn_irq_info->irq_irq);
399 /* 372 /*
400 * Affinity was set by the PROM, prevent it from 373 * Affinity was set by the PROM, prevent it from
401 * being reset by the request_irq() path. 374 * being reset by the request_irq() path.
402 */ 375 */
403 desc->status |= IRQ_AFFINITY_SET; 376 irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
404#endif 377#endif
405} 378}
406 379
@@ -439,25 +412,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
439 pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; 412 pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
440 413
441 /* Don't force an interrupt if the irq has been disabled */ 414 /* Don't force an interrupt if the irq has been disabled */
442 if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && 415 if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
443 pci_provider && pci_provider->force_interrupt) 416 pci_provider && pci_provider->force_interrupt)
444 (*pci_provider->force_interrupt)(sn_irq_info); 417 (*pci_provider->force_interrupt)(sn_irq_info);
445} 418}
446 419
447static void force_interrupt(int irq)
448{
449 struct sn_irq_info *sn_irq_info;
450
451 if (!sn_ioif_inited)
452 return;
453
454 rcu_read_lock();
455 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
456 sn_call_force_intr_provider(sn_irq_info);
457
458 rcu_read_unlock();
459}
460
461/* 420/*
462 * Check for lost interrupts. If the PIC int_status reg. says that 421 * Check for lost interrupts. If the PIC int_status reg. says that
463 * an interrupt has been sent, but not handled, and the interrupt 422 * an interrupt has been sent, but not handled, and the interrupt
@@ -476,7 +435,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
476 /* 435 /*
477 * Bridge types attached to TIO (anything but PIC) do not need this WAR 436 * Bridge types attached to TIO (anything but PIC) do not need this WAR
478 * since they do not target Shub II interrupt registers. If that 437 * since they do not target Shub II interrupt registers. If that
479 * ever changes, this check needs to accomodate. 438 * ever changes, this check needs to accommodate.
480 */ 439 */
481 if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) 440 if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
482 return; 441 return;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 0c72dd463831..2b98b9e088de 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -144,16 +144,16 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
144 */ 144 */
145 msg.data = 0x100 + irq; 145 msg.data = 0x100 + irq;
146 146
147 set_irq_msi(irq, entry); 147 irq_set_msi_desc(irq, entry);
148 write_msi_msg(irq, &msg); 148 write_msi_msg(irq, &msg);
149 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 149 irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
150 150
151 return 0; 151 return 0;
152} 152}
153 153
154#ifdef CONFIG_SMP 154#ifdef CONFIG_SMP
155static int sn_set_msi_irq_affinity(unsigned int irq, 155static int sn_set_msi_irq_affinity(struct irq_data *data,
156 const struct cpumask *cpu_mask) 156 const struct cpumask *cpu_mask, bool force)
157{ 157{
158 struct msi_msg msg; 158 struct msi_msg msg;
159 int slice; 159 int slice;
@@ -164,7 +164,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
164 struct sn_irq_info *sn_irq_info; 164 struct sn_irq_info *sn_irq_info;
165 struct sn_irq_info *new_irq_info; 165 struct sn_irq_info *new_irq_info;
166 struct sn_pcibus_provider *provider; 166 struct sn_pcibus_provider *provider;
167 unsigned int cpu; 167 unsigned int cpu, irq = data->irq;
168 168
169 cpu = cpumask_first(cpu_mask); 169 cpu = cpumask_first(cpu_mask);
170 sn_irq_info = sn_msi_info[irq].sn_irq_info; 170 sn_irq_info = sn_msi_info[irq].sn_irq_info;
@@ -206,33 +206,33 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
207 207
208 write_msi_msg(irq, &msg); 208 write_msi_msg(irq, &msg);
209 cpumask_copy(irq_desc[irq].affinity, cpu_mask); 209 cpumask_copy(data->affinity, cpu_mask);
210 210
211 return 0; 211 return 0;
212} 212}
213#endif /* CONFIG_SMP */ 213#endif /* CONFIG_SMP */
214 214
215static void sn_ack_msi_irq(unsigned int irq) 215static void sn_ack_msi_irq(struct irq_data *data)
216{ 216{
217 move_native_irq(irq); 217 irq_move_irq(data);
218 ia64_eoi(); 218 ia64_eoi();
219} 219}
220 220
221static int sn_msi_retrigger_irq(unsigned int irq) 221static int sn_msi_retrigger_irq(struct irq_data *data)
222{ 222{
223 unsigned int vector = irq; 223 unsigned int vector = data->irq;
224 ia64_resend_irq(vector); 224 ia64_resend_irq(vector);
225 225
226 return 1; 226 return 1;
227} 227}
228 228
229static struct irq_chip sn_msi_chip = { 229static struct irq_chip sn_msi_chip = {
230 .name = "PCI-MSI", 230 .name = "PCI-MSI",
231 .mask = mask_msi_irq, 231 .irq_mask = mask_msi_irq,
232 .unmask = unmask_msi_irq, 232 .irq_unmask = unmask_msi_irq,
233 .ack = sn_ack_msi_irq, 233 .irq_ack = sn_ack_msi_irq,
234#ifdef CONFIG_SMP 234#ifdef CONFIG_SMP
235 .set_affinity = sn_set_msi_irq_affinity, 235 .irq_set_affinity = sn_set_msi_irq_affinity,
236#endif 236#endif
237 .retrigger = sn_msi_retrigger_irq, 237 .irq_retrigger = sn_msi_retrigger_irq,
238}; 238};
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index dbc4cbecb5ed..77db0b514fa4 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void)
592 /* 592 /*
593 * Don't check status. The SAL call is not supported on all PROMs 593 * Don't check status. The SAL call is not supported on all PROMs
594 * but a failure is harmless. 594 * but a failure is harmless.
595 * Architechtuallly, cpu_init is always called twice on cpu 0. We 595 * Architecturally, cpu_init is always called twice on cpu 0. We
596 * should set cpu_number on cpu 0 once. 596 * should set cpu_number on cpu 0 once.
597 */ 597 */
598 if (cpuid == 0) { 598 if (cpuid == 0) {
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 08e6565dc908..3d09108d4277 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,7 +9,7 @@
9# sn2 specific kernel files 9# sn2 specific kernel files
10# 10#
11 11
12EXTRA_CFLAGS += -Iarch/ia64/sn/include 12ccflags-y := -Iarch/ia64/sn/include
13 13
14obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \ 14obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
15 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o 15 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index fa1eceed0d23..30862c0358cd 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -860,6 +860,7 @@ error:
860 860
861static const struct file_operations sn_hwperf_fops = { 861static const struct file_operations sn_hwperf_fops = {
862 .unlocked_ioctl = sn_hwperf_ioctl, 862 .unlocked_ioctl = sn_hwperf_ioctl,
863 .llseek = noop_llseek,
863}; 864};
864 865
865static struct miscdevice sn_hwperf_dev = { 866static struct miscdevice sn_hwperf_dev = {
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index c76d8dc3aea3..7aab87f48060 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -45,38 +45,6 @@ static int licenseID_open(struct inode *inode, struct file *file)
45 return single_open(file, licenseID_show, NULL); 45 return single_open(file, licenseID_show, NULL);
46} 46}
47 47
48/*
49 * Enable forced interrupt by default.
50 * When set, the sn interrupt handler writes the force interrupt register on
51 * the bridge chip. The hardware will then send an interrupt message if the
52 * interrupt line is active. This mimics a level sensitive interrupt.
53 */
54extern int sn_force_interrupt_flag;
55
56static int sn_force_interrupt_show(struct seq_file *s, void *p)
57{
58 seq_printf(s, "Force interrupt is %s\n",
59 sn_force_interrupt_flag ? "enabled" : "disabled");
60 return 0;
61}
62
63static ssize_t sn_force_interrupt_write_proc(struct file *file,
64 const char __user *buffer, size_t count, loff_t *data)
65{
66 char val;
67
68 if (copy_from_user(&val, buffer, 1))
69 return -EFAULT;
70
71 sn_force_interrupt_flag = (val == '0') ? 0 : 1;
72 return count;
73}
74
75static int sn_force_interrupt_open(struct inode *inode, struct file *file)
76{
77 return single_open(file, sn_force_interrupt_show, NULL);
78}
79
80static int coherence_id_show(struct seq_file *s, void *p) 48static int coherence_id_show(struct seq_file *s, void *p)
81{ 49{
82 seq_printf(s, "%d\n", partition_coherence_id()); 50 seq_printf(s, "%d\n", partition_coherence_id());
@@ -114,14 +82,6 @@ static const struct file_operations proc_license_id_fops = {
114 .release = single_release, 82 .release = single_release,
115}; 83};
116 84
117static const struct file_operations proc_sn_force_intr_fops = {
118 .open = sn_force_interrupt_open,
119 .read = seq_read,
120 .write = sn_force_interrupt_write_proc,
121 .llseek = seq_lseek,
122 .release = single_release,
123};
124
125static const struct file_operations proc_coherence_id_fops = { 85static const struct file_operations proc_coherence_id_fops = {
126 .open = coherence_id_open, 86 .open = coherence_id_open,
127 .read = seq_read, 87 .read = seq_read,
@@ -149,8 +109,6 @@ void register_sn_procfs(void)
149 proc_create("system_serial_number", 0444, sgi_proc_dir, 109 proc_create("system_serial_number", 0444, sgi_proc_dir,
150 &proc_system_sn_fops); 110 &proc_system_sn_fops);
151 proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops); 111 proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
152 proc_create("sn_force_interrupt", 0644, sgi_proc_dir,
153 &proc_sn_force_intr_fops);
154 proc_create("coherence_id", 0444, sgi_proc_dir, 112 proc_create("coherence_id", 0444, sgi_proc_dir,
155 &proc_coherence_id_fops); 113 &proc_coherence_id_fops);
156 proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); 114 proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index 21d6f09e3447..c34efda122e1 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -33,8 +33,6 @@ static struct clocksource clocksource_sn2 = {
33 .rating = 450, 33 .rating = 450,
34 .read = read_sn2, 34 .read = read_sn2,
35 .mask = (1LL << 55) - 1, 35 .mask = (1LL << 55) - 1,
36 .mult = 0,
37 .shift = 10,
38 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 36 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
39}; 37};
40 38
@@ -57,9 +55,7 @@ ia64_sn_udelay (unsigned long usecs)
57void __init sn_timer_init(void) 55void __init sn_timer_init(void)
58{ 56{
59 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; 57 clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
60 clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, 58 clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
61 clocksource_sn2.shift);
62 clocksource_register(&clocksource_sn2);
63 59
64 ia64_udelay = &ia64_sn_udelay; 60 ia64_udelay = &ia64_sn_udelay;
65} 61}
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index ad4ef34dfe26..df2a90145426 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,6 +7,6 @@
7# 7#
8# Makefile for the sn pci general routines. 8# Makefile for the sn pci general routines.
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10ccflags-y := -Iarch/ia64/sn/include
11 11
12obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ 12obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 01192d3247dd..396bcae36309 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,7 +7,7 @@
7# 7#
8# Makefile for the sn2 io routines. 8# Makefile for the sn2 io routines.
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10ccflags-y := -Iarch/ia64/sn/include
11 11
12obj-y += pcibr_dma.o pcibr_reg.o \ 12obj-y += pcibr_dma.o pcibr_reg.o \
13 pcibr_ate.o pcibr_provider.o 13 pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index c659ad5613a0..33def666a664 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -227,7 +227,7 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
227 * after doing the read. For PIC this routine then forces a fake interrupt 227 * after doing the read. For PIC this routine then forces a fake interrupt
228 * on another line, which is logically associated with the slot that the PIO 228 * on another line, which is logically associated with the slot that the PIO
229 * is addressed to. It then spins while watching the memory location that 229 * is addressed to. It then spins while watching the memory location that
230 * the interrupt is targetted to. When the interrupt response arrives, we 230 * the interrupt is targeted to. When the interrupt response arrives, we
231 * are sure that the DMA has landed in memory and it is safe for the driver 231 * are sure that the DMA has landed in memory and it is safe for the driver
232 * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush 232 * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
233 * Bridge register since it ensures the data has entered the coherence domain, 233 * Bridge register since it ensures the data has entered the coherence domain,
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 4d4536e3b6f3..9c271be9919a 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
509 * use the GART mapped mode. 509 * use the GART mapped mode.
510 */ 510 */
511static u64 511static u64
512tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) 512tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
513{ 513{
514 u64 mapaddr; 514 u64 mapaddr;
515 515
diff --git a/arch/ia64/uv/kernel/Makefile b/arch/ia64/uv/kernel/Makefile
index 8d92b4684d8e..124e441d383d 100644
--- a/arch/ia64/uv/kernel/Makefile
+++ b/arch/ia64/uv/kernel/Makefile
@@ -7,7 +7,7 @@
7# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10EXTRA_CFLAGS += -Iarch/ia64/sn/include 10ccflags-y := -Iarch/ia64/sn/include
11 11
12obj-y += setup.o 12obj-y += setup.o
13obj-$(CONFIG_IA64_GENERIC) += machvec.o 13obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index a3fb7cf9ae1d..b279e142c633 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt;
92static int xen_slab_ready; 92static int xen_slab_ready;
93 93
94#ifdef CONFIG_SMP 94#ifdef CONFIG_SMP
95#include <linux/sched.h>
96
95/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, 97/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
96 * it ends up to issue several memory accesses upon percpu data and 98 * it ends up to issue several memory accesses upon percpu data and
97 * thus adds unnecessary traffic to other paths. 99 * thus adds unnecessary traffic to other paths.
@@ -99,7 +101,13 @@ static int xen_slab_ready;
99static irqreturn_t 101static irqreturn_t
100xen_dummy_handler(int irq, void *dev_id) 102xen_dummy_handler(int irq, void *dev_id)
101{ 103{
104 return IRQ_HANDLED;
105}
102 106
107static irqreturn_t
108xen_resched_handler(int irq, void *dev_id)
109{
110 scheduler_ipi();
103 return IRQ_HANDLED; 111 return IRQ_HANDLED;
104} 112}
105 113
@@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = {
110}; 118};
111 119
112static struct irqaction xen_resched_irqaction = { 120static struct irqaction xen_resched_irqaction = {
113 .handler = xen_dummy_handler, 121 .handler = xen_resched_handler,
114 .flags = IRQF_DISABLED, 122 .flags = IRQF_DISABLED,
115 .name = "resched" 123 .name = "resched"
116}; 124};
@@ -138,7 +146,6 @@ static void
138__xen_register_percpu_irq(unsigned int cpu, unsigned int vec, 146__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
139 struct irqaction *action, int save) 147 struct irqaction *action, int save)
140{ 148{
141 struct irq_desc *desc;
142 int irq = 0; 149 int irq = 0;
143 150
144 if (xen_slab_ready) { 151 if (xen_slab_ready) {
@@ -223,8 +230,7 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
223 * mark the interrupt for migrations and trigger it 230 * mark the interrupt for migrations and trigger it
224 * on cpu hotplug. 231 * on cpu hotplug.
225 */ 232 */
226 desc = irq_desc + irq; 233 irq_set_status_flags(irq, IRQ_PER_CPU);
227 desc->status |= IRQ_PER_CPU;
228 } 234 }
229 } 235 }
230 236
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
index fd66b048c6fa..419c8620945a 100644
--- a/arch/ia64/xen/suspend.c
+++ b/arch/ia64/xen/suspend.c
@@ -37,19 +37,14 @@ xen_mm_unpin_all(void)
37 /* nothing */ 37 /* nothing */
38} 38}
39 39
40void xen_pre_device_suspend(void)
41{
42 /* nothing */
43}
44
45void 40void
46xen_pre_suspend() 41xen_arch_pre_suspend()
47{ 42{
48 /* nothing */ 43 /* nothing */
49} 44}
50 45
51void 46void
52xen_post_suspend(int suspend_cancelled) 47xen_arch_post_suspend(int suspend_cancelled)
53{ 48{
54 if (suspend_cancelled) 49 if (suspend_cancelled)
55 return; 50 return;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index c1c544513e8d..1f8244a78bee 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
139 run_posix_cpu_timers(p); 139 run_posix_cpu_timers(p);
140 delta_itm += local_cpu_data->itm_delta * (stolen + blocked); 140 delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
141 141
142 if (cpu == time_keeper_id) { 142 if (cpu == time_keeper_id)
143 write_seqlock(&xtime_lock); 143 xtime_update(stolen + blocked);
144 do_timer(stolen + blocked); 144
145 local_cpu_data->itm_next = delta_itm + new_itm; 145 local_cpu_data->itm_next = delta_itm + new_itm;
146 write_sequnlock(&xtime_lock); 146
147 } else {
148 local_cpu_data->itm_next = delta_itm + new_itm;
149 }
150 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; 147 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
151 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; 148 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
152 } 149 }
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 8adc6a14272a..3e8d350fdf39 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -1136,7 +1136,6 @@ __initconst = {
1136static void __init 1136static void __init
1137xen_patch_branch(unsigned long tag, unsigned long type) 1137xen_patch_branch(unsigned long tag, unsigned long type)
1138{ 1138{
1139 const unsigned long nelem = 1139 __paravirt_patch_apply_branch(tag, type, xen_branch_target,
1140 sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); 1140 ARRAY_SIZE(xen_branch_target));
1141 __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
1142} 1141}