aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Makefile7
-rw-r--r--arch/ia64/configs/gensparse_defconfig3
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig1
-rw-r--r--arch/ia64/configs/zx1_defconfig1
-rw-r--r--arch/ia64/defconfig1
-rw-r--r--arch/ia64/hp/sim/simserial.c12
-rw-r--r--arch/ia64/ia32/Makefile4
-rw-r--r--arch/ia64/ia32/elfcore32.h3
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/ia32_ioctl.c45
-rw-r--r--arch/ia64/ia32/ia32_signal.c4
-rw-r--r--arch/ia64/ia32/ia32_support.c4
-rw-r--r--arch/ia64/ia32/sys_ia32.c85
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/acpi-ext.c22
-rw-r--r--arch/ia64/kernel/acpi-processor.c67
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/cpufreq/Makefile1
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c51
-rw-r--r--arch/ia64/kernel/efi.c160
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/fsys.S1
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/jprobes.S27
-rw-r--r--arch/ia64/kernel/kprobes.c68
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c46
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h269
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/ptrace.c33
-rw-r--r--arch/ia64/kernel/salinfo.c171
-rw-r--r--arch/ia64/kernel/setup.c26
-rw-r--r--arch/ia64/kernel/signal.c10
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/ia64/kernel/traps.c26
-rw-r--r--arch/ia64/kernel/uncached.c1
-rw-r--r--arch/ia64/mm/init.c36
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/oprofile/backtrace.c2
-rw-r--r--arch/ia64/pci/pci.c31
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h35
-rw-r--r--arch/ia64/sn/include/xtalk/xbow.h206
-rw-r--r--arch/ia64/sn/include/xtalk/xwidgetdev.h46
-rw-r--r--arch/ia64/sn/kernel/bte_error.c58
-rw-r--r--arch/ia64/sn/kernel/huberror.c9
-rw-r--r--arch/ia64/sn/kernel/io_init.c154
-rw-r--r--arch/ia64/sn/kernel/irq.c10
-rw-r--r--arch/ia64/sn/kernel/mca.c7
-rw-r--r--arch/ia64/sn/kernel/tiocx.c37
-rw-r--r--arch/ia64/sn/kernel/xp_main.c17
-rw-r--r--arch/ia64/sn/kernel/xpc.h1273
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c58
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c206
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c10
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c16
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c76
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c44
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c28
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c36
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c68
63 files changed, 1544 insertions, 2110 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 67932ad53082..f722e1a25948 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -25,7 +25,6 @@ cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
25 -falign-functions=32 -frename-registers -fno-optimize-sibling-calls 25 -falign-functions=32 -frename-registers -fno-optimize-sibling-calls
26CFLAGS_KERNEL := -mconstant-gp 26CFLAGS_KERNEL := -mconstant-gp
27 27
28GCC_VERSION := $(call cc-version)
29GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)") 28GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)")
30CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(READELF)") 29CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(READELF)")
31 30
@@ -37,11 +36,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
37 ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz) 36 ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
38endif 37endif
39 38
40ifneq ($(shell if [ $(GCC_VERSION) -lt 0300 ] ; then echo "bad"; fi ;),) 39ifeq ($(call cc-version),0304)
41$(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad code.)
42endif
43
44ifeq ($(GCC_VERSION),0304)
45 cflags-$(CONFIG_ITANIUM) += -mtune=merced 40 cflags-$(CONFIG_ITANIUM) += -mtune=merced
46 cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley 41 cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
47endif 42endif
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 80f8663bc6d9..991c07b57c24 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -557,6 +557,7 @@ CONFIG_E100=m
557# CONFIG_DL2K is not set 557# CONFIG_DL2K is not set
558CONFIG_E1000=y 558CONFIG_E1000=y
559# CONFIG_E1000_NAPI is not set 559# CONFIG_E1000_NAPI is not set
560# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
560# CONFIG_NS83820 is not set 561# CONFIG_NS83820 is not set
561# CONFIG_HAMACHI is not set 562# CONFIG_HAMACHI is not set
562# CONFIG_YELLOWFIN is not set 563# CONFIG_YELLOWFIN is not set
@@ -701,6 +702,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
701CONFIG_SERIAL_SGI_L1_CONSOLE=y 702CONFIG_SERIAL_SGI_L1_CONSOLE=y
702# CONFIG_SERIAL_JSM is not set 703# CONFIG_SERIAL_JSM is not set
703CONFIG_SERIAL_SGI_IOC4=y 704CONFIG_SERIAL_SGI_IOC4=y
705CONFIG_SERIAL_SGI_IOC3=y
704CONFIG_UNIX98_PTYS=y 706CONFIG_UNIX98_PTYS=y
705CONFIG_LEGACY_PTYS=y 707CONFIG_LEGACY_PTYS=y
706CONFIG_LEGACY_PTY_COUNT=256 708CONFIG_LEGACY_PTY_COUNT=256
@@ -1046,6 +1048,7 @@ CONFIG_INFINIBAND_IPOIB=m
1046# SN Devices 1048# SN Devices
1047# 1049#
1048CONFIG_SGI_IOC4=y 1050CONFIG_SGI_IOC4=y
1051CONFIG_SGI_IOC3=y
1049 1052
1050# 1053#
1051# File systems 1054# File systems
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index ff8bb3770c9d..3cb503b659e6 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -659,6 +659,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
659CONFIG_SERIAL_SGI_L1_CONSOLE=y 659CONFIG_SERIAL_SGI_L1_CONSOLE=y
660# CONFIG_SERIAL_JSM is not set 660# CONFIG_SERIAL_JSM is not set
661CONFIG_SERIAL_SGI_IOC4=y 661CONFIG_SERIAL_SGI_IOC4=y
662CONFIG_SERIAL_SGI_IOC3=y
662CONFIG_UNIX98_PTYS=y 663CONFIG_UNIX98_PTYS=y
663CONFIG_LEGACY_PTYS=y 664CONFIG_LEGACY_PTYS=y
664CONFIG_LEGACY_PTY_COUNT=256 665CONFIG_LEGACY_PTY_COUNT=256
@@ -899,6 +900,7 @@ CONFIG_INFINIBAND_SRP=m
899# SN Devices 900# SN Devices
900# 901#
901CONFIG_SGI_IOC4=y 902CONFIG_SGI_IOC4=y
903CONFIG_SGI_IOC3=y
902 904
903# 905#
904# File systems 906# File systems
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index b1e8f09e9fd5..6859119bc9dd 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -565,6 +565,7 @@ CONFIG_E100=m
565# CONFIG_DL2K is not set 565# CONFIG_DL2K is not set
566CONFIG_E1000=y 566CONFIG_E1000=y
567# CONFIG_E1000_NAPI is not set 567# CONFIG_E1000_NAPI is not set
568# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
568# CONFIG_NS83820 is not set 569# CONFIG_NS83820 is not set
569# CONFIG_HAMACHI is not set 570# CONFIG_HAMACHI is not set
570# CONFIG_YELLOWFIN is not set 571# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 0856ca67dd50..53899dc8eb53 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -548,6 +548,7 @@ CONFIG_E100=y
548# CONFIG_DL2K is not set 548# CONFIG_DL2K is not set
549CONFIG_E1000=y 549CONFIG_E1000=y
550# CONFIG_E1000_NAPI is not set 550# CONFIG_E1000_NAPI is not set
551# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
551# CONFIG_NS83820 is not set 552# CONFIG_NS83820 is not set
552# CONFIG_HAMACHI is not set 553# CONFIG_HAMACHI is not set
553# CONFIG_YELLOWFIN is not set 554# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 275a26c6e5aa..dcbc78a4cfa4 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -565,6 +565,7 @@ CONFIG_E100=m
565# CONFIG_DL2K is not set 565# CONFIG_DL2K is not set
566CONFIG_E1000=y 566CONFIG_E1000=y
567# CONFIG_E1000_NAPI is not set 567# CONFIG_E1000_NAPI is not set
568# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
568# CONFIG_NS83820 is not set 569# CONFIG_NS83820 is not set
569# CONFIG_HAMACHI is not set 570# CONFIG_HAMACHI is not set
570# CONFIG_YELLOWFIN is not set 571# CONFIG_YELLOWFIN is not set
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 19ee635eeb70..626cdc83668b 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -26,6 +26,7 @@
26#include <linux/fcntl.h> 26#include <linux/fcntl.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/capability.h>
29#include <linux/console.h> 30#include <linux/console.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/serial.h> 32#include <linux/serial.h>
@@ -107,7 +108,6 @@ static struct async_struct *IRQ_ports[NR_IRQS];
107static struct console *console; 108static struct console *console;
108 109
109static unsigned char *tmp_buf; 110static unsigned char *tmp_buf;
110static DECLARE_MUTEX(tmp_buf_sem);
111 111
112extern struct console *console_drivers; /* from kernel/printk.c */ 112extern struct console *console_drivers; /* from kernel/printk.c */
113 113
@@ -166,15 +166,9 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
166 } 166 }
167 } 167 }
168 seen_esc = 0; 168 seen_esc = 0;
169 if (tty->flip.count >= TTY_FLIPBUF_SIZE) break;
170 169
171 *tty->flip.char_buf_ptr = ch; 170 if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
172 171 break;
173 *tty->flip.flag_buf_ptr = 0;
174
175 tty->flip.flag_buf_ptr++;
176 tty->flip.char_buf_ptr++;
177 tty->flip.count++;
178 } 172 }
179 tty_flip_buffer_push(tty); 173 tty_flip_buffer_push(tty);
180} 174}
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
index 2ed90da81166..61cb60affd95 100644
--- a/arch/ia64/ia32/Makefile
+++ b/arch/ia64/ia32/Makefile
@@ -2,11 +2,9 @@
2# Makefile for the ia32 kernel emulation subsystem. 2# Makefile for the ia32 kernel emulation subsystem.
3# 3#
4 4
5obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \ 5obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
6 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o 6 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
7 7
8CFLAGS_ia32_ioctl.o += -Ifs/
9
10# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and 8# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
11# restore_ia32_fpstate_live() can be sure the live register contain user-level state. 9# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
12CFLAGS_ia32_signal.o += -mfixed-range=f16-f31 10CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index b73b8b6b10c1..a47f63b204fb 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -95,8 +95,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
95static inline int elf_core_copy_task_regs(struct task_struct *t, 95static inline int elf_core_copy_task_regs(struct task_struct *t,
96 elf_gregset_t* elfregs) 96 elf_gregset_t* elfregs)
97{ 97{
98 struct pt_regs *pp = ia64_task_regs(t); 98 ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
99 ELF_CORE_COPY_REGS((*elfregs), pp);
100 return 1; 99 return 1;
101} 100}
102 101
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 494fad6bf376..95fe04400f6b 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -469,7 +469,7 @@ ia32_syscall_table:
469 data8 sys32_epoll_wait 469 data8 sys32_epoll_wait
470 data8 sys_remap_file_pages 470 data8 sys_remap_file_pages
471 data8 sys_set_tid_address 471 data8 sys_set_tid_address
472 data8 sys32_timer_create 472 data8 compat_sys_timer_create
473 data8 compat_sys_timer_settime /* 260 */ 473 data8 compat_sys_timer_settime /* 260 */
474 data8 compat_sys_timer_gettime 474 data8 compat_sys_timer_gettime
475 data8 sys_timer_getoverrun 475 data8 sys_timer_getoverrun
diff --git a/arch/ia64/ia32/ia32_ioctl.c b/arch/ia64/ia32/ia32_ioctl.c
deleted file mode 100644
index 88739394f6df..000000000000
--- a/arch/ia64/ia32/ia32_ioctl.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * IA32 Architecture-specific ioctl shim code
3 *
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 2001-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9
10#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
11#include <linux/syscalls.h>
12#include "ia32priv.h"
13
14#define INCLUDES
15#include "compat_ioctl.c"
16
17#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
18
19#define DO_IOCTL(fd, cmd, arg) ({ \
20 int _ret; \
21 mm_segment_t _old_fs = get_fs(); \
22 \
23 set_fs(KERNEL_DS); \
24 _ret = sys_ioctl(fd, cmd, (unsigned long)arg); \
25 set_fs(_old_fs); \
26 _ret; \
27})
28
29#define CODE
30#include "compat_ioctl.c"
31
32#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
33#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
34#define IOCTL_TABLE_START \
35 struct ioctl_trans ioctl_start[] = {
36#define IOCTL_TABLE_END \
37 };
38
39IOCTL_TABLE_START
40#define DECLARES
41#include "compat_ioctl.c"
42#include <linux/compat_ioctl.h>
43IOCTL_TABLE_END
44
45int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index aa891c9bc9b6..5856510210fa 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -255,7 +255,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
255 */ 255 */
256 fp_tos = (fsr>>11)&0x7; 256 fp_tos = (fsr>>11)&0x7;
257 fr8_st_map = (8-fp_tos)&0x7; 257 fr8_st_map = (8-fp_tos)&0x7;
258 ptp = ia64_task_regs(tsk); 258 ptp = task_pt_regs(tsk);
259 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 259 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
260 ia64f2ia32f(fpregp, &ptp->f8); 260 ia64f2ia32f(fpregp, &ptp->f8);
261 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); 261 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
@@ -389,7 +389,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
389 fr8_st_map = (8-fp_tos)&0x7; 389 fr8_st_map = (8-fp_tos)&0x7;
390 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 390 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
391 391
392 ptp = ia64_task_regs(tsk); 392 ptp = task_pt_regs(tsk);
393 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); 393 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
394 ia32f2ia64f(&ptp->f8, fpregp); 394 ia32f2ia64f(&ptp->f8, fpregp);
395 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); 395 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index 4f630043b3ae..c187743965a0 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -58,7 +58,7 @@ load_desc (u16 selector)
58void 58void
59ia32_load_segment_descriptors (struct task_struct *task) 59ia32_load_segment_descriptors (struct task_struct *task)
60{ 60{
61 struct pt_regs *regs = ia64_task_regs(task); 61 struct pt_regs *regs = task_pt_regs(task);
62 62
63 /* Setup the segment descriptors */ 63 /* Setup the segment descriptors */
64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ 64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
@@ -113,7 +113,7 @@ void
113ia32_load_state (struct task_struct *t) 113ia32_load_state (struct task_struct *t)
114{ 114{
115 unsigned long eflag, fsr, fcr, fir, fdr, tssd; 115 unsigned long eflag, fsr, fcr, fir, fdr, tssd;
116 struct pt_regs *regs = ia64_task_regs(t); 116 struct pt_regs *regs = task_pt_regs(t);
117 117
118 eflag = t->thread.eflag; 118 eflag = t->thread.eflag;
119 fsr = t->thread.fsr; 119 fsr = t->thread.fsr;
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index dc282710421a..70dba1f0e2ee 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -48,12 +48,13 @@
48#include <linux/ptrace.h> 48#include <linux/ptrace.h>
49#include <linux/stat.h> 49#include <linux/stat.h>
50#include <linux/ipc.h> 50#include <linux/ipc.h>
51#include <linux/capability.h>
51#include <linux/compat.h> 52#include <linux/compat.h>
52#include <linux/vfs.h> 53#include <linux/vfs.h>
53#include <linux/mman.h> 54#include <linux/mman.h>
55#include <linux/mutex.h>
54 56
55#include <asm/intrinsics.h> 57#include <asm/intrinsics.h>
56#include <asm/semaphore.h>
57#include <asm/types.h> 58#include <asm/types.h>
58#include <asm/uaccess.h> 59#include <asm/uaccess.h>
59#include <asm/unistd.h> 60#include <asm/unistd.h>
@@ -85,7 +86,7 @@
85 * while doing so. 86 * while doing so.
86 */ 87 */
87/* XXX make per-mm: */ 88/* XXX make per-mm: */
88static DECLARE_MUTEX(ia32_mmap_sem); 89static DEFINE_MUTEX(ia32_mmap_mutex);
89 90
90asmlinkage long 91asmlinkage long
91sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, 92sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
@@ -894,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
894 prot = get_prot32(prot); 895 prot = get_prot32(prot);
895 896
896#if PAGE_SHIFT > IA32_PAGE_SHIFT 897#if PAGE_SHIFT > IA32_PAGE_SHIFT
897 down(&ia32_mmap_sem); 898 mutex_lock(&ia32_mmap_mutex);
898 { 899 {
899 addr = emulate_mmap(file, addr, len, prot, flags, offset); 900 addr = emulate_mmap(file, addr, len, prot, flags, offset);
900 } 901 }
901 up(&ia32_mmap_sem); 902 mutex_unlock(&ia32_mmap_mutex);
902#else 903#else
903 down_write(&current->mm->mmap_sem); 904 down_write(&current->mm->mmap_sem);
904 { 905 {
@@ -999,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len)
999 if (start >= end) 1000 if (start >= end)
1000 return 0; 1001 return 0;
1001 1002
1002 down(&ia32_mmap_sem); 1003 mutex_lock(&ia32_mmap_mutex);
1003 { 1004 ret = sys_munmap(start, end - start);
1004 ret = sys_munmap(start, end - start); 1005 mutex_unlock(&ia32_mmap_mutex);
1005 }
1006 up(&ia32_mmap_sem);
1007#endif 1006#endif
1008 return ret; 1007 return ret;
1009} 1008}
@@ -1055,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
1055 if (retval < 0) 1054 if (retval < 0)
1056 return retval; 1055 return retval;
1057 1056
1058 down(&ia32_mmap_sem); 1057 mutex_lock(&ia32_mmap_mutex);
1059 { 1058 {
1060 if (offset_in_page(start)) { 1059 if (offset_in_page(start)) {
1061 /* start address is 4KB aligned but not page aligned. */ 1060 /* start address is 4KB aligned but not page aligned. */
@@ -1079,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
1079 retval = sys_mprotect(start, end - start, prot); 1078 retval = sys_mprotect(start, end - start, prot);
1080 } 1079 }
1081 out: 1080 out:
1082 up(&ia32_mmap_sem); 1081 mutex_unlock(&ia32_mmap_mutex);
1083 return retval; 1082 return retval;
1084#endif 1083#endif
1085} 1084}
@@ -1123,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1123 old_len = PAGE_ALIGN(old_end) - addr; 1122 old_len = PAGE_ALIGN(old_end) - addr;
1124 new_len = PAGE_ALIGN(new_end) - addr; 1123 new_len = PAGE_ALIGN(new_end) - addr;
1125 1124
1126 down(&ia32_mmap_sem); 1125 mutex_lock(&ia32_mmap_mutex);
1127 { 1126 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1128 ret = sys_mremap(addr, old_len, new_len, flags, new_addr); 1127 mutex_unlock(&ia32_mmap_mutex);
1129 }
1130 up(&ia32_mmap_sem);
1131 1128
1132 if ((ret >= 0) && (old_len < new_len)) { 1129 if ((ret >= 0) && (old_len < new_len)) {
1133 /* mremap expanded successfully */ 1130 /* mremap expanded successfully */
@@ -1481,7 +1478,7 @@ getreg (struct task_struct *child, int regno)
1481{ 1478{
1482 struct pt_regs *child_regs; 1479 struct pt_regs *child_regs;
1483 1480
1484 child_regs = ia64_task_regs(child); 1481 child_regs = task_pt_regs(child);
1485 switch (regno / sizeof(int)) { 1482 switch (regno / sizeof(int)) {
1486 case PT_EBX: return child_regs->r11; 1483 case PT_EBX: return child_regs->r11;
1487 case PT_ECX: return child_regs->r9; 1484 case PT_ECX: return child_regs->r9;
@@ -1509,7 +1506,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
1509{ 1506{
1510 struct pt_regs *child_regs; 1507 struct pt_regs *child_regs;
1511 1508
1512 child_regs = ia64_task_regs(child); 1509 child_regs = task_pt_regs(child);
1513 switch (regno / sizeof(int)) { 1510 switch (regno / sizeof(int)) {
1514 case PT_EBX: child_regs->r11 = value; break; 1511 case PT_EBX: child_regs->r11 = value; break;
1515 case PT_ECX: child_regs->r9 = value; break; 1512 case PT_ECX: child_regs->r9 = value; break;
@@ -1625,7 +1622,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user
1625 * Stack frames start with 16-bytes of temp space 1622 * Stack frames start with 16-bytes of temp space
1626 */ 1623 */
1627 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1624 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1628 ptp = ia64_task_regs(tsk); 1625 ptp = task_pt_regs(tsk);
1629 tos = (tsk->thread.fsr >> 11) & 7; 1626 tos = (tsk->thread.fsr >> 11) & 7;
1630 for (i = 0; i < 8; i++) 1627 for (i = 0; i < 8; i++)
1631 put_fpreg(i, &save->st_space[i], ptp, swp, tos); 1628 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1658,7 +1655,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __us
1658 * Stack frames start with 16-bytes of temp space 1655 * Stack frames start with 16-bytes of temp space
1659 */ 1656 */
1660 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1657 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1661 ptp = ia64_task_regs(tsk); 1658 ptp = task_pt_regs(tsk);
1662 tos = (tsk->thread.fsr >> 11) & 7; 1659 tos = (tsk->thread.fsr >> 11) & 7;
1663 for (i = 0; i < 8; i++) 1660 for (i = 0; i < 8; i++)
1664 get_fpreg(i, &save->st_space[i], ptp, swp, tos); 1661 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1689,7 +1686,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user
1689 * Stack frames start with 16-bytes of temp space 1686 * Stack frames start with 16-bytes of temp space
1690 */ 1687 */
1691 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1688 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1692 ptp = ia64_task_regs(tsk); 1689 ptp = task_pt_regs(tsk);
1693 tos = (tsk->thread.fsr >> 11) & 7; 1690 tos = (tsk->thread.fsr >> 11) & 7;
1694 for (i = 0; i < 8; i++) 1691 for (i = 0; i < 8; i++)
1695 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); 1692 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1733,7 +1730,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
1733 * Stack frames start with 16-bytes of temp space 1730 * Stack frames start with 16-bytes of temp space
1734 */ 1731 */
1735 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1732 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1736 ptp = ia64_task_regs(tsk); 1733 ptp = task_pt_regs(tsk);
1737 tos = (tsk->thread.fsr >> 11) & 7; 1734 tos = (tsk->thread.fsr >> 11) & 7;
1738 for (i = 0; i < 8; i++) 1735 for (i = 0; i < 8; i++)
1739 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); 1736 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1761,21 +1758,15 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
1761 1758
1762 lock_kernel(); 1759 lock_kernel();
1763 if (request == PTRACE_TRACEME) { 1760 if (request == PTRACE_TRACEME) {
1764 ret = sys_ptrace(request, pid, addr, data); 1761 ret = ptrace_traceme();
1765 goto out; 1762 goto out;
1766 } 1763 }
1767 1764
1768 ret = -ESRCH; 1765 child = ptrace_get_task_struct(pid);
1769 read_lock(&tasklist_lock); 1766 if (IS_ERR(child)) {
1770 child = find_task_by_pid(pid); 1767 ret = PTR_ERR(child);
1771 if (child)
1772 get_task_struct(child);
1773 read_unlock(&tasklist_lock);
1774 if (!child)
1775 goto out; 1768 goto out;
1776 ret = -EPERM; 1769 }
1777 if (pid == 1) /* no messing around with init! */
1778 goto out_tsk;
1779 1770
1780 if (request == PTRACE_ATTACH) { 1771 if (request == PTRACE_ATTACH) {
1781 ret = sys_ptrace(request, pid, addr, data); 1772 ret = sys_ptrace(request, pid, addr, data);
@@ -2559,34 +2550,6 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info)
2559 return 0; 2550 return 0;
2560} 2551}
2561 2552
2562asmlinkage long
2563sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
2564{
2565 struct sigevent se;
2566 mm_segment_t oldfs;
2567 timer_t t;
2568 long err;
2569
2570 if (se32 == NULL)
2571 return sys_timer_create(clock, NULL, timer_id);
2572
2573 if (get_compat_sigevent(&se, se32))
2574 return -EFAULT;
2575
2576 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
2577 return -EFAULT;
2578
2579 oldfs = get_fs();
2580 set_fs(KERNEL_DS);
2581 err = sys_timer_create(clock, (struct sigevent __user *) &se, (timer_t __user *) &t);
2582 set_fs(oldfs);
2583
2584 if (!err)
2585 err = __put_user (t, timer_id);
2586
2587 return err;
2588}
2589
2590long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 2553long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
2591 __u32 len_low, __u32 len_high, int advice) 2554 __u32 len_low, __u32 len_high, int advice)
2592{ 2555{
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 307514f7a282..09a0dbc17fb6 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 13obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
14obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o 14obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
15obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o 15obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
16
17ifneq ($(CONFIG_ACPI_PROCESSOR),)
18obj-y += acpi-processor.o
19endif
20
16obj-$(CONFIG_IA64_PALINFO) += palinfo.o 21obj-$(CONFIG_IA64_PALINFO) += palinfo.o
17obj-$(CONFIG_IOSAPIC) += iosapic.o 22obj-$(CONFIG_IOSAPIC) += iosapic.o
18obj-$(CONFIG_MODULES) += module.o 23obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 13a5b3b49bf8..4a5574ff007b 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
33 struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; 33 struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
34 struct acpi_resource_vendor *vendor; 34 struct acpi_resource_vendor *vendor;
35 struct acpi_vendor_descriptor *descriptor; 35 struct acpi_vendor_descriptor *descriptor;
36 u32 length; 36 u32 byte_length;
37 37
38 if (resource->id != ACPI_RSTYPE_VENDOR) 38 if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
39 return AE_OK; 39 return AE_OK;
40 40
41 vendor = (struct acpi_resource_vendor *)&resource->data; 41 vendor = (struct acpi_resource_vendor *)&resource->data;
42 descriptor = (struct acpi_vendor_descriptor *)vendor->reserved; 42 descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
43 if (vendor->length <= sizeof(*info->descriptor) || 43 if (vendor->byte_length <= sizeof(*info->descriptor) ||
44 descriptor->guid_id != info->descriptor->guid_id || 44 descriptor->guid_id != info->descriptor->guid_id ||
45 efi_guidcmp(descriptor->guid, info->descriptor->guid)) 45 efi_guidcmp(descriptor->guid, info->descriptor->guid))
46 return AE_OK; 46 return AE_OK;
47 47
48 length = vendor->length - sizeof(struct acpi_vendor_descriptor); 48 byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
49 info->data = acpi_os_allocate(length); 49 info->data = acpi_os_allocate(byte_length);
50 if (!info->data) 50 if (!info->data)
51 return AE_NO_MEMORY; 51 return AE_NO_MEMORY;
52 52
53 memcpy(info->data, 53 memcpy(info->data,
54 vendor->reserved + sizeof(struct acpi_vendor_descriptor), 54 vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
55 length); 55 byte_length);
56 info->length = length; 56 info->length = byte_length;
57 return AE_CTRL_TERMINATE; 57 return AE_CTRL_TERMINATE;
58} 58}
59 59
60acpi_status 60acpi_status
61acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, 61acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
62 u8 ** data, u32 * length) 62 u8 ** data, u32 * byte_length)
63{ 63{
64 struct acpi_vendor_info info; 64 struct acpi_vendor_info info;
65 65
@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
72 return AE_NOT_FOUND; 72 return AE_NOT_FOUND;
73 73
74 *data = info.data; 74 *data = info.data;
75 *length = info.length; 75 *byte_length = info.length;
76 return AE_OK; 76 return AE_OK;
77} 77}
78 78
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
new file mode 100644
index 000000000000..e683630c8ce2
--- /dev/null
+++ b/arch/ia64/kernel/acpi-processor.c
@@ -0,0 +1,67 @@
1/*
2 * arch/ia64/kernel/cpufreq/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
48
49 obj->type = ACPI_TYPE_BUFFER;
50 obj->buffer.length = 12;
51 obj->buffer.pointer = (u8 *) buf;
52 obj_list->count = 1;
53 obj_list->pointer = obj;
54 pr->pdc = obj_list;
55
56 return;
57}
58
59/* Initialize _PDC data based on the CPU vendor */
60void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
61{
62 pr->pdc = NULL;
63 init_intel_pdc(pr);
64 return;
65}
66
67EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 9ad94ddf6687..d2702c419cf8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
567 * success: return IRQ number (>=0) 567 * success: return IRQ number (>=0)
568 * failure: return < 0 568 * failure: return < 0
569 */ 569 */
570int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) 570int acpi_register_gsi(u32 gsi, int triggering, int polarity)
571{ 571{
572 if (has_8259 && gsi < 16) 572 if (has_8259 && gsi < 16)
573 return isa_irq_to_vector(gsi); 573 return isa_irq_to_vector(gsi);
574 574
575 return iosapic_register_intr(gsi, 575 return iosapic_register_intr(gsi,
576 (active_high_low == 576 (polarity ==
577 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : 577 ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
578 IOSAPIC_POL_LOW, 578 IOSAPIC_POL_LOW,
579 (edge_level == 579 (triggering ==
580 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : 580 ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
581 IOSAPIC_LEVEL); 581 IOSAPIC_LEVEL);
582} 582}
diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile
index f748d34c02f0..4838f2a57c7a 100644
--- a/arch/ia64/kernel/cpufreq/Makefile
+++ b/arch/ia64/kernel/cpufreq/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o 1obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
2
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index da4d5cf80a48..5a1bf815282d 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
269} 269}
270 270
271 271
272/*
273 * processor_init_pdc - let BIOS know about the SMP capabilities
274 * of this driver
275 * @perf: processor-specific acpi_io_data struct
276 * @cpu: CPU being initialized
277 *
278 * To avoid issues with legacy OSes, some BIOSes require to be informed of
279 * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
280 * accordingly. Actual call to _PDC is done in driver/acpi/processor.c
281 */
282static void
283processor_init_pdc (
284 struct acpi_processor_performance *perf,
285 unsigned int cpu,
286 struct acpi_object_list *obj_list
287 )
288{
289 union acpi_object *obj;
290 u32 *buf;
291
292 dprintk("processor_init_pdc\n");
293
294 perf->pdc = NULL;
295 /* Initialize pdc. It will be used later. */
296 if (!obj_list)
297 return;
298
299 if (!(obj_list->count && obj_list->pointer))
300 return;
301
302 obj = obj_list->pointer;
303 if ((obj->buffer.length == 12) && obj->buffer.pointer) {
304 buf = (u32 *)obj->buffer.pointer;
305 buf[0] = ACPI_PDC_REVISION_ID;
306 buf[1] = 1;
307 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
308 perf->pdc = obj_list;
309 }
310 return;
311}
312
313
314static int 272static int
315acpi_cpufreq_cpu_init ( 273acpi_cpufreq_cpu_init (
316 struct cpufreq_policy *policy) 274 struct cpufreq_policy *policy)
@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
320 struct cpufreq_acpi_io *data; 278 struct cpufreq_acpi_io *data;
321 unsigned int result = 0; 279 unsigned int result = 0;
322 280
323 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
324 u32 arg0_buf[3];
325 struct acpi_object_list arg_list = {1, &arg0};
326
327 dprintk("acpi_cpufreq_cpu_init\n"); 281 dprintk("acpi_cpufreq_cpu_init\n");
328 /* setup arg_list for _PDC settings */
329 arg0.buffer.length = 12;
330 arg0.buffer.pointer = (u8 *) arg0_buf;
331 282
332 data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 283 data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
333 if (!data) 284 if (!data)
@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
337 288
338 acpi_io_data[cpu] = data; 289 acpi_io_data[cpu] = data;
339 290
340 processor_init_pdc(&data->acpi_data, cpu, &arg_list);
341 result = acpi_processor_register_performance(&data->acpi_data, cpu); 291 result = acpi_processor_register_performance(&data->acpi_data, cpu);
342 data->acpi_data.pdc = NULL;
343 292
344 if (result) 293 if (result)
345 goto err_free; 294 goto err_free;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a3aa45cbcfa0..c485a3b32ba8 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -247,6 +247,32 @@ typedef struct kern_memdesc {
247 247
248static kern_memdesc_t *kern_memmap; 248static kern_memdesc_t *kern_memmap;
249 249
250#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
251
252static inline u64
253kmd_end(kern_memdesc_t *kmd)
254{
255 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
256}
257
258static inline u64
259efi_md_end(efi_memory_desc_t *md)
260{
261 return (md->phys_addr + efi_md_size(md));
262}
263
264static inline int
265efi_wb(efi_memory_desc_t *md)
266{
267 return (md->attribute & EFI_MEMORY_WB);
268}
269
270static inline int
271efi_uc(efi_memory_desc_t *md)
272{
273 return (md->attribute & EFI_MEMORY_UC);
274}
275
250static void 276static void
251walk (efi_freemem_callback_t callback, void *arg, u64 attr) 277walk (efi_freemem_callback_t callback, void *arg, u64 attr)
252{ 278{
@@ -595,8 +621,8 @@ efi_get_iobase (void)
595 return 0; 621 return 0;
596} 622}
597 623
598u32 624static efi_memory_desc_t *
599efi_mem_type (unsigned long phys_addr) 625efi_memory_descriptor (unsigned long phys_addr)
600{ 626{
601 void *efi_map_start, *efi_map_end, *p; 627 void *efi_map_start, *efi_map_end, *p;
602 efi_memory_desc_t *md; 628 efi_memory_desc_t *md;
@@ -610,13 +636,13 @@ efi_mem_type (unsigned long phys_addr)
610 md = p; 636 md = p;
611 637
612 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) 638 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
613 return md->type; 639 return md;
614 } 640 }
615 return 0; 641 return 0;
616} 642}
617 643
618u64 644static int
619efi_mem_attributes (unsigned long phys_addr) 645efi_memmap_has_mmio (void)
620{ 646{
621 void *efi_map_start, *efi_map_end, *p; 647 void *efi_map_start, *efi_map_end, *p;
622 efi_memory_desc_t *md; 648 efi_memory_desc_t *md;
@@ -629,36 +655,98 @@ efi_mem_attributes (unsigned long phys_addr)
629 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 655 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
630 md = p; 656 md = p;
631 657
632 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) 658 if (md->type == EFI_MEMORY_MAPPED_IO)
633 return md->attribute; 659 return 1;
634 } 660 }
635 return 0; 661 return 0;
636} 662}
663
664u32
665efi_mem_type (unsigned long phys_addr)
666{
667 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
668
669 if (md)
670 return md->type;
671 return 0;
672}
673
674u64
675efi_mem_attributes (unsigned long phys_addr)
676{
677 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
678
679 if (md)
680 return md->attribute;
681 return 0;
682}
637EXPORT_SYMBOL(efi_mem_attributes); 683EXPORT_SYMBOL(efi_mem_attributes);
638 684
685/*
686 * Determines whether the memory at phys_addr supports the desired
687 * attribute (WB, UC, etc). If this returns 1, the caller can safely
688 * access *size bytes at phys_addr with the specified attribute.
689 */
690static int
691efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
692{
693 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
694 unsigned long md_end;
695
696 if (!md || (md->attribute & attr) != attr)
697 return 0;
698
699 do {
700 md_end = efi_md_end(md);
701 if (phys_addr + *size <= md_end)
702 return 1;
703
704 md = efi_memory_descriptor(md_end);
705 if (!md || (md->attribute & attr) != attr) {
706 *size = md_end - phys_addr;
707 return 1;
708 }
709 } while (md);
710 return 0;
711}
712
713/*
714 * For /dev/mem, we only allow read & write system calls to access
715 * write-back memory, because read & write don't allow the user to
716 * control access size.
717 */
639int 718int
640valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) 719valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
641{ 720{
642 void *efi_map_start, *efi_map_end, *p; 721 return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
643 efi_memory_desc_t *md; 722}
644 u64 efi_desc_size;
645 723
646 efi_map_start = __va(ia64_boot_param->efi_memmap); 724/*
647 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 725 * We allow mmap of anything in the EFI memory map that supports
648 efi_desc_size = ia64_boot_param->efi_memdesc_size; 726 * either write-back or uncacheable access. For uncacheable regions,
727 * the supported access sizes are system-dependent, and the user is
728 * responsible for using the correct size.
729 *
730 * Note that this doesn't currently allow access to hot-added memory,
731 * because that doesn't appear in the boot-time EFI memory map.
732 */
733int
734valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
735{
736 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
737 return 1;
649 738
650 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 739 if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
651 md = p; 740 return 1;
652 741
653 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) { 742 /*
654 if (!(md->attribute & EFI_MEMORY_WB)) 743 * Some firmware doesn't report MMIO regions in the EFI memory map.
655 return 0; 744 * The Intel BigSur (a.k.a. HP i2000) has this problem. In this
745 * case, we can't use the EFI memory map to validate mmap requests.
746 */
747 if (!efi_memmap_has_mmio())
748 return 1;
656 749
657 if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
658 *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
659 return 1;
660 }
661 }
662 return 0; 750 return 0;
663} 751}
664 752
@@ -707,32 +795,6 @@ efi_uart_console_only(void)
707 return 0; 795 return 0;
708} 796}
709 797
710#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
711
712static inline u64
713kmd_end(kern_memdesc_t *kmd)
714{
715 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
716}
717
718static inline u64
719efi_md_end(efi_memory_desc_t *md)
720{
721 return (md->phys_addr + efi_md_size(md));
722}
723
724static inline int
725efi_wb(efi_memory_desc_t *md)
726{
727 return (md->attribute & EFI_MEMORY_WB);
728}
729
730static inline int
731efi_uc(efi_memory_desc_t *md)
732{
733 return (md->attribute & EFI_MEMORY_UC);
734}
735
736/* 798/*
737 * Look for the first granule aligned memory descriptor memory 799 * Look for the first granule aligned memory descriptor memory
738 * that is big enough to hold EFI memory map. Make sure this 800 * that is big enough to hold EFI memory map. Make sure this
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0741b066b98f..7a6ffd613789 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1600,5 +1600,6 @@ sys_call_table:
1600 data8 sys_inotify_init 1600 data8 sys_inotify_init
1601 data8 sys_inotify_add_watch 1601 data8 sys_inotify_add_watch
1602 data8 sys_inotify_rm_watch 1602 data8 sys_inotify_rm_watch
1603 data8 sys_migrate_pages // 1280
1603 1604
1604 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1605 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 2ddbac6f4999..ce423910ca97 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -903,5 +903,6 @@ fsyscall_table:
903 data8 0 903 data8 0
904 data8 0 904 data8 0
905 data8 0 905 data8 0
906 data8 0 // 1280
906 907
907 .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 908 .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index bfe65b2e8621..fbc7ea35dd57 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1060,7 +1060,7 @@ SET_REG(b5);
1060 * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h. 1060 * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
1061 */ 1061 */
1062 1062
1063#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 1063#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
1064 1064
1065GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4) 1065GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
1066 .prologue 1066 .prologue
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 5db9d3bcbbcb..e72de580ebbf 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL(unw_init_running);
103 103
104#ifdef ASM_SUPPORTED 104#ifdef ASM_SUPPORTED
105# ifdef CONFIG_SMP 105# ifdef CONFIG_SMP
106# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 106# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
107/* 107/*
108 * This is not a normal routine and we don't want a function descriptor for it, so we use 108 * This is not a normal routine and we don't want a function descriptor for it, so we use
109 * a fake declaration here. 109 * a fake declaration here.
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index 2323377e3695..5cd6226f44f2 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -60,3 +60,30 @@ END(jprobe_break)
60GLOBAL_ENTRY(jprobe_inst_return) 60GLOBAL_ENTRY(jprobe_inst_return)
61 br.call.sptk.many b0=jprobe_break 61 br.call.sptk.many b0=jprobe_break
62END(jprobe_inst_return) 62END(jprobe_inst_return)
63
64GLOBAL_ENTRY(invalidate_stacked_regs)
65 movl r16=invalidate_restore_cfm
66 ;;
67 mov b6=r16
68 ;;
69 br.ret.sptk.many b6
70 ;;
71invalidate_restore_cfm:
72 mov r16=ar.rsc
73 ;;
74 mov ar.rsc=r0
75 ;;
76 loadrs
77 ;;
78 mov ar.rsc=r16
79 ;;
80 br.cond.sptk.many rp
81END(invalidate_stacked_regs)
82
83GLOBAL_ENTRY(flush_register_stack)
84 // flush dirty regs to backing store (must be first in insn group)
85 flushrs
86 ;;
87 br.ret.sptk.many rp
88END(flush_register_stack)
89
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 89a70400c4f6..50ae8c7d453d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -467,10 +467,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
467 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 467 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
468} 468}
469 469
470void __kprobes arch_remove_kprobe(struct kprobe *p)
471{
472}
473
474/* 470/*
475 * We are resuming execution after a single step fault, so the pt_regs 471 * We are resuming execution after a single step fault, so the pt_regs
476 * structure reflects the register state after we executed the instruction 472 * structure reflects the register state after we executed the instruction
@@ -642,6 +638,13 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
642 if (p->break_handler && p->break_handler(p, regs)) { 638 if (p->break_handler && p->break_handler(p, regs)) {
643 goto ss_probe; 639 goto ss_probe;
644 } 640 }
641 } else if (!is_ia64_break_inst(regs)) {
642 /* The breakpoint instruction was removed by
643 * another cpu right after we hit, no further
644 * handling of this interrupt is appropriate
645 */
646 ret = 1;
647 goto no_kprobe;
645 } else { 648 } else {
646 /* Not our break */ 649 /* Not our break */
647 goto no_kprobe; 650 goto no_kprobe;
@@ -763,11 +766,56 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
763 return ret; 766 return ret;
764} 767}
765 768
769struct param_bsp_cfm {
770 unsigned long ip;
771 unsigned long *bsp;
772 unsigned long cfm;
773};
774
775static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
776{
777 unsigned long ip;
778 struct param_bsp_cfm *lp = arg;
779
780 do {
781 unw_get_ip(info, &ip);
782 if (ip == 0)
783 break;
784 if (ip == lp->ip) {
785 unw_get_bsp(info, (unsigned long*)&lp->bsp);
786 unw_get_cfm(info, (unsigned long*)&lp->cfm);
787 return;
788 }
789 } while (unw_unwind(info) >= 0);
790 lp->bsp = 0;
791 lp->cfm = 0;
792 return;
793}
794
766int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 795int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
767{ 796{
768 struct jprobe *jp = container_of(p, struct jprobe, kp); 797 struct jprobe *jp = container_of(p, struct jprobe, kp);
769 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 798 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
770 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 799 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
800 struct param_bsp_cfm pa;
801 int bytes;
802
803 /*
804 * Callee owns the argument space and could overwrite it, eg
805 * tail call optimization. So to be absolutely safe
806 * we save the argument space before transfering the control
807 * to instrumented jprobe function which runs in
808 * the process context
809 */
810 pa.ip = regs->cr_iip;
811 unw_init_running(ia64_get_bsp_cfm, &pa);
812 bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
813 - (char *)pa.bsp;
814 memcpy( kcb->jprobes_saved_stacked_regs,
815 pa.bsp,
816 bytes );
817 kcb->bsp = pa.bsp;
818 kcb->cfm = pa.cfm;
771 819
772 /* save architectural state */ 820 /* save architectural state */
773 kcb->jprobe_saved_regs = *regs; 821 kcb->jprobe_saved_regs = *regs;
@@ -789,8 +837,20 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
789int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 837int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
790{ 838{
791 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 839 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
840 int bytes;
792 841
842 /* restoring architectural state */
793 *regs = kcb->jprobe_saved_regs; 843 *regs = kcb->jprobe_saved_regs;
844
845 /* restoring the original argument space */
846 flush_register_stack();
847 bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
848 - (char *)kcb->bsp;
849 memcpy( kcb->bsp,
850 kcb->jprobes_saved_stacked_regs,
851 bytes );
852 invalidate_stacked_regs();
853
794 preempt_enable_no_resched(); 854 preempt_enable_no_resched();
795 return 1; 855 return 1;
796} 856}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 355af15287c7..ee7eec9ee576 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -766,7 +766,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
766 l = strlen(previous_current->comm); 766 l = strlen(previous_current->comm);
767 snprintf(comm, sizeof(comm), "%s %*s %d", 767 snprintf(comm, sizeof(comm), "%s %*s %d",
768 current->comm, l, previous_current->comm, 768 current->comm, l, previous_current->comm,
769 previous_current->thread_info->cpu); 769 task_thread_info(previous_current)->cpu);
770 } 770 }
771 memcpy(current->comm, comm, sizeof(current->comm)); 771 memcpy(current->comm, comm, sizeof(current->comm));
772 772
@@ -1423,7 +1423,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1423 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); 1423 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1424 struct thread_info *ti; 1424 struct thread_info *ti;
1425 memset(p, 0, KERNEL_STACK_SIZE); 1425 memset(p, 0, KERNEL_STACK_SIZE);
1426 ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); 1426 ti = task_thread_info(p);
1427 ti->flags = _TIF_MCA_INIT; 1427 ti->flags = _TIF_MCA_INIT;
1428 ti->preempt_count = 1; 1428 ti->preempt_count = 1;
1429 ti->task = p; 1429 ti->task = p;
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index db32fc1d3935..403a80a58c13 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -847,7 +847,7 @@ ia64_state_restore:
847 ;; 847 ;;
848 mov cr.iim=temp3 848 mov cr.iim=temp3
849 mov cr.iha=temp4 849 mov cr.iha=temp4
850 dep r22=0,r22,62,2 // pal_min_state, physical, uncached 850 dep r22=0,r22,62,1 // pal_min_state, physical, uncached
851 mov IA64_KR(CURRENT)=r21 851 mov IA64_KR(CURRENT)=r21
852 ld8 r8=[temp1] // os_status 852 ld8 r8=[temp1] // os_status
853 ld8 r10=[temp2] // context 853 ld8 r10=[temp2] // context
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 410d4804fa6e..9c5194b385da 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -38,7 +38,9 @@
38#include <linux/pagemap.h> 38#include <linux/pagemap.h>
39#include <linux/mount.h> 39#include <linux/mount.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/capability.h>
41#include <linux/rcupdate.h> 42#include <linux/rcupdate.h>
43#include <linux/completion.h>
42 44
43#include <asm/errno.h> 45#include <asm/errno.h>
44#include <asm/intrinsics.h> 46#include <asm/intrinsics.h>
@@ -285,7 +287,7 @@ typedef struct pfm_context {
285 287
286 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ 288 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
287 289
288 struct semaphore ctx_restart_sem; /* use for blocking notification mode */ 290 struct completion ctx_restart_done; /* use for blocking notification mode */
289 291
290 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ 292 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
291 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ 293 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
@@ -627,9 +629,11 @@ static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count,
627 629
628#include "perfmon_itanium.h" 630#include "perfmon_itanium.h"
629#include "perfmon_mckinley.h" 631#include "perfmon_mckinley.h"
632#include "perfmon_montecito.h"
630#include "perfmon_generic.h" 633#include "perfmon_generic.h"
631 634
632static pmu_config_t *pmu_confs[]={ 635static pmu_config_t *pmu_confs[]={
636 &pmu_conf_mont,
633 &pmu_conf_mck, 637 &pmu_conf_mck,
634 &pmu_conf_ita, 638 &pmu_conf_ita,
635 &pmu_conf_gen, /* must be last */ 639 &pmu_conf_gen, /* must be last */
@@ -1709,7 +1713,7 @@ static void
1709pfm_syswide_force_stop(void *info) 1713pfm_syswide_force_stop(void *info)
1710{ 1714{
1711 pfm_context_t *ctx = (pfm_context_t *)info; 1715 pfm_context_t *ctx = (pfm_context_t *)info;
1712 struct pt_regs *regs = ia64_task_regs(current); 1716 struct pt_regs *regs = task_pt_regs(current);
1713 struct task_struct *owner; 1717 struct task_struct *owner;
1714 unsigned long flags; 1718 unsigned long flags;
1715 int ret; 1719 int ret;
@@ -1814,7 +1818,7 @@ pfm_flush(struct file *filp)
1814 is_system = ctx->ctx_fl_system; 1818 is_system = ctx->ctx_fl_system;
1815 1819
1816 task = PFM_CTX_TASK(ctx); 1820 task = PFM_CTX_TASK(ctx);
1817 regs = ia64_task_regs(task); 1821 regs = task_pt_regs(task);
1818 1822
1819 DPRINT(("ctx_state=%d is_current=%d\n", 1823 DPRINT(("ctx_state=%d is_current=%d\n",
1820 state, 1824 state,
@@ -1944,7 +1948,7 @@ pfm_close(struct inode *inode, struct file *filp)
1944 is_system = ctx->ctx_fl_system; 1948 is_system = ctx->ctx_fl_system;
1945 1949
1946 task = PFM_CTX_TASK(ctx); 1950 task = PFM_CTX_TASK(ctx);
1947 regs = ia64_task_regs(task); 1951 regs = task_pt_regs(task);
1948 1952
1949 DPRINT(("ctx_state=%d is_current=%d\n", 1953 DPRINT(("ctx_state=%d is_current=%d\n",
1950 state, 1954 state,
@@ -1988,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
1988 /* 1992 /*
1989 * force task to wake up from MASKED state 1993 * force task to wake up from MASKED state
1990 */ 1994 */
1991 up(&ctx->ctx_restart_sem); 1995 complete(&ctx->ctx_restart_done);
1992 1996
1993 DPRINT(("waking up ctx_state=%d\n", state)); 1997 DPRINT(("waking up ctx_state=%d\n", state));
1994 1998
@@ -2703,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
2703 /* 2707 /*
2704 * init restart semaphore to locked 2708 * init restart semaphore to locked
2705 */ 2709 */
2706 sema_init(&ctx->ctx_restart_sem, 0); 2710 init_completion(&ctx->ctx_restart_done);
2707 2711
2708 /* 2712 /*
2709 * activation is used in SMP only 2713 * activation is used in SMP only
@@ -3684,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3684 */ 3688 */
3685 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { 3689 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3686 DPRINT(("unblocking [%d] \n", task->pid)); 3690 DPRINT(("unblocking [%d] \n", task->pid));
3687 up(&ctx->ctx_restart_sem); 3691 complete(&ctx->ctx_restart_done);
3688 } else { 3692 } else {
3689 DPRINT(("[%d] armed exit trap\n", task->pid)); 3693 DPRINT(("[%d] armed exit trap\n", task->pid));
3690 3694
@@ -4051,7 +4055,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4051 */ 4055 */
4052 ia64_psr(regs)->up = 0; 4056 ia64_psr(regs)->up = 0;
4053 } else { 4057 } else {
4054 tregs = ia64_task_regs(task); 4058 tregs = task_pt_regs(task);
4055 4059
4056 /* 4060 /*
4057 * stop monitoring at the user level 4061 * stop monitoring at the user level
@@ -4133,7 +4137,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4133 ia64_psr(regs)->up = 1; 4137 ia64_psr(regs)->up = 1;
4134 4138
4135 } else { 4139 } else {
4136 tregs = ia64_task_regs(ctx->ctx_task); 4140 tregs = task_pt_regs(ctx->ctx_task);
4137 4141
4138 /* 4142 /*
4139 * start monitoring at the kernel level the next 4143 * start monitoring at the kernel level the next
@@ -4403,7 +4407,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4403 /* 4407 /*
4404 * when not current, task MUST be stopped, so this is safe 4408 * when not current, task MUST be stopped, so this is safe
4405 */ 4409 */
4406 regs = ia64_task_regs(task); 4410 regs = task_pt_regs(task);
4407 4411
4408 /* force a full reload */ 4412 /* force a full reload */
4409 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; 4413 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4529,7 +4533,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4529 /* 4533 /*
4530 * per-task mode 4534 * per-task mode
4531 */ 4535 */
4532 tregs = task == current ? regs : ia64_task_regs(task); 4536 tregs = task == current ? regs : task_pt_regs(task);
4533 4537
4534 if (task == current) { 4538 if (task == current) {
4535 /* 4539 /*
@@ -4592,7 +4596,7 @@ pfm_exit_thread(struct task_struct *task)
4592{ 4596{
4593 pfm_context_t *ctx; 4597 pfm_context_t *ctx;
4594 unsigned long flags; 4598 unsigned long flags;
4595 struct pt_regs *regs = ia64_task_regs(task); 4599 struct pt_regs *regs = task_pt_regs(task);
4596 int ret, state; 4600 int ret, state;
4597 int free_ok = 0; 4601 int free_ok = 0;
4598 4602
@@ -4925,7 +4929,7 @@ restart_args:
4925 if (unlikely(ret)) goto abort_locked; 4929 if (unlikely(ret)) goto abort_locked;
4926 4930
4927skip_fd: 4931skip_fd:
4928 ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); 4932 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4929 4933
4930 call_made = 1; 4934 call_made = 1;
4931 4935
@@ -5049,7 +5053,7 @@ pfm_handle_work(void)
5049 5053
5050 pfm_clear_task_notify(); 5054 pfm_clear_task_notify();
5051 5055
5052 regs = ia64_task_regs(current); 5056 regs = task_pt_regs(current);
5053 5057
5054 /* 5058 /*
5055 * extract reason for being here and clear 5059 * extract reason for being here and clear
@@ -5086,7 +5090,7 @@ pfm_handle_work(void)
5086 * may go through without blocking on SMP systems 5090 * may go through without blocking on SMP systems
5087 * if restart has been received already by the time we call down() 5091 * if restart has been received already by the time we call down()
5088 */ 5092 */
5089 ret = down_interruptible(&ctx->ctx_restart_sem); 5093 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5090 5094
5091 DPRINT(("after block sleeping ret=%d\n", ret)); 5095 DPRINT(("after block sleeping ret=%d\n", ret));
5092 5096
@@ -5793,7 +5797,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
5793 * on every CPU, so we can rely on the pid to identify the idle task. 5797 * on every CPU, so we can rely on the pid to identify the idle task.
5794 */ 5798 */
5795 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { 5799 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5796 regs = ia64_task_regs(task); 5800 regs = task_pt_regs(task);
5797 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; 5801 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5798 return; 5802 return;
5799 } 5803 }
@@ -5876,7 +5880,7 @@ pfm_save_regs(struct task_struct *task)
5876 flags = pfm_protect_ctx_ctxsw(ctx); 5880 flags = pfm_protect_ctx_ctxsw(ctx);
5877 5881
5878 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { 5882 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5879 struct pt_regs *regs = ia64_task_regs(task); 5883 struct pt_regs *regs = task_pt_regs(task);
5880 5884
5881 pfm_clear_psr_up(); 5885 pfm_clear_psr_up();
5882 5886
@@ -6076,7 +6080,7 @@ pfm_load_regs (struct task_struct *task)
6076 BUG_ON(psr & IA64_PSR_I); 6080 BUG_ON(psr & IA64_PSR_I);
6077 6081
6078 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { 6082 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6079 struct pt_regs *regs = ia64_task_regs(task); 6083 struct pt_regs *regs = task_pt_regs(task);
6080 6084
6081 BUG_ON(ctx->ctx_smpl_hdr); 6085 BUG_ON(ctx->ctx_smpl_hdr);
6082 6086
@@ -6445,7 +6449,7 @@ pfm_alt_save_pmu_state(void *data)
6445{ 6449{
6446 struct pt_regs *regs; 6450 struct pt_regs *regs;
6447 6451
6448 regs = ia64_task_regs(current); 6452 regs = task_pt_regs(current);
6449 6453
6450 DPRINT(("called\n")); 6454 DPRINT(("called\n"));
6451 6455
@@ -6471,7 +6475,7 @@ pfm_alt_restore_pmu_state(void *data)
6471{ 6475{
6472 struct pt_regs *regs; 6476 struct pt_regs *regs;
6473 6477
6474 regs = ia64_task_regs(current); 6478 regs = task_pt_regs(current);
6475 6479
6476 DPRINT(("called\n")); 6480 DPRINT(("called\n"));
6477 6481
@@ -6753,7 +6757,7 @@ dump_pmu_state(const char *from)
6753 local_irq_save(flags); 6757 local_irq_save(flags);
6754 6758
6755 this_cpu = smp_processor_id(); 6759 this_cpu = smp_processor_id();
6756 regs = ia64_task_regs(current); 6760 regs = task_pt_regs(current);
6757 info = PFM_CPUINFO_GET(); 6761 info = PFM_CPUINFO_GET();
6758 dcr = ia64_getreg(_IA64_REG_CR_DCR); 6762 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6759 6763
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
new file mode 100644
index 000000000000..cd06ac6a686c
--- /dev/null
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -0,0 +1,269 @@
1/*
2 * This file contains the Montecito PMU register description tables
3 * and pmc checker used by perfmon.c.
4 *
5 * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
6 * Contributed by Stephane Eranian <eranian@hpl.hp.com>
7 */
8static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
9
10#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
11 RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
12#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
13#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
14
15static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
16/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
17/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
18/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
19/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
20/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
21/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
22/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
23/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
24/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
25/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
26/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
27/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
28/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
29/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
30/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
31/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
32/* pmc16 */ { PFM_REG_NOTIMPL, },
33/* pmc17 */ { PFM_REG_NOTIMPL, },
34/* pmc18 */ { PFM_REG_NOTIMPL, },
35/* pmc19 */ { PFM_REG_NOTIMPL, },
36/* pmc20 */ { PFM_REG_NOTIMPL, },
37/* pmc21 */ { PFM_REG_NOTIMPL, },
38/* pmc22 */ { PFM_REG_NOTIMPL, },
39/* pmc23 */ { PFM_REG_NOTIMPL, },
40/* pmc24 */ { PFM_REG_NOTIMPL, },
41/* pmc25 */ { PFM_REG_NOTIMPL, },
42/* pmc26 */ { PFM_REG_NOTIMPL, },
43/* pmc27 */ { PFM_REG_NOTIMPL, },
44/* pmc28 */ { PFM_REG_NOTIMPL, },
45/* pmc29 */ { PFM_REG_NOTIMPL, },
46/* pmc30 */ { PFM_REG_NOTIMPL, },
47/* pmc31 */ { PFM_REG_NOTIMPL, },
48/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
49/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
50/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
51/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
52/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
53/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
54/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
55/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
56/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
57/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
58/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
60};
61
62static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
63/* pmd0 */ { PFM_REG_NOTIMPL, },
64/* pmd1 */ { PFM_REG_NOTIMPL, },
65/* pmd2 */ { PFM_REG_NOTIMPL, },
66/* pmd3 */ { PFM_REG_NOTIMPL, },
67/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
68/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
69/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
70/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
71/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
72/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
73/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
74/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
75/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
76/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
77/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
78/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
79/* pmd16 */ { PFM_REG_NOTIMPL, },
80/* pmd17 */ { PFM_REG_NOTIMPL, },
81/* pmd18 */ { PFM_REG_NOTIMPL, },
82/* pmd19 */ { PFM_REG_NOTIMPL, },
83/* pmd20 */ { PFM_REG_NOTIMPL, },
84/* pmd21 */ { PFM_REG_NOTIMPL, },
85/* pmd22 */ { PFM_REG_NOTIMPL, },
86/* pmd23 */ { PFM_REG_NOTIMPL, },
87/* pmd24 */ { PFM_REG_NOTIMPL, },
88/* pmd25 */ { PFM_REG_NOTIMPL, },
89/* pmd26 */ { PFM_REG_NOTIMPL, },
90/* pmd27 */ { PFM_REG_NOTIMPL, },
91/* pmd28 */ { PFM_REG_NOTIMPL, },
92/* pmd29 */ { PFM_REG_NOTIMPL, },
93/* pmd30 */ { PFM_REG_NOTIMPL, },
94/* pmd31 */ { PFM_REG_NOTIMPL, },
95/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
96/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
97/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
98/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
99/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
100/* pmd37 */ { PFM_REG_NOTIMPL, },
101/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
102/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
103/* pmd40 */ { PFM_REG_NOTIMPL, },
104/* pmd41 */ { PFM_REG_NOTIMPL, },
105/* pmd42 */ { PFM_REG_NOTIMPL, },
106/* pmd43 */ { PFM_REG_NOTIMPL, },
107/* pmd44 */ { PFM_REG_NOTIMPL, },
108/* pmd45 */ { PFM_REG_NOTIMPL, },
109/* pmd46 */ { PFM_REG_NOTIMPL, },
110/* pmd47 */ { PFM_REG_NOTIMPL, },
111/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
112/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
113/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
114/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
115/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
116/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
117/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
118/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
119/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
120/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
121/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
122/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
123/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
124/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
125/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
126/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
127 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
128};
129
130/*
131 * PMC reserved fields must have their power-up values preserved
132 */
133static int
134pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
135{
136 unsigned long tmp1, tmp2, ival = *val;
137
138 /* remove reserved areas from user value */
139 tmp1 = ival & PMC_RSVD_MASK(cnum);
140
141 /* get reserved fields values */
142 tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
143
144 *val = tmp1 | tmp2;
145
146 DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
147 cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
148 return 0;
149}
150
151/*
152 * task can be NULL if the context is unloaded
153 */
154static int
155pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
156{
157 int ret = 0;
158 unsigned long val32 = 0, val38 = 0, val41 = 0;
159 unsigned long tmpval;
160 int check_case1 = 0;
161 int is_loaded;
162
163 /* first preserve the reserved fields */
164 pfm_mont_reserved(cnum, val, regs);
165
166 tmpval = *val;
167
168 /* sanity check */
169 if (ctx == NULL) return -EINVAL;
170
171 is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
172
173 /*
174 * we must clear the debug registers if pmc41 has a value which enable
175 * memory pipeline event constraints. In this case we need to clear the
176 * the debug registers if they have not yet been accessed. This is required
177 * to avoid picking stale state.
178 * PMC41 is "active" if:
179 * one of the pmc41.cfg_dtagXX field is different from 0x3
180 * AND
181 * at the corresponding pmc41.en_dbrpXX is set.
182 * AND
183 * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
184 */
185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
186
187 if (cnum == 41 && is_loaded
188 && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
189
190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
191
192 /* don't mix debug with perfmon */
193 if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
194
195 /*
196 * a count of 0 will mark the debug registers if:
197 * AND
198 */
199 ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
200 if (ret) return ret;
201 }
202 /*
203 * we must clear the (instruction) debug registers if:
204 * pmc38.ig_ibrpX is 0 (enabled)
205 * AND
206 * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
207 */
208 if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
209
210 DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
211
212 /* don't mix debug with perfmon */
213 if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
214
215 /*
216 * a count of 0 will mark the debug registers as in use and also
217 * ensure that they are properly cleared.
218 */
219 ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
220 if (ret) return ret;
221
222 }
223 switch(cnum) {
224 case 32: val32 = *val;
225 val38 = ctx->ctx_pmcs[38];
226 val41 = ctx->ctx_pmcs[41];
227 check_case1 = 1;
228 break;
229 case 38: val38 = *val;
230 val32 = ctx->ctx_pmcs[32];
231 val41 = ctx->ctx_pmcs[41];
232 check_case1 = 1;
233 break;
234 case 41: val41 = *val;
235 val32 = ctx->ctx_pmcs[32];
236 val38 = ctx->ctx_pmcs[38];
237 check_case1 = 1;
238 break;
239 }
240 /* check illegal configuration which can produce inconsistencies in tagging
241 * i-side events in L1D and L2 caches
242 */
243 if (check_case1) {
244 ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
245 && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
246 || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
247 if (ret) {
248 DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
249 return -EINVAL;
250 }
251 }
252 *val = tmpval;
253 return 0;
254}
255
256/*
257 * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
258 */
259static pmu_config_t pmu_conf_mont={
260 .pmu_name = "Montecito",
261 .pmu_family = 0x20,
262 .flags = PFM_PMU_IRQ_RESEND,
263 .ovfl_val = (1UL << 47) - 1,
264 .pmd_desc = pfm_mont_pmd_desc,
265 .pmc_desc = pfm_mont_pmc_desc,
266 .num_ibrs = 8,
267 .num_dbrs = 8,
268 .use_rr_dbregs = 1 /* debug register are use for range retrictions */
269};
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e9904c74d2ba..309d59658e5f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
328#endif 328#endif
329 329
330#ifdef CONFIG_IA32_SUPPORT 330#ifdef CONFIG_IA32_SUPPORT
331 if (IS_IA32_PROCESS(ia64_task_regs(task))) 331 if (IS_IA32_PROCESS(task_pt_regs(task)))
332 ia32_save_state(task); 332 ia32_save_state(task);
333#endif 333#endif
334} 334}
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
353#endif 353#endif
354 354
355#ifdef CONFIG_IA32_SUPPORT 355#ifdef CONFIG_IA32_SUPPORT
356 if (IS_IA32_PROCESS(ia64_task_regs(task))) 356 if (IS_IA32_PROCESS(task_pt_regs(task)))
357 ia32_load_state(task); 357 ia32_load_state(task);
358#endif 358#endif
359} 359}
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
488 * If we're cloning an IA32 task then save the IA32 extra 488 * If we're cloning an IA32 task then save the IA32 extra
489 * state from the current task to the new task 489 * state from the current task to the new task
490 */ 490 */
491 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 491 if (IS_IA32_PROCESS(task_pt_regs(current))) {
492 ia32_save_state(p); 492 ia32_save_state(p);
493 if (clone_flags & CLONE_SETTLS) 493 if (clone_flags & CLONE_SETTLS)
494 retval = ia32_clone_tls(p, child_ptregs); 494 retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
701kernel_thread_helper (int (*fn)(void *), void *arg) 701kernel_thread_helper (int (*fn)(void *), void *arg)
702{ 702{
703#ifdef CONFIG_IA32_SUPPORT 703#ifdef CONFIG_IA32_SUPPORT
704 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 704 if (IS_IA32_PROCESS(task_pt_regs(current))) {
705 /* A kernel thread is always a 64-bit process. */ 705 /* A kernel thread is always a 64-bit process. */
706 current->thread.map_base = DEFAULT_MAP_BASE; 706 current->thread.map_base = DEFAULT_MAP_BASE;
707 current->thread.task_size = DEFAULT_TASK_SIZE; 707 current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
723 ia64_drop_fpu(current); 723 ia64_drop_fpu(current);
724#ifdef CONFIG_IA32_SUPPORT 724#ifdef CONFIG_IA32_SUPPORT
725 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 725 if (IS_IA32_PROCESS(task_pt_regs(current))) {
726 ia32_drop_partial_page_list(current); 726 ia32_drop_partial_page_list(current);
727 current->thread.task_size = IA32_PAGE_OFFSET; 727 current->thread.task_size = IA32_PAGE_OFFSET;
728 set_fs(USER_DS); 728 set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
755 if (current->thread.flags & IA64_THREAD_DBG_VALID) 755 if (current->thread.flags & IA64_THREAD_DBG_VALID)
756 pfm_release_debug_registers(current); 756 pfm_release_debug_registers(current);
757#endif 757#endif
758 if (IS_IA32_PROCESS(ia64_task_regs(current))) 758 if (IS_IA32_PROCESS(task_pt_regs(current)))
759 ia32_drop_partial_page_list(current); 759 ia32_drop_partial_page_list(current);
760} 760}
761 761
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 4b19d0410632..eaed14aac6aa 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
254 long num_regs, nbits; 254 long num_regs, nbits;
255 struct pt_regs *pt; 255 struct pt_regs *pt;
256 256
257 pt = ia64_task_regs(task); 257 pt = task_pt_regs(task);
258 kbsp = (unsigned long *) sw->ar_bspstore; 258 kbsp = (unsigned long *) sw->ar_bspstore;
259 ubspstore = (unsigned long *) pt->ar_bspstore; 259 ubspstore = (unsigned long *) pt->ar_bspstore;
260 260
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
314 struct pt_regs *pt; 314 struct pt_regs *pt;
315 unsigned long cfm, *urbs_kargs; 315 unsigned long cfm, *urbs_kargs;
316 316
317 pt = ia64_task_regs(task); 317 pt = task_pt_regs(task);
318 kbsp = (unsigned long *) sw->ar_bspstore; 318 kbsp = (unsigned long *) sw->ar_bspstore;
319 ubspstore = (unsigned long *) pt->ar_bspstore; 319 ubspstore = (unsigned long *) pt->ar_bspstore;
320 320
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
407 407
408 urbs_end = (long *) user_rbs_end; 408 urbs_end = (long *) user_rbs_end;
409 laddr = (unsigned long *) addr; 409 laddr = (unsigned long *) addr;
410 child_regs = ia64_task_regs(child); 410 child_regs = task_pt_regs(child);
411 bspstore = (unsigned long *) child_regs->ar_bspstore; 411 bspstore = (unsigned long *) child_regs->ar_bspstore;
412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
413 if (on_kernel_rbs(addr, (unsigned long) bspstore, 413 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 struct pt_regs *child_regs; 467 struct pt_regs *child_regs;
468 468
469 laddr = (unsigned long *) addr; 469 laddr = (unsigned long *) addr;
470 child_regs = ia64_task_regs(child); 470 child_regs = task_pt_regs(child);
471 bspstore = (unsigned long *) child_regs->ar_bspstore; 471 bspstore = (unsigned long *) child_regs->ar_bspstore;
472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
473 if (on_kernel_rbs(addr, (unsigned long) bspstore, 473 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
567 */ 567 */
568 return 0; 568 return 0;
569 569
570 thread_regs = ia64_task_regs(thread); 570 thread_regs = task_pt_regs(thread);
571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
573 return 0; 573 return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
627inline void 627inline void
628ia64_flush_fph (struct task_struct *task) 628ia64_flush_fph (struct task_struct *task)
629{ 629{
630 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 630 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
631 631
632 /* 632 /*
633 * Prevent migrating this task while 633 * Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
653void 653void
654ia64_sync_fph (struct task_struct *task) 654ia64_sync_fph (struct task_struct *task)
655{ 655{
656 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 656 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
657 657
658 ia64_flush_fph(task); 658 ia64_flush_fph(task);
659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
794 + offsetof(struct pt_regs, reg))) 794 + offsetof(struct pt_regs, reg)))
795 795
796 796
797 pt = ia64_task_regs(child); 797 pt = task_pt_regs(child);
798 sw = (struct switch_stack *) (child->thread.ksp + 16); 798 sw = (struct switch_stack *) (child->thread.ksp + 16);
799 799
800 if ((addr & 0x7) != 0) { 800 if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1121 return -EIO; 1121 return -EIO;
1122 1122
1123 pt = ia64_task_regs(child); 1123 pt = task_pt_regs(child);
1124 sw = (struct switch_stack *) (child->thread.ksp + 16); 1124 sw = (struct switch_stack *) (child->thread.ksp + 16);
1125 unw_init_from_blocked_task(&info, child); 1125 unw_init_from_blocked_task(&info, child);
1126 if (unw_unwind_to_user(&info) < 0) { 1126 if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1266 return -EIO; 1266 return -EIO;
1267 1267
1268 pt = ia64_task_regs(child); 1268 pt = task_pt_regs(child);
1269 sw = (struct switch_stack *) (child->thread.ksp + 16); 1269 sw = (struct switch_stack *) (child->thread.ksp + 16);
1270 unw_init_from_blocked_task(&info, child); 1270 unw_init_from_blocked_task(&info, child);
1271 if (unw_unwind_to_user(&info) < 0) { 1271 if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1403void 1403void
1404ptrace_disable (struct task_struct *child) 1404ptrace_disable (struct task_struct *child)
1405{ 1405{
1406 struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); 1406 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1407 1407
1408 /* make sure the single step/taken-branch trap bits are not set: */ 1408 /* make sure the single step/taken-branch trap bits are not set: */
1409 child_psr->ss = 0; 1409 child_psr->ss = 0;
@@ -1422,14 +1422,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1422 lock_kernel(); 1422 lock_kernel();
1423 ret = -EPERM; 1423 ret = -EPERM;
1424 if (request == PTRACE_TRACEME) { 1424 if (request == PTRACE_TRACEME) {
1425 /* are we already being traced? */ 1425 ret = ptrace_traceme();
1426 if (current->ptrace & PT_PTRACED)
1427 goto out;
1428 ret = security_ptrace(current->parent, current);
1429 if (ret)
1430 goto out;
1431 current->ptrace |= PT_PTRACED;
1432 ret = 0;
1433 goto out; 1426 goto out;
1434 } 1427 }
1435 1428
@@ -1463,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1463 if (ret < 0) 1456 if (ret < 0)
1464 goto out_tsk; 1457 goto out_tsk;
1465 1458
1466 pt = ia64_task_regs(child); 1459 pt = task_pt_regs(child);
1467 sw = (struct switch_stack *) (child->thread.ksp + 16); 1460 sw = (struct switch_stack *) (child->thread.ksp + 16);
1468 1461
1469 switch (request) { 1462 switch (request) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 1461dc660b43..9d5a823479a3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Creates entries in /proc/sal for various system features. 4 * Creates entries in /proc/sal for various system features.
5 * 5 *
6 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved.
7 * Copyright (c) 2003 Hewlett-Packard Co 7 * Copyright (c) 2003 Hewlett-Packard Co
8 * Bjorn Helgaas <bjorn.helgaas@hp.com> 8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * 9 *
@@ -27,8 +27,17 @@
27 * mca.c may not pass a buffer, a NULL buffer just indicates that a new 27 * mca.c may not pass a buffer, a NULL buffer just indicates that a new
28 * record is available in SAL. 28 * record is available in SAL.
29 * Replace some NR_CPUS by cpus_online, for hotplug cpu. 29 * Replace some NR_CPUS by cpus_online, for hotplug cpu.
30 *
31 * Jan 5 2006 kaos@sgi.com
32 * Handle hotplug cpus coming online.
33 * Handle hotplug cpus going offline while they still have outstanding records.
34 * Use the cpu_* macros consistently.
35 * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
36 * Modify the locking to make the test for "work to do" an atomic operation.
30 */ 37 */
31 38
39#include <linux/capability.h>
40#include <linux/cpu.h>
32#include <linux/types.h> 41#include <linux/types.h>
33#include <linux/proc_fs.h> 42#include <linux/proc_fs.h>
34#include <linux/module.h> 43#include <linux/module.h>
@@ -131,8 +140,8 @@ enum salinfo_state {
131}; 140};
132 141
133struct salinfo_data { 142struct salinfo_data {
134 volatile cpumask_t cpu_event; /* which cpus have outstanding events */ 143 cpumask_t cpu_event; /* which cpus have outstanding events */
135 struct semaphore sem; /* count of cpus with outstanding events (bits set in cpu_event) */ 144 struct semaphore mutex;
136 u8 *log_buffer; 145 u8 *log_buffer;
137 u64 log_size; 146 u64 log_size;
138 u8 *oemdata; /* decoded oem data */ 147 u8 *oemdata; /* decoded oem data */
@@ -173,6 +182,21 @@ struct salinfo_platform_oemdata_parms {
173 int ret; 182 int ret;
174}; 183};
175 184
185/* Kick the mutex that tells user space that there is work to do. Instead of
186 * trying to track the state of the mutex across multiple cpus, in user
187 * context, interrupt context, non-maskable interrupt context and hotplug cpu,
188 * it is far easier just to grab the mutex if it is free then release it.
189 *
190 * This routine must be called with data_saved_lock held, to make the down/up
191 * operation atomic.
192 */
193static void
194salinfo_work_to_do(struct salinfo_data *data)
195{
196 down_trylock(&data->mutex);
197 up(&data->mutex);
198}
199
176static void 200static void
177salinfo_platform_oemdata_cpu(void *context) 201salinfo_platform_oemdata_cpu(void *context)
178{ 202{
@@ -211,9 +235,9 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
211 235
212 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); 236 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
213 237
238 if (irqsafe)
239 spin_lock_irqsave(&data_saved_lock, flags);
214 if (buffer) { 240 if (buffer) {
215 if (irqsafe)
216 spin_lock_irqsave(&data_saved_lock, flags);
217 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 241 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
218 if (!data_saved->buffer) 242 if (!data_saved->buffer)
219 break; 243 break;
@@ -231,13 +255,11 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
231 data_saved->size = size; 255 data_saved->size = size;
232 data_saved->buffer = buffer; 256 data_saved->buffer = buffer;
233 } 257 }
234 if (irqsafe)
235 spin_unlock_irqrestore(&data_saved_lock, flags);
236 } 258 }
237 259 cpu_set(smp_processor_id(), data->cpu_event);
238 if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { 260 if (irqsafe) {
239 if (irqsafe) 261 salinfo_work_to_do(data);
240 up(&data->sem); 262 spin_unlock_irqrestore(&data_saved_lock, flags);
241 } 263 }
242} 264}
243 265
@@ -248,20 +270,17 @@ static struct timer_list salinfo_timer;
248static void 270static void
249salinfo_timeout_check(struct salinfo_data *data) 271salinfo_timeout_check(struct salinfo_data *data)
250{ 272{
251 int i; 273 unsigned long flags;
252 if (!data->open) 274 if (!data->open)
253 return; 275 return;
254 for_each_online_cpu(i) { 276 if (!cpus_empty(data->cpu_event)) {
255 if (test_bit(i, &data->cpu_event)) { 277 spin_lock_irqsave(&data_saved_lock, flags);
256 /* double up() is not a problem, user space will see no 278 salinfo_work_to_do(data);
257 * records for the additional "events". 279 spin_unlock_irqrestore(&data_saved_lock, flags);
258 */
259 up(&data->sem);
260 }
261 } 280 }
262} 281}
263 282
264static void 283static void
265salinfo_timeout (unsigned long arg) 284salinfo_timeout (unsigned long arg)
266{ 285{
267 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); 286 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
@@ -289,16 +308,20 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
289 int i, n, cpu = -1; 308 int i, n, cpu = -1;
290 309
291retry: 310retry:
292 if (down_trylock(&data->sem)) { 311 if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
293 if (file->f_flags & O_NONBLOCK) 312 if (file->f_flags & O_NONBLOCK)
294 return -EAGAIN; 313 return -EAGAIN;
295 if (down_interruptible(&data->sem)) 314 if (down_interruptible(&data->mutex))
296 return -EINTR; 315 return -EINTR;
297 } 316 }
298 317
299 n = data->cpu_check; 318 n = data->cpu_check;
300 for (i = 0; i < NR_CPUS; i++) { 319 for (i = 0; i < NR_CPUS; i++) {
301 if (test_bit(n, &data->cpu_event) && cpu_online(n)) { 320 if (cpu_isset(n, data->cpu_event)) {
321 if (!cpu_online(n)) {
322 cpu_clear(n, data->cpu_event);
323 continue;
324 }
302 cpu = n; 325 cpu = n;
303 break; 326 break;
304 } 327 }
@@ -309,9 +332,6 @@ retry:
309 if (cpu == -1) 332 if (cpu == -1)
310 goto retry; 333 goto retry;
311 334
312 /* events are sticky until the user says "clear" */
313 up(&data->sem);
314
315 /* for next read, start checking at next CPU */ 335 /* for next read, start checking at next CPU */
316 data->cpu_check = cpu; 336 data->cpu_check = cpu;
317 if (++data->cpu_check == NR_CPUS) 337 if (++data->cpu_check == NR_CPUS)
@@ -380,10 +400,8 @@ salinfo_log_release(struct inode *inode, struct file *file)
380static void 400static void
381call_on_cpu(int cpu, void (*fn)(void *), void *arg) 401call_on_cpu(int cpu, void (*fn)(void *), void *arg)
382{ 402{
383 cpumask_t save_cpus_allowed, new_cpus_allowed; 403 cpumask_t save_cpus_allowed = current->cpus_allowed;
384 memcpy(&save_cpus_allowed, &current->cpus_allowed, sizeof(save_cpus_allowed)); 404 cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
385 memset(&new_cpus_allowed, 0, sizeof(new_cpus_allowed));
386 set_bit(cpu, &new_cpus_allowed);
387 set_cpus_allowed(current, new_cpus_allowed); 405 set_cpus_allowed(current, new_cpus_allowed);
388 (*fn)(arg); 406 (*fn)(arg);
389 set_cpus_allowed(current, save_cpus_allowed); 407 set_cpus_allowed(current, save_cpus_allowed);
@@ -432,10 +450,10 @@ retry:
432 if (!data->saved_num) 450 if (!data->saved_num)
433 call_on_cpu(cpu, salinfo_log_read_cpu, data); 451 call_on_cpu(cpu, salinfo_log_read_cpu, data);
434 if (!data->log_size) { 452 if (!data->log_size) {
435 data->state = STATE_NO_DATA; 453 data->state = STATE_NO_DATA;
436 clear_bit(cpu, &data->cpu_event); 454 cpu_clear(cpu, data->cpu_event);
437 } else { 455 } else {
438 data->state = STATE_LOG_RECORD; 456 data->state = STATE_LOG_RECORD;
439 } 457 }
440} 458}
441 459
@@ -472,27 +490,31 @@ static int
472salinfo_log_clear(struct salinfo_data *data, int cpu) 490salinfo_log_clear(struct salinfo_data *data, int cpu)
473{ 491{
474 sal_log_record_header_t *rh; 492 sal_log_record_header_t *rh;
493 unsigned long flags;
494 spin_lock_irqsave(&data_saved_lock, flags);
475 data->state = STATE_NO_DATA; 495 data->state = STATE_NO_DATA;
476 if (!test_bit(cpu, &data->cpu_event)) 496 if (!cpu_isset(cpu, data->cpu_event)) {
497 spin_unlock_irqrestore(&data_saved_lock, flags);
477 return 0; 498 return 0;
478 down(&data->sem); 499 }
479 clear_bit(cpu, &data->cpu_event); 500 cpu_clear(cpu, data->cpu_event);
480 if (data->saved_num) { 501 if (data->saved_num) {
481 unsigned long flags; 502 shift1_data_saved(data, data->saved_num - 1);
482 spin_lock_irqsave(&data_saved_lock, flags);
483 shift1_data_saved(data, data->saved_num - 1 );
484 data->saved_num = 0; 503 data->saved_num = 0;
485 spin_unlock_irqrestore(&data_saved_lock, flags);
486 } 504 }
505 spin_unlock_irqrestore(&data_saved_lock, flags);
487 rh = (sal_log_record_header_t *)(data->log_buffer); 506 rh = (sal_log_record_header_t *)(data->log_buffer);
488 /* Corrected errors have already been cleared from SAL */ 507 /* Corrected errors have already been cleared from SAL */
489 if (rh->severity != sal_log_severity_corrected) 508 if (rh->severity != sal_log_severity_corrected)
490 call_on_cpu(cpu, salinfo_log_clear_cpu, data); 509 call_on_cpu(cpu, salinfo_log_clear_cpu, data);
491 /* clearing a record may make a new record visible */ 510 /* clearing a record may make a new record visible */
492 salinfo_log_new_read(cpu, data); 511 salinfo_log_new_read(cpu, data);
493 if (data->state == STATE_LOG_RECORD && 512 if (data->state == STATE_LOG_RECORD) {
494 !test_and_set_bit(cpu, &data->cpu_event)) 513 spin_lock_irqsave(&data_saved_lock, flags);
495 up(&data->sem); 514 cpu_set(cpu, data->cpu_event);
515 salinfo_work_to_do(data);
516 spin_unlock_irqrestore(&data_saved_lock, flags);
517 }
496 return 0; 518 return 0;
497} 519}
498 520
@@ -549,6 +571,53 @@ static struct file_operations salinfo_data_fops = {
549 .write = salinfo_log_write, 571 .write = salinfo_log_write,
550}; 572};
551 573
574#ifdef CONFIG_HOTPLUG_CPU
575static int __devinit
576salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
577{
578 unsigned int i, cpu = (unsigned long)hcpu;
579 unsigned long flags;
580 struct salinfo_data *data;
581 switch (action) {
582 case CPU_ONLINE:
583 spin_lock_irqsave(&data_saved_lock, flags);
584 for (i = 0, data = salinfo_data;
585 i < ARRAY_SIZE(salinfo_data);
586 ++i, ++data) {
587 cpu_set(cpu, data->cpu_event);
588 salinfo_work_to_do(data);
589 }
590 spin_unlock_irqrestore(&data_saved_lock, flags);
591 break;
592 case CPU_DEAD:
593 spin_lock_irqsave(&data_saved_lock, flags);
594 for (i = 0, data = salinfo_data;
595 i < ARRAY_SIZE(salinfo_data);
596 ++i, ++data) {
597 struct salinfo_data_saved *data_saved;
598 int j;
599 for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
600 j >= 0;
601 --j, --data_saved) {
602 if (data_saved->buffer && data_saved->cpu == cpu) {
603 shift1_data_saved(data, j);
604 }
605 }
606 cpu_clear(cpu, data->cpu_event);
607 }
608 spin_unlock_irqrestore(&data_saved_lock, flags);
609 break;
610 }
611 return NOTIFY_OK;
612}
613
614static struct notifier_block salinfo_cpu_notifier =
615{
616 .notifier_call = salinfo_cpu_callback,
617 .priority = 0,
618};
619#endif /* CONFIG_HOTPLUG_CPU */
620
552static int __init 621static int __init
553salinfo_init(void) 622salinfo_init(void)
554{ 623{
@@ -556,7 +625,7 @@ salinfo_init(void)
556 struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ 625 struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
557 struct proc_dir_entry *dir, *entry; 626 struct proc_dir_entry *dir, *entry;
558 struct salinfo_data *data; 627 struct salinfo_data *data;
559 int i, j, online; 628 int i, j;
560 629
561 salinfo_dir = proc_mkdir("sal", NULL); 630 salinfo_dir = proc_mkdir("sal", NULL);
562 if (!salinfo_dir) 631 if (!salinfo_dir)
@@ -571,7 +640,7 @@ salinfo_init(void)
571 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 640 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
572 data = salinfo_data + i; 641 data = salinfo_data + i;
573 data->type = i; 642 data->type = i;
574 sema_init(&data->sem, 0); 643 init_MUTEX(&data->mutex);
575 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 644 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
576 if (!dir) 645 if (!dir)
577 continue; 646 continue;
@@ -591,12 +660,8 @@ salinfo_init(void)
591 *sdir++ = entry; 660 *sdir++ = entry;
592 661
593 /* we missed any events before now */ 662 /* we missed any events before now */
594 online = 0; 663 for_each_online_cpu(j)
595 for_each_online_cpu(j) { 664 cpu_set(j, data->cpu_event);
596 set_bit(j, &data->cpu_event);
597 ++online;
598 }
599 sema_init(&data->sem, online);
600 665
601 *sdir++ = dir; 666 *sdir++ = dir;
602 } 667 }
@@ -608,6 +673,10 @@ salinfo_init(void)
608 salinfo_timer.function = &salinfo_timeout; 673 salinfo_timer.function = &salinfo_timeout;
609 add_timer(&salinfo_timer); 674 add_timer(&salinfo_timer);
610 675
676#ifdef CONFIG_HOTPLUG_CPU
677 register_cpu_notifier(&salinfo_cpu_notifier);
678#endif
679
611 return 0; 680 return 0;
612} 681}
613 682
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5add0bcf87a7..c0766575a3a2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -43,6 +43,7 @@
43#include <linux/initrd.h> 43#include <linux/initrd.h>
44#include <linux/platform.h> 44#include <linux/platform.h>
45#include <linux/pm.h> 45#include <linux/pm.h>
46#include <linux/cpufreq.h>
46 47
47#include <asm/ia32.h> 48#include <asm/ia32.h>
48#include <asm/machvec.h> 49#include <asm/machvec.h>
@@ -59,6 +60,7 @@
59#include <asm/smp.h> 60#include <asm/smp.h>
60#include <asm/system.h> 61#include <asm/system.h>
61#include <asm/unistd.h> 62#include <asm/unistd.h>
63#include <asm/system.h>
62 64
63#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 65#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
64# error "struct cpuinfo_ia64 too big!" 66# error "struct cpuinfo_ia64 too big!"
@@ -517,6 +519,7 @@ show_cpuinfo (struct seq_file *m, void *v)
517 char family[32], features[128], *cp, sep; 519 char family[32], features[128], *cp, sep;
518 struct cpuinfo_ia64 *c = v; 520 struct cpuinfo_ia64 *c = v;
519 unsigned long mask; 521 unsigned long mask;
522 unsigned long proc_freq;
520 int i; 523 int i;
521 524
522 mask = c->features; 525 mask = c->features;
@@ -549,6 +552,10 @@ show_cpuinfo (struct seq_file *m, void *v)
549 sprintf(cp, " 0x%lx", mask); 552 sprintf(cp, " 0x%lx", mask);
550 } 553 }
551 554
555 proc_freq = cpufreq_quick_get(cpunum);
556 if (!proc_freq)
557 proc_freq = c->proc_freq / 1000;
558
552 seq_printf(m, 559 seq_printf(m,
553 "processor : %d\n" 560 "processor : %d\n"
554 "vendor : %s\n" 561 "vendor : %s\n"
@@ -565,7 +572,7 @@ show_cpuinfo (struct seq_file *m, void *v)
565 "BogoMIPS : %lu.%02lu\n", 572 "BogoMIPS : %lu.%02lu\n",
566 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 573 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
567 features, c->ppn, c->number, 574 features, c->ppn, c->number,
568 c->proc_freq / 1000000, c->proc_freq % 1000000, 575 proc_freq / 1000, proc_freq % 1000,
569 c->itc_freq / 1000000, c->itc_freq % 1000000, 576 c->itc_freq / 1000000, c->itc_freq % 1000000,
570 lpj*HZ/500000, (lpj*HZ/5000) % 100); 577 lpj*HZ/500000, (lpj*HZ/5000) % 100);
571#ifdef CONFIG_SMP 578#ifdef CONFIG_SMP
@@ -689,6 +696,7 @@ static void
689get_max_cacheline_size (void) 696get_max_cacheline_size (void)
690{ 697{
691 unsigned long line_size, max = 1; 698 unsigned long line_size, max = 1;
699 unsigned int cache_size = 0;
692 u64 l, levels, unique_caches; 700 u64 l, levels, unique_caches;
693 pal_cache_config_info_t cci; 701 pal_cache_config_info_t cci;
694 s64 status; 702 s64 status;
@@ -718,6 +726,8 @@ get_max_cacheline_size (void)
718 line_size = 1 << cci.pcci_line_size; 726 line_size = 1 << cci.pcci_line_size;
719 if (line_size > max) 727 if (line_size > max)
720 max = line_size; 728 max = line_size;
729 if (cache_size < cci.pcci_cache_size)
730 cache_size = cci.pcci_cache_size;
721 if (!cci.pcci_unified) { 731 if (!cci.pcci_unified) {
722 status = ia64_pal_cache_config_info(l, 732 status = ia64_pal_cache_config_info(l,
723 /* cache_type (instruction)= */ 1, 733 /* cache_type (instruction)= */ 1,
@@ -734,6 +744,9 @@ get_max_cacheline_size (void)
734 ia64_i_cache_stride_shift = cci.pcci_stride; 744 ia64_i_cache_stride_shift = cci.pcci_stride;
735 } 745 }
736 out: 746 out:
747#ifdef CONFIG_SMP
748 max_cache_size = max(max_cache_size, cache_size);
749#endif
737 if (max > ia64_max_cacheline_size) 750 if (max > ia64_max_cacheline_size)
738 ia64_max_cacheline_size = max; 751 ia64_max_cacheline_size = max;
739} 752}
@@ -788,7 +801,7 @@ cpu_init (void)
788#endif 801#endif
789 802
790 /* Clear the stack memory reserved for pt_regs: */ 803 /* Clear the stack memory reserved for pt_regs: */
791 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 804 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
792 805
793 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 806 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
794 807
@@ -864,6 +877,15 @@ cpu_init (void)
864 pm_idle = default_idle; 877 pm_idle = default_idle;
865} 878}
866 879
880/*
881 * On SMP systems, when the scheduler does migration-cost autodetection,
882 * it needs a way to flush as much of the CPU's caches as possible.
883 */
884void sched_cacheflush(void)
885{
886 ia64_sal_cache_flush(3);
887}
888
867void 889void
868check_bugs (void) 890check_bugs (void)
869{ 891{
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 58ce07efc56e..463f6bb44d07 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -655,11 +655,11 @@ set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
655 655
656 if (!t) 656 if (!t)
657 return; 657 return;
658 t->thread_info->sigdelayed.signo = signo; 658 task_thread_info(t)->sigdelayed.signo = signo;
659 t->thread_info->sigdelayed.code = code; 659 task_thread_info(t)->sigdelayed.code = code;
660 t->thread_info->sigdelayed.addr = addr; 660 task_thread_info(t)->sigdelayed.addr = addr;
661 t->thread_info->sigdelayed.start_time = start_time; 661 task_thread_info(t)->sigdelayed.start_time = start_time;
662 t->thread_info->sigdelayed.pid = pid; 662 task_thread_info(t)->sigdelayed.pid = pid;
663 wmb(); 663 wmb();
664 set_tsk_thread_flag(t, TIF_SIGDELAYED); 664 set_tsk_thread_flag(t, TIF_SIGDELAYED);
665 } 665 }
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f2dbcd1db0d4..c7b943f10199 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -151,7 +151,7 @@ out:
151asmlinkage long 151asmlinkage long
152sys_pipe (void) 152sys_pipe (void)
153{ 153{
154 struct pt_regs *regs = ia64_task_regs(current); 154 struct pt_regs *regs = task_pt_regs(current);
155 int fd[2]; 155 int fd[2];
156 int retval; 156 int retval;
157 157
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index d3e0ecb56d62..55391901b013 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -530,12 +530,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
530 if (fsys_mode(current, &regs)) { 530 if (fsys_mode(current, &regs)) {
531 extern char __kernel_syscall_via_break[]; 531 extern char __kernel_syscall_via_break[];
532 /* 532 /*
533 * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap 533 * Got a trap in fsys-mode: Taken Branch Trap
534 * need special handling; Debug trap is not supposed to happen. 534 * and Single Step trap need special handling;
535 * Debug trap is ignored (we disable it here
536 * and re-enable it in the lower-privilege trap).
535 */ 537 */
536 if (unlikely(vector == 29)) { 538 if (unlikely(vector == 29)) {
537 die("Got debug trap in fsys-mode---not supposed to happen!", 539 set_thread_flag(TIF_DB_DISABLED);
538 &regs, 0); 540 ia64_psr(&regs)->db = 0;
541 ia64_psr(&regs)->lp = 1;
539 return; 542 return;
540 } 543 }
541 /* re-do the system call via break 0x100000: */ 544 /* re-do the system call via break 0x100000: */
@@ -589,10 +592,19 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
589 case 34: 592 case 34:
590 if (isr & 0x2) { 593 if (isr & 0x2) {
591 /* Lower-Privilege Transfer Trap */ 594 /* Lower-Privilege Transfer Trap */
595
596 /* If we disabled debug traps during an fsyscall,
597 * re-enable them here.
598 */
599 if (test_thread_flag(TIF_DB_DISABLED)) {
600 clear_thread_flag(TIF_DB_DISABLED);
601 ia64_psr(&regs)->db = 1;
602 }
603
592 /* 604 /*
593 * Just clear PSR.lp and then return immediately: all the 605 * Just clear PSR.lp and then return immediately:
594 * interesting work (e.g., signal delivery is done in the kernel 606 * all the interesting work (e.g., signal delivery)
595 * exit path). 607 * is done in the kernel exit path.
596 */ 608 */
597 ia64_psr(&regs)->lp = 0; 609 ia64_psr(&regs)->lp = 0;
598 return; 610 return;
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index b631cf86ed44..fcd2bad0286f 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
210 210
211 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); 211 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
212 212
213 touch_softlockup_watchdog();
213 memset((char *)start, 0, length); 214 memset((char *)start, 0, length);
214 215
215 node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); 216 node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index e3215ba64ffd..b38b6d213c15 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -635,3 +635,39 @@ mem_init (void)
635 ia32_mem_init(); 635 ia32_mem_init();
636#endif 636#endif
637} 637}
638
639#ifdef CONFIG_MEMORY_HOTPLUG
640void online_page(struct page *page)
641{
642 ClearPageReserved(page);
643 set_page_count(page, 1);
644 __free_page(page);
645 totalram_pages++;
646 num_physpages++;
647}
648
649int add_memory(u64 start, u64 size)
650{
651 pg_data_t *pgdat;
652 struct zone *zone;
653 unsigned long start_pfn = start >> PAGE_SHIFT;
654 unsigned long nr_pages = size >> PAGE_SHIFT;
655 int ret;
656
657 pgdat = NODE_DATA(0);
658
659 zone = pgdat->node_zones + ZONE_NORMAL;
660 ret = __add_pages(zone, start_pfn, nr_pages);
661
662 if (ret)
663 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
664 __FUNCTION__, ret);
665
666 return ret;
667}
668
669int remove_memory(u64 start, u64 size)
670{
671 return -EINVAL;
672}
673#endif
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 41105d454423..6a4eec9113e8 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -90,7 +90,7 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
90{ 90{
91 static DEFINE_SPINLOCK(ptcg_lock); 91 static DEFINE_SPINLOCK(ptcg_lock);
92 92
93 if (mm != current->active_mm) { 93 if (mm != current->active_mm || !current->mm) {
94 flush_tlb_all(); 94 flush_tlb_all();
95 return; 95 return;
96 } 96 }
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index b7dabbfb0d61..adb01566bd57 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -32,7 +32,7 @@ typedef struct
32 u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */ 32 u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
33} ia64_backtrace_t; 33} ia64_backtrace_t;
34 34
35#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 35#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
36/* 36/*
37 * Returns non-zero if the PC is in the spinlock contention out-of-line code 37 * Returns non-zero if the PC is in the spinlock contention out-of-line code
38 * with non-standard calling sequence (on older compilers). 38 * with non-standard calling sequence (on older compilers).
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 20d76fae24e8..0b30ca006286 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
193 goto free_resource; 193 goto free_resource;
194 } 194 }
195 195
196 min = addr->min_address_range; 196 min = addr->minimum;
197 max = min + addr->address_length - 1; 197 max = min + addr->address_length - 1;
198 if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION) 198 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
199 sparse = 1; 199 sparse = 1;
200 200
201 space_nr = new_space(addr->address_translation_offset, sparse); 201 space_nr = new_space(addr->translation_offset, sparse);
202 if (space_nr == ~0) 202 if (space_nr == ~0)
203 goto free_name; 203 goto free_name;
204 204
@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
285 if (addr.resource_type == ACPI_MEMORY_RANGE) { 285 if (addr.resource_type == ACPI_MEMORY_RANGE) {
286 flags = IORESOURCE_MEM; 286 flags = IORESOURCE_MEM;
287 root = &iomem_resource; 287 root = &iomem_resource;
288 offset = addr.address_translation_offset; 288 offset = addr.translation_offset;
289 } else if (addr.resource_type == ACPI_IO_RANGE) { 289 } else if (addr.resource_type == ACPI_IO_RANGE) {
290 flags = IORESOURCE_IO; 290 flags = IORESOURCE_IO;
291 root = &ioport_resource; 291 root = &ioport_resource;
@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
298 window = &info->controller->window[info->controller->windows++]; 298 window = &info->controller->window[info->controller->windows++];
299 window->resource.name = info->name; 299 window->resource.name = info->name;
300 window->resource.flags = flags; 300 window->resource.flags = flags;
301 window->resource.start = addr.min_address_range + offset; 301 window->resource.start = addr.minimum + offset;
302 window->resource.end = window->resource.start + addr.address_length - 1; 302 window->resource.end = window->resource.start + addr.address_length - 1;
303 window->resource.child = NULL; 303 window->resource.child = NULL;
304 window->offset = offset; 304 window->offset = offset;
@@ -454,14 +454,13 @@ static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
454 return 0; 454 return 0;
455} 455}
456 456
457static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) 457static void __devinit
458pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
458{ 459{
459 struct pci_bus_region region; 460 struct pci_bus_region region;
460 int i; 461 int i;
461 int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \
462 PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
463 462
464 for (i = 0; i < limit; i++) { 463 for (i = start; i < limit; i++) {
465 if (!dev->resource[i].flags) 464 if (!dev->resource[i].flags)
466 continue; 465 continue;
467 region.start = dev->resource[i].start; 466 region.start = dev->resource[i].start;
@@ -472,6 +471,16 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
472 } 471 }
473} 472}
474 473
474static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
475{
476 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
477}
478
479static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
480{
481 pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
482}
483
475/* 484/*
476 * Called after each bus is probed, but before its children are examined. 485 * Called after each bus is probed, but before its children are examined.
477 */ 486 */
@@ -482,7 +491,7 @@ pcibios_fixup_bus (struct pci_bus *b)
482 491
483 if (b->self) { 492 if (b->self) {
484 pci_read_bridge_bases(b); 493 pci_read_bridge_bases(b);
485 pcibios_fixup_device_resources(b->self); 494 pcibios_fixup_bridge_resources(b->self);
486 } 495 }
487 list_for_each_entry(dev, &b->devices, bus_list) 496 list_for_each_entry(dev, &b->devices, bus_list)
488 pcibios_fixup_device_resources(dev); 497 pcibios_fixup_device_resources(dev);
@@ -700,7 +709,7 @@ int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
700 */ 709 */
701int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size) 710int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size)
702{ 711{
703 int ret = 0; 712 int ret = size;
704 713
705 switch (size) { 714 switch (size) {
706 case 1: 715 case 1:
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
index 71c2b271b4c6..8182583c762c 100644
--- a/arch/ia64/sn/include/xtalk/hubdev.h
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -26,29 +26,46 @@
26#define IIO_NUM_ITTES 7 26#define IIO_NUM_ITTES 7
27#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 27#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
28 28
29struct sn_flush_device_list { 29/* This struct is shared between the PROM and the kernel.
30 * Changes to this struct will require corresponding changes to the kernel.
31 */
32struct sn_flush_device_common {
30 int sfdl_bus; 33 int sfdl_bus;
31 int sfdl_slot; 34 int sfdl_slot;
32 int sfdl_pin; 35 int sfdl_pin;
33 struct bar_list { 36 struct common_bar_list {
34 unsigned long start; 37 unsigned long start;
35 unsigned long end; 38 unsigned long end;
36 } sfdl_bar_list[6]; 39 } sfdl_bar_list[6];
37 unsigned long sfdl_force_int_addr; 40 unsigned long sfdl_force_int_addr;
38 unsigned long sfdl_flush_value; 41 unsigned long sfdl_flush_value;
39 volatile unsigned long *sfdl_flush_addr; 42 volatile unsigned long *sfdl_flush_addr;
40 uint32_t sfdl_persistent_busnum; 43 u32 sfdl_persistent_busnum;
41 uint32_t sfdl_persistent_segment; 44 u32 sfdl_persistent_segment;
42 struct pcibus_info *sfdl_pcibus_info; 45 struct pcibus_info *sfdl_pcibus_info;
46};
47
48/* This struct is kernel only and is not used by the PROM */
49struct sn_flush_device_kernel {
43 spinlock_t sfdl_flush_lock; 50 spinlock_t sfdl_flush_lock;
51 struct sn_flush_device_common *common;
52};
53
54/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
55 * for older official PROMs to function on the new kernel base. This struct
56 * will be removed when the next official PROM release occurs. */
57
58struct sn_flush_device_war {
59 struct sn_flush_device_common common;
60 u32 filler; /* older PROMs expect the default size of a spinlock_t */
44}; 61};
45 62
46/* 63/*
47 * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list. 64 * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
48 */ 65 */
49struct sn_flush_nasid_entry { 66struct sn_flush_nasid_entry {
50 struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */ 67 struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
51 uint64_t iio_itte[8]; 68 u64 iio_itte[8];
52}; 69};
53 70
54struct hubdev_info { 71struct hubdev_info {
@@ -62,8 +79,8 @@ struct hubdev_info {
62 79
63 void *hdi_nodepda; 80 void *hdi_nodepda;
64 void *hdi_node_vertex; 81 void *hdi_node_vertex;
65 uint32_t max_segment_number; 82 u32 max_segment_number;
66 uint32_t max_pcibus_number; 83 u32 max_pcibus_number;
67}; 84};
68 85
69extern void hubdev_init_node(nodepda_t *, cnodeid_t); 86extern void hubdev_init_node(nodepda_t *, cnodeid_t);
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h
index ec56b3432f17..90f37a4133d0 100644
--- a/arch/ia64/sn/include/xtalk/xbow.h
+++ b/arch/ia64/sn/include/xtalk/xbow.h
@@ -3,7 +3,8 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights
7 * Reserved.
7 */ 8 */
8#ifndef _ASM_IA64_SN_XTALK_XBOW_H 9#ifndef _ASM_IA64_SN_XTALK_XBOW_H
9#define _ASM_IA64_SN_XTALK_XBOW_H 10#define _ASM_IA64_SN_XTALK_XBOW_H
@@ -21,94 +22,94 @@
21 22
22/* Register set for each xbow link */ 23/* Register set for each xbow link */
23typedef volatile struct xb_linkregs_s { 24typedef volatile struct xb_linkregs_s {
24/* 25/*
25 * we access these through synergy unswizzled space, so the address 26 * we access these through synergy unswizzled space, so the address
26 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) 27 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
27 * That's why we put the register first and filler second. 28 * That's why we put the register first and filler second.
28 */ 29 */
29 uint32_t link_ibf; 30 u32 link_ibf;
30 uint32_t filler0; /* filler for proper alignment */ 31 u32 filler0; /* filler for proper alignment */
31 uint32_t link_control; 32 u32 link_control;
32 uint32_t filler1; 33 u32 filler1;
33 uint32_t link_status; 34 u32 link_status;
34 uint32_t filler2; 35 u32 filler2;
35 uint32_t link_arb_upper; 36 u32 link_arb_upper;
36 uint32_t filler3; 37 u32 filler3;
37 uint32_t link_arb_lower; 38 u32 link_arb_lower;
38 uint32_t filler4; 39 u32 filler4;
39 uint32_t link_status_clr; 40 u32 link_status_clr;
40 uint32_t filler5; 41 u32 filler5;
41 uint32_t link_reset; 42 u32 link_reset;
42 uint32_t filler6; 43 u32 filler6;
43 uint32_t link_aux_status; 44 u32 link_aux_status;
44 uint32_t filler7; 45 u32 filler7;
45} xb_linkregs_t; 46} xb_linkregs_t;
46 47
47typedef volatile struct xbow_s { 48typedef volatile struct xbow_s {
48 /* standard widget configuration 0x000000-0x000057 */ 49 /* standard widget configuration 0x000000-0x000057 */
49 struct widget_cfg xb_widget; /* 0x000000 */ 50 struct widget_cfg xb_widget; /* 0x000000 */
50 51
51 /* helper fieldnames for accessing bridge widget */ 52 /* helper fieldnames for accessing bridge widget */
52 53
53#define xb_wid_id xb_widget.w_id 54#define xb_wid_id xb_widget.w_id
54#define xb_wid_stat xb_widget.w_status 55#define xb_wid_stat xb_widget.w_status
55#define xb_wid_err_upper xb_widget.w_err_upper_addr 56#define xb_wid_err_upper xb_widget.w_err_upper_addr
56#define xb_wid_err_lower xb_widget.w_err_lower_addr 57#define xb_wid_err_lower xb_widget.w_err_lower_addr
57#define xb_wid_control xb_widget.w_control 58#define xb_wid_control xb_widget.w_control
58#define xb_wid_req_timeout xb_widget.w_req_timeout 59#define xb_wid_req_timeout xb_widget.w_req_timeout
59#define xb_wid_int_upper xb_widget.w_intdest_upper_addr 60#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
60#define xb_wid_int_lower xb_widget.w_intdest_lower_addr 61#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
61#define xb_wid_err_cmdword xb_widget.w_err_cmd_word 62#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
62#define xb_wid_llp xb_widget.w_llp_cfg 63#define xb_wid_llp xb_widget.w_llp_cfg
63#define xb_wid_stat_clr xb_widget.w_tflush 64#define xb_wid_stat_clr xb_widget.w_tflush
64 65
65/* 66/*
66 * we access these through synergy unswizzled space, so the address 67 * we access these through synergy unswizzled space, so the address
67 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) 68 * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
68 * That's why we put the register first and filler second. 69 * That's why we put the register first and filler second.
69 */ 70 */
70 /* xbow-specific widget configuration 0x000058-0x0000FF */ 71 /* xbow-specific widget configuration 0x000058-0x0000FF */
71 uint32_t xb_wid_arb_reload; /* 0x00005C */ 72 u32 xb_wid_arb_reload; /* 0x00005C */
72 uint32_t _pad_000058; 73 u32 _pad_000058;
73 uint32_t xb_perf_ctr_a; /* 0x000064 */ 74 u32 xb_perf_ctr_a; /* 0x000064 */
74 uint32_t _pad_000060; 75 u32 _pad_000060;
75 uint32_t xb_perf_ctr_b; /* 0x00006c */ 76 u32 xb_perf_ctr_b; /* 0x00006c */
76 uint32_t _pad_000068; 77 u32 _pad_000068;
77 uint32_t xb_nic; /* 0x000074 */ 78 u32 xb_nic; /* 0x000074 */
78 uint32_t _pad_000070; 79 u32 _pad_000070;
79 80
80 /* Xbridge only */ 81 /* Xbridge only */
81 uint32_t xb_w0_rst_fnc; /* 0x00007C */ 82 u32 xb_w0_rst_fnc; /* 0x00007C */
82 uint32_t _pad_000078; 83 u32 _pad_000078;
83 uint32_t xb_l8_rst_fnc; /* 0x000084 */ 84 u32 xb_l8_rst_fnc; /* 0x000084 */
84 uint32_t _pad_000080; 85 u32 _pad_000080;
85 uint32_t xb_l9_rst_fnc; /* 0x00008c */ 86 u32 xb_l9_rst_fnc; /* 0x00008c */
86 uint32_t _pad_000088; 87 u32 _pad_000088;
87 uint32_t xb_la_rst_fnc; /* 0x000094 */ 88 u32 xb_la_rst_fnc; /* 0x000094 */
88 uint32_t _pad_000090; 89 u32 _pad_000090;
89 uint32_t xb_lb_rst_fnc; /* 0x00009c */ 90 u32 xb_lb_rst_fnc; /* 0x00009c */
90 uint32_t _pad_000098; 91 u32 _pad_000098;
91 uint32_t xb_lc_rst_fnc; /* 0x0000a4 */ 92 u32 xb_lc_rst_fnc; /* 0x0000a4 */
92 uint32_t _pad_0000a0; 93 u32 _pad_0000a0;
93 uint32_t xb_ld_rst_fnc; /* 0x0000ac */ 94 u32 xb_ld_rst_fnc; /* 0x0000ac */
94 uint32_t _pad_0000a8; 95 u32 _pad_0000a8;
95 uint32_t xb_le_rst_fnc; /* 0x0000b4 */ 96 u32 xb_le_rst_fnc; /* 0x0000b4 */
96 uint32_t _pad_0000b0; 97 u32 _pad_0000b0;
97 uint32_t xb_lf_rst_fnc; /* 0x0000bc */ 98 u32 xb_lf_rst_fnc; /* 0x0000bc */
98 uint32_t _pad_0000b8; 99 u32 _pad_0000b8;
99 uint32_t xb_lock; /* 0x0000c4 */ 100 u32 xb_lock; /* 0x0000c4 */
100 uint32_t _pad_0000c0; 101 u32 _pad_0000c0;
101 uint32_t xb_lock_clr; /* 0x0000cc */ 102 u32 xb_lock_clr; /* 0x0000cc */
102 uint32_t _pad_0000c8; 103 u32 _pad_0000c8;
103 /* end of Xbridge only */ 104 /* end of Xbridge only */
104 uint32_t _pad_0000d0[12]; 105 u32 _pad_0000d0[12];
105 106
106 /* Link Specific Registers, port 8..15 0x000100-0x000300 */ 107 /* Link Specific Registers, port 8..15 0x000100-0x000300 */
107 xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS]; 108 xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
108#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
109
110} xbow_t; 109} xbow_t;
111 110
111#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
112
112#define XB_FLAGS_EXISTS 0x1 /* device exists */ 113#define XB_FLAGS_EXISTS 0x1 /* device exists */
113#define XB_FLAGS_MASTER 0x2 114#define XB_FLAGS_MASTER 0x2
114#define XB_FLAGS_SLAVE 0x0 115#define XB_FLAGS_SLAVE 0x0
@@ -160,7 +161,7 @@ typedef volatile struct xbow_s {
160/* End of Xbridge only */ 161/* End of Xbridge only */
161 162
162/* used only in ide, but defined here within the reserved portion */ 163/* used only in ide, but defined here within the reserved portion */
163/* of the widget0 address space (before 0xf4) */ 164/* of the widget0 address space (before 0xf4) */
164#define XBOW_WID_UNDEF 0xe4 165#define XBOW_WID_UNDEF 0xe4
165 166
166/* xbow link register set base, legal value for x is 0x8..0xf */ 167/* xbow link register set base, legal value for x is 0x8..0xf */
@@ -179,29 +180,37 @@ typedef volatile struct xbow_s {
179 180
180/* link_control(x) */ 181/* link_control(x) */
181#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */ 182#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
182 /* reserved: 0x40000000 */ 183/* reserved: 0x40000000 */
183#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */ 184#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
184#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */ 185#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer
185#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */ 186 level */
186#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */ 187#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8
187#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */ 188 bit mode */
188#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */ 189#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP
189#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */ 190 packet */
190#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */ 191#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit
191 /* reserved: 0x0000fe00 */ 192 mask */
193#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit
194 shift */
195#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination
196 */
197#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input
198 buffer */
199/* reserved: 0x0000fe00 */
192#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */ 200#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
193#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */ 201#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
194#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */ 202#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
195#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */ 203#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
196#define XB_CTRL_RCV_IE 0x00000010 /* receive */ 204#define XB_CTRL_RCV_IE 0x00000010 /* receive */
197#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */ 205#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
198 /* reserved: 0x00000004 */ 206/* reserved: 0x00000004 */
199#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */ 207#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request
208 timeout */
200#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */ 209#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
201 210
202/* link_status(x) */ 211/* link_status(x) */
203#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE 212#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
204 /* reserved: 0x7ff80000 */ 213/* reserved: 0x7ff80000 */
205#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */ 214#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
206#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE 215#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
207#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE 216#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
@@ -211,7 +220,7 @@ typedef volatile struct xbow_s {
211#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE 220#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
212#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE 221#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
213#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE 222#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
214 /* reserved: 0x00000004 */ 223/* reserved: 0x00000004 */
215#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE 224#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
216#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE 225#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
217 226
@@ -222,7 +231,7 @@ typedef volatile struct xbow_s {
222#define XB_AUX_LINKFAIL_RST_BAD 0x00000040 231#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
223#define XB_AUX_STAT_PRESENT 0x00000020 232#define XB_AUX_STAT_PRESENT 0x00000020
224#define XB_AUX_STAT_PORT_WIDTH 0x00000010 233#define XB_AUX_STAT_PORT_WIDTH 0x00000010
225 /* reserved: 0x0000000f */ 234/* reserved: 0x0000000f */
226 235
227/* 236/*
228 * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper 237 * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
@@ -238,7 +247,8 @@ typedef volatile struct xbow_s {
238/* XBOW_WID_STAT */ 247/* XBOW_WID_STAT */
239#define XB_WID_STAT_LINK_INTR_SHFT (24) 248#define XB_WID_STAT_LINK_INTR_SHFT (24)
240#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT) 249#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
241#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT)) 250#define XB_WID_STAT_LINK_INTR(x) \
251 (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
242#define XB_WID_STAT_WIDGET0_INTR 0x00800000 252#define XB_WID_STAT_WIDGET0_INTR 0x00800000
243#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */ 253#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
244#define XB_WID_STAT_REG_ACC_ERR 0x00000020 254#define XB_WID_STAT_REG_ACC_ERR 0x00000020
@@ -264,7 +274,7 @@ typedef volatile struct xbow_s {
264#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */ 274#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
265#define XBOW_WIDGET_MFGR_NUM 0x0 275#define XBOW_WIDGET_MFGR_NUM 0x0
266#define XXBOW_WIDGET_MFGR_NUM 0x0 276#define XXBOW_WIDGET_MFGR_NUM 0x0
267#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */ 277#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
268 278
269#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */ 279#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
270#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */ 280#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
@@ -279,13 +289,13 @@ typedef volatile struct xbow_s {
279#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */ 289#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
280 290
281#define IS_XBRIDGE_XBOW(wid) \ 291#define IS_XBRIDGE_XBOW(wid) \
282 (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \ 292 (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
283 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) 293 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
284 294
285#define IS_PIC_XBOW(wid) \ 295#define IS_PIC_XBOW(wid) \
286 (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \ 296 (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
287 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) 297 XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
288 298
289#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv) 299#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
290 300
291#endif /* _ASM_IA64_SN_XTALK_XBOW_H */ 301#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h
index c5f4bc5cc033..2800eda0fd68 100644
--- a/arch/ia64/sn/include/xtalk/xwidgetdev.h
+++ b/arch/ia64/sn/include/xtalk/xwidgetdev.h
@@ -25,28 +25,28 @@
25 25
26/* widget configuration registers */ 26/* widget configuration registers */
27struct widget_cfg{ 27struct widget_cfg{
28 uint32_t w_id; /* 0x04 */ 28 u32 w_id; /* 0x04 */
29 uint32_t w_pad_0; /* 0x00 */ 29 u32 w_pad_0; /* 0x00 */
30 uint32_t w_status; /* 0x0c */ 30 u32 w_status; /* 0x0c */
31 uint32_t w_pad_1; /* 0x08 */ 31 u32 w_pad_1; /* 0x08 */
32 uint32_t w_err_upper_addr; /* 0x14 */ 32 u32 w_err_upper_addr; /* 0x14 */
33 uint32_t w_pad_2; /* 0x10 */ 33 u32 w_pad_2; /* 0x10 */
34 uint32_t w_err_lower_addr; /* 0x1c */ 34 u32 w_err_lower_addr; /* 0x1c */
35 uint32_t w_pad_3; /* 0x18 */ 35 u32 w_pad_3; /* 0x18 */
36 uint32_t w_control; /* 0x24 */ 36 u32 w_control; /* 0x24 */
37 uint32_t w_pad_4; /* 0x20 */ 37 u32 w_pad_4; /* 0x20 */
38 uint32_t w_req_timeout; /* 0x2c */ 38 u32 w_req_timeout; /* 0x2c */
39 uint32_t w_pad_5; /* 0x28 */ 39 u32 w_pad_5; /* 0x28 */
40 uint32_t w_intdest_upper_addr; /* 0x34 */ 40 u32 w_intdest_upper_addr; /* 0x34 */
41 uint32_t w_pad_6; /* 0x30 */ 41 u32 w_pad_6; /* 0x30 */
42 uint32_t w_intdest_lower_addr; /* 0x3c */ 42 u32 w_intdest_lower_addr; /* 0x3c */
43 uint32_t w_pad_7; /* 0x38 */ 43 u32 w_pad_7; /* 0x38 */
44 uint32_t w_err_cmd_word; /* 0x44 */ 44 u32 w_err_cmd_word; /* 0x44 */
45 uint32_t w_pad_8; /* 0x40 */ 45 u32 w_pad_8; /* 0x40 */
46 uint32_t w_llp_cfg; /* 0x4c */ 46 u32 w_llp_cfg; /* 0x4c */
47 uint32_t w_pad_9; /* 0x48 */ 47 u32 w_pad_9; /* 0x48 */
48 uint32_t w_tflush; /* 0x54 */ 48 u32 w_tflush; /* 0x54 */
49 uint32_t w_pad_10; /* 0x50 */ 49 u32 w_pad_10; /* 0x50 */
50}; 50};
51 51
52/* 52/*
@@ -63,7 +63,7 @@ struct xwidget_info{
63 struct xwidget_hwid xwi_hwid; /* Widget Identification */ 63 struct xwidget_hwid xwi_hwid; /* Widget Identification */
64 char xwi_masterxid; /* Hub's Widget Port Number */ 64 char xwi_masterxid; /* Hub's Widget Port Number */
65 void *xwi_hubinfo; /* Hub's provider private info */ 65 void *xwi_hubinfo; /* Hub's provider private info */
66 uint64_t *xwi_hub_provider; /* prom provider functions */ 66 u64 *xwi_hub_provider; /* prom provider functions */
67 void *xwi_vertex; 67 void *xwi_vertex;
68}; 68};
69 69
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index fcbc748ae433..f1ec1370b3e3 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -33,7 +33,7 @@ void bte_error_handler(unsigned long);
33 * Wait until all BTE related CRBs are completed 33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces. 34 * and then reset the interfaces.
35 */ 35 */
36void shub1_bte_error_handler(unsigned long _nodepda) 36int shub1_bte_error_handler(unsigned long _nodepda)
37{ 37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
@@ -53,7 +53,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
55 smp_processor_id())); 55 smp_processor_id()));
56 return; 56 return 1;
57 } 57 }
58 58
59 /* Determine information about our hub */ 59 /* Determine information about our hub */
@@ -81,7 +81,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
81 mod_timer(recovery_timer, HZ * 5); 81 mod_timer(recovery_timer, HZ * 5);
82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
83 smp_processor_id())); 83 smp_processor_id()));
84 return; 84 return 1;
85 } 85 }
86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
87 87
@@ -99,7 +99,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
100 err_nodepda, smp_processor_id(), 100 err_nodepda, smp_processor_id(),
101 i)); 101 i));
102 return; 102 return 1;
103 } 103 }
104 } 104 }
105 } 105 }
@@ -124,6 +124,42 @@ void shub1_bte_error_handler(unsigned long _nodepda)
124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
125 125
126 del_timer(recovery_timer); 126 del_timer(recovery_timer);
127 return 0;
128}
129
130/*
131 * Wait until all BTE related CRBs are completed
132 * and then reset the interfaces.
133 */
134int shub2_bte_error_handler(unsigned long _nodepda)
135{
136 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
137 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
138 struct bteinfo_s *bte;
139 nasid_t nasid;
140 u64 status;
141 int i;
142
143 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
144
145 /*
146 * Verify that all the BTEs are complete
147 */
148 for (i = 0; i < BTES_PER_NODE; i++) {
149 bte = &err_nodepda->bte_if[i];
150 status = BTE_LNSTAT_LOAD(bte);
151 if ((status & IBLS_ERROR) || !(status & IBLS_BUSY))
152 continue;
153 mod_timer(recovery_timer, HZ * 5);
154 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
155 smp_processor_id()));
156 return 1;
157 }
158 if (ia64_sn_bte_recovery(nasid))
159 panic("bte_error_handler(): Fatal BTE Error");
160
161 del_timer(recovery_timer);
162 return 0;
127} 163}
128 164
129/* 165/*
@@ -135,7 +171,6 @@ void bte_error_handler(unsigned long _nodepda)
135 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 171 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
136 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; 172 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
137 int i; 173 int i;
138 nasid_t nasid;
139 unsigned long irq_flags; 174 unsigned long irq_flags;
140 volatile u64 *notify; 175 volatile u64 *notify;
141 bte_result_t bh_error; 176 bte_result_t bh_error;
@@ -160,12 +195,15 @@ void bte_error_handler(unsigned long _nodepda)
160 } 195 }
161 196
162 if (is_shub1()) { 197 if (is_shub1()) {
163 shub1_bte_error_handler(_nodepda); 198 if (shub1_bte_error_handler(_nodepda)) {
199 spin_unlock_irqrestore(recovery_lock, irq_flags);
200 return;
201 }
164 } else { 202 } else {
165 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 203 if (shub2_bte_error_handler(_nodepda)) {
166 204 spin_unlock_irqrestore(recovery_lock, irq_flags);
167 if (ia64_sn_bte_recovery(nasid)) 205 return;
168 panic("bte_error_handler(): Fatal BTE Error"); 206 }
169 } 207 }
170 208
171 for (i = 0; i < BTES_PER_NODE; i++) { 209 for (i = 0; i < BTES_PER_NODE; i++) {
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 5c5eb01c50f0..56ab6bae00ee 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -32,13 +32,14 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
32 ret_stuff.v0 = 0; 32 ret_stuff.v0 = 0;
33 hubdev_info = (struct hubdev_info *)arg; 33 hubdev_info = (struct hubdev_info *)arg;
34 nasid = hubdev_info->hdi_nasid; 34 nasid = hubdev_info->hdi_nasid;
35 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, 35
36 if (is_shub1()) {
37 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
36 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
37 39
38 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("hubii_eint_handler(): Fatal TIO Error");
40 42
41 if (is_shub1()) {
42 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
43 (void)hubiio_crb_error_handler(hubdev_info); 44 (void)hubiio_crb_error_handler(hubdev_info);
44 } else 45 } else
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 318087e35b66..00700f7e6837 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -76,11 +76,12 @@ static struct sn_pcibus_provider sn_pci_default_provider = {
76}; 76};
77 77
78/* 78/*
79 * Retrieve the DMA Flush List given nasid. This list is needed 79 * Retrieve the DMA Flush List given nasid, widget, and device.
80 * to implement the WAR - Flush DMA data on PIO Reads. 80 * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
81 */ 81 */
82static inline uint64_t 82static inline u64
83sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address) 83sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
84 u64 address)
84{ 85{
85 86
86 struct ia64_sal_retval ret_stuff; 87 struct ia64_sal_retval ret_stuff;
@@ -88,17 +89,17 @@ sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
88 ret_stuff.v0 = 0; 89 ret_stuff.v0 = 0;
89 90
90 SAL_CALL_NOLOCK(ret_stuff, 91 SAL_CALL_NOLOCK(ret_stuff,
91 (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, 92 (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
92 (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0, 93 (u64) nasid, (u64) widget_num,
93 0); 94 (u64) device_num, (u64) address, 0, 0, 0);
94 return ret_stuff.v0; 95 return ret_stuff.status;
95 96
96} 97}
97 98
98/* 99/*
99 * Retrieve the hub device info structure for the given nasid. 100 * Retrieve the hub device info structure for the given nasid.
100 */ 101 */
101static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address) 102static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
102{ 103{
103 104
104 struct ia64_sal_retval ret_stuff; 105 struct ia64_sal_retval ret_stuff;
@@ -114,7 +115,7 @@ static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
114/* 115/*
115 * Retrieve the pci bus information given the bus number. 116 * Retrieve the pci bus information given the bus number.
116 */ 117 */
117static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) 118static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
118{ 119{
119 120
120 struct ia64_sal_retval ret_stuff; 121 struct ia64_sal_retval ret_stuff;
@@ -130,9 +131,9 @@ static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
130/* 131/*
131 * Retrieve the pci device information given the bus and device|function number. 132 * Retrieve the pci device information given the bus and device|function number.
132 */ 133 */
133static inline uint64_t 134static inline u64
134sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, 135sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
135 u64 sn_irq_info) 136 u64 sn_irq_info)
136{ 137{
137 struct ia64_sal_retval ret_stuff; 138 struct ia64_sal_retval ret_stuff;
138 ret_stuff.status = 0; 139 ret_stuff.status = 0;
@@ -140,7 +141,7 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
140 141
141 SAL_CALL_NOLOCK(ret_stuff, 142 SAL_CALL_NOLOCK(ret_stuff,
142 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, 143 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
143 (u64) segment, (u64) bus_number, (u64) devfn, 144 (u64) segment, (u64) bus_number, (u64) devfn,
144 (u64) pci_dev, 145 (u64) pci_dev,
145 sn_irq_info, 0, 0); 146 sn_irq_info, 0, 0);
146 return ret_stuff.v0; 147 return ret_stuff.v0;
@@ -164,18 +165,55 @@ sn_pcidev_info_get(struct pci_dev *dev)
164 return NULL; 165 return NULL;
165} 166}
166 167
168/* Older PROM flush WAR
169 *
170 * 01/16/06 -- This war will be in place until a new official PROM is released.
171 * Additionally note that the struct sn_flush_device_war also has to be
172 * removed from arch/ia64/sn/include/xtalk/hubdev.h
173 */
174static u8 war_implemented = 0;
175
176static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
177 struct sn_flush_device_common *common)
178{
179 struct sn_flush_device_war *war_list;
180 struct sn_flush_device_war *dev_entry;
181 struct ia64_sal_retval isrv = {0,0,0,0};
182
183 if (!war_implemented) {
184 printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
185 "PROM flush WAR\n");
186 war_implemented = 1;
187 }
188
189 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
190 if (!war_list)
191 BUG();
192
193 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
194 nasid, widget, __pa(war_list), 0, 0, 0 ,0);
195 if (isrv.status)
196 panic("sn_device_fixup_war failed: %s\n",
197 ia64_sal_strerror(isrv.status));
198
199 dev_entry = war_list + device;
200 memcpy(common,dev_entry, sizeof(*common));
201
202 kfree(war_list);
203}
204
167/* 205/*
168 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 206 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
169 * each node in the system. 207 * each node in the system.
170 */ 208 */
171static void sn_fixup_ionodes(void) 209static void sn_fixup_ionodes(void)
172{ 210{
173 211 struct sn_flush_device_kernel *sn_flush_device_kernel;
174 struct sn_flush_device_list *sn_flush_device_list; 212 struct sn_flush_device_kernel *dev_entry;
175 struct hubdev_info *hubdev; 213 struct hubdev_info *hubdev;
176 uint64_t status; 214 u64 status;
177 uint64_t nasid; 215 u64 nasid;
178 int i, widget; 216 int i, widget, device;
179 217
180 /* 218 /*
181 * Get SGI Specific HUB chipset information. 219 * Get SGI Specific HUB chipset information.
@@ -186,7 +224,7 @@ static void sn_fixup_ionodes(void)
186 nasid = cnodeid_to_nasid(i); 224 nasid = cnodeid_to_nasid(i);
187 hubdev->max_segment_number = 0xffffffff; 225 hubdev->max_segment_number = 0xffffffff;
188 hubdev->max_pcibus_number = 0xff; 226 hubdev->max_pcibus_number = 0xff;
189 status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev)); 227 status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
190 if (status) 228 if (status)
191 continue; 229 continue;
192 230
@@ -213,38 +251,60 @@ static void sn_fixup_ionodes(void)
213 251
214 hubdev->hdi_flush_nasid_list.widget_p = 252 hubdev->hdi_flush_nasid_list.widget_p =
215 kmalloc((HUB_WIDGET_ID_MAX + 1) * 253 kmalloc((HUB_WIDGET_ID_MAX + 1) *
216 sizeof(struct sn_flush_device_list *), GFP_KERNEL); 254 sizeof(struct sn_flush_device_kernel *),
217 255 GFP_KERNEL);
218 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0, 256 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
219 (HUB_WIDGET_ID_MAX + 1) * 257 (HUB_WIDGET_ID_MAX + 1) *
220 sizeof(struct sn_flush_device_list *)); 258 sizeof(struct sn_flush_device_kernel *));
221 259
222 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 260 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
223 sn_flush_device_list = kmalloc(DEV_PER_WIDGET * 261 sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET *
224 sizeof(struct 262 sizeof(struct
225 sn_flush_device_list), 263 sn_flush_device_kernel),
226 GFP_KERNEL); 264 GFP_KERNEL);
227 memset(sn_flush_device_list, 0x0, 265 if (!sn_flush_device_kernel)
266 BUG();
267 memset(sn_flush_device_kernel, 0x0,
228 DEV_PER_WIDGET * 268 DEV_PER_WIDGET *
229 sizeof(struct sn_flush_device_list)); 269 sizeof(struct sn_flush_device_kernel));
230 270
231 status = 271 dev_entry = sn_flush_device_kernel;
232 sal_get_widget_dmaflush_list(nasid, widget, 272 for (device = 0; device < DEV_PER_WIDGET;
233 (uint64_t) 273 device++,dev_entry++) {
234 __pa 274 dev_entry->common = kmalloc(sizeof(struct
235 (sn_flush_device_list)); 275 sn_flush_device_common),
236 if (status) { 276 GFP_KERNEL);
237 kfree(sn_flush_device_list); 277 if (!dev_entry->common)
238 continue; 278 BUG();
279 memset(dev_entry->common, 0x0, sizeof(struct
280 sn_flush_device_common));
281
282 status = sal_get_device_dmaflush_list(nasid,
283 widget,
284 device,
285 (u64)(dev_entry->common));
286 if (status) {
287 if (sn_sal_rev() < 0x0450) {
288 /* shortlived WAR for older
289 * PROM images
290 */
291 sn_device_fixup_war(nasid,
292 widget,
293 device,
294 dev_entry->common);
295 }
296 else
297 BUG();
298 }
299
300 spin_lock_init(&dev_entry->sfdl_flush_lock);
239 } 301 }
240 302
241 spin_lock_init(&sn_flush_device_list->sfdl_flush_lock); 303 if (sn_flush_device_kernel)
242 hubdev->hdi_flush_nasid_list.widget_p[widget] = 304 hubdev->hdi_flush_nasid_list.widget_p[widget] =
243 sn_flush_device_list; 305 sn_flush_device_kernel;
244 } 306 }
245
246 } 307 }
247
248} 308}
249 309
250/* 310/*
@@ -256,7 +316,7 @@ static void sn_fixup_ionodes(void)
256 */ 316 */
257static void 317static void
258sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, 318sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
259 int64_t * pci_addrs) 319 s64 * pci_addrs)
260{ 320{
261 struct pci_controller *controller = PCI_CONTROLLER(dev->bus); 321 struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
262 unsigned int i; 322 unsigned int i;
@@ -316,7 +376,7 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
316 struct pci_bus *host_pci_bus; 376 struct pci_bus *host_pci_bus;
317 struct pci_dev *host_pci_dev; 377 struct pci_dev *host_pci_dev;
318 struct pcidev_info *pcidev_info; 378 struct pcidev_info *pcidev_info;
319 int64_t pci_addrs[PCI_ROM_RESOURCE + 1]; 379 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
320 struct sn_irq_info *sn_irq_info; 380 struct sn_irq_info *sn_irq_info;
321 unsigned long size; 381 unsigned long size;
322 unsigned int bus_no, devfn; 382 unsigned int bus_no, devfn;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 01d18b7b5bb3..ec37084bdc17 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -28,7 +28,7 @@ extern int sn_ioif_inited;
28static struct list_head **sn_irq_lh; 28static struct list_head **sn_irq_lh;
29static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ 29static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
30 30
31static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, 31static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
32 u64 sn_irq_info, 32 u64 sn_irq_info,
33 int req_irq, nasid_t req_nasid, 33 int req_irq, nasid_t req_nasid,
34 int req_slice) 34 int req_slice)
@@ -123,7 +123,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
123 123
124 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 124 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
125 sn_irq_lh[irq], list) { 125 sn_irq_lh[irq], list) {
126 uint64_t bridge; 126 u64 bridge;
127 int local_widget, status; 127 int local_widget, status;
128 nasid_t local_nasid; 128 nasid_t local_nasid;
129 struct sn_irq_info *new_irq_info; 129 struct sn_irq_info *new_irq_info;
@@ -134,7 +134,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
134 break; 134 break;
135 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); 135 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
136 136
137 bridge = (uint64_t) new_irq_info->irq_bridge; 137 bridge = (u64) new_irq_info->irq_bridge;
138 if (!bridge) { 138 if (!bridge) {
139 kfree(new_irq_info); 139 kfree(new_irq_info);
140 break; /* irq is not a device interrupt */ 140 break; /* irq is not a device interrupt */
@@ -349,10 +349,10 @@ static void force_interrupt(int irq)
349 */ 349 */
350static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) 350static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
351{ 351{
352 uint64_t regval; 352 u64 regval;
353 int irr_reg_num; 353 int irr_reg_num;
354 int irr_bit; 354 int irr_bit;
355 uint64_t irr_reg; 355 u64 irr_reg;
356 struct pcidev_info *pcidev_info; 356 struct pcidev_info *pcidev_info;
357 struct pcibus_info *pcibus_info; 357 struct pcibus_info *pcibus_info;
358 358
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 6546db6abdba..9ab684d1bb55 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
13#include <linux/mutex.h>
13#include <asm/mca.h> 14#include <asm/mca.h>
14#include <asm/sal.h> 15#include <asm/sal.h>
15#include <asm/sn/sn_sal.h> 16#include <asm/sn/sn_sal.h>
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
27/* Printing oemdata from mca uses data that is not passed through SAL, it is 28/* Printing oemdata from mca uses data that is not passed through SAL, it is
28 * global. Only one user at a time. 29 * global. Only one user at a time.
29 */ 30 */
30static DECLARE_MUTEX(sn_oemdata_mutex); 31static DEFINE_MUTEX(sn_oemdata_mutex);
31static u8 **sn_oemdata; 32static u8 **sn_oemdata;
32static u64 *sn_oemdata_size, sn_oemdata_bufsize; 33static u64 *sn_oemdata_size, sn_oemdata_bufsize;
33 34
@@ -89,7 +90,7 @@ static int
89sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, 90sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
90 u64 * oemdata_size) 91 u64 * oemdata_size)
91{ 92{
92 down(&sn_oemdata_mutex); 93 mutex_lock(&sn_oemdata_mutex);
93 sn_oemdata = oemdata; 94 sn_oemdata = oemdata;
94 sn_oemdata_size = oemdata_size; 95 sn_oemdata_size = oemdata_size;
95 sn_oemdata_bufsize = 0; 96 sn_oemdata_bufsize = 0;
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
107 *sn_oemdata_size = 0; 108 *sn_oemdata_size = 0;
108 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 109 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
109 } 110 }
110 up(&sn_oemdata_mutex); 111 mutex_unlock(&sn_oemdata_mutex);
111 return 0; 112 return 0;
112} 113}
113 114
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 0d8592a745a7..d263d3e8fbb9 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -11,6 +11,7 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/capability.h>
14#include <linux/device.h> 15#include <linux/device.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <asm/system.h> 17#include <asm/system.h>
@@ -65,7 +66,7 @@ static int tiocx_match(struct device *dev, struct device_driver *drv)
65 66
66} 67}
67 68
68static int tiocx_hotplug(struct device *dev, char **envp, int num_envp, 69static int tiocx_uevent(struct device *dev, char **envp, int num_envp,
69 char *buffer, int buffer_size) 70 char *buffer, int buffer_size)
70{ 71{
71 return -ENODEV; 72 return -ENODEV;
@@ -76,12 +77,6 @@ static void tiocx_bus_release(struct device *dev)
76 kfree(to_cx_dev(dev)); 77 kfree(to_cx_dev(dev));
77} 78}
78 79
79struct bus_type tiocx_bus_type = {
80 .name = "tiocx",
81 .match = tiocx_match,
82 .hotplug = tiocx_hotplug,
83};
84
85/** 80/**
86 * cx_device_match - Find cx_device in the id table. 81 * cx_device_match - Find cx_device in the id table.
87 * @ids: id table from driver 82 * @ids: id table from driver
@@ -148,6 +143,14 @@ static int cx_driver_remove(struct device *dev)
148 return 0; 143 return 0;
149} 144}
150 145
146struct bus_type tiocx_bus_type = {
147 .name = "tiocx",
148 .match = tiocx_match,
149 .uevent = tiocx_uevent,
150 .probe = cx_device_probe,
151 .remove = cx_driver_remove,
152};
153
151/** 154/**
152 * cx_driver_register - Register the driver. 155 * cx_driver_register - Register the driver.
153 * @cx_driver: driver table (cx_drv struct) from driver 156 * @cx_driver: driver table (cx_drv struct) from driver
@@ -161,8 +164,6 @@ int cx_driver_register(struct cx_drv *cx_driver)
161{ 164{
162 cx_driver->driver.name = cx_driver->name; 165 cx_driver->driver.name = cx_driver->name;
163 cx_driver->driver.bus = &tiocx_bus_type; 166 cx_driver->driver.bus = &tiocx_bus_type;
164 cx_driver->driver.probe = cx_device_probe;
165 cx_driver->driver.remove = cx_driver_remove;
166 167
167 return driver_register(&cx_driver->driver); 168 return driver_register(&cx_driver->driver);
168} 169}
@@ -244,7 +245,7 @@ static int cx_device_reload(struct cx_dev *cx_dev)
244 cx_dev->bt); 245 cx_dev->bt);
245} 246}
246 247
247static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, 248static inline u64 tiocx_intr_alloc(nasid_t nasid, int widget,
248 u64 sn_irq_info, 249 u64 sn_irq_info,
249 int req_irq, nasid_t req_nasid, 250 int req_irq, nasid_t req_nasid,
250 int req_slice) 251 int req_slice)
@@ -301,7 +302,7 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
301 302
302void tiocx_irq_free(struct sn_irq_info *sn_irq_info) 303void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
303{ 304{
304 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; 305 u64 bridge = (u64) sn_irq_info->irq_bridge;
305 nasid_t nasid = NASID_GET(bridge); 306 nasid_t nasid = NASID_GET(bridge);
306 int widget; 307 int widget;
307 308
@@ -312,12 +313,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
312 } 313 }
313} 314}
314 315
315uint64_t tiocx_dma_addr(uint64_t addr) 316u64 tiocx_dma_addr(u64 addr)
316{ 317{
317 return PHYS_TO_TIODMA(addr); 318 return PHYS_TO_TIODMA(addr);
318} 319}
319 320
320uint64_t tiocx_swin_base(int nasid) 321u64 tiocx_swin_base(int nasid)
321{ 322{
322 return TIO_SWIN_BASE(nasid, TIOCX_CORELET); 323 return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
323} 324}
@@ -334,8 +335,8 @@ EXPORT_SYMBOL(tiocx_swin_base);
334 335
335static void tio_conveyor_set(nasid_t nasid, int enable_flag) 336static void tio_conveyor_set(nasid_t nasid, int enable_flag)
336{ 337{
337 uint64_t ice_frz; 338 u64 ice_frz;
338 uint64_t disable_cb = (1ull << 61); 339 u64 disable_cb = (1ull << 61);
339 340
340 if (!(nasid & 1)) 341 if (!(nasid & 1))
341 return; 342 return;
@@ -387,7 +388,7 @@ static int is_fpga_tio(int nasid, int *bt)
387 388
388static int bitstream_loaded(nasid_t nasid) 389static int bitstream_loaded(nasid_t nasid)
389{ 390{
390 uint64_t cx_credits; 391 u64 cx_credits;
391 392
392 cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3); 393 cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
393 cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK; 394 cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
@@ -403,14 +404,14 @@ static int tiocx_reload(struct cx_dev *cx_dev)
403 nasid_t nasid = cx_dev->cx_id.nasid; 404 nasid_t nasid = cx_dev->cx_id.nasid;
404 405
405 if (bitstream_loaded(nasid)) { 406 if (bitstream_loaded(nasid)) {
406 uint64_t cx_id; 407 u64 cx_id;
407 int rv; 408 int rv;
408 409
409 rv = ia64_sn_sysctl_tio_clock_reset(nasid); 410 rv = ia64_sn_sysctl_tio_clock_reset(nasid);
410 if (rv) { 411 if (rv) {
411 printk(KERN_ALERT "CX port JTAG reset failed.\n"); 412 printk(KERN_ALERT "CX port JTAG reset failed.\n");
412 } else { 413 } else {
413 cx_id = *(volatile uint64_t *) 414 cx_id = *(volatile u64 *)
414 (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + 415 (TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
415 WIDGET_ID); 416 WIDGET_ID);
416 part_num = XWIDGET_PART_NUM(cx_id); 417 part_num = XWIDGET_PART_NUM(cx_id);
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
index 3be52a34c80f..b7ea46645e12 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mutex.h>
22#include <asm/sn/intr.h> 23#include <asm/sn/intr.h>
23#include <asm/sn/sn_sal.h> 24#include <asm/sn/sn_sal.h>
24#include <asm/sn/xp.h> 25#include <asm/sn/xp.h>
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
136 137
137 registration = &xpc_registrations[ch_number]; 138 registration = &xpc_registrations[ch_number];
138 139
139 if (down_interruptible(&registration->sema) != 0) { 140 if (mutex_lock_interruptible(&registration->mutex) != 0) {
140 return xpcInterrupted; 141 return xpcInterrupted;
141 } 142 }
142 143
143 /* if XPC_CHANNEL_REGISTERED(ch_number) */ 144 /* if XPC_CHANNEL_REGISTERED(ch_number) */
144 if (registration->func != NULL) { 145 if (registration->func != NULL) {
145 up(&registration->sema); 146 mutex_unlock(&registration->mutex);
146 return xpcAlreadyRegistered; 147 return xpcAlreadyRegistered;
147 } 148 }
148 149
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
154 registration->key = key; 155 registration->key = key;
155 registration->func = func; 156 registration->func = func;
156 157
157 up(&registration->sema); 158 mutex_unlock(&registration->mutex);
158 159
159 xpc_interface.connect(ch_number); 160 xpc_interface.connect(ch_number);
160 161
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
190 * figured XPC's users will just turn around and call xpc_disconnect() 191 * figured XPC's users will just turn around and call xpc_disconnect()
191 * again anyways, so we might as well wait, if need be. 192 * again anyways, so we might as well wait, if need be.
192 */ 193 */
193 down(&registration->sema); 194 mutex_lock(&registration->mutex);
194 195
195 /* if !XPC_CHANNEL_REGISTERED(ch_number) */ 196 /* if !XPC_CHANNEL_REGISTERED(ch_number) */
196 if (registration->func == NULL) { 197 if (registration->func == NULL) {
197 up(&registration->sema); 198 mutex_unlock(&registration->mutex);
198 return; 199 return;
199 } 200 }
200 201
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
208 209
209 xpc_interface.disconnect(ch_number); 210 xpc_interface.disconnect(ch_number);
210 211
211 up(&registration->sema); 212 mutex_unlock(&registration->mutex);
212 213
213 return; 214 return;
214} 215}
@@ -250,9 +251,9 @@ xp_init(void)
250 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 251 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
251 } 252 }
252 253
253 /* initialize the connection registration semaphores */ 254 /* initialize the connection registration mutex */
254 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { 255 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
255 sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ 256 mutex_init(&xpc_registrations[ch_number].mutex);
256 } 257 }
257 258
258 return 0; 259 return 0;
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
deleted file mode 100644
index 5483a9f227d4..000000000000
--- a/arch/ia64/sn/kernel/xpc.h
+++ /dev/null
@@ -1,1273 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) structures and macros.
12 */
13
14#ifndef _IA64_SN_KERNEL_XPC_H
15#define _IA64_SN_KERNEL_XPC_H
16
17
18#include <linux/config.h>
19#include <linux/interrupt.h>
20#include <linux/sysctl.h>
21#include <linux/device.h>
22#include <asm/pgtable.h>
23#include <asm/processor.h>
24#include <asm/sn/bte.h>
25#include <asm/sn/clksupport.h>
26#include <asm/sn/addrs.h>
27#include <asm/sn/mspec.h>
28#include <asm/sn/shub_mmr.h>
29#include <asm/sn/xp.h>
30
31
32/*
33 * XPC Version numbers consist of a major and minor number. XPC can always
34 * talk to versions with same major #, and never talk to versions with a
35 * different major #.
36 */
37#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
38#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
39#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
40
41
42/*
43 * The next macros define word or bit representations for given
44 * C-brick nasid in either the SAL provided bit array representing
45 * nasids in the partition/machine or the AMO_t array used for
46 * inter-partition initiation communications.
47 *
48 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
49 * such, some space will be saved by insisting that nasid information
50 * passed from SAL always be packed for C-Bricks and the
51 * cross-partition interrupts use the same packing scheme.
52 */
53#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
54#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
55#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
56 (1UL << XPC_NASID_B_INDEX(_n)))
57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
58
59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
60#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
61
62/* define the process name of HB checker and the CPU it is pinned to */
63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
64#define XPC_HB_CHECK_CPU 0
65
66/* define the process name of the discovery thread */
67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
68
69
70/*
71 * the reserved page
72 *
73 * SAL reserves one page of memory per partition for XPC. Though a full page
74 * in length (16384 bytes), its starting address is not page aligned, but it
75 * is cacheline aligned. The reserved page consists of the following:
76 *
77 * reserved page header
78 *
79 * The first cacheline of the reserved page contains the header
80 * (struct xpc_rsvd_page). Before SAL initialization has completed,
81 * SAL has set up the following fields of the reserved page header:
82 * SAL_signature, SAL_version, partid, and nasids_size. The other
83 * fields are set up by XPC. (xpc_rsvd_page points to the local
84 * partition's reserved page.)
85 *
86 * part_nasids mask
87 * mach_nasids mask
88 *
89 * SAL also sets up two bitmaps (or masks), one that reflects the actual
90 * nasids in this partition (part_nasids), and the other that reflects
91 * the actual nasids in the entire machine (mach_nasids). We're only
92 * interested in the even numbered nasids (which contain the processors
93 * and/or memory), so we only need half as many bits to represent the
94 * nasids. The part_nasids mask is located starting at the first cacheline
95 * following the reserved page header. The mach_nasids mask follows right
96 * after the part_nasids mask. The size in bytes of each mask is reflected
97 * by the reserved page header field 'nasids_size'. (Local partition's
98 * mask pointers are xpc_part_nasids and xpc_mach_nasids.)
99 *
100 * vars
101 * vars part
102 *
103 * Immediately following the mach_nasids mask are the XPC variables
104 * required by other partitions. First are those that are generic to all
105 * partitions (vars), followed on the next available cacheline by those
106 * which are partition specific (vars part). These are setup by XPC.
107 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
108 *
109 * Note: Until vars_pa is set, the partition XPC code has not been initialized.
110 */
111struct xpc_rsvd_page {
112 u64 SAL_signature; /* SAL: unique signature */
113 u64 SAL_version; /* SAL: version */
114 u8 partid; /* SAL: partition ID */
115 u8 version;
116 u8 pad1[6]; /* align to next u64 in cacheline */
117 volatile u64 vars_pa;
118 struct timespec stamp; /* time when reserved page was setup by XPC */
119 u64 pad2[9]; /* align to last u64 in cacheline */
120 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
121};
122
123#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
124
125#define XPC_SUPPORTS_RP_STAMP(_version) \
126 (_version >= _XPC_VERSION(1,1))
127
128/*
129 * compare stamps - the return value is:
130 *
131 * < 0, if stamp1 < stamp2
132 * = 0, if stamp1 == stamp2
133 * > 0, if stamp1 > stamp2
134 */
135static inline int
136xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
137{
138 int ret;
139
140
141 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
142 ret = stamp1->tv_nsec - stamp2->tv_nsec;
143 }
144 return ret;
145}
146
147
148/*
149 * Define the structures by which XPC variables can be exported to other
150 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
151 */
152
153/*
154 * The following structure describes the partition generic variables
155 * needed by other partitions in order to properly initialize.
156 *
157 * struct xpc_vars version number also applies to struct xpc_vars_part.
158 * Changes to either structure and/or related functionality should be
159 * reflected by incrementing either the major or minor version numbers
160 * of struct xpc_vars.
161 */
162struct xpc_vars {
163 u8 version;
164 u64 heartbeat;
165 u64 heartbeating_to_mask;
166 u64 heartbeat_offline; /* if 0, heartbeat should be changing */
167 int act_nasid;
168 int act_phys_cpuid;
169 u64 vars_part_pa;
170 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
171 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
172};
173
174#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
175
176#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
177 (_version >= _XPC_VERSION(3,1))
178
179
180static inline int
181xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
182{
183 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
184}
185
186static inline void
187xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
188{
189 u64 old_mask, new_mask;
190
191 do {
192 old_mask = vars->heartbeating_to_mask;
193 new_mask = (old_mask | (1UL << partid));
194 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
195 old_mask);
196}
197
198static inline void
199xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
200{
201 u64 old_mask, new_mask;
202
203 do {
204 old_mask = vars->heartbeating_to_mask;
205 new_mask = (old_mask & ~(1UL << partid));
206 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
207 old_mask);
208}
209
210
211/*
212 * The AMOs page consists of a number of AMO variables which are divided into
213 * four groups, The first two groups are used to identify an IRQ's sender.
214 * These two groups consist of 64 and 128 AMO variables respectively. The last
215 * two groups, consisting of just one AMO variable each, are used to identify
216 * the remote partitions that are currently engaged (from the viewpoint of
217 * the XPC running on the remote partition).
218 */
219#define XPC_NOTIFY_IRQ_AMOS 0
220#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
221#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
222#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
223
224
225/*
226 * The following structure describes the per partition specific variables.
227 *
228 * An array of these structures, one per partition, will be defined. As a
229 * partition becomes active XPC will copy the array entry corresponding to
230 * itself from that partition. It is desirable that the size of this
231 * structure evenly divide into a cacheline, such that none of the entries
232 * in this array crosses a cacheline boundary. As it is now, each entry
233 * occupies half a cacheline.
234 */
235struct xpc_vars_part {
236 volatile u64 magic;
237
238 u64 openclose_args_pa; /* physical address of open and close args */
239 u64 GPs_pa; /* physical address of Get/Put values */
240
241 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
242 int IPI_nasid; /* nasid of where to send IPIs */
243 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
244
245 u8 nchannels; /* #of defined channels supported */
246
247 u8 reserved[23]; /* pad to a full 64 bytes */
248};
249
250/*
251 * The vars_part MAGIC numbers play a part in the first contact protocol.
252 *
253 * MAGIC1 indicates that the per partition specific variables for a remote
254 * partition have been initialized by this partition.
255 *
256 * MAGIC2 indicates that this partition has pulled the remote partititions
257 * per partition variables that pertain to this partition.
258 */
259#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
260#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
261
262
263/* the reserved page sizes and offsets */
264
265#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
266#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
267
268#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
269#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
270#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
271#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
272
273
274/*
275 * Functions registered by add_timer() or called by kernel_thread() only
276 * allow for a single 64-bit argument. The following macros can be used to
277 * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
278 * the passed argument.
279 */
280#define XPC_PACK_ARGS(_arg1, _arg2) \
281 ((((u64) _arg1) & 0xffffffff) | \
282 ((((u64) _arg2) & 0xffffffff) << 32))
283
284#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
285#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
286
287
288
289/*
290 * Define a Get/Put value pair (pointers) used with a message queue.
291 */
292struct xpc_gp {
293 volatile s64 get; /* Get value */
294 volatile s64 put; /* Put value */
295};
296
297#define XPC_GP_SIZE \
298 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
299
300
301
302/*
303 * Define a structure that contains arguments associated with opening and
304 * closing a channel.
305 */
306struct xpc_openclose_args {
307 u16 reason; /* reason why channel is closing */
308 u16 msg_size; /* sizeof each message entry */
309 u16 remote_nentries; /* #of message entries in remote msg queue */
310 u16 local_nentries; /* #of message entries in local msg queue */
311 u64 local_msgqueue_pa; /* physical address of local message queue */
312};
313
314#define XPC_OPENCLOSE_ARGS_SIZE \
315 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
316
317
318
319/* struct xpc_msg flags */
320
321#define XPC_M_DONE 0x01 /* msg has been received/consumed */
322#define XPC_M_READY 0x02 /* msg is ready to be sent */
323#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
324
325
326#define XPC_MSG_ADDRESS(_payload) \
327 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
328
329
330
331/*
332 * Defines notify entry.
333 *
334 * This is used to notify a message's sender that their message was received
335 * and consumed by the intended recipient.
336 */
337struct xpc_notify {
338 struct semaphore sema; /* notify semaphore */
339 volatile u8 type; /* type of notification */
340
341 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */
343 void *key; /* pointer to user's key */
344};
345
346/* struct xpc_notify type of notification */
347
348#define XPC_N_CALL 0x01 /* notify function provided by user */
349
350
351
352/*
353 * Define the structure that manages all the stuff required by a channel. In
354 * particular, they are used to manage the messages sent across the channel.
355 *
356 * This structure is private to a partition, and is NOT shared across the
357 * partition boundary.
358 *
359 * There is an array of these structures for each remote partition. It is
360 * allocated at the time a partition becomes active. The array contains one
361 * of these structures for each potential channel connection to that partition.
362 *
363 * Each of these structures manages two message queues (circular buffers).
364 * They are allocated at the time a channel connection is made. One of
365 * these message queues (local_msgqueue) holds the locally created messages
366 * that are destined for the remote partition. The other of these message
367 * queues (remote_msgqueue) is a locally cached copy of the remote partition's
368 * own local_msgqueue.
369 *
370 * The following is a description of the Get/Put pointers used to manage these
371 * two message queues. Consider the local_msgqueue to be on one partition
372 * and the remote_msgqueue to be its cached copy on another partition. A
373 * description of what each of the lettered areas contains is included.
374 *
375 *
376 * local_msgqueue remote_msgqueue
377 *
378 * |/////////| |/////////|
379 * w_remote_GP.get --> +---------+ |/////////|
380 * | F | |/////////|
381 * remote_GP.get --> +---------+ +---------+ <-- local_GP->get
382 * | | | |
383 * | | | E |
384 * | | | |
385 * | | +---------+ <-- w_local_GP.get
386 * | B | |/////////|
387 * | | |////D////|
388 * | | |/////////|
389 * | | +---------+ <-- w_remote_GP.put
390 * | | |////C////|
391 * local_GP->put --> +---------+ +---------+ <-- remote_GP.put
392 * | | |/////////|
393 * | A | |/////////|
394 * | | |/////////|
395 * w_local_GP.put --> +---------+ |/////////|
396 * |/////////| |/////////|
397 *
398 *
399 * ( remote_GP.[get|put] are cached copies of the remote
400 * partition's local_GP->[get|put], and thus their values can
401 * lag behind their counterparts on the remote partition. )
402 *
403 *
404 * A - Messages that have been allocated, but have not yet been sent to the
405 * remote partition.
406 *
407 * B - Messages that have been sent, but have not yet been acknowledged by the
408 * remote partition as having been received.
409 *
410 * C - Area that needs to be prepared for the copying of sent messages, by
411 * the clearing of the message flags of any previously received messages.
412 *
413 * D - Area into which sent messages are to be copied from the remote
414 * partition's local_msgqueue and then delivered to their intended
415 * recipients. [ To allow for a multi-message copy, another pointer
416 * (next_msg_to_pull) has been added to keep track of the next message
417 * number needing to be copied (pulled). It chases after w_remote_GP.put.
418 * Any messages lying between w_local_GP.get and next_msg_to_pull have
419 * been copied and are ready to be delivered. ]
420 *
421 * E - Messages that have been copied and delivered, but have not yet been
422 * acknowledged by the recipient as having been received.
423 *
424 * F - Messages that have been acknowledged, but XPC has not yet notified the
425 * sender that the message was received by its intended recipient.
426 * This is also an area that needs to be prepared for the allocating of
427 * new messages, by the clearing of the message flags of the acknowledged
428 * messages.
429 */
430struct xpc_channel {
431 partid_t partid; /* ID of remote partition connected */
432 spinlock_t lock; /* lock for updating this structure */
433 u32 flags; /* general flags */
434
435 enum xpc_retval reason; /* reason why channel is disconnect'g */
436 int reason_line; /* line# disconnect initiated from */
437
438 u16 number; /* channel # */
439
440 u16 msg_size; /* sizeof each msg entry */
441 u16 local_nentries; /* #of msg entries in local msg queue */
442 u16 remote_nentries; /* #of msg entries in remote msg queue*/
443
444 void *local_msgqueue_base; /* base address of kmalloc'd space */
445 struct xpc_msg *local_msgqueue; /* local message queue */
446 void *remote_msgqueue_base; /* base address of kmalloc'd space */
447 struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
448 /* local message queue */
449 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
450 /* local message queue */
451
452 atomic_t references; /* #of external references to queues */
453
454 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
455 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
456
457 u8 delayed_IPI_flags; /* IPI flags received, but delayed */
458 /* action until channel disconnected */
459
460 /* queue of msg senders who want to be notified when msg received */
461
462 atomic_t n_to_notify; /* #of msg senders to notify */
463 struct xpc_notify *notify_queue;/* notify queue for messages sent */
464
465 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */
467
468 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
469 struct semaphore wdisconnect_sema; /* wait for channel disconnect */
470
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */
473
474 /* various flavors of local and remote Get/Put values */
475
476 struct xpc_gp *local_GP; /* local Get/Put values */
477 struct xpc_gp remote_GP; /* remote Get/Put values */
478 struct xpc_gp w_local_GP; /* working local Get/Put values */
479 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
480 s64 next_msg_to_pull; /* Put value of next msg to pull */
481
482 /* kthread management related fields */
483
484// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
485// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
486// >>> dependent on activity over the last interval of time
487 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
488 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
489 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
490 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
491 atomic_t kthreads_active; /* #of kthreads actively working */
492 // >>> following field is temporary
493 u32 kthreads_created; /* total #of kthreads created */
494
495 wait_queue_head_t idle_wq; /* idle kthread wait queue */
496
497} ____cacheline_aligned;
498
499
500/* struct xpc_channel flags */
501
502#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
503
504#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
505#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
506#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
507#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
508
509#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
510#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */
511#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */
512#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */
513
514#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */
515#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */
516#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */
517#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */
518
519#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
520#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
521#define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */
522#define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */
523
524
525
526/*
527 * Manages channels on a partition basis. There is one of these structures
528 * for each partition (a partition will never utilize the structure that
529 * represents itself).
530 */
531struct xpc_partition {
532
533 /* XPC HB infrastructure */
534
535 u8 remote_rp_version; /* version# of partition's rsvd pg */
536 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
537 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
538 u64 remote_vars_pa; /* phys addr of partition's vars */
539 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
540 u64 last_heartbeat; /* HB at last read */
541 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
542 int remote_act_nasid; /* active part's act/deact nasid */
543 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
544 u32 act_IRQ_rcvd; /* IRQs since activation */
545 spinlock_t act_lock; /* protect updating of act_state */
546 u8 act_state; /* from XPC HB viewpoint */
547 u8 remote_vars_version; /* version# of partition's vars */
548 enum xpc_retval reason; /* reason partition is deactivating */
549 int reason_line; /* line# deactivation initiated from */
550 int reactivate_nasid; /* nasid in partition to reactivate */
551
552 unsigned long disengage_request_timeout; /* timeout in jiffies */
553 struct timer_list disengage_request_timer;
554
555
556 /* XPC infrastructure referencing and teardown control */
557
558 volatile u8 setup_state; /* infrastructure setup state */
559 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
560 atomic_t references; /* #of references to infrastructure */
561
562
563 /*
564 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
565 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
566 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
567 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
568 */
569
570
571 u8 nchannels; /* #of defined channels supported */
572 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
573 atomic_t nchannels_engaged;/* #of channels engaged with remote part */
574 struct xpc_channel *channels;/* array of channel structures */
575
576 void *local_GPs_base; /* base address of kmalloc'd space */
577 struct xpc_gp *local_GPs; /* local Get/Put values */
578 void *remote_GPs_base; /* base address of kmalloc'd space */
579 struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
580 /* values */
581 u64 remote_GPs_pa; /* phys address of remote partition's local */
582 /* Get/Put values */
583
584
585 /* fields used to pass args when opening or closing a channel */
586
587 void *local_openclose_args_base; /* base address of kmalloc'd space */
588 struct xpc_openclose_args *local_openclose_args; /* local's args */
589 void *remote_openclose_args_base; /* base address of kmalloc'd space */
590 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
591 /* args */
592 u64 remote_openclose_args_pa; /* phys addr of remote's args */
593
594
595 /* IPI sending, receiving and handling related fields */
596
597 int remote_IPI_nasid; /* nasid of where to send IPIs */
598 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
599 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
600
601 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
602 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
603 char IPI_owner[8]; /* IPI owner's name */
604 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
605
606 spinlock_t IPI_lock; /* IPI handler lock */
607
608
609 /* channel manager related fields */
610
611 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
612 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
613
614} ____cacheline_aligned;
615
616
617/* struct xpc_partition act_state values (for XPC HB) */
618
619#define XPC_P_INACTIVE 0x00 /* partition is not active */
620#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
621#define XPC_P_ACTIVATING 0x02 /* activation thread started */
622#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
623#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
624
625
626#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
627 xpc_deactivate_partition(__LINE__, (_p), (_reason))
628
629
630/* struct xpc_partition setup_state values */
631
632#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
633#define XPC_P_SETUP 0x01 /* infrastructure is setup */
634#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
635#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
636
637
638
639/*
640 * struct xpc_partition IPI_timer #of seconds to wait before checking for
641 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
642 * after the IPI was received.
643 */
644#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
645
646
647/* number of seconds to wait for other partitions to disengage */
648#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
649
650/* interval in seconds to print 'waiting disengagement' messages */
651#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
652
653
654#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
655
656
657
658/* found in xp_main.c */
659extern struct xpc_registration xpc_registrations[];
660
661
662/* found in xpc_main.c */
663extern struct device *xpc_part;
664extern struct device *xpc_chan;
665extern int xpc_disengage_request_timelimit;
666extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
667extern void xpc_dropped_IPI_check(struct xpc_partition *);
668extern void xpc_activate_partition(struct xpc_partition *);
669extern void xpc_activate_kthreads(struct xpc_channel *, int);
670extern void xpc_create_kthreads(struct xpc_channel *, int);
671extern void xpc_disconnect_wait(int);
672
673
674/* found in xpc_partition.c */
675extern int xpc_exiting;
676extern struct xpc_vars *xpc_vars;
677extern struct xpc_rsvd_page *xpc_rsvd_page;
678extern struct xpc_vars_part *xpc_vars_part;
679extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
680extern char xpc_remote_copy_buffer[];
681extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
682extern void xpc_allow_IPI_ops(void);
683extern void xpc_restrict_IPI_ops(void);
684extern int xpc_identify_act_IRQ_sender(void);
685extern int xpc_partition_disengaged(struct xpc_partition *);
686extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
687extern void xpc_mark_partition_inactive(struct xpc_partition *);
688extern void xpc_discovery(void);
689extern void xpc_check_remote_hb(void);
690extern void xpc_deactivate_partition(const int, struct xpc_partition *,
691 enum xpc_retval);
692extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
693
694
695/* found in xpc_channel.c */
696extern void xpc_initiate_connect(int);
697extern void xpc_initiate_disconnect(int);
698extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
699extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
700extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
701 xpc_notify_func, void *);
702extern void xpc_initiate_received(partid_t, int, void *);
703extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
704extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
705extern void xpc_process_channel_activity(struct xpc_partition *);
706extern void xpc_connected_callout(struct xpc_channel *);
707extern void xpc_deliver_msg(struct xpc_channel *);
708extern void xpc_disconnect_channel(const int, struct xpc_channel *,
709 enum xpc_retval, unsigned long *);
710extern void xpc_disconnecting_callout(struct xpc_channel *);
711extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
712extern void xpc_teardown_infrastructure(struct xpc_partition *);
713
714
715
716static inline void
717xpc_wakeup_channel_mgr(struct xpc_partition *part)
718{
719 if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
720 wake_up(&part->channel_mgr_wq);
721 }
722}
723
724
725
726/*
727 * These next two inlines are used to keep us from tearing down a channel's
728 * msg queues while a thread may be referencing them.
729 */
730static inline void
731xpc_msgqueue_ref(struct xpc_channel *ch)
732{
733 atomic_inc(&ch->references);
734}
735
736static inline void
737xpc_msgqueue_deref(struct xpc_channel *ch)
738{
739 s32 refs = atomic_dec_return(&ch->references);
740
741 DBUG_ON(refs < 0);
742 if (refs == 0) {
743 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
744 }
745}
746
747
748
749#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
750 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
751
752
753/*
754 * These two inlines are used to keep us from tearing down a partition's
755 * setup infrastructure while a thread may be referencing it.
756 */
757static inline void
758xpc_part_deref(struct xpc_partition *part)
759{
760 s32 refs = atomic_dec_return(&part->references);
761
762
763 DBUG_ON(refs < 0);
764 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
765 wake_up(&part->teardown_wq);
766 }
767}
768
769static inline int
770xpc_part_ref(struct xpc_partition *part)
771{
772 int setup;
773
774
775 atomic_inc(&part->references);
776 setup = (part->setup_state == XPC_P_SETUP);
777 if (!setup) {
778 xpc_part_deref(part);
779 }
780 return setup;
781}
782
783
784
785/*
786 * The following macro is to be used for the setting of the reason and
787 * reason_line fields in both the struct xpc_channel and struct xpc_partition
788 * structures.
789 */
790#define XPC_SET_REASON(_p, _reason, _line) \
791 { \
792 (_p)->reason = _reason; \
793 (_p)->reason_line = _line; \
794 }
795
796
797
798/*
799 * This next set of inlines are used to keep track of when a partition is
800 * potentially engaged in accessing memory belonging to another partition.
801 */
802
803static inline void
804xpc_mark_partition_engaged(struct xpc_partition *part)
805{
806 unsigned long irq_flags;
807 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
808 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
809
810
811 local_irq_save(irq_flags);
812
813 /* set bit corresponding to our partid in remote partition's AMO */
814 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
815 (1UL << sn_partition_id));
816 /*
817 * We must always use the nofault function regardless of whether we
818 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
819 * didn't, we'd never know that the other partition is down and would
820 * keep sending IPIs and AMOs to it until the heartbeat times out.
821 */
822 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
823 variable), xp_nofault_PIOR_target));
824
825 local_irq_restore(irq_flags);
826}
827
828static inline void
829xpc_mark_partition_disengaged(struct xpc_partition *part)
830{
831 unsigned long irq_flags;
832 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
833 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
834
835
836 local_irq_save(irq_flags);
837
838 /* clear bit corresponding to our partid in remote partition's AMO */
839 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
840 ~(1UL << sn_partition_id));
841 /*
842 * We must always use the nofault function regardless of whether we
843 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
844 * didn't, we'd never know that the other partition is down and would
845 * keep sending IPIs and AMOs to it until the heartbeat times out.
846 */
847 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
848 variable), xp_nofault_PIOR_target));
849
850 local_irq_restore(irq_flags);
851}
852
853static inline void
854xpc_request_partition_disengage(struct xpc_partition *part)
855{
856 unsigned long irq_flags;
857 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
858 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
859
860
861 local_irq_save(irq_flags);
862
863 /* set bit corresponding to our partid in remote partition's AMO */
864 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
865 (1UL << sn_partition_id));
866 /*
867 * We must always use the nofault function regardless of whether we
868 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
869 * didn't, we'd never know that the other partition is down and would
870 * keep sending IPIs and AMOs to it until the heartbeat times out.
871 */
872 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
873 variable), xp_nofault_PIOR_target));
874
875 local_irq_restore(irq_flags);
876}
877
878static inline void
879xpc_cancel_partition_disengage_request(struct xpc_partition *part)
880{
881 unsigned long irq_flags;
882 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
883 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
884
885
886 local_irq_save(irq_flags);
887
888 /* clear bit corresponding to our partid in remote partition's AMO */
889 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
890 ~(1UL << sn_partition_id));
891 /*
892 * We must always use the nofault function regardless of whether we
893 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
894 * didn't, we'd never know that the other partition is down and would
895 * keep sending IPIs and AMOs to it until the heartbeat times out.
896 */
897 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
898 variable), xp_nofault_PIOR_target));
899
900 local_irq_restore(irq_flags);
901}
902
903static inline u64
904xpc_partition_engaged(u64 partid_mask)
905{
906 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
907
908
909 /* return our partition's AMO variable ANDed with partid_mask */
910 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
911 partid_mask);
912}
913
914static inline u64
915xpc_partition_disengage_requested(u64 partid_mask)
916{
917 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
918
919
920 /* return our partition's AMO variable ANDed with partid_mask */
921 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
922 partid_mask);
923}
924
925static inline void
926xpc_clear_partition_engaged(u64 partid_mask)
927{
928 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
929
930
931 /* clear bit(s) based on partid_mask in our partition's AMO */
932 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
933 ~partid_mask);
934}
935
936static inline void
937xpc_clear_partition_disengage_request(u64 partid_mask)
938{
939 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
940
941
942 /* clear bit(s) based on partid_mask in our partition's AMO */
943 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
944 ~partid_mask);
945}
946
947
948
949/*
950 * The following set of macros and inlines are used for the sending and
951 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
952 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
953 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
954 */
955
956static inline u64
957xpc_IPI_receive(AMO_t *amo)
958{
959 return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
960}
961
962
963static inline enum xpc_retval
964xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
965{
966 int ret = 0;
967 unsigned long irq_flags;
968
969
970 local_irq_save(irq_flags);
971
972 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
973 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
974
975 /*
976 * We must always use the nofault function regardless of whether we
977 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
978 * didn't, we'd never know that the other partition is down and would
979 * keep sending IPIs and AMOs to it until the heartbeat times out.
980 */
981 ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
982 xp_nofault_PIOR_target));
983
984 local_irq_restore(irq_flags);
985
986 return ((ret == 0) ? xpcSuccess : xpcPioReadError);
987}
988
989
990/*
991 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
992 */
993
994/*
995 * Flag the appropriate AMO variable and send an IPI to the specified node.
996 */
997static inline void
998xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
999 int to_phys_cpuid)
1000{
1001 int w_index = XPC_NASID_W_INDEX(from_nasid);
1002 int b_index = XPC_NASID_B_INDEX(from_nasid);
1003 AMO_t *amos = (AMO_t *) __va(amos_page_pa +
1004 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
1005
1006
1007 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
1008 to_phys_cpuid, SGI_XPC_ACTIVATE);
1009}
1010
1011static inline void
1012xpc_IPI_send_activate(struct xpc_vars *vars)
1013{
1014 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
1015 vars->act_nasid, vars->act_phys_cpuid);
1016}
1017
1018static inline void
1019xpc_IPI_send_activated(struct xpc_partition *part)
1020{
1021 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1022 part->remote_act_nasid, part->remote_act_phys_cpuid);
1023}
1024
1025static inline void
1026xpc_IPI_send_reactivate(struct xpc_partition *part)
1027{
1028 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
1029 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
1030}
1031
1032static inline void
1033xpc_IPI_send_disengage(struct xpc_partition *part)
1034{
1035 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1036 part->remote_act_nasid, part->remote_act_phys_cpuid);
1037}
1038
1039
1040/*
1041 * IPIs associated with SGI_XPC_NOTIFY IRQ.
1042 */
1043
1044/*
1045 * Send an IPI to the remote partition that is associated with the
1046 * specified channel.
1047 */
1048#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
1049 xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
1050
1051static inline void
1052xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1053 unsigned long *irq_flags)
1054{
1055 struct xpc_partition *part = &xpc_partitions[ch->partid];
1056 enum xpc_retval ret;
1057
1058
1059 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
1060 ret = xpc_IPI_send(part->remote_IPI_amo_va,
1061 (u64) ipi_flag << (ch->number * 8),
1062 part->remote_IPI_nasid,
1063 part->remote_IPI_phys_cpuid,
1064 SGI_XPC_NOTIFY);
1065 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1066 ipi_flag_string, ch->partid, ch->number, ret);
1067 if (unlikely(ret != xpcSuccess)) {
1068 if (irq_flags != NULL) {
1069 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1070 }
1071 XPC_DEACTIVATE_PARTITION(part, ret);
1072 if (irq_flags != NULL) {
1073 spin_lock_irqsave(&ch->lock, *irq_flags);
1074 }
1075 }
1076 }
1077}
1078
1079
1080/*
1081 * Make it look like the remote partition, which is associated with the
1082 * specified channel, sent us an IPI. This faked IPI will be handled
1083 * by xpc_dropped_IPI_check().
1084 */
1085#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
1086 xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
1087
1088static inline void
1089xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1090 char *ipi_flag_string)
1091{
1092 struct xpc_partition *part = &xpc_partitions[ch->partid];
1093
1094
1095 FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
1096 FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
1097 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1098 ipi_flag_string, ch->partid, ch->number);
1099}
1100
1101
1102/*
1103 * The sending and receiving of IPIs includes the setting of an AMO variable
1104 * to indicate the reason the IPI was sent. The 64-bit variable is divided
1105 * up into eight bytes, ordered from right to left. Byte zero pertains to
1106 * channel 0, byte one to channel 1, and so on. Each byte is described by
1107 * the following IPI flags.
1108 */
1109
1110#define XPC_IPI_CLOSEREQUEST 0x01
1111#define XPC_IPI_CLOSEREPLY 0x02
1112#define XPC_IPI_OPENREQUEST 0x04
1113#define XPC_IPI_OPENREPLY 0x08
1114#define XPC_IPI_MSGREQUEST 0x10
1115
1116
1117/* given an AMO variable and a channel#, get its associated IPI flags */
1118#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1119#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1120
1121#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
1122#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
1123
1124
1125static inline void
1126xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1127{
1128 struct xpc_openclose_args *args = ch->local_openclose_args;
1129
1130
1131 args->reason = ch->reason;
1132
1133 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
1134}
1135
1136static inline void
1137xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
1138{
1139 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
1140}
1141
1142static inline void
1143xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1144{
1145 struct xpc_openclose_args *args = ch->local_openclose_args;
1146
1147
1148 args->msg_size = ch->msg_size;
1149 args->local_nentries = ch->local_nentries;
1150
1151 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
1152}
1153
1154static inline void
1155xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1156{
1157 struct xpc_openclose_args *args = ch->local_openclose_args;
1158
1159
1160 args->remote_nentries = ch->remote_nentries;
1161 args->local_nentries = ch->local_nentries;
1162 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
1163
1164 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
1165}
1166
1167static inline void
1168xpc_IPI_send_msgrequest(struct xpc_channel *ch)
1169{
1170 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
1171}
1172
1173static inline void
1174xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1175{
1176 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1177}
1178
1179
1180/*
1181 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1182 * pages are located in the lowest granule. The lowest granule uses 4k pages
1183 * for cached references and an alternate TLB handler to never provide a
1184 * cacheable mapping for the entire region. This will prevent speculative
1185 * reading of cached copies of our lines from being issued which will cause
1186 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
1187 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
1188 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
1189 * activation and 2 AMO variables for partition deactivation.
1190 */
1191static inline AMO_t *
1192xpc_IPI_init(int index)
1193{
1194 AMO_t *amo = xpc_vars->amos_page + index;
1195
1196
1197 (void) xpc_IPI_receive(amo); /* clear AMO variable */
1198 return amo;
1199}
1200
1201
1202
1203static inline enum xpc_retval
1204xpc_map_bte_errors(bte_result_t error)
1205{
1206 switch (error) {
1207 case BTE_SUCCESS: return xpcSuccess;
1208 case BTEFAIL_DIR: return xpcBteDirectoryError;
1209 case BTEFAIL_POISON: return xpcBtePoisonError;
1210 case BTEFAIL_WERR: return xpcBteWriteError;
1211 case BTEFAIL_ACCESS: return xpcBteAccessError;
1212 case BTEFAIL_PWERR: return xpcBtePWriteError;
1213 case BTEFAIL_PRERR: return xpcBtePReadError;
1214 case BTEFAIL_TOUT: return xpcBteTimeOutError;
1215 case BTEFAIL_XTERR: return xpcBteXtalkError;
1216 case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
1217 default: return xpcBteUnmappedError;
1218 }
1219}
1220
1221
1222
1223static inline void *
1224xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
1225{
1226 /* see if kmalloc will give us cachline aligned memory by default */
1227 *base = kmalloc(size, flags);
1228 if (*base == NULL) {
1229 return NULL;
1230 }
1231 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
1232 return *base;
1233 }
1234 kfree(*base);
1235
1236 /* nope, we'll have to do it ourselves */
1237 *base = kmalloc(size + L1_CACHE_BYTES, flags);
1238 if (*base == NULL) {
1239 return NULL;
1240 }
1241 return (void *) L1_CACHE_ALIGN((u64) *base);
1242}
1243
1244
1245/*
1246 * Check to see if there is any channel activity to/from the specified
1247 * partition.
1248 */
1249static inline void
1250xpc_check_for_channel_activity(struct xpc_partition *part)
1251{
1252 u64 IPI_amo;
1253 unsigned long irq_flags;
1254
1255
1256 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1257 if (IPI_amo == 0) {
1258 return;
1259 }
1260
1261 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1262 part->local_IPI_amo |= IPI_amo;
1263 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1264
1265 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
1266 XPC_PARTID(part), IPI_amo);
1267
1268 xpc_wakeup_channel_mgr(part);
1269}
1270
1271
1272#endif /* _IA64_SN_KERNEL_XPC_H */
1273
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index abf4fc2a87bb..8d950c778bb6 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9 9
@@ -22,9 +22,11 @@
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/mutex.h>
26#include <linux/completion.h>
25#include <asm/sn/bte.h> 27#include <asm/sn/bte.h>
26#include <asm/sn/sn_sal.h> 28#include <asm/sn/sn_sal.h>
27#include "xpc.h" 29#include <asm/sn/xpc.h>
28 30
29 31
30/* 32/*
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
56 atomic_set(&ch->n_to_notify, 0); 58 atomic_set(&ch->n_to_notify, 0);
57 59
58 spin_lock_init(&ch->lock); 60 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ 61 mutex_init(&ch->msg_to_pull_mutex);
60 sema_init(&ch->wdisconnect_sema, 0); /* event wait */ 62 init_completion(&ch->wdisconnect_wait);
61 63
62 atomic_set(&ch->n_on_msg_allocate_wq, 0); 64 atomic_set(&ch->n_on_msg_allocate_wq, 0);
63 init_waitqueue_head(&ch->msg_allocate_wq); 65 init_waitqueue_head(&ch->msg_allocate_wq);
@@ -534,7 +536,6 @@ static enum xpc_retval
534xpc_allocate_msgqueues(struct xpc_channel *ch) 536xpc_allocate_msgqueues(struct xpc_channel *ch)
535{ 537{
536 unsigned long irq_flags; 538 unsigned long irq_flags;
537 int i;
538 enum xpc_retval ret; 539 enum xpc_retval ret;
539 540
540 541
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
552 return ret; 553 return ret;
553 } 554 }
554 555
555 for (i = 0; i < ch->local_nentries; i++) {
556 /* use a semaphore as an event wait queue */
557 sema_init(&ch->notify_queue[i].sema, 0);
558 }
559
560 spin_lock_irqsave(&ch->lock, irq_flags); 556 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP; 557 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags); 558 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -779,6 +775,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
779 775
780 /* both sides are disconnected now */ 776 /* both sides are disconnected now */
781 777
778 if (ch->flags & XPC_C_CONNECTCALLOUT) {
779 spin_unlock_irqrestore(&ch->lock, *irq_flags);
780 xpc_disconnect_callout(ch, xpcDisconnected);
781 spin_lock_irqsave(&ch->lock, *irq_flags);
782 }
783
782 /* it's now safe to free the channel's message queues */ 784 /* it's now safe to free the channel's message queues */
783 xpc_free_msgqueues(ch); 785 xpc_free_msgqueues(ch);
784 786
@@ -793,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
793 } 795 }
794 796
795 if (ch->flags & XPC_C_WDISCONNECT) { 797 if (ch->flags & XPC_C_WDISCONNECT) {
796 spin_unlock_irqrestore(&ch->lock, *irq_flags); 798 /* we won't lose the CPU since we're holding ch->lock */
797 up(&ch->wdisconnect_sema); 799 complete(&ch->wdisconnect_wait);
798 spin_lock_irqsave(&ch->lock, *irq_flags);
799
800 } else if (ch->delayed_IPI_flags) { 800 } else if (ch->delayed_IPI_flags) {
801 if (part->act_state != XPC_P_DEACTIVATING) { 801 if (part->act_state != XPC_P_DEACTIVATING) {
802 /* time to take action on any delayed IPI flags */ 802 /* time to take action on any delayed IPI flags */
@@ -1086,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
1086 struct xpc_registration *registration = &xpc_registrations[ch->number]; 1086 struct xpc_registration *registration = &xpc_registrations[ch->number];
1087 1087
1088 1088
1089 if (down_trylock(&registration->sema) != 0) { 1089 if (mutex_trylock(&registration->mutex) == 0) {
1090 return xpcRetry; 1090 return xpcRetry;
1091 } 1091 }
1092 1092
1093 if (!XPC_CHANNEL_REGISTERED(ch->number)) { 1093 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
1094 up(&registration->sema); 1094 mutex_unlock(&registration->mutex);
1095 return xpcUnregistered; 1095 return xpcUnregistered;
1096 } 1096 }
1097 1097
@@ -1102,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1102 1102
1103 if (ch->flags & XPC_C_DISCONNECTING) { 1103 if (ch->flags & XPC_C_DISCONNECTING) {
1104 spin_unlock_irqrestore(&ch->lock, irq_flags); 1104 spin_unlock_irqrestore(&ch->lock, irq_flags);
1105 up(&registration->sema); 1105 mutex_unlock(&registration->mutex);
1106 return ch->reason; 1106 return ch->reason;
1107 } 1107 }
1108 1108
@@ -1134,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1134 * channel lock be locked and will unlock and relock 1134 * channel lock be locked and will unlock and relock
1135 * the channel lock as needed. 1135 * the channel lock as needed.
1136 */ 1136 */
1137 up(&registration->sema); 1137 mutex_unlock(&registration->mutex);
1138 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, 1138 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1139 &irq_flags); 1139 &irq_flags);
1140 spin_unlock_irqrestore(&ch->lock, irq_flags); 1140 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1149,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
1149 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); 1149 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1150 } 1150 }
1151 1151
1152 up(&registration->sema); 1152 mutex_unlock(&registration->mutex);
1153 1153
1154 1154
1155 /* initiate the connection */ 1155 /* initiate the connection */
@@ -1645,7 +1645,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1645 1645
1646 1646
1647void 1647void
1648xpc_disconnecting_callout(struct xpc_channel *ch) 1648xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
1649{ 1649{
1650 /* 1650 /*
1651 * Let the channel's registerer know that the channel is being 1651 * Let the channel's registerer know that the channel is being
@@ -1654,15 +1654,13 @@ xpc_disconnecting_callout(struct xpc_channel *ch)
1654 */ 1654 */
1655 1655
1656 if (ch->func != NULL) { 1656 if (ch->func != NULL) {
1657 dev_dbg(xpc_chan, "ch->func() called, reason=xpcDisconnecting," 1657 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
1658 " partid=%d, channel=%d\n", ch->partid, ch->number); 1658 "channel=%d\n", reason, ch->partid, ch->number);
1659 1659
1660 ch->func(xpcDisconnecting, ch->partid, ch->number, NULL, 1660 ch->func(reason, ch->partid, ch->number, NULL, ch->key);
1661 ch->key);
1662 1661
1663 dev_dbg(xpc_chan, "ch->func() returned, reason=" 1662 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
1664 "xpcDisconnecting, partid=%d, channel=%d\n", 1663 "channel=%d\n", reason, ch->partid, ch->number);
1665 ch->partid, ch->number);
1666 } 1664 }
1667} 1665}
1668 1666
@@ -2085,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2085 enum xpc_retval ret; 2083 enum xpc_retval ret;
2086 2084
2087 2085
2088 if (down_interruptible(&ch->msg_to_pull_sema) != 0) { 2086 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
2089 /* we were interrupted by a signal */ 2087 /* we were interrupted by a signal */
2090 return NULL; 2088 return NULL;
2091 } 2089 }
@@ -2121,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2121 2119
2122 XPC_DEACTIVATE_PARTITION(part, ret); 2120 XPC_DEACTIVATE_PARTITION(part, ret);
2123 2121
2124 up(&ch->msg_to_pull_sema); 2122 mutex_unlock(&ch->msg_to_pull_mutex);
2125 return NULL; 2123 return NULL;
2126 } 2124 }
2127 2125
@@ -2130,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2130 ch->next_msg_to_pull += nmsgs; 2128 ch->next_msg_to_pull += nmsgs;
2131 } 2129 }
2132 2130
2133 up(&ch->msg_to_pull_sema); 2131 mutex_unlock(&ch->msg_to_pull_mutex);
2134 2132
2135 /* return the message we were looking for */ 2133 /* return the message we were looking for */
2136 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 2134 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index b617236524c6..c75f8aeefc2b 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9 9
@@ -55,11 +55,12 @@
55#include <linux/slab.h> 55#include <linux/slab.h>
56#include <linux/delay.h> 56#include <linux/delay.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/completion.h>
58#include <asm/sn/intr.h> 59#include <asm/sn/intr.h>
59#include <asm/sn/sn_sal.h> 60#include <asm/sn/sn_sal.h>
60#include <asm/kdebug.h> 61#include <asm/kdebug.h>
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
62#include "xpc.h" 63#include <asm/sn/xpc.h>
63 64
64 65
65/* define two XPC debug device structures to be used with dev_dbg() et al */ 66/* define two XPC debug device structures to be used with dev_dbg() et al */
@@ -82,6 +83,9 @@ struct device *xpc_part = &xpc_part_dbg_subname;
82struct device *xpc_chan = &xpc_chan_dbg_subname; 83struct device *xpc_chan = &xpc_chan_dbg_subname;
83 84
84 85
86static int xpc_kdebug_ignore;
87
88
85/* systune related variables for /proc/sys directories */ 89/* systune related variables for /proc/sys directories */
86 90
87static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; 91static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -162,6 +166,8 @@ static ctl_table xpc_sys_dir[] = {
162}; 166};
163static struct ctl_table_header *xpc_sysctl; 167static struct ctl_table_header *xpc_sysctl;
164 168
169/* non-zero if any remote partition disengage request was timed out */
170int xpc_disengage_request_timedout;
165 171
166/* #of IRQs received */ 172/* #of IRQs received */
167static atomic_t xpc_act_IRQ_rcvd; 173static atomic_t xpc_act_IRQ_rcvd;
@@ -172,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
172static unsigned long xpc_hb_check_timeout; 178static unsigned long xpc_hb_check_timeout;
173 179
174/* notification that the xpc_hb_checker thread has exited */ 180/* notification that the xpc_hb_checker thread has exited */
175static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); 181static DECLARE_COMPLETION(xpc_hb_checker_exited);
176 182
177/* notification that the xpc_discovery thread has exited */ 183/* notification that the xpc_discovery thread has exited */
178static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); 184static DECLARE_COMPLETION(xpc_discovery_exited);
179 185
180 186
181static struct timer_list xpc_hb_timer; 187static struct timer_list xpc_hb_timer;
@@ -316,7 +322,7 @@ xpc_hb_checker(void *ignore)
316 322
317 323
318 /* mark this thread as having exited */ 324 /* mark this thread as having exited */
319 up(&xpc_hb_checker_exited); 325 complete(&xpc_hb_checker_exited);
320 return 0; 326 return 0;
321} 327}
322 328
@@ -336,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
336 dev_dbg(xpc_part, "discovery thread is exiting\n"); 342 dev_dbg(xpc_part, "discovery thread is exiting\n");
337 343
338 /* mark this thread as having exited */ 344 /* mark this thread as having exited */
339 up(&xpc_discovery_exited); 345 complete(&xpc_discovery_exited);
340 return 0; 346 return 0;
341} 347}
342 348
@@ -773,7 +779,7 @@ xpc_daemonize_kthread(void *args)
773 ch->flags |= XPC_C_DISCONNECTCALLOUT; 779 ch->flags |= XPC_C_DISCONNECTCALLOUT;
774 spin_unlock_irqrestore(&ch->lock, irq_flags); 780 spin_unlock_irqrestore(&ch->lock, irq_flags);
775 781
776 xpc_disconnecting_callout(ch); 782 xpc_disconnect_callout(ch, xpcDisconnecting);
777 } else { 783 } else {
778 spin_unlock_irqrestore(&ch->lock, irq_flags); 784 spin_unlock_irqrestore(&ch->lock, irq_flags);
779 } 785 }
@@ -888,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
888 continue; 894 continue;
889 } 895 }
890 896
891 (void) down(&ch->wdisconnect_sema); 897 wait_for_completion(&ch->wdisconnect_wait);
892 898
893 spin_lock_irqsave(&ch->lock, irq_flags); 899 spin_lock_irqsave(&ch->lock, irq_flags);
894 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 900 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
@@ -921,9 +927,9 @@ static void
921xpc_do_exit(enum xpc_retval reason) 927xpc_do_exit(enum xpc_retval reason)
922{ 928{
923 partid_t partid; 929 partid_t partid;
924 int active_part_count; 930 int active_part_count, printed_waiting_msg = 0;
925 struct xpc_partition *part; 931 struct xpc_partition *part;
926 unsigned long printmsg_time; 932 unsigned long printmsg_time, disengage_request_timeout = 0;
927 933
928 934
929 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 935 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
@@ -941,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
941 free_irq(SGI_XPC_ACTIVATE, NULL); 947 free_irq(SGI_XPC_ACTIVATE, NULL);
942 948
943 /* wait for the discovery thread to exit */ 949 /* wait for the discovery thread to exit */
944 down(&xpc_discovery_exited); 950 wait_for_completion(&xpc_discovery_exited);
945 951
946 /* wait for the heartbeat checker thread to exit */ 952 /* wait for the heartbeat checker thread to exit */
947 down(&xpc_hb_checker_exited); 953 wait_for_completion(&xpc_hb_checker_exited);
948 954
949 955
950 /* sleep for a 1/3 of a second or so */ 956 /* sleep for a 1/3 of a second or so */
@@ -953,7 +959,8 @@ xpc_do_exit(enum xpc_retval reason)
953 959
954 /* wait for all partitions to become inactive */ 960 /* wait for all partitions to become inactive */
955 961
956 printmsg_time = jiffies; 962 printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
963 xpc_disengage_request_timedout = 0;
957 964
958 do { 965 do {
959 active_part_count = 0; 966 active_part_count = 0;
@@ -969,20 +976,39 @@ xpc_do_exit(enum xpc_retval reason)
969 active_part_count++; 976 active_part_count++;
970 977
971 XPC_DEACTIVATE_PARTITION(part, reason); 978 XPC_DEACTIVATE_PARTITION(part, reason);
972 }
973 979
974 if (active_part_count == 0) { 980 if (part->disengage_request_timeout >
975 break; 981 disengage_request_timeout) {
982 disengage_request_timeout =
983 part->disengage_request_timeout;
984 }
976 } 985 }
977 986
978 if (jiffies >= printmsg_time) { 987 if (xpc_partition_engaged(-1UL)) {
979 dev_info(xpc_part, "waiting for partitions to " 988 if (time_after(jiffies, printmsg_time)) {
980 "deactivate/disengage, active count=%d, remote " 989 dev_info(xpc_part, "waiting for remote "
981 "engaged=0x%lx\n", active_part_count, 990 "partitions to disengage, timeout in "
982 xpc_partition_engaged(1UL << partid)); 991 "%ld seconds\n",
983 992 (disengage_request_timeout - jiffies)
984 printmsg_time = jiffies + 993 / HZ);
994 printmsg_time = jiffies +
985 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 995 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
996 printed_waiting_msg = 1;
997 }
998
999 } else if (active_part_count > 0) {
1000 if (printed_waiting_msg) {
1001 dev_info(xpc_part, "waiting for local partition"
1002 " to disengage\n");
1003 printed_waiting_msg = 0;
1004 }
1005
1006 } else {
1007 if (!xpc_disengage_request_timedout) {
1008 dev_info(xpc_part, "all partitions have "
1009 "disengaged\n");
1010 }
1011 break;
986 } 1012 }
987 1013
988 /* sleep for a 1/3 of a second or so */ 1014 /* sleep for a 1/3 of a second or so */
@@ -1000,11 +1026,13 @@ xpc_do_exit(enum xpc_retval reason)
1000 del_timer_sync(&xpc_hb_timer); 1026 del_timer_sync(&xpc_hb_timer);
1001 DBUG_ON(xpc_vars->heartbeating_to_mask != 0); 1027 DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
1002 1028
1003 /* take ourselves off of the reboot_notifier_list */ 1029 if (reason == xpcUnloading) {
1004 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1030 /* take ourselves off of the reboot_notifier_list */
1031 (void) unregister_reboot_notifier(&xpc_reboot_notifier);
1005 1032
1006 /* take ourselves off of the die_notifier list */ 1033 /* take ourselves off of the die_notifier list */
1007 (void) unregister_die_notifier(&xpc_die_notifier); 1034 (void) unregister_die_notifier(&xpc_die_notifier);
1035 }
1008 1036
1009 /* close down protections for IPI operations */ 1037 /* close down protections for IPI operations */
1010 xpc_restrict_IPI_ops(); 1038 xpc_restrict_IPI_ops();
@@ -1020,7 +1048,35 @@ xpc_do_exit(enum xpc_retval reason)
1020 1048
1021 1049
1022/* 1050/*
1023 * Called when the system is about to be either restarted or halted. 1051 * This function is called when the system is being rebooted.
1052 */
1053static int
1054xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1055{
1056 enum xpc_retval reason;
1057
1058
1059 switch (event) {
1060 case SYS_RESTART:
1061 reason = xpcSystemReboot;
1062 break;
1063 case SYS_HALT:
1064 reason = xpcSystemHalt;
1065 break;
1066 case SYS_POWER_OFF:
1067 reason = xpcSystemPoweroff;
1068 break;
1069 default:
1070 reason = xpcSystemGoingDown;
1071 }
1072
1073 xpc_do_exit(reason);
1074 return NOTIFY_DONE;
1075}
1076
1077
1078/*
1079 * Notify other partitions to disengage from all references to our memory.
1024 */ 1080 */
1025static void 1081static void
1026xpc_die_disengage(void) 1082xpc_die_disengage(void)
@@ -1028,7 +1084,7 @@ xpc_die_disengage(void)
1028 struct xpc_partition *part; 1084 struct xpc_partition *part;
1029 partid_t partid; 1085 partid_t partid;
1030 unsigned long engaged; 1086 unsigned long engaged;
1031 long time, print_time, disengage_request_timeout; 1087 long time, printmsg_time, disengage_request_timeout;
1032 1088
1033 1089
1034 /* keep xpc_hb_checker thread from doing anything (just in case) */ 1090 /* keep xpc_hb_checker thread from doing anything (just in case) */
@@ -1055,57 +1111,53 @@ xpc_die_disengage(void)
1055 } 1111 }
1056 } 1112 }
1057 1113
1058 print_time = rtc_time(); 1114 time = rtc_time();
1059 disengage_request_timeout = print_time + 1115 printmsg_time = time +
1116 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1117 disengage_request_timeout = time +
1060 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1118 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1061 1119
1062 /* wait for all other partitions to disengage from us */ 1120 /* wait for all other partitions to disengage from us */
1063 1121
1064 while ((engaged = xpc_partition_engaged(-1UL)) && 1122 while (1) {
1065 (time = rtc_time()) < disengage_request_timeout) { 1123 engaged = xpc_partition_engaged(-1UL);
1124 if (!engaged) {
1125 dev_info(xpc_part, "all partitions have disengaged\n");
1126 break;
1127 }
1066 1128
1067 if (time >= print_time) { 1129 time = rtc_time();
1130 if (time >= disengage_request_timeout) {
1131 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1132 if (engaged & (1UL << partid)) {
1133 dev_info(xpc_part, "disengage from "
1134 "remote partition %d timed "
1135 "out\n", partid);
1136 }
1137 }
1138 break;
1139 }
1140
1141 if (time >= printmsg_time) {
1068 dev_info(xpc_part, "waiting for remote partitions to " 1142 dev_info(xpc_part, "waiting for remote partitions to "
1069 "disengage, engaged=0x%lx\n", engaged); 1143 "disengage, timeout in %ld seconds\n",
1070 print_time = time + (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1144 (disengage_request_timeout - time) /
1145 sn_rtc_cycles_per_second);
1146 printmsg_time = time +
1147 (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1071 sn_rtc_cycles_per_second); 1148 sn_rtc_cycles_per_second);
1072 } 1149 }
1073 } 1150 }
1074 dev_info(xpc_part, "finished waiting for remote partitions to "
1075 "disengage, engaged=0x%lx\n", engaged);
1076}
1077
1078
1079/*
1080 * This function is called when the system is being rebooted.
1081 */
1082static int
1083xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1084{
1085 enum xpc_retval reason;
1086
1087
1088 switch (event) {
1089 case SYS_RESTART:
1090 reason = xpcSystemReboot;
1091 break;
1092 case SYS_HALT:
1093 reason = xpcSystemHalt;
1094 break;
1095 case SYS_POWER_OFF:
1096 reason = xpcSystemPoweroff;
1097 break;
1098 default:
1099 reason = xpcSystemGoingDown;
1100 }
1101
1102 xpc_do_exit(reason);
1103 return NOTIFY_DONE;
1104} 1151}
1105 1152
1106 1153
1107/* 1154/*
1108 * This function is called when the system is being rebooted. 1155 * This function is called when the system is being restarted or halted due
1156 * to some sort of system failure. If this is the case we need to notify the
1157 * other partitions to disengage from all references to our memory.
1158 * This function can also be called when our heartbeater could be offlined
1159 * for a time. In this case we need to notify other partitions to not worry
1160 * about the lack of a heartbeat.
1109 */ 1161 */
1110static int 1162static int
1111xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) 1163xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
@@ -1115,11 +1167,25 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1115 case DIE_MACHINE_HALT: 1167 case DIE_MACHINE_HALT:
1116 xpc_die_disengage(); 1168 xpc_die_disengage();
1117 break; 1169 break;
1170
1171 case DIE_KDEBUG_ENTER:
1172 /* Should lack of heartbeat be ignored by other partitions? */
1173 if (!xpc_kdebug_ignore) {
1174 break;
1175 }
1176 /* fall through */
1118 case DIE_MCA_MONARCH_ENTER: 1177 case DIE_MCA_MONARCH_ENTER:
1119 case DIE_INIT_MONARCH_ENTER: 1178 case DIE_INIT_MONARCH_ENTER:
1120 xpc_vars->heartbeat++; 1179 xpc_vars->heartbeat++;
1121 xpc_vars->heartbeat_offline = 1; 1180 xpc_vars->heartbeat_offline = 1;
1122 break; 1181 break;
1182
1183 case DIE_KDEBUG_LEAVE:
1184 /* Is lack of heartbeat being ignored by other partitions? */
1185 if (!xpc_kdebug_ignore) {
1186 break;
1187 }
1188 /* fall through */
1123 case DIE_MCA_MONARCH_LEAVE: 1189 case DIE_MCA_MONARCH_LEAVE:
1124 case DIE_INIT_MONARCH_LEAVE: 1190 case DIE_INIT_MONARCH_LEAVE:
1125 xpc_vars->heartbeat++; 1191 xpc_vars->heartbeat++;
@@ -1302,7 +1368,7 @@ xpc_init(void)
1302 dev_err(xpc_part, "failed while forking discovery thread\n"); 1368 dev_err(xpc_part, "failed while forking discovery thread\n");
1303 1369
1304 /* mark this new thread as a non-starter */ 1370 /* mark this new thread as a non-starter */
1305 up(&xpc_discovery_exited); 1371 complete(&xpc_discovery_exited);
1306 1372
1307 xpc_do_exit(xpcUnloading); 1373 xpc_do_exit(xpcUnloading);
1308 return -EBUSY; 1374 return -EBUSY;
@@ -1344,3 +1410,7 @@ module_param(xpc_disengage_request_timelimit, int, 0);
1344MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1410MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1345 "for disengage request to complete."); 1411 "for disengage request to complete.");
1346 1412
1413module_param(xpc_kdebug_ignore, int, 0);
1414MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1415 "other partitions when dropping into kdebug.");
1416
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index cdd6431853a1..88a730e6cfdb 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9 9
@@ -28,7 +28,7 @@
28#include <asm/sn/sn_sal.h> 28#include <asm/sn/sn_sal.h>
29#include <asm/sn/nodepda.h> 29#include <asm/sn/nodepda.h>
30#include <asm/sn/addrs.h> 30#include <asm/sn/addrs.h>
31#include "xpc.h" 31#include <asm/sn/xpc.h>
32 32
33 33
34/* XPC is exiting flag */ 34/* XPC is exiting flag */
@@ -771,7 +771,8 @@ xpc_identify_act_IRQ_req(int nasid)
771 } 771 }
772 } 772 }
773 773
774 if (!xpc_partition_disengaged(part)) { 774 if (part->disengage_request_timeout > 0 &&
775 !xpc_partition_disengaged(part)) {
775 /* still waiting on other side to disengage from us */ 776 /* still waiting on other side to disengage from us */
776 return; 777 return;
777 } 778 }
@@ -873,6 +874,9 @@ xpc_partition_disengaged(struct xpc_partition *part)
873 * request in a timely fashion, so assume it's dead. 874 * request in a timely fashion, so assume it's dead.
874 */ 875 */
875 876
877 dev_info(xpc_part, "disengage from remote partition %d "
878 "timed out\n", partid);
879 xpc_disengage_request_timedout = 1;
876 xpc_clear_partition_engaged(1UL << partid); 880 xpc_clear_partition_engaged(1UL << partid);
877 disengaged = 1; 881 disengaged = 1;
878 } 882 }
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index d1647b863e61..aa3fa5152a32 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -18,10 +18,10 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
18 * mark_ate: Mark the ate as either free or inuse. 18 * mark_ate: Mark the ate as either free or inuse.
19 */ 19 */
20static void mark_ate(struct ate_resource *ate_resource, int start, int number, 20static void mark_ate(struct ate_resource *ate_resource, int start, int number,
21 uint64_t value) 21 u64 value)
22{ 22{
23 23
24 uint64_t *ate = ate_resource->ate; 24 u64 *ate = ate_resource->ate;
25 int index; 25 int index;
26 int length = 0; 26 int length = 0;
27 27
@@ -38,7 +38,7 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
38 int count) 38 int count)
39{ 39{
40 40
41 uint64_t *ate = ate_resource->ate; 41 u64 *ate = ate_resource->ate;
42 int index; 42 int index;
43 int start_free; 43 int start_free;
44 44
@@ -119,7 +119,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) 119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
120{ 120{
121 int status = 0; 121 int status = 0;
122 uint64_t flag; 122 u64 flag;
123 123
124 flag = pcibr_lock(pcibus_info); 124 flag = pcibr_lock(pcibus_info);
125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); 125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
@@ -139,7 +139,7 @@ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
139 * Setup an Address Translation Entry as specified. Use either the Bridge 139 * Setup an Address Translation Entry as specified. Use either the Bridge
140 * internal maps or the external map RAM, as appropriate. 140 * internal maps or the external map RAM, as appropriate.
141 */ 141 */
142static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info, 142static inline u64 *pcibr_ate_addr(struct pcibus_info *pcibus_info,
143 int ate_index) 143 int ate_index)
144{ 144{
145 if (ate_index < pcibus_info->pbi_int_ate_size) { 145 if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -153,7 +153,7 @@ static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
153 */ 153 */
154void inline 154void inline
155ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, 155ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
156 volatile uint64_t ate) 156 volatile u64 ate)
157{ 157{
158 while (count-- > 0) { 158 while (count-- > 0) {
159 if (ate_index < pcibus_info->pbi_int_ate_size) { 159 if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -171,9 +171,9 @@ ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
171void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) 171void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
172{ 172{
173 173
174 volatile uint64_t ate; 174 volatile u64 ate;
175 int count; 175 int count;
176 uint64_t flags; 176 u64 flags;
177 177
178 if (pcibr_invalidate_ate) { 178 if (pcibr_invalidate_ate) {
179 /* For debugging purposes, clear the valid bit in the ATE */ 179 /* For debugging purposes, clear the valid bit in the ATE */
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 34093476e965..54ce5b7ceed2 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -41,21 +41,21 @@ extern int sn_ioif_inited;
41 41
42static dma_addr_t 42static dma_addr_t
43pcibr_dmamap_ate32(struct pcidev_info *info, 43pcibr_dmamap_ate32(struct pcidev_info *info,
44 uint64_t paddr, size_t req_size, uint64_t flags) 44 u64 paddr, size_t req_size, u64 flags)
45{ 45{
46 46
47 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 47 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
48 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 48 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
49 pdi_pcibus_info; 49 pdi_pcibus_info;
50 uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> 50 u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
51 pdi_linux_pcidev->devfn)) - 1; 51 pdi_linux_pcidev->devfn)) - 1;
52 int ate_count; 52 int ate_count;
53 int ate_index; 53 int ate_index;
54 uint64_t ate_flags = flags | PCI32_ATE_V; 54 u64 ate_flags = flags | PCI32_ATE_V;
55 uint64_t ate; 55 u64 ate;
56 uint64_t pci_addr; 56 u64 pci_addr;
57 uint64_t xio_addr; 57 u64 xio_addr;
58 uint64_t offset; 58 u64 offset;
59 59
60 /* PIC in PCI-X mode does not supports 32bit PageMap mode */ 60 /* PIC in PCI-X mode does not supports 32bit PageMap mode */
61 if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { 61 if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
@@ -109,12 +109,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
109} 109}
110 110
111static dma_addr_t 111static dma_addr_t
112pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, 112pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
113 uint64_t dma_attributes) 113 u64 dma_attributes)
114{ 114{
115 struct pcibus_info *pcibus_info = (struct pcibus_info *) 115 struct pcibus_info *pcibus_info = (struct pcibus_info *)
116 ((info->pdi_host_pcidev_info)->pdi_pcibus_info); 116 ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
117 uint64_t pci_addr; 117 u64 pci_addr;
118 118
119 /* Translate to Crosstalk View of Physical Address */ 119 /* Translate to Crosstalk View of Physical Address */
120 pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : 120 pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
@@ -127,7 +127,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
127 /* Handle Bridge Chipset differences */ 127 /* Handle Bridge Chipset differences */
128 if (IS_PIC_SOFT(pcibus_info)) { 128 if (IS_PIC_SOFT(pcibus_info)) {
129 pci_addr |= 129 pci_addr |=
130 ((uint64_t) pcibus_info-> 130 ((u64) pcibus_info->
131 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); 131 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
132 } else 132 } else
133 pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; 133 pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
@@ -142,17 +142,17 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
142 142
143static dma_addr_t 143static dma_addr_t
144pcibr_dmatrans_direct32(struct pcidev_info * info, 144pcibr_dmatrans_direct32(struct pcidev_info * info,
145 uint64_t paddr, size_t req_size, uint64_t flags) 145 u64 paddr, size_t req_size, u64 flags)
146{ 146{
147 147
148 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 148 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
149 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 149 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
150 pdi_pcibus_info; 150 pdi_pcibus_info;
151 uint64_t xio_addr; 151 u64 xio_addr;
152 152
153 uint64_t xio_base; 153 u64 xio_base;
154 uint64_t offset; 154 u64 offset;
155 uint64_t endoff; 155 u64 endoff;
156 156
157 if (IS_PCIX(pcibus_info)) { 157 if (IS_PCIX(pcibus_info)) {
158 return 0; 158 return 0;
@@ -209,16 +209,18 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
209 * unlike the PIC Device(x) Write Request Buffer Flush register. 209 * unlike the PIC Device(x) Write Request Buffer Flush register.
210 */ 210 */
211 211
212void sn_dma_flush(uint64_t addr) 212void sn_dma_flush(u64 addr)
213{ 213{
214 nasid_t nasid; 214 nasid_t nasid;
215 int is_tio; 215 int is_tio;
216 int wid_num; 216 int wid_num;
217 int i, j; 217 int i, j;
218 uint64_t flags; 218 u64 flags;
219 uint64_t itte; 219 u64 itte;
220 struct hubdev_info *hubinfo; 220 struct hubdev_info *hubinfo;
221 volatile struct sn_flush_device_list *p; 221 volatile struct sn_flush_device_kernel *p;
222 volatile struct sn_flush_device_common *common;
223
222 struct sn_flush_nasid_entry *flush_nasid_list; 224 struct sn_flush_nasid_entry *flush_nasid_list;
223 225
224 if (!sn_ioif_inited) 226 if (!sn_ioif_inited)
@@ -268,17 +270,17 @@ void sn_dma_flush(uint64_t addr)
268 p = &flush_nasid_list->widget_p[wid_num][0]; 270 p = &flush_nasid_list->widget_p[wid_num][0];
269 271
270 /* find a matching BAR */ 272 /* find a matching BAR */
271 for (i = 0; i < DEV_PER_WIDGET; i++) { 273 for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
274 common = p->common;
272 for (j = 0; j < PCI_ROM_RESOURCE; j++) { 275 for (j = 0; j < PCI_ROM_RESOURCE; j++) {
273 if (p->sfdl_bar_list[j].start == 0) 276 if (common->sfdl_bar_list[j].start == 0)
274 break; 277 break;
275 if (addr >= p->sfdl_bar_list[j].start 278 if (addr >= common->sfdl_bar_list[j].start
276 && addr <= p->sfdl_bar_list[j].end) 279 && addr <= common->sfdl_bar_list[j].end)
277 break; 280 break;
278 } 281 }
279 if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0) 282 if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
280 break; 283 break;
281 p++;
282 } 284 }
283 285
284 /* if no matching BAR, return without doing anything. */ 286 /* if no matching BAR, return without doing anything. */
@@ -297,31 +299,31 @@ void sn_dma_flush(uint64_t addr)
297 * If CE ever needs the sn_dma_flush mechanism, we will have 299 * If CE ever needs the sn_dma_flush mechanism, we will have
298 * to account for that here and in tioce_bus_fixup(). 300 * to account for that here and in tioce_bus_fixup().
299 */ 301 */
300 uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); 302 u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
301 uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); 303 u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
302 304
303 /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ 305 /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
304 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { 306 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
305 return; 307 return;
306 } else { 308 } else {
307 pcireg_wrb_flush_get(p->sfdl_pcibus_info, 309 pcireg_wrb_flush_get(common->sfdl_pcibus_info,
308 (p->sfdl_slot - 1)); 310 (common->sfdl_slot - 1));
309 } 311 }
310 } else { 312 } else {
311 spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> 313 spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock,
312 sfdl_flush_lock, flags); 314 flags);
313 315 *common->sfdl_flush_addr = 0;
314 *p->sfdl_flush_addr = 0;
315 316
316 /* force an interrupt. */ 317 /* force an interrupt. */
317 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; 318 *(volatile u32 *)(common->sfdl_force_int_addr) = 1;
318 319
319 /* wait for the interrupt to come back. */ 320 /* wait for the interrupt to come back. */
320 while (*(p->sfdl_flush_addr) != 0x10f) 321 while (*(common->sfdl_flush_addr) != 0x10f)
321 cpu_relax(); 322 cpu_relax();
322 323
323 /* okay, everything is synched up. */ 324 /* okay, everything is synched up. */
324 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); 325 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock,
326 flags);
325 } 327 }
326 return; 328 return;
327} 329}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 1f500c81002c..2fac27049bf6 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -23,14 +23,16 @@ int
23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) 23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
24{ 24{
25 struct ia64_sal_retval ret_stuff; 25 struct ia64_sal_retval ret_stuff;
26 uint64_t busnum; 26 u64 busnum;
27 u64 segment;
27 28
28 ret_stuff.status = 0; 29 ret_stuff.status = 0;
29 ret_stuff.v0 = 0; 30 ret_stuff.v0 = 0;
30 31
32 segment = soft->pbi_buscommon.bs_persist_segment;
31 busnum = soft->pbi_buscommon.bs_persist_busnum; 33 busnum = soft->pbi_buscommon.bs_persist_busnum;
32 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum, 34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
33 (u64) device, (u64) resp, 0, 0, 0, 0); 35 busnum, (u64) device, (u64) resp, 0, 0, 0);
34 36
35 return (int)ret_stuff.v0; 37 return (int)ret_stuff.v0;
36} 38}
@@ -40,15 +42,17 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
40 void *resp) 42 void *resp)
41{ 43{
42 struct ia64_sal_retval ret_stuff; 44 struct ia64_sal_retval ret_stuff;
43 uint64_t busnum; 45 u64 busnum;
46 u64 segment;
44 47
45 ret_stuff.status = 0; 48 ret_stuff.status = 0;
46 ret_stuff.v0 = 0; 49 ret_stuff.v0 = 0;
47 50
51 segment = soft->pbi_buscommon.bs_persist_segment;
48 busnum = soft->pbi_buscommon.bs_persist_busnum; 52 busnum = soft->pbi_buscommon.bs_persist_busnum;
49 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE, 53 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
50 (u64) busnum, (u64) device, (u64) action, 54 segment, busnum, (u64) device, (u64) action,
51 (u64) resp, 0, 0, 0); 55 (u64) resp, 0, 0);
52 56
53 return (int)ret_stuff.v0; 57 return (int)ret_stuff.v0;
54} 58}
@@ -56,7 +60,7 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
56static int sal_pcibr_error_interrupt(struct pcibus_info *soft) 60static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
57{ 61{
58 struct ia64_sal_retval ret_stuff; 62 struct ia64_sal_retval ret_stuff;
59 uint64_t busnum; 63 u64 busnum;
60 int segment; 64 int segment;
61 ret_stuff.status = 0; 65 ret_stuff.status = 0;
62 ret_stuff.v0 = 0; 66 ret_stuff.v0 = 0;
@@ -92,7 +96,8 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
92 cnodeid_t near_cnode; 96 cnodeid_t near_cnode;
93 struct hubdev_info *hubdev_info; 97 struct hubdev_info *hubdev_info;
94 struct pcibus_info *soft; 98 struct pcibus_info *soft;
95 struct sn_flush_device_list *sn_flush_device_list; 99 struct sn_flush_device_kernel *sn_flush_device_kernel;
100 struct sn_flush_device_common *common;
96 101
97 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { 102 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
98 return NULL; 103 return NULL;
@@ -137,20 +142,19 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
137 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); 142 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
138 143
139 if (hubdev_info->hdi_flush_nasid_list.widget_p) { 144 if (hubdev_info->hdi_flush_nasid_list.widget_p) {
140 sn_flush_device_list = hubdev_info->hdi_flush_nasid_list. 145 sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
141 widget_p[(int)soft->pbi_buscommon.bs_xid]; 146 widget_p[(int)soft->pbi_buscommon.bs_xid];
142 if (sn_flush_device_list) { 147 if (sn_flush_device_kernel) {
143 for (j = 0; j < DEV_PER_WIDGET; 148 for (j = 0; j < DEV_PER_WIDGET;
144 j++, sn_flush_device_list++) { 149 j++, sn_flush_device_kernel++) {
145 if (sn_flush_device_list->sfdl_slot == -1) 150 common = sn_flush_device_kernel->common;
151 if (common->sfdl_slot == -1)
146 continue; 152 continue;
147 if ((sn_flush_device_list-> 153 if ((common->sfdl_persistent_segment ==
148 sfdl_persistent_segment ==
149 soft->pbi_buscommon.bs_persist_segment) && 154 soft->pbi_buscommon.bs_persist_segment) &&
150 (sn_flush_device_list-> 155 (common->sfdl_persistent_busnum ==
151 sfdl_persistent_busnum ==
152 soft->pbi_buscommon.bs_persist_busnum)) 156 soft->pbi_buscommon.bs_persist_busnum))
153 sn_flush_device_list->sfdl_pcibus_info = 157 common->sfdl_pcibus_info =
154 soft; 158 soft;
155 } 159 }
156 } 160 }
@@ -159,9 +163,9 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
159 /* Setup the PMU ATE map */ 163 /* Setup the PMU ATE map */
160 soft->pbi_int_ate_resource.lowest_free_index = 0; 164 soft->pbi_int_ate_resource.lowest_free_index = 0;
161 soft->pbi_int_ate_resource.ate = 165 soft->pbi_int_ate_resource.ate =
162 kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL); 166 kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
163 memset(soft->pbi_int_ate_resource.ate, 0, 167 memset(soft->pbi_int_ate_resource.ate, 0,
164 (soft->pbi_int_ate_size * sizeof(uint64_t))); 168 (soft->pbi_int_ate_size * sizeof(u64)));
165 169
166 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { 170 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
167 /* TIO PCI Bridge: find nearest node with CPUs */ 171 /* TIO PCI Bridge: find nearest node with CPUs */
@@ -203,7 +207,7 @@ void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
203 struct pcidev_info *pcidev_info; 207 struct pcidev_info *pcidev_info;
204 struct pcibus_info *pcibus_info; 208 struct pcibus_info *pcibus_info;
205 int bit = sn_irq_info->irq_int_bit; 209 int bit = sn_irq_info->irq_int_bit;
206 uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr; 210 u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
207 211
208 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; 212 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
209 if (pcidev_info) { 213 if (pcidev_info) {
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 79fdb91d7259..8b8bbd51d433 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -23,7 +23,7 @@ union br_ptr {
23/* 23/*
24 * Control Register Access -- Read/Write 0000_0020 24 * Control Register Access -- Read/Write 0000_0020
25 */ 25 */
26void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) 26void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
27{ 27{
28 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 28 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
29 29
@@ -43,7 +43,7 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
43 } 43 }
44} 44}
45 45
46void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) 46void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
47{ 47{
48 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 48 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
49 49
@@ -66,10 +66,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
66/* 66/*
67 * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 67 * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
68 */ 68 */
69uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) 69u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
70{ 70{
71 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 71 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
72 uint64_t ret = 0; 72 u64 ret = 0;
73 73
74 if (pcibus_info) { 74 if (pcibus_info) {
75 switch (pcibus_info->pbi_bridge_type) { 75 switch (pcibus_info->pbi_bridge_type) {
@@ -96,10 +96,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
96/* 96/*
97 * Interrupt Status Register Access -- Read Only 0000_0100 97 * Interrupt Status Register Access -- Read Only 0000_0100
98 */ 98 */
99uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) 99u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
100{ 100{
101 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 101 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
102 uint64_t ret = 0; 102 u64 ret = 0;
103 103
104 if (pcibus_info) { 104 if (pcibus_info) {
105 switch (pcibus_info->pbi_bridge_type) { 105 switch (pcibus_info->pbi_bridge_type) {
@@ -121,7 +121,7 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
121/* 121/*
122 * Interrupt Enable Register Access -- Read/Write 0000_0108 122 * Interrupt Enable Register Access -- Read/Write 0000_0108
123 */ 123 */
124void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) 124void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
125{ 125{
126 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 126 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
127 127
@@ -141,7 +141,7 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
141 } 141 }
142} 142}
143 143
144void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) 144void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
145{ 145{
146 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 146 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
147 147
@@ -165,7 +165,7 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
165 * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 165 * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
166 */ 166 */
167void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, 167void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
168 uint64_t addr) 168 u64 addr)
169{ 169{
170 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 170 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
171 171
@@ -217,10 +217,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
217/* 217/*
218 * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 218 * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
219 */ 219 */
220uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) 220u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
221{ 221{
222 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 222 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
223 uint64_t ret = 0; 223 u64 ret = 0;
224 224
225 if (pcibus_info) { 225 if (pcibus_info) {
226 switch (pcibus_info->pbi_bridge_type) { 226 switch (pcibus_info->pbi_bridge_type) {
@@ -242,7 +242,7 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
242} 242}
243 243
244void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, 244void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
245 uint64_t val) 245 u64 val)
246{ 246{
247 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 247 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
248 248
@@ -262,10 +262,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
262 } 262 }
263} 263}
264 264
265uint64_t __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) 265u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
266{ 266{
267 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; 267 union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
268 uint64_t __iomem *ret = NULL; 268 u64 __iomem *ret = NULL;
269 269
270 if (pcibus_info) { 270 if (pcibus_info) {
271 switch (pcibus_info->pbi_bridge_type) { 271 switch (pcibus_info->pbi_bridge_type) {
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 27aa1842dacc..7571a4025529 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -16,7 +16,7 @@
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
17#include <asm/sn/tioca_provider.h> 17#include <asm/sn/tioca_provider.h>
18 18
19uint32_t tioca_gart_found; 19u32 tioca_gart_found;
20EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ 20EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
21 21
22LIST_HEAD(tioca_list); 22LIST_HEAD(tioca_list);
@@ -34,8 +34,8 @@ static int tioca_gart_init(struct tioca_kernel *);
34static int 34static int
35tioca_gart_init(struct tioca_kernel *tioca_kern) 35tioca_gart_init(struct tioca_kernel *tioca_kern)
36{ 36{
37 uint64_t ap_reg; 37 u64 ap_reg;
38 uint64_t offset; 38 u64 offset;
39 struct page *tmp; 39 struct page *tmp;
40 struct tioca_common *tioca_common; 40 struct tioca_common *tioca_common;
41 struct tioca __iomem *ca_base; 41 struct tioca __iomem *ca_base;
@@ -214,7 +214,7 @@ void
214tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) 214tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
215{ 215{
216 int cap_ptr; 216 int cap_ptr;
217 uint32_t reg; 217 u32 reg;
218 struct tioca __iomem *tioca_base; 218 struct tioca __iomem *tioca_base;
219 struct pci_dev *pdev; 219 struct pci_dev *pdev;
220 struct tioca_common *common; 220 struct tioca_common *common;
@@ -276,7 +276,7 @@ EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
276 * We will always use 0x1 276 * We will always use 0x1
277 * 55:55 - Swap bytes Currently unused 277 * 55:55 - Swap bytes Currently unused
278 */ 278 */
279static uint64_t 279static u64
280tioca_dma_d64(unsigned long paddr) 280tioca_dma_d64(unsigned long paddr)
281{ 281{
282 dma_addr_t bus_addr; 282 dma_addr_t bus_addr;
@@ -318,15 +318,15 @@ tioca_dma_d64(unsigned long paddr)
318 * and so a given CA can only directly target nodes in the range 318 * and so a given CA can only directly target nodes in the range
319 * xxx - xxx+255. 319 * xxx - xxx+255.
320 */ 320 */
321static uint64_t 321static u64
322tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) 322tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
323{ 323{
324 struct tioca_common *tioca_common; 324 struct tioca_common *tioca_common;
325 struct tioca __iomem *ca_base; 325 struct tioca __iomem *ca_base;
326 uint64_t ct_addr; 326 u64 ct_addr;
327 dma_addr_t bus_addr; 327 dma_addr_t bus_addr;
328 uint32_t node_upper; 328 u32 node_upper;
329 uint64_t agp_dma_extn; 329 u64 agp_dma_extn;
330 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); 330 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
331 331
332 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; 332 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
@@ -367,10 +367,10 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
367 * dma_addr_t is guarenteed to be contiguous in CA bus space. 367 * dma_addr_t is guarenteed to be contiguous in CA bus space.
368 */ 368 */
369static dma_addr_t 369static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) 370tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
371{ 371{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry; 372 int i, ps, ps_shift, entry, entries, mapsize, last_entry;
373 uint64_t xio_addr, end_xio_addr; 373 u64 xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common; 374 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern; 375 struct tioca_kernel *tioca_kern;
376 dma_addr_t bus_addr = 0; 376 dma_addr_t bus_addr = 0;
@@ -514,10 +514,10 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
514 * The mapping mode used is based on the devices dma_mask. As a last resort 514 * The mapping mode used is based on the devices dma_mask. As a last resort
515 * use the GART mapped mode. 515 * use the GART mapped mode.
516 */ 516 */
517static uint64_t 517static u64
518tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) 518tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
519{ 519{
520 uint64_t mapaddr; 520 u64 mapaddr;
521 521
522 /* 522 /*
523 * If card is 64 or 48 bit addresable, use a direct mapping. 32 523 * If card is 64 or 48 bit addresable, use a direct mapping. 32
@@ -554,8 +554,8 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
554{ 554{
555 struct tioca_common *soft = arg; 555 struct tioca_common *soft = arg;
556 struct ia64_sal_retval ret_stuff; 556 struct ia64_sal_retval ret_stuff;
557 uint64_t segment; 557 u64 segment;
558 uint64_t busnum; 558 u64 busnum;
559 ret_stuff.status = 0; 559 ret_stuff.status = 0;
560 ret_stuff.v0 = 0; 560 ret_stuff.v0 = 0;
561 561
@@ -620,7 +620,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
620 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); 620 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
621 tioca_kern->ca_closest_node = 621 tioca_kern->ca_closest_node =
622 nasid_to_cnodeid(tioca_common->ca_closest_nasid); 622 nasid_to_cnodeid(tioca_common->ca_closest_nasid);
623 tioca_common->ca_kernel_private = (uint64_t) tioca_kern; 623 tioca_common->ca_kernel_private = (u64) tioca_kern;
624 624
625 bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, 625 bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
626 tioca_common->ca_common.bs_persist_busnum); 626 tioca_common->ca_common.bs_persist_busnum);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index dda196c9e324..e52831ed93eb 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -81,10 +81,10 @@
81 * 61 - 0 since this is not an MSI transaction 81 * 61 - 0 since this is not an MSI transaction
82 * 60:54 - reserved, MBZ 82 * 60:54 - reserved, MBZ
83 */ 83 */
84static uint64_t 84static u64
85tioce_dma_d64(unsigned long ct_addr) 85tioce_dma_d64(unsigned long ct_addr)
86{ 86{
87 uint64_t bus_addr; 87 u64 bus_addr;
88 88
89 bus_addr = ct_addr | (1UL << 63); 89 bus_addr = ct_addr | (1UL << 63);
90 90
@@ -141,9 +141,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
141 * length, and if enough resources exist, fill in the ATE's and construct a 141 * length, and if enough resources exist, fill in the ATE's and construct a
142 * tioce_dmamap struct to track the mapping. 142 * tioce_dmamap struct to track the mapping.
143 */ 143 */
144static uint64_t 144static u64
145tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, 145tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
146 uint64_t ct_addr, int len) 146 u64 ct_addr, int len)
147{ 147{
148 int i; 148 int i;
149 int j; 149 int j;
@@ -152,11 +152,11 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
152 int entries; 152 int entries;
153 int nates; 153 int nates;
154 int pagesize; 154 int pagesize;
155 uint64_t *ate_shadow; 155 u64 *ate_shadow;
156 uint64_t *ate_reg; 156 u64 *ate_reg;
157 uint64_t addr; 157 u64 addr;
158 struct tioce *ce_mmr; 158 struct tioce *ce_mmr;
159 uint64_t bus_base; 159 u64 bus_base;
160 struct tioce_dmamap *map; 160 struct tioce_dmamap *map;
161 161
162 ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; 162 ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
@@ -224,7 +224,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
224 224
225 addr = ct_addr; 225 addr = ct_addr;
226 for (j = 0; j < nates; j++) { 226 for (j = 0; j < nates; j++) {
227 uint64_t ate; 227 u64 ate;
228 228
229 ate = ATE_MAKE(addr, pagesize); 229 ate = ATE_MAKE(addr, pagesize);
230 ate_shadow[i + j] = ate; 230 ate_shadow[i + j] = ate;
@@ -252,15 +252,15 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
252 * 252 *
253 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. 253 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
254 */ 254 */
255static uint64_t 255static u64
256tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) 256tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
257{ 257{
258 int dma_ok; 258 int dma_ok;
259 int port; 259 int port;
260 struct tioce *ce_mmr; 260 struct tioce *ce_mmr;
261 struct tioce_kernel *ce_kern; 261 struct tioce_kernel *ce_kern;
262 uint64_t ct_upper; 262 u64 ct_upper;
263 uint64_t ct_lower; 263 u64 ct_lower;
264 dma_addr_t bus_addr; 264 dma_addr_t bus_addr;
265 265
266 ct_upper = ct_addr & ~0x3fffffffUL; 266 ct_upper = ct_addr & ~0x3fffffffUL;
@@ -269,7 +269,7 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
269 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); 269 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
270 270
271 if (ce_kern->ce_port[port].dirmap_refcnt == 0) { 271 if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
272 uint64_t tmp; 272 u64 tmp;
273 273
274 ce_kern->ce_port[port].dirmap_shadow = ct_upper; 274 ce_kern->ce_port[port].dirmap_shadow = ct_upper;
275 writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); 275 writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]);
@@ -295,10 +295,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
295 * Given a TIOCE bus address, set the appropriate bit to indicate barrier 295 * Given a TIOCE bus address, set the appropriate bit to indicate barrier
296 * attributes. 296 * attributes.
297 */ 297 */
298static uint64_t 298static u64
299tioce_dma_barrier(uint64_t bus_addr, int on) 299tioce_dma_barrier(u64 bus_addr, int on)
300{ 300{
301 uint64_t barrier_bit; 301 u64 barrier_bit;
302 302
303 /* barrier not supported in M40/M40S mode */ 303 /* barrier not supported in M40/M40S mode */
304 if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) 304 if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr))
@@ -351,7 +351,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
351 351
352 list_for_each_entry(map, &ce_kern->ce_dmamap_list, 352 list_for_each_entry(map, &ce_kern->ce_dmamap_list,
353 ce_dmamap_list) { 353 ce_dmamap_list) {
354 uint64_t last; 354 u64 last;
355 355
356 last = map->pci_start + map->nbytes - 1; 356 last = map->pci_start + map->nbytes - 1;
357 if (bus_addr >= map->pci_start && bus_addr <= last) 357 if (bus_addr >= map->pci_start && bus_addr <= last)
@@ -385,17 +385,17 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
385 * This is the main wrapper for mapping host physical pages to CE PCI space. 385 * This is the main wrapper for mapping host physical pages to CE PCI space.
386 * The mapping mode used is based on the device's dma_mask. 386 * The mapping mode used is based on the device's dma_mask.
387 */ 387 */
388static uint64_t 388static u64
389tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, 389tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
390 int barrier) 390 int barrier)
391{ 391{
392 unsigned long flags; 392 unsigned long flags;
393 uint64_t ct_addr; 393 u64 ct_addr;
394 uint64_t mapaddr = 0; 394 u64 mapaddr = 0;
395 struct tioce_kernel *ce_kern; 395 struct tioce_kernel *ce_kern;
396 struct tioce_dmamap *map; 396 struct tioce_dmamap *map;
397 int port; 397 int port;
398 uint64_t dma_mask; 398 u64 dma_mask;
399 399
400 dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; 400 dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;
401 401
@@ -425,7 +425,7 @@ tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count,
425 * address bits than this device can support. 425 * address bits than this device can support.
426 */ 426 */
427 list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { 427 list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {
428 uint64_t last; 428 u64 last;
429 429
430 last = map->ct_start + map->nbytes - 1; 430 last = map->ct_start + map->nbytes - 1;
431 if (ct_addr >= map->ct_start && 431 if (ct_addr >= map->ct_start &&
@@ -501,8 +501,8 @@ dma_map_done:
501 * Simply call tioce_do_dma_map() to create a map with the barrier bit clear 501 * Simply call tioce_do_dma_map() to create a map with the barrier bit clear
502 * in the address. 502 * in the address.
503 */ 503 */
504static uint64_t 504static u64
505tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) 505tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
506{ 506{
507 return tioce_do_dma_map(pdev, paddr, byte_count, 0); 507 return tioce_do_dma_map(pdev, paddr, byte_count, 0);
508} 508}
@@ -515,8 +515,8 @@ tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
515 * 515 *
516 * Simply call tioce_do_dma_map() to create a map with the barrier bit set 516 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
517 * in the address. 517 * in the address.
518 */ static uint64_t 518 */ static u64
519tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) 519tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count)
520{ 520{
521 return tioce_do_dma_map(pdev, paddr, byte_count, 1); 521 return tioce_do_dma_map(pdev, paddr, byte_count, 1);
522} 522}
@@ -551,7 +551,7 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
551tioce_kern_init(struct tioce_common *tioce_common) 551tioce_kern_init(struct tioce_common *tioce_common)
552{ 552{
553 int i; 553 int i;
554 uint32_t tmp; 554 u32 tmp;
555 struct tioce *tioce_mmr; 555 struct tioce *tioce_mmr;
556 struct tioce_kernel *tioce_kern; 556 struct tioce_kernel *tioce_kern;
557 557
@@ -563,7 +563,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
563 tioce_kern->ce_common = tioce_common; 563 tioce_kern->ce_common = tioce_common;
564 spin_lock_init(&tioce_kern->ce_lock); 564 spin_lock_init(&tioce_kern->ce_lock);
565 INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); 565 INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);
566 tioce_common->ce_kernel_private = (uint64_t) tioce_kern; 566 tioce_common->ce_kernel_private = (u64) tioce_kern;
567 567
568 /* 568 /*
569 * Determine the secondary bus number of the port2 logical PPB. 569 * Determine the secondary bus number of the port2 logical PPB.
@@ -575,7 +575,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
575 raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, 575 raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment,
576 tioce_common->ce_pcibus.bs_persist_busnum, 576 tioce_common->ce_pcibus.bs_persist_busnum,
577 PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); 577 PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp);
578 tioce_kern->ce_port1_secondary = (uint8_t) tmp; 578 tioce_kern->ce_port1_secondary = (u8) tmp;
579 579
580 /* 580 /*
581 * Set PMU pagesize to the largest size available, and zero out 581 * Set PMU pagesize to the largest size available, and zero out
@@ -615,7 +615,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
615 struct pcidev_info *pcidev_info; 615 struct pcidev_info *pcidev_info;
616 struct tioce_common *ce_common; 616 struct tioce_common *ce_common;
617 struct tioce *ce_mmr; 617 struct tioce *ce_mmr;
618 uint64_t force_int_val; 618 u64 force_int_val;
619 619
620 if (!sn_irq_info->irq_bridge) 620 if (!sn_irq_info->irq_bridge)
621 return; 621 return;
@@ -687,7 +687,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
687 struct tioce_common *ce_common; 687 struct tioce_common *ce_common;
688 struct tioce *ce_mmr; 688 struct tioce *ce_mmr;
689 int bit; 689 int bit;
690 uint64_t vector; 690 u64 vector;
691 691
692 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; 692 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
693 if (!pcidev_info) 693 if (!pcidev_info)
@@ -699,7 +699,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
699 bit = sn_irq_info->irq_int_bit; 699 bit = sn_irq_info->irq_int_bit;
700 700
701 __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); 701 __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
702 vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; 702 vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
703 vector |= sn_irq_info->irq_xtalkaddr; 703 vector |= sn_irq_info->irq_xtalkaddr;
704 writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); 704 writeq(vector, &ce_mmr->ce_adm_int_dest[bit]);
705 __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); 705 __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));