aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/a.out.h35
-rw-r--r--include/asm-ia64/acpi-ext.h17
-rw-r--r--include/asm-ia64/acpi.h112
-rw-r--r--include/asm-ia64/agp.h21
-rw-r--r--include/asm-ia64/asmmacro.h111
-rw-r--r--include/asm-ia64/atomic.h183
-rw-r--r--include/asm-ia64/bitops.h410
-rw-r--r--include/asm-ia64/break.h21
-rw-r--r--include/asm-ia64/bug.h15
-rw-r--r--include/asm-ia64/bugs.h19
-rw-r--r--include/asm-ia64/byteorder.h42
-rw-r--r--include/asm-ia64/cache.h30
-rw-r--r--include/asm-ia64/cacheflush.h50
-rw-r--r--include/asm-ia64/checksum.h76
-rw-r--r--include/asm-ia64/compat.h198
-rw-r--r--include/asm-ia64/cpu.h22
-rw-r--r--include/asm-ia64/cputime.h6
-rw-r--r--include/asm-ia64/current.h17
-rw-r--r--include/asm-ia64/cyclone.h15
-rw-r--r--include/asm-ia64/delay.h97
-rw-r--r--include/asm-ia64/div64.h1
-rw-r--r--include/asm-ia64/dma-mapping.h70
-rw-r--r--include/asm-ia64/dma.h23
-rw-r--r--include/asm-ia64/elf.h259
-rw-r--r--include/asm-ia64/errno.h1
-rw-r--r--include/asm-ia64/fcntl.h84
-rw-r--r--include/asm-ia64/fpswa.h73
-rw-r--r--include/asm-ia64/fpu.h66
-rw-r--r--include/asm-ia64/gcc_intrin.h597
-rw-r--r--include/asm-ia64/hardirq.h38
-rw-r--r--include/asm-ia64/hdreg.h14
-rw-r--r--include/asm-ia64/hw_irq.h144
-rw-r--r--include/asm-ia64/ia32.h38
-rw-r--r--include/asm-ia64/ia64regs.h100
-rw-r--r--include/asm-ia64/ide.h71
-rw-r--r--include/asm-ia64/intel_intrin.h257
-rw-r--r--include/asm-ia64/intrinsics.h181
-rw-r--r--include/asm-ia64/io.h484
-rw-r--r--include/asm-ia64/ioctl.h77
-rw-r--r--include/asm-ia64/ioctl32.h1
-rw-r--r--include/asm-ia64/ioctls.h89
-rw-r--r--include/asm-ia64/iosapic.h110
-rw-r--r--include/asm-ia64/ipcbuf.h28
-rw-r--r--include/asm-ia64/irq.h43
-rw-r--r--include/asm-ia64/kmap_types.h31
-rw-r--r--include/asm-ia64/kregs.h163
-rw-r--r--include/asm-ia64/linkage.h6
-rw-r--r--include/asm-ia64/local.h50
-rw-r--r--include/asm-ia64/machvec.h390
-rw-r--r--include/asm-ia64/machvec_dig.h18
-rw-r--r--include/asm-ia64/machvec_hpsim.h18
-rw-r--r--include/asm-ia64/machvec_hpzx1.h38
-rw-r--r--include/asm-ia64/machvec_hpzx1_swiotlb.h43
-rw-r--r--include/asm-ia64/machvec_init.h32
-rw-r--r--include/asm-ia64/machvec_sn2.h126
-rw-r--r--include/asm-ia64/mc146818rtc.h10
-rw-r--r--include/asm-ia64/mca.h132
-rw-r--r--include/asm-ia64/mca_asm.h312
-rw-r--r--include/asm-ia64/meminit.h60
-rw-r--r--include/asm-ia64/mman.h51
-rw-r--r--include/asm-ia64/mmu.h11
-rw-r--r--include/asm-ia64/mmu_context.h170
-rw-r--r--include/asm-ia64/mmzone.h32
-rw-r--r--include/asm-ia64/module.h35
-rw-r--r--include/asm-ia64/msgbuf.h27
-rw-r--r--include/asm-ia64/msi.h20
-rw-r--r--include/asm-ia64/namei.h25
-rw-r--r--include/asm-ia64/nodedata.h52
-rw-r--r--include/asm-ia64/numa.h74
-rw-r--r--include/asm-ia64/numnodes.h15
-rw-r--r--include/asm-ia64/page.h207
-rw-r--r--include/asm-ia64/pal.h1564
-rw-r--r--include/asm-ia64/param.h42
-rw-r--r--include/asm-ia64/parport.h20
-rw-r--r--include/asm-ia64/patch.h25
-rw-r--r--include/asm-ia64/pci.h141
-rw-r--r--include/asm-ia64/percpu.h72
-rw-r--r--include/asm-ia64/perfmon.h259
-rw-r--r--include/asm-ia64/perfmon_default_smpl.h83
-rw-r--r--include/asm-ia64/pgalloc.h167
-rw-r--r--include/asm-ia64/pgtable.h593
-rw-r--r--include/asm-ia64/poll.h31
-rw-r--r--include/asm-ia64/posix_types.h126
-rw-r--r--include/asm-ia64/processor.h698
-rw-r--r--include/asm-ia64/ptrace.h337
-rw-r--r--include/asm-ia64/ptrace_offsets.h268
-rw-r--r--include/asm-ia64/resource.h8
-rw-r--r--include/asm-ia64/rse.h66
-rw-r--r--include/asm-ia64/rwsem.h188
-rw-r--r--include/asm-ia64/sal.h840
-rw-r--r--include/asm-ia64/scatterlist.h28
-rw-r--r--include/asm-ia64/sections.h22
-rw-r--r--include/asm-ia64/segment.h6
-rw-r--r--include/asm-ia64/semaphore.h102
-rw-r--r--include/asm-ia64/sembuf.h22
-rw-r--r--include/asm-ia64/serial.h19
-rw-r--r--include/asm-ia64/setup.h6
-rw-r--r--include/asm-ia64/shmbuf.h38
-rw-r--r--include/asm-ia64/shmparam.h12
-rw-r--r--include/asm-ia64/sigcontext.h70
-rw-r--r--include/asm-ia64/siginfo.h141
-rw-r--r--include/asm-ia64/signal.h185
-rw-r--r--include/asm-ia64/smp.h134
-rw-r--r--include/asm-ia64/sn/addrs.h238
-rw-r--r--include/asm-ia64/sn/arch.h52
-rw-r--r--include/asm-ia64/sn/bte.h148
-rw-r--r--include/asm-ia64/sn/clksupport.h28
-rw-r--r--include/asm-ia64/sn/fetchop.h85
-rw-r--r--include/asm-ia64/sn/geo.h124
-rw-r--r--include/asm-ia64/sn/intr.h56
-rw-r--r--include/asm-ia64/sn/io.h265
-rw-r--r--include/asm-ia64/sn/klconfig.h272
-rw-r--r--include/asm-ia64/sn/l1.h36
-rw-r--r--include/asm-ia64/sn/leds.h33
-rw-r--r--include/asm-ia64/sn/module.h127
-rw-r--r--include/asm-ia64/sn/nodepda.h86
-rw-r--r--include/asm-ia64/sn/pda.h80
-rw-r--r--include/asm-ia64/sn/rw_mmr.h74
-rw-r--r--include/asm-ia64/sn/shub_mmr.h441
-rw-r--r--include/asm-ia64/sn/shubio.h3476
-rw-r--r--include/asm-ia64/sn/simulator.h27
-rw-r--r--include/asm-ia64/sn/sn2/sn_hwperf.h226
-rw-r--r--include/asm-ia64/sn/sn_cpuid.h144
-rw-r--r--include/asm-ia64/sn/sn_fru.h44
-rw-r--r--include/asm-ia64/sn/sn_sal.h1015
-rw-r--r--include/asm-ia64/sn/sndrv.h47
-rw-r--r--include/asm-ia64/sn/types.h25
-rw-r--r--include/asm-ia64/socket.h59
-rw-r--r--include/asm-ia64/sockios.h19
-rw-r--r--include/asm-ia64/spinlock.h208
-rw-r--r--include/asm-ia64/stat.h51
-rw-r--r--include/asm-ia64/statfs.h62
-rw-r--r--include/asm-ia64/string.h22
-rw-r--r--include/asm-ia64/suspend.h1
-rw-r--r--include/asm-ia64/system.h295
-rw-r--r--include/asm-ia64/termbits.h182
-rw-r--r--include/asm-ia64/termios.h113
-rw-r--r--include/asm-ia64/thread_info.h94
-rw-r--r--include/asm-ia64/timex.h40
-rw-r--r--include/asm-ia64/tlb.h245
-rw-r--r--include/asm-ia64/tlbflush.h99
-rw-r--r--include/asm-ia64/topology.h90
-rw-r--r--include/asm-ia64/types.h75
-rw-r--r--include/asm-ia64/uaccess.h408
-rw-r--r--include/asm-ia64/ucontext.h12
-rw-r--r--include/asm-ia64/unaligned.h6
-rw-r--r--include/asm-ia64/unistd.h399
-rw-r--r--include/asm-ia64/unwind.h240
-rw-r--r--include/asm-ia64/user.h58
-rw-r--r--include/asm-ia64/ustack.h16
-rw-r--r--include/asm-ia64/vga.h22
-rw-r--r--include/asm-ia64/xor.h33
152 files changed, 22805 insertions, 0 deletions
diff --git a/include/asm-ia64/a.out.h b/include/asm-ia64/a.out.h
new file mode 100644
index 000000000000..7293ac1df3ab
--- /dev/null
+++ b/include/asm-ia64/a.out.h
@@ -0,0 +1,35 @@
1#ifndef _ASM_IA64_A_OUT_H
2#define _ASM_IA64_A_OUT_H
3
4/*
5 * No a.out format has been (or should be) defined so this file is
6 * just a dummy that allows us to get binfmt_elf compiled. It
7 * probably would be better to clean up binfmt_elf.c so it does not
8 * necessarily depend on there being a.out support.
9 *
10 * Modified 1998-2002
11 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
12 */
13
14#include <linux/types.h>
15
16struct exec {
17 unsigned long a_info;
18 unsigned long a_text;
19 unsigned long a_data;
20 unsigned long a_bss;
21 unsigned long a_entry;
22};
23
24#define N_TXTADDR(x) 0
25#define N_DATADDR(x) 0
26#define N_BSSADDR(x) 0
27#define N_DRSIZE(x) 0
28#define N_TRSIZE(x) 0
29#define N_SYMSIZE(x) 0
30#define N_TXTOFF(x) 0
31
32#ifdef __KERNEL__
33#include <asm/ustack.h>
34#endif
35#endif /* _ASM_IA64_A_OUT_H */
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h
new file mode 100644
index 000000000000..9271d74c64cc
--- /dev/null
+++ b/include/asm-ia64/acpi-ext.h
@@ -0,0 +1,17 @@
1/*
2 * ia64/platform/hp/common/hp_acpi.h
3 *
4 * Copyright (C) 2003 Hewlett-Packard
5 * Copyright (C) Alex Williamson
6 * Copyright (C) Bjorn Helgaas
7 *
8 * Vendor specific extensions to ACPI.
9 */
10#ifndef _ASM_IA64_ACPI_EXT_H
11#define _ASM_IA64_ACPI_EXT_H
12
13#include <linux/types.h>
14
15extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
16
17#endif /* _ASM_IA64_ACPI_EXT_H */
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
new file mode 100644
index 000000000000..6a26a977f253
--- /dev/null
+++ b/include/asm-ia64/acpi.h
@@ -0,0 +1,112 @@
1/*
2 * asm-ia64/acpi.h
3 *
4 * Copyright (C) 1999 VA Linux Systems
5 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
7 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
28#ifndef _ASM_ACPI_H
29#define _ASM_ACPI_H
30
31#ifdef __KERNEL__
32
33#include <linux/init.h>
34#include <linux/numa.h>
35#include <asm/system.h>
36
37#define COMPILER_DEPENDENT_INT64 long
38#define COMPILER_DEPENDENT_UINT64 unsigned long
39
40/*
41 * Calling conventions:
42 *
43 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
44 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
45 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
46 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
47 */
48#define ACPI_SYSTEM_XFACE
49#define ACPI_EXTERNAL_XFACE
50#define ACPI_INTERNAL_XFACE
51#define ACPI_INTERNAL_VAR_XFACE
52
53/* Asm macros */
54
55#define ACPI_ASM_MACROS
56#define BREAKPOINT3
57#define ACPI_DISABLE_IRQS() local_irq_disable()
58#define ACPI_ENABLE_IRQS() local_irq_enable()
59#define ACPI_FLUSH_CPU_CACHE()
60
61static inline int
62ia64_acpi_acquire_global_lock (unsigned int *lock)
63{
64 unsigned int old, new, val;
65 do {
66 old = *lock;
67 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
68 val = ia64_cmpxchg4_acq(lock, new, old);
69 } while (unlikely (val != old));
70 return (new < 3) ? -1 : 0;
71}
72
73static inline int
74ia64_acpi_release_global_lock (unsigned int *lock)
75{
76 unsigned int old, new, val;
77 do {
78 old = *lock;
79 new = old & ~0x3;
80 val = ia64_cmpxchg4_acq(lock, new, old);
81 } while (unlikely (val != old));
82 return old & 0x1;
83}
84
85#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
86 ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
87
88#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
89 ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
90
91#define acpi_disabled 0 /* ACPI always enabled on IA64 */
92#define acpi_noirq 0 /* ACPI always enabled on IA64 */
93#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
94#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
95static inline void disable_acpi(void) { }
96
97const char *acpi_get_sysname (void);
98int acpi_request_vector (u32 int_type);
99int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
100
101#ifdef CONFIG_ACPI_NUMA
102/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
103#define MAX_PXM_DOMAINS (256)
104extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
105extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
106#endif
107
108extern u16 ia64_acpiid_to_sapicid[];
109
110#endif /*__KERNEL__*/
111
112#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h
new file mode 100644
index 000000000000..d1316f1e6ee1
--- /dev/null
+++ b/include/asm-ia64/agp.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_IA64_AGP_H
2#define _ASM_IA64_AGP_H
3
4/*
5 * IA-64 specific AGP definitions.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11/*
12 * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
13 * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
14 * (unlike x86, where it gets mapped "write-coalescing").
15 */
16#define map_page_into_agp(page) /* nothing */
17#define unmap_page_from_agp(page) /* nothing */
18#define flush_agp_mappings() /* nothing */
19#define flush_agp_cache() mb()
20
21#endif /* _ASM_IA64_AGP_H */
diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h
new file mode 100644
index 000000000000..77af457f4ad7
--- /dev/null
+++ b/include/asm-ia64/asmmacro.h
@@ -0,0 +1,111 @@
1#ifndef _ASM_IA64_ASMMACRO_H
2#define _ASM_IA64_ASMMACRO_H
3
4/*
5 * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/config.h>
10
11#define ENTRY(name) \
12 .align 32; \
13 .proc name; \
14name:
15
16#define ENTRY_MIN_ALIGN(name) \
17 .align 16; \
18 .proc name; \
19name:
20
21#define GLOBAL_ENTRY(name) \
22 .global name; \
23 ENTRY(name)
24
25#define END(name) \
26 .endp name
27
28/*
29 * Helper macros to make unwind directives more readable:
30 */
31
32/* prologue_gr: */
33#define ASM_UNW_PRLG_RP 0x8
34#define ASM_UNW_PRLG_PFS 0x4
35#define ASM_UNW_PRLG_PSP 0x2
36#define ASM_UNW_PRLG_PR 0x1
37#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
38
39/*
40 * Helper macros for accessing user memory.
41 */
42
43 .section "__ex_table", "a" // declare section & section attributes
44 .previous
45
46# define EX(y,x...) \
47 .xdata4 "__ex_table", 99f-., y-.; \
48 [99:] x
49# define EXCLR(y,x...) \
50 .xdata4 "__ex_table", 99f-., y-.+4; \
51 [99:] x
52
53/*
54 * Mark instructions that need a load of a virtual address patched to be
55 * a load of a physical address. We use this either in critical performance
56 * path (ivt.S - TLB miss processing) or in places where it might not be
57 * safe to use a "tpa" instruction (mca_asm.S - error recovery).
58 */
59 .section ".data.patch.vtop", "a" // declare section & section attributes
60 .previous
61
62#define LOAD_PHYSICAL(pr, reg, obj) \
63[1:](pr)movl reg = obj; \
64 .xdata4 ".data.patch.vtop", 1b-.
65
66/*
67 * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
68 * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
69 */
70#define DO_MCKINLEY_E9_WORKAROUND
71
72#ifdef DO_MCKINLEY_E9_WORKAROUND
73 .section ".data.patch.mckinley_e9", "a"
74 .previous
75/* workaround for Itanium 2 Errata 9: */
76# define FSYS_RETURN \
77 .xdata4 ".data.patch.mckinley_e9", 1f-.; \
781:{ .mib; \
79 nop.m 0; \
80 mov r16=ar.pfs; \
81 br.call.sptk.many b7=2f;; \
82 }; \
832:{ .mib; \
84 nop.m 0; \
85 mov ar.pfs=r16; \
86 br.ret.sptk.many b6;; \
87 }
88#else
89# define FSYS_RETURN br.ret.sptk.many b6
90#endif
91
92/*
93 * Up until early 2004, use of .align within a function caused bad unwind info.
94 * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
95 * otherwise.
96 */
97#ifdef HAVE_WORKING_TEXT_ALIGN
98# define TEXT_ALIGN(n) .align n
99#else
100# define TEXT_ALIGN(n)
101#endif
102
103#ifdef HAVE_SERIALIZE_DIRECTIVE
104# define dv_serialize_data .serialize.data
105# define dv_serialize_instruction .serialize.instruction
106#else
107# define dv_serialize_data
108# define dv_serialize_instruction
109#endif
110
111#endif /* _ASM_IA64_ASMMACRO_H */
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
new file mode 100644
index 000000000000..874a6f890e75
--- /dev/null
+++ b/include/asm-ia64/atomic.h
@@ -0,0 +1,183 @@
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18
19/*
20 * On IA-64, counter must always be volatile to ensure that that the
21 * memory accesses are ordered.
22 */
23typedef struct { volatile __s32 counter; } atomic_t;
24typedef struct { volatile __s64 counter; } atomic64_t;
25
26#define ATOMIC_INIT(i) ((atomic_t) { (i) })
27#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
28
29#define atomic_read(v) ((v)->counter)
30#define atomic64_read(v) ((v)->counter)
31
32#define atomic_set(v,i) (((v)->counter) = (i))
33#define atomic64_set(v,i) (((v)->counter) = (i))
34
35static __inline__ int
36ia64_atomic_add (int i, atomic_t *v)
37{
38 __s32 old, new;
39 CMPXCHG_BUGCHECK_DECL
40
41 do {
42 CMPXCHG_BUGCHECK(v);
43 old = atomic_read(v);
44 new = old + i;
45 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
46 return new;
47}
48
49static __inline__ int
50ia64_atomic64_add (__s64 i, atomic64_t *v)
51{
52 __s64 old, new;
53 CMPXCHG_BUGCHECK_DECL
54
55 do {
56 CMPXCHG_BUGCHECK(v);
57 old = atomic_read(v);
58 new = old + i;
59 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
60 return new;
61}
62
63static __inline__ int
64ia64_atomic_sub (int i, atomic_t *v)
65{
66 __s32 old, new;
67 CMPXCHG_BUGCHECK_DECL
68
69 do {
70 CMPXCHG_BUGCHECK(v);
71 old = atomic_read(v);
72 new = old - i;
73 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
74 return new;
75}
76
77static __inline__ int
78ia64_atomic64_sub (__s64 i, atomic64_t *v)
79{
80 __s64 old, new;
81 CMPXCHG_BUGCHECK_DECL
82
83 do {
84 CMPXCHG_BUGCHECK(v);
85 old = atomic_read(v);
86 new = old - i;
87 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
88 return new;
89}
90
91#define atomic_add_return(i,v) \
92({ \
93 int __ia64_aar_i = (i); \
94 (__builtin_constant_p(i) \
95 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
96 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
97 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
98 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
99 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
100 : ia64_atomic_add(__ia64_aar_i, v); \
101})
102
103#define atomic64_add_return(i,v) \
104({ \
105 long __ia64_aar_i = (i); \
106 (__builtin_constant_p(i) \
107 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
108 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
109 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
110 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
111 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
112 : ia64_atomic64_add(__ia64_aar_i, v); \
113})
114
115/*
116 * Atomically add I to V and return TRUE if the resulting value is
117 * negative.
118 */
119static __inline__ int
120atomic_add_negative (int i, atomic_t *v)
121{
122 return atomic_add_return(i, v) < 0;
123}
124
125static __inline__ int
126atomic64_add_negative (__s64 i, atomic64_t *v)
127{
128 return atomic64_add_return(i, v) < 0;
129}
130
131#define atomic_sub_return(i,v) \
132({ \
133 int __ia64_asr_i = (i); \
134 (__builtin_constant_p(i) \
135 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
136 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
137 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
138 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
139 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
140 : ia64_atomic_sub(__ia64_asr_i, v); \
141})
142
143#define atomic64_sub_return(i,v) \
144({ \
145 long __ia64_asr_i = (i); \
146 (__builtin_constant_p(i) \
147 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
148 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
149 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
150 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
151 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
152 : ia64_atomic64_sub(__ia64_asr_i, v); \
153})
154
155#define atomic_dec_return(v) atomic_sub_return(1, (v))
156#define atomic_inc_return(v) atomic_add_return(1, (v))
157#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
158#define atomic64_inc_return(v) atomic64_add_return(1, (v))
159
160#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
161#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
162#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
163#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
164#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
165#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
166
167#define atomic_add(i,v) atomic_add_return((i), (v))
168#define atomic_sub(i,v) atomic_sub_return((i), (v))
169#define atomic_inc(v) atomic_add(1, (v))
170#define atomic_dec(v) atomic_sub(1, (v))
171
172#define atomic64_add(i,v) atomic64_add_return((i), (v))
173#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
174#define atomic64_inc(v) atomic64_add(1, (v))
175#define atomic64_dec(v) atomic64_sub(1, (v))
176
177/* Atomic operations are already serializing */
178#define smp_mb__before_atomic_dec() barrier()
179#define smp_mb__after_atomic_dec() barrier()
180#define smp_mb__before_atomic_inc() barrier()
181#define smp_mb__after_atomic_inc() barrier()
182
183#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
new file mode 100644
index 000000000000..925d54cee475
--- /dev/null
+++ b/include/asm-ia64/bitops.h
@@ -0,0 +1,410 @@
1#ifndef _ASM_IA64_BITOPS_H
2#define _ASM_IA64_BITOPS_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
9 * scheduler patch
10 */
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/bitops.h>
15#include <asm/intrinsics.h>
16
17/**
18 * set_bit - Atomically set a bit in memory
19 * @nr: the bit to set
20 * @addr: the address to start counting from
21 *
22 * This function is atomic and may not be reordered. See __set_bit()
23 * if you do not require the atomic guarantees.
24 * Note that @nr may be almost arbitrarily large; this function is not
25 * restricted to acting on a single-word quantity.
26 *
27 * The address must be (at least) "long" aligned.
28 * Note that there are driver (e.g., eepro100) which use these operations to operate on
29 * hw-defined data-structures, so we can't easily change these operations to force a
30 * bigger alignment.
31 *
32 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
33 */
34static __inline__ void
35set_bit (int nr, volatile void *addr)
36{
37 __u32 bit, old, new;
38 volatile __u32 *m;
39 CMPXCHG_BUGCHECK_DECL
40
41 m = (volatile __u32 *) addr + (nr >> 5);
42 bit = 1 << (nr & 31);
43 do {
44 CMPXCHG_BUGCHECK(m);
45 old = *m;
46 new = old | bit;
47 } while (cmpxchg_acq(m, old, new) != old);
48}
49
50/**
51 * __set_bit - Set a bit in memory
52 * @nr: the bit to set
53 * @addr: the address to start counting from
54 *
55 * Unlike set_bit(), this function is non-atomic and may be reordered.
56 * If it's called on the same region of memory simultaneously, the effect
57 * may be that only one operation succeeds.
58 */
59static __inline__ void
60__set_bit (int nr, volatile void *addr)
61{
62 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
63}
64
65/*
66 * clear_bit() has "acquire" semantics.
67 */
68#define smp_mb__before_clear_bit() smp_mb()
69#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
70
71/**
72 * clear_bit - Clears a bit in memory
73 * @nr: Bit to clear
74 * @addr: Address to start counting from
75 *
76 * clear_bit() is atomic and may not be reordered. However, it does
77 * not contain a memory barrier, so if it is used for locking purposes,
78 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
79 * in order to ensure changes are visible on other processors.
80 */
81static __inline__ void
82clear_bit (int nr, volatile void *addr)
83{
84 __u32 mask, old, new;
85 volatile __u32 *m;
86 CMPXCHG_BUGCHECK_DECL
87
88 m = (volatile __u32 *) addr + (nr >> 5);
89 mask = ~(1 << (nr & 31));
90 do {
91 CMPXCHG_BUGCHECK(m);
92 old = *m;
93 new = old & mask;
94 } while (cmpxchg_acq(m, old, new) != old);
95}
96
97/**
98 * __clear_bit - Clears a bit in memory (non-atomic version)
99 */
100static __inline__ void
101__clear_bit (int nr, volatile void *addr)
102{
103 volatile __u32 *p = (__u32 *) addr + (nr >> 5);
104 __u32 m = 1 << (nr & 31);
105 *p &= ~m;
106}
107
108/**
109 * change_bit - Toggle a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * change_bit() is atomic and may not be reordered.
114 * Note that @nr may be almost arbitrarily large; this function is not
115 * restricted to acting on a single-word quantity.
116 */
117static __inline__ void
118change_bit (int nr, volatile void *addr)
119{
120 __u32 bit, old, new;
121 volatile __u32 *m;
122 CMPXCHG_BUGCHECK_DECL
123
124 m = (volatile __u32 *) addr + (nr >> 5);
125 bit = (1 << (nr & 31));
126 do {
127 CMPXCHG_BUGCHECK(m);
128 old = *m;
129 new = old ^ bit;
130 } while (cmpxchg_acq(m, old, new) != old);
131}
132
133/**
134 * __change_bit - Toggle a bit in memory
135 * @nr: the bit to set
136 * @addr: the address to start counting from
137 *
138 * Unlike change_bit(), this function is non-atomic and may be reordered.
139 * If it's called on the same region of memory simultaneously, the effect
140 * may be that only one operation succeeds.
141 */
142static __inline__ void
143__change_bit (int nr, volatile void *addr)
144{
145 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
146}
147
148/**
149 * test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set
151 * @addr: Address to count from
152 *
153 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier.
155 */
156static __inline__ int
157test_and_set_bit (int nr, volatile void *addr)
158{
159 __u32 bit, old, new;
160 volatile __u32 *m;
161 CMPXCHG_BUGCHECK_DECL
162
163 m = (volatile __u32 *) addr + (nr >> 5);
164 bit = 1 << (nr & 31);
165 do {
166 CMPXCHG_BUGCHECK(m);
167 old = *m;
168 new = old | bit;
169 } while (cmpxchg_acq(m, old, new) != old);
170 return (old & bit) != 0;
171}
172
173/**
174 * __test_and_set_bit - Set a bit and return its old value
175 * @nr: Bit to set
176 * @addr: Address to count from
177 *
178 * This operation is non-atomic and can be reordered.
179 * If two examples of this operation race, one can appear to succeed
180 * but actually fail. You must protect multiple accesses with a lock.
181 */
182static __inline__ int
183__test_and_set_bit (int nr, volatile void *addr)
184{
185 __u32 *p = (__u32 *) addr + (nr >> 5);
186 __u32 m = 1 << (nr & 31);
187 int oldbitset = (*p & m) != 0;
188
189 *p |= m;
190 return oldbitset;
191}
192
193/**
194 * test_and_clear_bit - Clear a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is atomic and cannot be reordered.
199 * It also implies a memory barrier.
200 */
201static __inline__ int
202test_and_clear_bit (int nr, volatile void *addr)
203{
204 __u32 mask, old, new;
205 volatile __u32 *m;
206 CMPXCHG_BUGCHECK_DECL
207
208 m = (volatile __u32 *) addr + (nr >> 5);
209 mask = ~(1 << (nr & 31));
210 do {
211 CMPXCHG_BUGCHECK(m);
212 old = *m;
213 new = old & mask;
214 } while (cmpxchg_acq(m, old, new) != old);
215 return (old & ~mask) != 0;
216}
217
218/**
219 * __test_and_clear_bit - Clear a bit and return its old value
220 * @nr: Bit to set
221 * @addr: Address to count from
222 *
223 * This operation is non-atomic and can be reordered.
224 * If two examples of this operation race, one can appear to succeed
225 * but actually fail. You must protect multiple accesses with a lock.
226 */
227static __inline__ int
228__test_and_clear_bit(int nr, volatile void * addr)
229{
230 __u32 *p = (__u32 *) addr + (nr >> 5);
231 __u32 m = 1 << (nr & 31);
232 int oldbitset = *p & m;
233
234 *p &= ~m;
235 return oldbitset;
236}
237
238/**
239 * test_and_change_bit - Change a bit and return its old value
240 * @nr: Bit to set
241 * @addr: Address to count from
242 *
243 * This operation is atomic and cannot be reordered.
244 * It also implies a memory barrier.
245 */
246static __inline__ int
247test_and_change_bit (int nr, volatile void *addr)
248{
249 __u32 bit, old, new;
250 volatile __u32 *m;
251 CMPXCHG_BUGCHECK_DECL
252
253 m = (volatile __u32 *) addr + (nr >> 5);
254 bit = (1 << (nr & 31));
255 do {
256 CMPXCHG_BUGCHECK(m);
257 old = *m;
258 new = old ^ bit;
259 } while (cmpxchg_acq(m, old, new) != old);
260 return (old & bit) != 0;
261}
262
263/*
264 * WARNING: non atomic version.
265 */
266static __inline__ int
267__test_and_change_bit (int nr, void *addr)
268{
269 __u32 old, bit = (1 << (nr & 31));
270 __u32 *m = (__u32 *) addr + (nr >> 5);
271
272 old = *m;
273 *m = old ^ bit;
274 return (old & bit) != 0;
275}
276
277static __inline__ int
278test_bit (int nr, const volatile void *addr)
279{
280 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
281}
282
283/**
284 * ffz - find the first zero bit in a long word
285 * @x: The long word to find the bit in
286 *
287 * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
288 * no zero exists, so code should check against ~0UL first...
289 */
290static inline unsigned long
291ffz (unsigned long x)
292{
293 unsigned long result;
294
295 result = ia64_popcnt(x & (~x - 1));
296 return result;
297}
298
299/**
300 * __ffs - find first bit in word.
301 * @x: The word to search
302 *
303 * Undefined if no bit exists, so code should check against 0 first.
304 */
305static __inline__ unsigned long
306__ffs (unsigned long x)
307{
308 unsigned long result;
309
310 result = ia64_popcnt((x-1) & ~x);
311 return result;
312}
313
314#ifdef __KERNEL__
315
316/*
317 * find_last_zero_bit - find the last zero bit in a 64 bit quantity
318 * @x: The value to search
319 */
320static inline unsigned long
321ia64_fls (unsigned long x)
322{
323 long double d = x;
324 long exp;
325
326 exp = ia64_getf_exp(d);
327 return exp - 0xffff;
328}
329
330static inline int
331fls (int x)
332{
333 return ia64_fls((unsigned int) x);
334}
335
336/*
337 * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
338 * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
339 * "int" values only and the result value is the bit number + 1. ffs(0) is defined to
340 * return zero.
341 */
342#define ffs(x) __builtin_ffs(x)
343
344/*
345 * hweightN: returns the hamming weight (i.e. the number
346 * of bits set) of a N-bit word
347 */
348static __inline__ unsigned long
349hweight64 (unsigned long x)
350{
351 unsigned long result;
352 result = ia64_popcnt(x);
353 return result;
354}
355
356#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
357#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
358#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
359
360#endif /* __KERNEL__ */
361
362extern int __find_next_zero_bit (const void *addr, unsigned long size,
363 unsigned long offset);
364extern int __find_next_bit(const void *addr, unsigned long size,
365 unsigned long offset);
366
367#define find_next_zero_bit(addr, size, offset) \
368 __find_next_zero_bit((addr), (size), (offset))
369#define find_next_bit(addr, size, offset) \
370 __find_next_bit((addr), (size), (offset))
371
372/*
373 * The optimizer actually does good code for this case..
374 */
375#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
376
377#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
378
379#ifdef __KERNEL__
380
381#define __clear_bit(nr, addr) clear_bit(nr, addr)
382
383#define ext2_set_bit test_and_set_bit
384#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
385#define ext2_clear_bit test_and_clear_bit
386#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
387#define ext2_test_bit test_bit
388#define ext2_find_first_zero_bit find_first_zero_bit
389#define ext2_find_next_zero_bit find_next_zero_bit
390
391/* Bitmap functions for the minix filesystem. */
392#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
393#define minix_set_bit(nr,addr) set_bit(nr,addr)
394#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
395#define minix_test_bit(nr,addr) test_bit(nr,addr)
396#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
397
398static inline int
399sched_find_first_bit (unsigned long *b)
400{
401 if (unlikely(b[0]))
402 return __ffs(b[0]);
403 if (unlikely(b[1]))
404 return 64 + __ffs(b[1]);
405 return __ffs(b[2]) + 128;
406}
407
408#endif /* __KERNEL__ */
409
410#endif /* _ASM_IA64_BITOPS_H */
diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h
new file mode 100644
index 000000000000..97c7b2d79600
--- /dev/null
+++ b/include/asm-ia64/break.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_IA64_BREAK_H
2#define _ASM_IA64_BREAK_H
3
4/*
5 * IA-64 Linux break numbers.
6 *
7 * Copyright (C) 1999 Hewlett-Packard Co
8 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11/*
12 * OS-specific debug break numbers:
13 */
14#define __IA64_BREAK_KDB 0x80100
15
16/*
17 * OS-specific break numbers:
18 */
19#define __IA64_BREAK_SYSCALL 0x100000
20
21#endif /* _ASM_IA64_BREAK_H */
diff --git a/include/asm-ia64/bug.h b/include/asm-ia64/bug.h
new file mode 100644
index 000000000000..2c0cd51e8856
--- /dev/null
+++ b/include/asm-ia64/bug.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_IA64_BUG_H
2#define _ASM_IA64_BUG_H
3
4#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
5# define ia64_abort() __builtin_trap()
6#else
7# define ia64_abort() (*(volatile int *) 0 = 0)
8#endif
9#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
10
11/* should this BUG should be made generic? */
12#define HAVE_ARCH_BUG
13#include <asm-generic/bug.h>
14
15#endif
diff --git a/include/asm-ia64/bugs.h b/include/asm-ia64/bugs.h
new file mode 100644
index 000000000000..433523e3b2ed
--- /dev/null
+++ b/include/asm-ia64/bugs.h
@@ -0,0 +1,19 @@
1/*
2 * This is included by init/main.c to check for architecture-dependent bugs.
3 *
4 * Needs:
5 * void check_bugs(void);
6 *
7 * Based on <asm-alpha/bugs.h>.
8 *
9 * Modified 1998, 1999, 2003
10 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
11 */
12#ifndef _ASM_IA64_BUGS_H
13#define _ASM_IA64_BUGS_H
14
15#include <asm/processor.h>
16
17extern void check_bugs (void);
18
19#endif /* _ASM_IA64_BUGS_H */
diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h
new file mode 100644
index 000000000000..69bd41d7c26e
--- /dev/null
+++ b/include/asm-ia64/byteorder.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_IA64_BYTEORDER_H
2#define _ASM_IA64_BYTEORDER_H
3
4/*
5 * Modified 1998, 1999
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
7 */
8
9#include <asm/types.h>
10#include <asm/intrinsics.h>
11#include <linux/compiler.h>
12
13static __inline__ __attribute_const__ __u64
14__ia64_swab64 (__u64 x)
15{
16 __u64 result;
17
18 result = ia64_mux1(x, ia64_mux1_rev);
19 return result;
20}
21
22static __inline__ __attribute_const__ __u32
23__ia64_swab32 (__u32 x)
24{
25 return __ia64_swab64(x) >> 32;
26}
27
28static __inline__ __attribute_const__ __u16
29__ia64_swab16(__u16 x)
30{
31 return __ia64_swab64(x) >> 48;
32}
33
34#define __arch__swab64(x) __ia64_swab64(x)
35#define __arch__swab32(x) __ia64_swab32(x)
36#define __arch__swab16(x) __ia64_swab16(x)
37
38#define __BYTEORDER_HAS_U64__
39
40#include <linux/byteorder/little_endian.h>
41
42#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
new file mode 100644
index 000000000000..666d8f175cb3
--- /dev/null
+++ b/include/asm-ia64/cache.h
@@ -0,0 +1,30 @@
1#ifndef _ASM_IA64_CACHE_H
2#define _ASM_IA64_CACHE_H
3
4#include <linux/config.h>
5
6/*
7 * Copyright (C) 1998-2000 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11/* Bytes per L1 (data) cache line. */
12#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
13#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
14
15#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
16
17#ifdef CONFIG_SMP
18# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
19# define SMP_CACHE_BYTES L1_CACHE_BYTES
20#else
21 /*
22 * The "aligned" directive can only _increase_ alignment, so this is
23 * safe and provides an easy way to avoid wasting space on a
24 * uni-processor:
25 */
26# define SMP_CACHE_SHIFT 3
27# define SMP_CACHE_BYTES (1 << 3)
28#endif
29
30#endif /* _ASM_IA64_CACHE_H */
diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h
new file mode 100644
index 000000000000..f2dacb4245ec
--- /dev/null
+++ b/include/asm-ia64/cacheflush.h
@@ -0,0 +1,50 @@
1#ifndef _ASM_IA64_CACHEFLUSH_H
2#define _ASM_IA64_CACHEFLUSH_H
3
4/*
5 * Copyright (C) 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/page-flags.h>
10
11#include <asm/bitops.h>
12#include <asm/page.h>
13
14/*
15 * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
16 * to avoid them whenever possible.
17 */
18
19#define flush_cache_all() do { } while (0)
20#define flush_cache_mm(mm) do { } while (0)
21#define flush_cache_range(vma, start, end) do { } while (0)
22#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
23#define flush_icache_page(vma,page) do { } while (0)
24#define flush_cache_vmap(start, end) do { } while (0)
25#define flush_cache_vunmap(start, end) do { } while (0)
26
27#define flush_dcache_page(page) \
28do { \
29 clear_bit(PG_arch_1, &(page)->flags); \
30} while (0)
31
32#define flush_dcache_mmap_lock(mapping) do { } while (0)
33#define flush_dcache_mmap_unlock(mapping) do { } while (0)
34
35extern void flush_icache_range (unsigned long start, unsigned long end);
36
37#define flush_icache_user_range(vma, page, user_addr, len) \
38do { \
39 unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
40 flush_icache_range(_addr, _addr + (len)); \
41} while (0)
42
43#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
44do { memcpy(dst, src, len); \
45 flush_icache_user_range(vma, page, vaddr, len); \
46} while (0)
47#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
48 memcpy(dst, src, len)
49
50#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/include/asm-ia64/checksum.h b/include/asm-ia64/checksum.h
new file mode 100644
index 000000000000..1f230ff8ea81
--- /dev/null
+++ b/include/asm-ia64/checksum.h
@@ -0,0 +1,76 @@
1#ifndef _ASM_IA64_CHECKSUM_H
2#define _ASM_IA64_CHECKSUM_H
3
4/*
5 * Modified 1998, 1999
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9/*
10 * This is a version of ip_compute_csum() optimized for IP headers,
11 * which always checksum on 4 octet boundaries.
12 */
13extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
14
15/*
16 * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
17 * checksum, already complemented
18 */
19extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
20 unsigned long daddr,
21 unsigned short len,
22 unsigned short proto,
23 unsigned int sum);
24
25extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
26 unsigned long daddr,
27 unsigned short len,
28 unsigned short proto,
29 unsigned int sum);
30
31/*
32 * Computes the checksum of a memory block at buff, length len,
33 * and adds in "sum" (32-bit)
34 *
35 * returns a 32-bit number suitable for feeding into itself
36 * or csum_tcpudp_magic
37 *
38 * this function must be called with even lengths, except
39 * for the last fragment, which may be odd
40 *
41 * it's best to have buff aligned on a 32-bit boundary
42 */
43extern unsigned int csum_partial (const unsigned char * buff, int len,
44 unsigned int sum);
45
46/*
47 * Same as csum_partial, but copies from src while it checksums.
48 *
49 * Here it is even more important to align src and dst on a 32-bit (or
50 * even better 64-bit) boundary.
51 */
52extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
53 int len, unsigned int sum,
54 int *errp);
55
56extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
57 int len, unsigned int sum);
58
59/*
60 * This routine is used for miscellaneous IP-like checksums, mainly in
61 * icmp.c
62 */
63extern unsigned short ip_compute_csum (unsigned char *buff, int len);
64
65/*
66 * Fold a partial checksum without adding pseudo headers.
67 */
68static inline unsigned short
69csum_fold (unsigned int sum)
70{
71 sum = (sum & 0xffff) + (sum >> 16);
72 sum = (sum & 0xffff) + (sum >> 16);
73 return ~sum;
74}
75
76#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
new file mode 100644
index 000000000000..cc0ff0a4bdd0
--- /dev/null
+++ b/include/asm-ia64/compat.h
@@ -0,0 +1,198 @@
1#ifndef _ASM_IA64_COMPAT_H
2#define _ASM_IA64_COMPAT_H
3/*
4 * Architecture specific compatibility types
5 */
6#include <linux/types.h>
7
8#define COMPAT_USER_HZ 100
9
10typedef u32 compat_size_t;
11typedef s32 compat_ssize_t;
12typedef s32 compat_time_t;
13typedef s32 compat_clock_t;
14typedef s32 compat_key_t;
15typedef s32 compat_pid_t;
16typedef u16 compat_uid_t;
17typedef u16 compat_gid_t;
18typedef u32 compat_uid32_t;
19typedef u32 compat_gid32_t;
20typedef u16 compat_mode_t;
21typedef u32 compat_ino_t;
22typedef u16 compat_dev_t;
23typedef s32 compat_off_t;
24typedef s64 compat_loff_t;
25typedef u16 compat_nlink_t;
26typedef u16 compat_ipc_pid_t;
27typedef s32 compat_daddr_t;
28typedef u32 compat_caddr_t;
29typedef __kernel_fsid_t compat_fsid_t;
30
31typedef s32 compat_int_t;
32typedef s32 compat_long_t;
33typedef u32 compat_uint_t;
34typedef u32 compat_ulong_t;
35
36struct compat_timespec {
37 compat_time_t tv_sec;
38 s32 tv_nsec;
39};
40
41struct compat_timeval {
42 compat_time_t tv_sec;
43 s32 tv_usec;
44};
45
46struct compat_stat {
47 compat_dev_t st_dev;
48 u16 __pad1;
49 compat_ino_t st_ino;
50 compat_mode_t st_mode;
51 compat_nlink_t st_nlink;
52 compat_uid_t st_uid;
53 compat_gid_t st_gid;
54 compat_dev_t st_rdev;
55 u16 __pad2;
56 u32 st_size;
57 u32 st_blksize;
58 u32 st_blocks;
59 u32 st_atime;
60 u32 st_atime_nsec;
61 u32 st_mtime;
62 u32 st_mtime_nsec;
63 u32 st_ctime;
64 u32 st_ctime_nsec;
65 u32 __unused4;
66 u32 __unused5;
67};
68
69struct compat_flock {
70 short l_type;
71 short l_whence;
72 compat_off_t l_start;
73 compat_off_t l_len;
74 compat_pid_t l_pid;
75};
76
77#define F_GETLK64 12
78#define F_SETLK64 13
79#define F_SETLKW64 14
80
81/*
82 * IA32 uses 4 byte alignment for 64 bit quantities,
83 * so we need to pack this structure.
84 */
85struct compat_flock64 {
86 short l_type;
87 short l_whence;
88 compat_loff_t l_start;
89 compat_loff_t l_len;
90 compat_pid_t l_pid;
91} __attribute__((packed));
92
93struct compat_statfs {
94 int f_type;
95 int f_bsize;
96 int f_blocks;
97 int f_bfree;
98 int f_bavail;
99 int f_files;
100 int f_ffree;
101 compat_fsid_t f_fsid;
102 int f_namelen; /* SunOS ignores this field. */
103 int f_frsize;
104 int f_spare[5];
105};
106
107#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
108#define COMPAT_RLIM_INFINITY 0xffffffff
109
110typedef u32 compat_old_sigset_t; /* at least 32 bits */
111
112#define _COMPAT_NSIG 64
113#define _COMPAT_NSIG_BPW 32
114
115typedef u32 compat_sigset_word;
116
117#define COMPAT_OFF_T_MAX 0x7fffffff
118#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
119
120struct compat_ipc64_perm {
121 compat_key_t key;
122 compat_uid32_t uid;
123 compat_gid32_t gid;
124 compat_uid32_t cuid;
125 compat_gid32_t cgid;
126 unsigned short mode;
127 unsigned short __pad1;
128 unsigned short seq;
129 unsigned short __pad2;
130 compat_ulong_t unused1;
131 compat_ulong_t unused2;
132};
133
134struct compat_semid64_ds {
135 struct compat_ipc64_perm sem_perm;
136 compat_time_t sem_otime;
137 compat_ulong_t __unused1;
138 compat_time_t sem_ctime;
139 compat_ulong_t __unused2;
140 compat_ulong_t sem_nsems;
141 compat_ulong_t __unused3;
142 compat_ulong_t __unused4;
143};
144
145struct compat_msqid64_ds {
146 struct compat_ipc64_perm msg_perm;
147 compat_time_t msg_stime;
148 compat_ulong_t __unused1;
149 compat_time_t msg_rtime;
150 compat_ulong_t __unused2;
151 compat_time_t msg_ctime;
152 compat_ulong_t __unused3;
153 compat_ulong_t msg_cbytes;
154 compat_ulong_t msg_qnum;
155 compat_ulong_t msg_qbytes;
156 compat_pid_t msg_lspid;
157 compat_pid_t msg_lrpid;
158 compat_ulong_t __unused4;
159 compat_ulong_t __unused5;
160};
161
162struct compat_shmid64_ds {
163 struct compat_ipc64_perm shm_perm;
164 compat_size_t shm_segsz;
165 compat_time_t shm_atime;
166 compat_ulong_t __unused1;
167 compat_time_t shm_dtime;
168 compat_ulong_t __unused2;
169 compat_time_t shm_ctime;
170 compat_ulong_t __unused3;
171 compat_pid_t shm_cpid;
172 compat_pid_t shm_lpid;
173 compat_ulong_t shm_nattch;
174 compat_ulong_t __unused4;
175 compat_ulong_t __unused5;
176};
177
178/*
179 * A pointer passed in from user mode. This should not be used for syscall parameters,
180 * just declare them as pointers because the syscall entry code will have appropriately
181 * comverted them already.
182 */
183typedef u32 compat_uptr_t;
184
185static inline void __user *
186compat_ptr (compat_uptr_t uptr)
187{
188 return (void __user *) (unsigned long) uptr;
189}
190
191static __inline__ void __user *
192compat_alloc_user_space (long len)
193{
194 struct pt_regs *regs = ia64_task_regs(current);
195 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
196}
197
198#endif /* _ASM_IA64_COMPAT_H */
diff --git a/include/asm-ia64/cpu.h b/include/asm-ia64/cpu.h
new file mode 100644
index 000000000000..e87fa3210a2b
--- /dev/null
+++ b/include/asm-ia64/cpu.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_CPU_H_
2#define _ASM_IA64_CPU_H_
3
4#include <linux/device.h>
5#include <linux/cpu.h>
6#include <linux/topology.h>
7#include <linux/percpu.h>
8
9struct ia64_cpu {
10 struct cpu cpu;
11};
12
13DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
14
15DECLARE_PER_CPU(int, cpu_state);
16
17extern int arch_register_cpu(int num);
18#ifdef CONFIG_HOTPLUG_CPU
19extern void arch_unregister_cpu(int);
20#endif
21
22#endif /* _ASM_IA64_CPU_H_ */
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h
new file mode 100644
index 000000000000..72400a78002a
--- /dev/null
+++ b/include/asm-ia64/cputime.h
@@ -0,0 +1,6 @@
1#ifndef __IA64_CPUTIME_H
2#define __IA64_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __IA64_CPUTIME_H */
diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h
new file mode 100644
index 000000000000..c659f90fbfd9
--- /dev/null
+++ b/include/asm-ia64/current.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_IA64_CURRENT_H
2#define _ASM_IA64_CURRENT_H
3
4/*
5 * Modified 1998-2000
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9#include <asm/intrinsics.h>
10
11/*
12 * In kernel mode, thread pointer (r13) is used to point to the current task
13 * structure.
14 */
15#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
16
17#endif /* _ASM_IA64_CURRENT_H */
diff --git a/include/asm-ia64/cyclone.h b/include/asm-ia64/cyclone.h
new file mode 100644
index 000000000000..88f6500e84ab
--- /dev/null
+++ b/include/asm-ia64/cyclone.h
@@ -0,0 +1,15 @@
1#ifndef ASM_IA64_CYCLONE_H
2#define ASM_IA64_CYCLONE_H
3
4#ifdef CONFIG_IA64_CYCLONE
5extern int use_cyclone;
6extern void __init cyclone_setup(void);
7#else /* CONFIG_IA64_CYCLONE */
8#define use_cyclone 0
9static inline void cyclone_setup(void)
10{
11 printk(KERN_ERR "Cyclone Counter: System not configured"
12 " w/ CONFIG_IA64_CYCLONE.\n");
13}
14#endif /* CONFIG_IA64_CYCLONE */
15#endif /* !ASM_IA64_CYCLONE_H */
diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h
new file mode 100644
index 000000000000..57182d6f2b9a
--- /dev/null
+++ b/include/asm-ia64/delay.h
@@ -0,0 +1,97 @@
1#ifndef _ASM_IA64_DELAY_H
2#define _ASM_IA64_DELAY_H
3
4/*
5 * Delay routines using a pre-computed "cycles/usec" value.
6 *
7 * Copyright (C) 1998, 1999 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 1999 VA Linux Systems
10 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
11 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
12 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/compiler.h>
19
20#include <asm/intrinsics.h>
21#include <asm/processor.h>
22
23static __inline__ void
24ia64_set_itm (unsigned long val)
25{
26 ia64_setreg(_IA64_REG_CR_ITM, val);
27 ia64_srlz_d();
28}
29
30static __inline__ unsigned long
31ia64_get_itm (void)
32{
33 unsigned long result;
34
35 result = ia64_getreg(_IA64_REG_CR_ITM);
36 ia64_srlz_d();
37 return result;
38}
39
40static __inline__ void
41ia64_set_itv (unsigned long val)
42{
43 ia64_setreg(_IA64_REG_CR_ITV, val);
44 ia64_srlz_d();
45}
46
47static __inline__ unsigned long
48ia64_get_itv (void)
49{
50 return ia64_getreg(_IA64_REG_CR_ITV);
51}
52
53static __inline__ void
54ia64_set_itc (unsigned long val)
55{
56 ia64_setreg(_IA64_REG_AR_ITC, val);
57 ia64_srlz_d();
58}
59
60static __inline__ unsigned long
61ia64_get_itc (void)
62{
63 unsigned long result;
64
65 result = ia64_getreg(_IA64_REG_AR_ITC);
66 ia64_barrier();
67#ifdef CONFIG_ITANIUM
68 while (unlikely((__s32) result == -1)) {
69 result = ia64_getreg(_IA64_REG_AR_ITC);
70 ia64_barrier();
71 }
72#endif
73 return result;
74}
75
76extern void ia64_delay_loop (unsigned long loops);
77
78static __inline__ void
79__delay (unsigned long loops)
80{
81 if (unlikely(loops < 1))
82 return;
83
84 ia64_delay_loop (loops - 1);
85}
86
87static __inline__ void
88udelay (unsigned long usecs)
89{
90 unsigned long start = ia64_get_itc();
91 unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
92
93 while (ia64_get_itc() - start < cycles)
94 cpu_relax();
95}
96
97#endif /* _ASM_IA64_DELAY_H */
diff --git a/include/asm-ia64/div64.h b/include/asm-ia64/div64.h
new file mode 100644
index 000000000000..6cd978cefb28
--- /dev/null
+++ b/include/asm-ia64/div64.h
@@ -0,0 +1 @@
#include <asm-generic/div64.h>
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
new file mode 100644
index 000000000000..6347c9845642
--- /dev/null
+++ b/include/asm-ia64/dma-mapping.h
@@ -0,0 +1,70 @@
1#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/config.h>
9#include <asm/machvec.h>
10
11#define dma_alloc_coherent platform_dma_alloc_coherent
12#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
13#define dma_free_coherent platform_dma_free_coherent
14#define dma_free_noncoherent platform_dma_free_coherent
15#define dma_map_single platform_dma_map_single
16#define dma_map_sg platform_dma_map_sg
17#define dma_unmap_single platform_dma_unmap_single
18#define dma_unmap_sg platform_dma_unmap_sg
19#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
20#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
21#define dma_sync_single_for_device platform_dma_sync_single_for_device
22#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
23#define dma_mapping_error platform_dma_mapping_error
24
25#define dma_map_page(dev, pg, off, size, dir) \
26 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
27#define dma_unmap_page(dev, dma_addr, size, dir) \
28 dma_unmap_single(dev, dma_addr, size, dir)
29
30/*
31 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
32 * See Documentation/DMA-API.txt for details.
33 */
34
35#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
36 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
37#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
38 dma_sync_single_for_device(dev, dma_handle, size, dir)
39
40#define dma_supported platform_dma_supported
41
42static inline int
43dma_set_mask (struct device *dev, u64 mask)
44{
45 if (!dev->dma_mask || !dma_supported(dev, mask))
46 return -EIO;
47 *dev->dma_mask = mask;
48 return 0;
49}
50
51static inline int
52dma_get_cache_alignment (void)
53{
54 extern int ia64_max_cacheline_size;
55 return ia64_max_cacheline_size;
56}
57
58static inline void
59dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
60{
61 /*
62 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
63 * ensure that dma_cache_sync() enforces order, hence the mb().
64 */
65 mb();
66}
67
68#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
69
70#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
new file mode 100644
index 000000000000..3be1b4925e18
--- /dev/null
+++ b/include/asm-ia64/dma.h
@@ -0,0 +1,23 @@
1#ifndef _ASM_IA64_DMA_H
2#define _ASM_IA64_DMA_H
3
4/*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/config.h>
10
11#include <asm/io.h> /* need byte IO */
12
13extern unsigned long MAX_DMA_ADDRESS;
14
15#ifdef CONFIG_PCI
16 extern int isa_dma_bridge_buggy;
17#else
18# define isa_dma_bridge_buggy (0)
19#endif
20
21#define free_dma(x)
22
23#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
new file mode 100644
index 000000000000..7d4ccc4b976e
--- /dev/null
+++ b/include/asm-ia64/elf.h
@@ -0,0 +1,259 @@
1#ifndef _ASM_IA64_ELF_H
2#define _ASM_IA64_ELF_H
3
4/*
5 * ELF-specific definitions.
6 *
7 * Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11#include <linux/config.h>
12
13#include <asm/fpu.h>
14#include <asm/page.h>
15
16/*
17 * This is used to ensure we don't load something for the wrong architecture.
18 */
19#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
20
21/*
22 * These are used to set parameters in the core dumps.
23 */
24#define ELF_CLASS ELFCLASS64
25#define ELF_DATA ELFDATA2LSB
26#define ELF_ARCH EM_IA_64
27
28#define USE_ELF_CORE_DUMP
29
30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
31 interpreted as follows by Linux: */
32#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */
33
34#define ELF_EXEC_PAGESIZE PAGE_SIZE
35
36/*
37 * This is the location that an ET_DYN program is loaded if exec'ed.
38 * Typical use of this is to invoke "./ld.so someprog" to test out a
39 * new version of the loader. We need to make sure that it is out of
40 * the way of the program that it will "exec", and that there is
41 * sufficient room for the brk.
42 */
43#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
44
45#define PT_IA_64_UNWIND 0x70000001
46
47/* IA-64 relocations: */
48#define R_IA64_NONE 0x00 /* none */
49#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
50#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
51#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
52#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
53#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
54#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
55#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
56#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
57#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
58#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
59#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
60#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
61#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
62#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
63#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
64#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
65#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
66#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
67#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
68#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
69#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
70#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
71#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
72#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
73#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
74#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
75#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
76#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
77#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
78#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
79#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
80#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
81#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
82#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
83#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
84#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
85#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
86#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
87#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
88#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
89#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
90#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
91#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
92#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
93#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
94#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
95#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
96#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
97#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
98#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
99#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
100#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
101#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
102#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
103#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
104#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
105#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
106#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
107#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
108#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
109#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
110#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
111#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
112#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
113#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
114#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
115#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
116#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
117#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
118#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
119#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
120#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
121#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
122#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
123#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
124#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
125#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
126#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
127#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
128#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
129
130/* IA-64 specific section flags: */
131#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
132
133/*
134 * We use (abuse?) this macro to insert the (empty) vm_area that is
135 * used to map the register backing store. I don't see any better
136 * place to do this, but we should discuss this with Linus once we can
137 * talk to him...
138 */
139extern void ia64_init_addr_space (void);
140#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space()
141
142/* ELF register definitions. This is needed for core dump support. */
143
144/*
145 * elf_gregset_t contains the application-level state in the following order:
146 * r0-r31
147 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
148 * predicate registers (p0-p63)
149 * b0-b7
150 * ip cfm psr
151 * ar.rsc ar.bsp ar.bspstore ar.rnat
152 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
153 */
154#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
155#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
156
157typedef unsigned long elf_fpxregset_t;
158
159typedef unsigned long elf_greg_t;
160typedef elf_greg_t elf_gregset_t[ELF_NGREG];
161
162typedef struct ia64_fpreg elf_fpreg_t;
163typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
164
165
166
167struct pt_regs; /* forward declaration... */
168extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
169#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
170
171/* This macro yields a bitmask that programs can use to figure out
172 what instruction set this CPU supports. */
173#define ELF_HWCAP 0
174
175/* This macro yields a string that ld.so will use to load
176 implementation specific libraries for optimization. Not terribly
177 relevant until we have real hardware to play with... */
178#define ELF_PLATFORM NULL
179
180/*
181 * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
182 * them, start the architecture-specific ones at 32.
183 */
184#define AT_SYSINFO 32
185#define AT_SYSINFO_EHDR 33
186
187#ifdef __KERNEL__
188#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
189#define elf_read_implies_exec(ex, executable_stack) \
190 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
191
192struct task_struct;
193
194extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
195extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
196
197#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
198#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
199
200#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
201
202#define ARCH_DLINFO \
203do { \
204 extern char __kernel_syscall_via_epc[]; \
205 NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
206 NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
207} while (0)
208
209
210/*
211 * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
212 * extra segments containing the gate DSO contents. Dumping its
213 * contents makes post-mortem fully interpretable later without matching up
214 * the same kernel and hardware config to see what PC values meant.
215 * Dumping its extra ELF program headers includes all the other information
216 * a debugger needs to easily find how the gate DSO was being used.
217 */
218#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum)
219#define ELF_CORE_WRITE_EXTRA_PHDRS \
220do { \
221 const struct elf_phdr *const gate_phdrs = \
222 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
223 int i; \
224 Elf64_Off ofs = 0; \
225 for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
226 struct elf_phdr phdr = gate_phdrs[i]; \
227 if (phdr.p_type == PT_LOAD) { \
228 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
229 phdr.p_filesz = phdr.p_memsz; \
230 if (ofs == 0) { \
231 ofs = phdr.p_offset = offset; \
232 offset += phdr.p_filesz; \
233 } \
234 else \
235 phdr.p_offset = ofs; \
236 } \
237 else \
238 phdr.p_offset += ofs; \
239 phdr.p_paddr = 0; /* match other core phdrs */ \
240 DUMP_WRITE(&phdr, sizeof(phdr)); \
241 } \
242} while (0)
243#define ELF_CORE_WRITE_EXTRA_DATA \
244do { \
245 const struct elf_phdr *const gate_phdrs = \
246 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
247 int i; \
248 for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
249 if (gate_phdrs[i].p_type == PT_LOAD) { \
250 DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \
251 PAGE_ALIGN(gate_phdrs[i].p_memsz)); \
252 break; \
253 } \
254 } \
255} while (0)
256
257#endif /* __KERNEL__ */
258
259#endif /* _ASM_IA64_ELF_H */
diff --git a/include/asm-ia64/errno.h b/include/asm-ia64/errno.h
new file mode 100644
index 000000000000..4c82b503d92f
--- /dev/null
+++ b/include/asm-ia64/errno.h
@@ -0,0 +1 @@
#include <asm-generic/errno.h>
diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h
new file mode 100644
index 000000000000..d193981bb1d8
--- /dev/null
+++ b/include/asm-ia64/fcntl.h
@@ -0,0 +1,84 @@
1#ifndef _ASM_IA64_FCNTL_H
2#define _ASM_IA64_FCNTL_H
3/*
4 * Based on <asm-i386/fcntl.h>.
5 *
6 * Modified 1998-2000
7 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
8 */
9
10/*
11 * open/fcntl - O_SYNC is only implemented on blocks devices and on
12 * files located on an ext2 file system
13 */
14#define O_ACCMODE 0003
15#define O_RDONLY 00
16#define O_WRONLY 01
17#define O_RDWR 02
18#define O_CREAT 0100 /* not fcntl */
19#define O_EXCL 0200 /* not fcntl */
20#define O_NOCTTY 0400 /* not fcntl */
21#define O_TRUNC 01000 /* not fcntl */
22#define O_APPEND 02000
23#define O_NONBLOCK 04000
24#define O_NDELAY O_NONBLOCK
25#define O_SYNC 010000
26#define FASYNC 020000 /* fcntl, for BSD compatibility */
27#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
28#define O_LARGEFILE 0100000
29#define O_DIRECTORY 0200000 /* must be a directory */
30#define O_NOFOLLOW 0400000 /* don't follow links */
31#define O_NOATIME 01000000
32
33#define F_DUPFD 0 /* dup */
34#define F_GETFD 1 /* get close_on_exec */
35#define F_SETFD 2 /* set/clear close_on_exec */
36#define F_GETFL 3 /* get file->f_flags */
37#define F_SETFL 4 /* set file->f_flags */
38#define F_GETLK 5
39#define F_SETLK 6
40#define F_SETLKW 7
41
42#define F_SETOWN 8 /* for sockets. */
43#define F_GETOWN 9 /* for sockets. */
44#define F_SETSIG 10 /* for sockets. */
45#define F_GETSIG 11 /* for sockets. */
46
47/* for F_[GET|SET]FL */
48#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
49
50/* for posix fcntl() and lockf() */
51#define F_RDLCK 0
52#define F_WRLCK 1
53#define F_UNLCK 2
54
55/* for old implementation of bsd flock () */
56#define F_EXLCK 4 /* or 3 */
57#define F_SHLCK 8 /* or 4 */
58
59/* for leases */
60#define F_INPROGRESS 16
61
62/* operations for bsd flock(), also used by the kernel implementation */
63#define LOCK_SH 1 /* shared lock */
64#define LOCK_EX 2 /* exclusive lock */
65#define LOCK_NB 4 /* or'd with one of the above to prevent
66 blocking */
67#define LOCK_UN 8 /* remove lock */
68
69#define LOCK_MAND 32 /* This is a mandatory flock */
70#define LOCK_READ 64 /* ... Which allows concurrent read operations */
71#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
72#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
73
74struct flock {
75 short l_type;
76 short l_whence;
77 off_t l_start;
78 off_t l_len;
79 pid_t l_pid;
80};
81
82#define F_LINUX_SPECIFIC_BASE 1024
83
84#endif /* _ASM_IA64_FCNTL_H */
diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h
new file mode 100644
index 000000000000..62edfceadaa6
--- /dev/null
+++ b/include/asm-ia64/fpswa.h
@@ -0,0 +1,73 @@
1#ifndef _ASM_IA64_FPSWA_H
2#define _ASM_IA64_FPSWA_H
3
4/*
5 * Floating-point Software Assist
6 *
7 * Copyright (C) 1999 Intel Corporation.
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
10 */
11
12typedef struct {
13 /* 4 * 128 bits */
14 unsigned long fp_lp[4*2];
15} fp_state_low_preserved_t;
16
17typedef struct {
18 /* 10 * 128 bits */
19 unsigned long fp_lv[10 * 2];
20} fp_state_low_volatile_t;
21
22typedef struct {
23 /* 16 * 128 bits */
24 unsigned long fp_hp[16 * 2];
25} fp_state_high_preserved_t;
26
27typedef struct {
28 /* 96 * 128 bits */
29 unsigned long fp_hv[96 * 2];
30} fp_state_high_volatile_t;
31
32/**
33 * floating point state to be passed to the FP emulation library by
34 * the trap/fault handler
35 */
36typedef struct {
37 unsigned long bitmask_low64;
38 unsigned long bitmask_high64;
39 fp_state_low_preserved_t *fp_state_low_preserved;
40 fp_state_low_volatile_t *fp_state_low_volatile;
41 fp_state_high_preserved_t *fp_state_high_preserved;
42 fp_state_high_volatile_t *fp_state_high_volatile;
43} fp_state_t;
44
45typedef struct {
46 unsigned long status;
47 unsigned long err0;
48 unsigned long err1;
49 unsigned long err2;
50} fpswa_ret_t;
51
52/**
53 * function header for the Floating Point software assist
54 * library. This function is invoked by the Floating point software
55 * assist trap/fault handler.
56 */
57typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
58 unsigned long *fsr, unsigned long *isr, unsigned long *preds,
59 unsigned long *ifs, fp_state_t *fp_state);
60
61/**
62 * This is the FPSWA library interface as defined by EFI. We need to pass a
63 * pointer to the interface itself on a call to the assist library
64 */
65typedef struct {
66 unsigned int revision;
67 unsigned int reserved;
68 efi_fpswa_t fpswa;
69} fpswa_interface_t;
70
71extern fpswa_interface_t *fpswa_interface;
72
73#endif /* _ASM_IA64_FPSWA_H */
diff --git a/include/asm-ia64/fpu.h b/include/asm-ia64/fpu.h
new file mode 100644
index 000000000000..3859558ff0a4
--- /dev/null
+++ b/include/asm-ia64/fpu.h
@@ -0,0 +1,66 @@
1#ifndef _ASM_IA64_FPU_H
2#define _ASM_IA64_FPU_H
3
4/*
5 * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <asm/types.h>
10
11/* floating point status register: */
12#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
13#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
14#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
15#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
16#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
17#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
18#define FPSR_S0(x) ((x) << 6)
19#define FPSR_S1(x) ((x) << 19)
20#define FPSR_S2(x) (__IA64_UL(x) << 32)
21#define FPSR_S3(x) (__IA64_UL(x) << 45)
22
23/* floating-point status field controls: */
24#define FPSF_FTZ (1 << 0) /* flush-to-zero */
25#define FPSF_WRE (1 << 1) /* widest-range exponent */
26#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
27#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
28#define FPSF_TD (1 << 6) /* trap disabled */
29
30/* floating-point status field flags: */
31#define FPSF_V (1 << 7) /* invalid operation flag */
32#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
33#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
34#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
35#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
36#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
37
38/* floating-point rounding control: */
39#define FPRC_NEAREST 0x0
40#define FPRC_NEGINF 0x1
41#define FPRC_POSINF 0x2
42#define FPRC_TRUNC 0x3
43
44#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
45
46/* This default value is the same as HP-UX uses. Don't change it
47 without a very good reason. */
48#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
49 | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
50 | FPSR_S0 (FPSF_DEFAULT) \
51 | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
52 | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
53 | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
54
55# ifndef __ASSEMBLY__
56
57struct ia64_fpreg {
58 union {
59 unsigned long bits[2];
60 long double __dummy; /* force 16-byte alignment */
61 } u;
62};
63
64# endif /* __ASSEMBLY__ */
65
66#endif /* _ASM_IA64_FPU_H */
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
new file mode 100644
index 000000000000..7c357dfbae50
--- /dev/null
+++ b/include/asm-ia64/gcc_intrin.h
@@ -0,0 +1,597 @@
1#ifndef _ASM_IA64_GCC_INTRIN_H
2#define _ASM_IA64_GCC_INTRIN_H
3/*
4 *
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7 */
8
9#include <linux/compiler.h>
10
11/* define this macro to get some asm stmts included in 'c' files */
12#define ASM_SUPPORTED
13
14/* Optimization barrier */
15/* The "volatile" is due to gcc bugs */
16#define ia64_barrier() asm volatile ("":::"memory")
17
18#define ia64_stop() asm volatile (";;"::)
19
20#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
21
22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23
24extern void ia64_bad_param_for_setreg (void);
25extern void ia64_bad_param_for_getreg (void);
26
27register unsigned long ia64_r13 asm ("r13") __attribute_used__;
28
29#define ia64_setreg(regnum, val) \
30({ \
31 switch (regnum) { \
32 case _IA64_REG_PSR_L: \
33 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
34 break; \
35 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
36 asm volatile ("mov ar%0=%1" :: \
37 "i" (regnum - _IA64_REG_AR_KR0), \
38 "r"(val): "memory"); \
39 break; \
40 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
41 asm volatile ("mov cr%0=%1" :: \
42 "i" (regnum - _IA64_REG_CR_DCR), \
43 "r"(val): "memory" ); \
44 break; \
45 case _IA64_REG_SP: \
46 asm volatile ("mov r12=%0" :: \
47 "r"(val): "memory"); \
48 break; \
49 case _IA64_REG_GP: \
50 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
51 break; \
52 default: \
53 ia64_bad_param_for_setreg(); \
54 break; \
55 } \
56})
57
58#define ia64_getreg(regnum) \
59({ \
60 __u64 ia64_intri_res; \
61 \
62 switch (regnum) { \
63 case _IA64_REG_GP: \
64 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
65 break; \
66 case _IA64_REG_IP: \
67 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
68 break; \
69 case _IA64_REG_PSR: \
70 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 break; \
72 case _IA64_REG_TP: /* for current() */ \
73 ia64_intri_res = ia64_r13; \
74 break; \
75 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
76 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
77 : "i"(regnum - _IA64_REG_AR_KR0)); \
78 break; \
79 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
80 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
81 : "i" (regnum - _IA64_REG_CR_DCR)); \
82 break; \
83 case _IA64_REG_SP: \
84 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
85 break; \
86 default: \
87 ia64_bad_param_for_getreg(); \
88 break; \
89 } \
90 ia64_intri_res; \
91})
92
93#define ia64_hint_pause 0
94
95#define ia64_hint(mode) \
96({ \
97 switch (mode) { \
98 case ia64_hint_pause: \
99 asm volatile ("hint @pause" ::: "memory"); \
100 break; \
101 } \
102})
103
104
105/* Integer values for mux1 instruction */
106#define ia64_mux1_brcst 0
107#define ia64_mux1_mix 8
108#define ia64_mux1_shuf 9
109#define ia64_mux1_alt 10
110#define ia64_mux1_rev 11
111
112#define ia64_mux1(x, mode) \
113({ \
114 __u64 ia64_intri_res; \
115 \
116 switch (mode) { \
117 case ia64_mux1_brcst: \
118 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
119 break; \
120 case ia64_mux1_mix: \
121 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
122 break; \
123 case ia64_mux1_shuf: \
124 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
125 break; \
126 case ia64_mux1_alt: \
127 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
128 break; \
129 case ia64_mux1_rev: \
130 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
131 break; \
132 } \
133 ia64_intri_res; \
134})
135
136#define ia64_popcnt(x) \
137({ \
138 __u64 ia64_intri_res; \
139 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
140 \
141 ia64_intri_res; \
142})
143
144#define ia64_getf_exp(x) \
145({ \
146 long ia64_intri_res; \
147 \
148 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
149 \
150 ia64_intri_res; \
151})
152
153#define ia64_shrp(a, b, count) \
154({ \
155 __u64 ia64_intri_res; \
156 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
157 ia64_intri_res; \
158})
159
160#define ia64_ldfs(regnum, x) \
161({ \
162 register double __f__ asm ("f"#regnum); \
163 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
164})
165
166#define ia64_ldfd(regnum, x) \
167({ \
168 register double __f__ asm ("f"#regnum); \
169 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
170})
171
172#define ia64_ldfe(regnum, x) \
173({ \
174 register double __f__ asm ("f"#regnum); \
175 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
176})
177
178#define ia64_ldf8(regnum, x) \
179({ \
180 register double __f__ asm ("f"#regnum); \
181 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
182})
183
184#define ia64_ldf_fill(regnum, x) \
185({ \
186 register double __f__ asm ("f"#regnum); \
187 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
188})
189
190#define ia64_stfs(x, regnum) \
191({ \
192 register double __f__ asm ("f"#regnum); \
193 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
194})
195
196#define ia64_stfd(x, regnum) \
197({ \
198 register double __f__ asm ("f"#regnum); \
199 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
200})
201
202#define ia64_stfe(x, regnum) \
203({ \
204 register double __f__ asm ("f"#regnum); \
205 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
206})
207
208#define ia64_stf8(x, regnum) \
209({ \
210 register double __f__ asm ("f"#regnum); \
211 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
212})
213
214#define ia64_stf_spill(x, regnum) \
215({ \
216 register double __f__ asm ("f"#regnum); \
217 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
218})
219
220#define ia64_fetchadd4_acq(p, inc) \
221({ \
222 \
223 __u64 ia64_intri_res; \
224 asm volatile ("fetchadd4.acq %0=[%1],%2" \
225 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
226 : "memory"); \
227 \
228 ia64_intri_res; \
229})
230
231#define ia64_fetchadd4_rel(p, inc) \
232({ \
233 __u64 ia64_intri_res; \
234 asm volatile ("fetchadd4.rel %0=[%1],%2" \
235 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
236 : "memory"); \
237 \
238 ia64_intri_res; \
239})
240
241#define ia64_fetchadd8_acq(p, inc) \
242({ \
243 \
244 __u64 ia64_intri_res; \
245 asm volatile ("fetchadd8.acq %0=[%1],%2" \
246 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
247 : "memory"); \
248 \
249 ia64_intri_res; \
250})
251
252#define ia64_fetchadd8_rel(p, inc) \
253({ \
254 __u64 ia64_intri_res; \
255 asm volatile ("fetchadd8.rel %0=[%1],%2" \
256 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
257 : "memory"); \
258 \
259 ia64_intri_res; \
260})
261
262#define ia64_xchg1(ptr,x) \
263({ \
264 __u64 ia64_intri_res; \
265 asm volatile ("xchg1 %0=[%1],%2" \
266 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
267 ia64_intri_res; \
268})
269
270#define ia64_xchg2(ptr,x) \
271({ \
272 __u64 ia64_intri_res; \
273 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
274 : "r" (ptr), "r" (x) : "memory"); \
275 ia64_intri_res; \
276})
277
278#define ia64_xchg4(ptr,x) \
279({ \
280 __u64 ia64_intri_res; \
281 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
282 : "r" (ptr), "r" (x) : "memory"); \
283 ia64_intri_res; \
284})
285
286#define ia64_xchg8(ptr,x) \
287({ \
288 __u64 ia64_intri_res; \
289 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
290 : "r" (ptr), "r" (x) : "memory"); \
291 ia64_intri_res; \
292})
293
294#define ia64_cmpxchg1_acq(ptr, new, old) \
295({ \
296 __u64 ia64_intri_res; \
297 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
298 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
299 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
300 ia64_intri_res; \
301})
302
303#define ia64_cmpxchg1_rel(ptr, new, old) \
304({ \
305 __u64 ia64_intri_res; \
306 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
307 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
308 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
309 ia64_intri_res; \
310})
311
312#define ia64_cmpxchg2_acq(ptr, new, old) \
313({ \
314 __u64 ia64_intri_res; \
315 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
316 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
317 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
318 ia64_intri_res; \
319})
320
321#define ia64_cmpxchg2_rel(ptr, new, old) \
322({ \
323 __u64 ia64_intri_res; \
324 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
325 \
326 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
327 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
328 ia64_intri_res; \
329})
330
331#define ia64_cmpxchg4_acq(ptr, new, old) \
332({ \
333 __u64 ia64_intri_res; \
334 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
335 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
336 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
337 ia64_intri_res; \
338})
339
340#define ia64_cmpxchg4_rel(ptr, new, old) \
341({ \
342 __u64 ia64_intri_res; \
343 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
344 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
345 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
346 ia64_intri_res; \
347})
348
349#define ia64_cmpxchg8_acq(ptr, new, old) \
350({ \
351 __u64 ia64_intri_res; \
352 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
353 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
354 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
355 ia64_intri_res; \
356})
357
358#define ia64_cmpxchg8_rel(ptr, new, old) \
359({ \
360 __u64 ia64_intri_res; \
361 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 \
363 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
364 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
365 ia64_intri_res; \
366})
367
368#define ia64_mf() asm volatile ("mf" ::: "memory")
369#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
370
371#define ia64_invala() asm volatile ("invala" ::: "memory")
372
373#define ia64_thash(addr) \
374({ \
375 __u64 ia64_intri_res; \
376 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
377 ia64_intri_res; \
378})
379
380#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
381#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
382
383#ifdef HAVE_SERIALIZE_DIRECTIVE
384# define ia64_dv_serialize_data() asm volatile (".serialize.data");
385# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
386#else
387# define ia64_dv_serialize_data()
388# define ia64_dv_serialize_instruction()
389#endif
390
391#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
392
393#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
394
395#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
396
397
398#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
399 :: "r"(trnum), "r"(addr) : "memory")
400
401#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
402 :: "r"(trnum), "r"(addr) : "memory")
403
404#define ia64_tpa(addr) \
405({ \
406 __u64 ia64_pa; \
407 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
408 ia64_pa; \
409})
410
411#define __ia64_set_dbr(index, val) \
412 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
413
414#define ia64_set_ibr(index, val) \
415 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
416
417#define ia64_set_pkr(index, val) \
418 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
419
420#define ia64_set_pmc(index, val) \
421 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
422
423#define ia64_set_pmd(index, val) \
424 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
425
426#define ia64_set_rr(index, val) \
427 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
428
429#define ia64_get_cpuid(index) \
430({ \
431 __u64 ia64_intri_res; \
432 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
433 ia64_intri_res; \
434})
435
436#define __ia64_get_dbr(index) \
437({ \
438 __u64 ia64_intri_res; \
439 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
440 ia64_intri_res; \
441})
442
443#define ia64_get_ibr(index) \
444({ \
445 __u64 ia64_intri_res; \
446 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
447 ia64_intri_res; \
448})
449
450#define ia64_get_pkr(index) \
451({ \
452 __u64 ia64_intri_res; \
453 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
454 ia64_intri_res; \
455})
456
457#define ia64_get_pmc(index) \
458({ \
459 __u64 ia64_intri_res; \
460 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
461 ia64_intri_res; \
462})
463
464
465#define ia64_get_pmd(index) \
466({ \
467 __u64 ia64_intri_res; \
468 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
469 ia64_intri_res; \
470})
471
472#define ia64_get_rr(index) \
473({ \
474 __u64 ia64_intri_res; \
475 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
476 ia64_intri_res; \
477})
478
479#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
480
481
482#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
483
484#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
485#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
486#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
487#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
488
489#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
490
491#define ia64_ptcga(addr, size) \
492do { \
493 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
494 ia64_dv_serialize_data(); \
495} while (0)
496
497#define ia64_ptcl(addr, size) \
498do { \
499 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
500 ia64_dv_serialize_data(); \
501} while (0)
502
503#define ia64_ptri(addr, size) \
504 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
505
506#define ia64_ptrd(addr, size) \
507 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
508
509/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
510
511#define ia64_lfhint_none 0
512#define ia64_lfhint_nt1 1
513#define ia64_lfhint_nt2 2
514#define ia64_lfhint_nta 3
515
516#define ia64_lfetch(lfhint, y) \
517({ \
518 switch (lfhint) { \
519 case ia64_lfhint_none: \
520 asm volatile ("lfetch [%0]" : : "r"(y)); \
521 break; \
522 case ia64_lfhint_nt1: \
523 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
524 break; \
525 case ia64_lfhint_nt2: \
526 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
527 break; \
528 case ia64_lfhint_nta: \
529 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
530 break; \
531 } \
532})
533
534#define ia64_lfetch_excl(lfhint, y) \
535({ \
536 switch (lfhint) { \
537 case ia64_lfhint_none: \
538 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
539 break; \
540 case ia64_lfhint_nt1: \
541 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
542 break; \
543 case ia64_lfhint_nt2: \
544 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
545 break; \
546 case ia64_lfhint_nta: \
547 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
548 break; \
549 } \
550})
551
552#define ia64_lfetch_fault(lfhint, y) \
553({ \
554 switch (lfhint) { \
555 case ia64_lfhint_none: \
556 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
557 break; \
558 case ia64_lfhint_nt1: \
559 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
560 break; \
561 case ia64_lfhint_nt2: \
562 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
563 break; \
564 case ia64_lfhint_nta: \
565 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
566 break; \
567 } \
568})
569
570#define ia64_lfetch_fault_excl(lfhint, y) \
571({ \
572 switch (lfhint) { \
573 case ia64_lfhint_none: \
574 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
575 break; \
576 case ia64_lfhint_nt1: \
577 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
578 break; \
579 case ia64_lfhint_nt2: \
580 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
581 break; \
582 case ia64_lfhint_nta: \
583 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
584 break; \
585 } \
586})
587
588#define ia64_intrin_local_irq_restore(x) \
589do { \
590 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
591 "(p6) ssm psr.i;" \
592 "(p7) rsm psr.i;;" \
593 "(p6) srlz.d" \
594 :: "r"((x)) : "p6", "p7", "memory"); \
595} while (0)
596
597#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h
new file mode 100644
index 000000000000..33ef8f096d95
--- /dev/null
+++ b/include/asm-ia64/hardirq.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_HARDIRQ_H
2#define _ASM_IA64_HARDIRQ_H
3
4/*
5 * Modified 1998-2002, 2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/config.h>
10
11#include <linux/threads.h>
12#include <linux/irq.h>
13
14#include <asm/processor.h>
15
16/*
17 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
18 */
19
20#define __ARCH_IRQ_STAT 1
21
22#define local_softirq_pending() (local_cpu_data->softirq_pending)
23
24#define HARDIRQ_BITS 14
25
26/*
27 * The hardirq mask has to be large enough to have space for potentially all IRQ sources
28 * in the system nesting on a single CPU:
29 */
30#if (1 << HARDIRQ_BITS) < NR_IRQS
31# error HARDIRQ_BITS is too low!
32#endif
33
34extern void __iomem *ipi_base_addr;
35
36void ack_bad_irq(unsigned int irq);
37
38#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/include/asm-ia64/hdreg.h b/include/asm-ia64/hdreg.h
new file mode 100644
index 000000000000..83b5161d2678
--- /dev/null
+++ b/include/asm-ia64/hdreg.h
@@ -0,0 +1,14 @@
1/*
2 * linux/include/asm-ia64/hdreg.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7#warning this file is obsolete, please do not use it
8
9#ifndef __ASM_IA64_HDREG_H
10#define __ASM_IA64_HDREG_H
11
12typedef unsigned short ide_ioreg_t;
13
14#endif /* __ASM_IA64_HDREG_H */
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
new file mode 100644
index 000000000000..041ab8c51a64
--- /dev/null
+++ b/include/asm-ia64/hw_irq.h
@@ -0,0 +1,144 @@
1#ifndef _ASM_IA64_HW_IRQ_H
2#define _ASM_IA64_HW_IRQ_H
3
4/*
5 * Copyright (C) 2001-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/interrupt.h>
10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/profile.h>
13
14#include <asm/machvec.h>
15#include <asm/ptrace.h>
16#include <asm/smp.h>
17
18typedef u8 ia64_vector;
19
20/*
21 * 0 special
22 *
23 * 1,3-14 are reserved from firmware
24 *
25 * 16-255 (vectored external interrupts) are available
26 *
27 * 15 spurious interrupt (see IVR)
28 *
29 * 16 lowest priority, 255 highest priority
30 *
31 * 15 classes of 16 interrupts each.
32 */
33#define IA64_MIN_VECTORED_IRQ 16
34#define IA64_MAX_VECTORED_IRQ 255
35#define IA64_NUM_VECTORS 256
36
37#define AUTO_ASSIGN -1
38
39#define IA64_SPURIOUS_INT_VECTOR 0x0f
40
41/*
42 * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
43 */
44#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
45#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
46#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
47#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
48/*
49 * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
50 */
51#define IA64_FIRST_DEVICE_VECTOR 0x30
52#define IA64_LAST_DEVICE_VECTOR 0xe7
53#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
54
55#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
56#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */
57#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
58#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
59#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
60#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
61
62/* Used for encoding redirected irqs */
63
64#define IA64_IRQ_REDIRECTED (1 << 31)
65
66/* IA64 inter-cpu interrupt related definitions */
67
68#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
69
70/* Delivery modes for inter-cpu interrupts */
71enum {
72 IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
73 IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
74 IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
75 IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
76 IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
77};
78
79extern __u8 isa_irq_to_vector_map[16];
80#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
81
82extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
83
84extern int assign_irq_vector (int irq); /* allocate a free vector */
85extern void free_irq_vector (int vector);
86extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
87extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
88
89static inline void
90hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector)
91{
92 platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
93}
94
95/*
96 * Default implementations for the irq-descriptor API:
97 */
98
99extern irq_desc_t irq_desc[NR_IRQS];
100
101#ifndef CONFIG_IA64_GENERIC
102static inline unsigned int
103__ia64_local_vector_to_irq (ia64_vector vec)
104{
105 return (unsigned int) vec;
106}
107#endif
108
109/*
110 * Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
111 * vectors. On smaller systems, there is a one-to-one correspondence between interrupt
112 * vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
113 * domains meaning that the translation from vector number to irq number depends on the
114 * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
115 * differences and provides a uniform means to translate between vector and irq numbers
116 * and to obtain the irq descriptor for a given irq number.
117 */
118
119/* Return a pointer to the irq descriptor for IRQ. */
120static inline irq_desc_t *
121irq_descp (int irq)
122{
123 return irq_desc + irq;
124}
125
126/* Extract the IA-64 vector that corresponds to IRQ. */
127static inline ia64_vector
128irq_to_vector (int irq)
129{
130 return (ia64_vector) irq;
131}
132
133/*
134 * Convert the local IA-64 vector to the corresponding irq number. This translation is
135 * done in the context of the interrupt domain that the currently executing CPU belongs
136 * to.
137 */
138static inline unsigned int
139local_vector_to_irq (ia64_vector vec)
140{
141 return platform_local_vector_to_irq(vec);
142}
143
144#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
new file mode 100644
index 000000000000..8e746b2413a6
--- /dev/null
+++ b/include/asm-ia64/ia32.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_IA32_H
2#define _ASM_IA64_IA32_H
3
4#include <linux/config.h>
5
6#include <asm/ptrace.h>
7#include <asm/signal.h>
8
9#define IA32_NR_syscalls 285 /* length of syscall table */
10#define IA32_PAGE_SHIFT 12 /* 4KB pages */
11
12#ifndef __ASSEMBLY__
13
14# ifdef CONFIG_IA32_SUPPORT
15
16extern void ia32_cpu_init (void);
17extern void ia32_mem_init (void);
18extern void ia32_gdt_init (void);
19extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
20extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
21extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
22
23# endif /* !CONFIG_IA32_SUPPORT */
24
25/* Declare this unconditionally, so we don't get warnings for unreachable code. */
26extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
27 sigset_t *set, struct pt_regs *regs);
28#if PAGE_SHIFT > IA32_PAGE_SHIFT
29extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long);
30extern void ia32_drop_partial_page_list (struct task_struct *);
31#else
32# define ia32_copy_partial_page_list(a1, a2) 0
33# define ia32_drop_partial_page_list(a1) do { ; } while (0)
34#endif
35
36#endif /* !__ASSEMBLY__ */
37
38#endif /* _ASM_IA64_IA32_H */
diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h
new file mode 100644
index 000000000000..1757f1c11ad4
--- /dev/null
+++ b/include/asm-ia64/ia64regs.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright (C) 2002,2003 Intel Corp.
3 * Jun Nakajima <jun.nakajima@intel.com>
4 * Suresh Siddha <suresh.b.siddha@intel.com>
5 */
6
7#ifndef _ASM_IA64_IA64REGS_H
8#define _ASM_IA64_IA64REGS_H
9
10/*
11 * Register Names for getreg() and setreg().
12 *
13 * The "magic" numbers happen to match the values used by the Intel compiler's
14 * getreg()/setreg() intrinsics.
15 */
16
17/* Special Registers */
18
19#define _IA64_REG_IP 1016 /* getreg only */
20#define _IA64_REG_PSR 1019
21#define _IA64_REG_PSR_L 1019
22
23/* General Integer Registers */
24
25#define _IA64_REG_GP 1025 /* R1 */
26#define _IA64_REG_R8 1032 /* R8 */
27#define _IA64_REG_R9 1033 /* R9 */
28#define _IA64_REG_SP 1036 /* R12 */
29#define _IA64_REG_TP 1037 /* R13 */
30
31/* Application Registers */
32
33#define _IA64_REG_AR_KR0 3072
34#define _IA64_REG_AR_KR1 3073
35#define _IA64_REG_AR_KR2 3074
36#define _IA64_REG_AR_KR3 3075
37#define _IA64_REG_AR_KR4 3076
38#define _IA64_REG_AR_KR5 3077
39#define _IA64_REG_AR_KR6 3078
40#define _IA64_REG_AR_KR7 3079
41#define _IA64_REG_AR_RSC 3088
42#define _IA64_REG_AR_BSP 3089
43#define _IA64_REG_AR_BSPSTORE 3090
44#define _IA64_REG_AR_RNAT 3091
45#define _IA64_REG_AR_FCR 3093
46#define _IA64_REG_AR_EFLAG 3096
47#define _IA64_REG_AR_CSD 3097
48#define _IA64_REG_AR_SSD 3098
49#define _IA64_REG_AR_CFLAG 3099
50#define _IA64_REG_AR_FSR 3100
51#define _IA64_REG_AR_FIR 3101
52#define _IA64_REG_AR_FDR 3102
53#define _IA64_REG_AR_CCV 3104
54#define _IA64_REG_AR_UNAT 3108
55#define _IA64_REG_AR_FPSR 3112
56#define _IA64_REG_AR_ITC 3116
57#define _IA64_REG_AR_PFS 3136
58#define _IA64_REG_AR_LC 3137
59#define _IA64_REG_AR_EC 3138
60
61/* Control Registers */
62
63#define _IA64_REG_CR_DCR 4096
64#define _IA64_REG_CR_ITM 4097
65#define _IA64_REG_CR_IVA 4098
66#define _IA64_REG_CR_PTA 4104
67#define _IA64_REG_CR_IPSR 4112
68#define _IA64_REG_CR_ISR 4113
69#define _IA64_REG_CR_IIP 4115
70#define _IA64_REG_CR_IFA 4116
71#define _IA64_REG_CR_ITIR 4117
72#define _IA64_REG_CR_IIPA 4118
73#define _IA64_REG_CR_IFS 4119
74#define _IA64_REG_CR_IIM 4120
75#define _IA64_REG_CR_IHA 4121
76#define _IA64_REG_CR_LID 4160
77#define _IA64_REG_CR_IVR 4161 /* getreg only */
78#define _IA64_REG_CR_TPR 4162
79#define _IA64_REG_CR_EOI 4163
80#define _IA64_REG_CR_IRR0 4164 /* getreg only */
81#define _IA64_REG_CR_IRR1 4165 /* getreg only */
82#define _IA64_REG_CR_IRR2 4166 /* getreg only */
83#define _IA64_REG_CR_IRR3 4167 /* getreg only */
84#define _IA64_REG_CR_ITV 4168
85#define _IA64_REG_CR_PMV 4169
86#define _IA64_REG_CR_CMCV 4170
87#define _IA64_REG_CR_LRR0 4176
88#define _IA64_REG_CR_LRR1 4177
89
90/* Indirect Registers for getindreg() and setindreg() */
91
92#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
93#define _IA64_REG_INDR_DBR 9001
94#define _IA64_REG_INDR_IBR 9002
95#define _IA64_REG_INDR_PKR 9003
96#define _IA64_REG_INDR_PMC 9004
97#define _IA64_REG_INDR_PMD 9005
98#define _IA64_REG_INDR_RR 9006
99
100#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
new file mode 100644
index 000000000000..e62b95301d51
--- /dev/null
+++ b/include/asm-ia64/ide.h
@@ -0,0 +1,71 @@
1/*
2 * linux/include/asm-ia64/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the ia64 architecture specific IDE code.
9 */
10
11#ifndef __ASM_IA64_IDE_H
12#define __ASM_IA64_IDE_H
13
14#ifdef __KERNEL__
15
16#include <linux/config.h>
17
18#include <linux/irq.h>
19
20#ifndef MAX_HWIFS
21# ifdef CONFIG_PCI
22#define MAX_HWIFS 10
23# else
24#define MAX_HWIFS 6
25# endif
26#endif
27
28#define IDE_ARCH_OBSOLETE_DEFAULTS
29
30static inline int ide_default_irq(unsigned long base)
31{
32 switch (base) {
33 case 0x1f0: return isa_irq_to_vector(14);
34 case 0x170: return isa_irq_to_vector(15);
35 case 0x1e8: return isa_irq_to_vector(11);
36 case 0x168: return isa_irq_to_vector(10);
37 case 0x1e0: return isa_irq_to_vector(8);
38 case 0x160: return isa_irq_to_vector(12);
39 default:
40 return 0;
41 }
42}
43
44static inline unsigned long ide_default_io_base(int index)
45{
46 switch (index) {
47 case 0: return 0x1f0;
48 case 1: return 0x170;
49 case 2: return 0x1e8;
50 case 3: return 0x168;
51 case 4: return 0x1e0;
52 case 5: return 0x160;
53 default:
54 return 0;
55 }
56}
57
58#define IDE_ARCH_OBSOLETE_INIT
59#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
60
61#ifdef CONFIG_PCI
62#define ide_init_default_irq(base) (0)
63#else
64#define ide_init_default_irq(base) ide_default_irq(base)
65#endif
66
67#include <asm-generic/ide_iops.h>
68
69#endif /* __KERNEL__ */
70
71#endif /* __ASM_IA64_IDE_H */
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
new file mode 100644
index 000000000000..a7122d850177
--- /dev/null
+++ b/include/asm-ia64/intel_intrin.h
@@ -0,0 +1,257 @@
1#ifndef _ASM_IA64_INTEL_INTRIN_H
2#define _ASM_IA64_INTEL_INTRIN_H
3/*
4 * Intel Compiler Intrinsics
5 *
6 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
7 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
8 *
9 */
10#include <asm/types.h>
11
12void __lfetch(int lfhint, void *y);
13void __lfetch_excl(int lfhint, void *y);
14void __lfetch_fault(int lfhint, void *y);
15void __lfetch_fault_excl(int lfhint, void *y);
16
17/* In the following, whichFloatReg should be an integer from 0-127 */
18void __ldfs(const int whichFloatReg, void *src);
19void __ldfd(const int whichFloatReg, void *src);
20void __ldfe(const int whichFloatReg, void *src);
21void __ldf8(const int whichFloatReg, void *src);
22void __ldf_fill(const int whichFloatReg, void *src);
23void __stfs(void *dst, const int whichFloatReg);
24void __stfd(void *dst, const int whichFloatReg);
25void __stfe(void *dst, const int whichFloatReg);
26void __stf8(void *dst, const int whichFloatReg);
27void __stf_spill(void *dst, const int whichFloatReg);
28
29void __st1_rel(void *dst, const __s8 value);
30void __st2_rel(void *dst, const __s16 value);
31void __st4_rel(void *dst, const __s32 value);
32void __st8_rel(void *dst, const __s64 value);
33__u8 __ld1_acq(void *src);
34__u16 __ld2_acq(void *src);
35__u32 __ld4_acq(void *src);
36__u64 __ld8_acq(void *src);
37
38__u64 __fetchadd4_acq(__u32 *addend, const int increment);
39__u64 __fetchadd4_rel(__u32 *addend, const int increment);
40__u64 __fetchadd8_acq(__u64 *addend, const int increment);
41__u64 __fetchadd8_rel(__u64 *addend, const int increment);
42
43__u64 __getf_exp(double d);
44
45/* OS Related Itanium(R) Intrinsics */
46
47/* The names to use for whichReg and whichIndReg below come from
48 the include file asm/ia64regs.h */
49
50__u64 __getIndReg(const int whichIndReg, __s64 index);
51__u64 __getReg(const int whichReg);
52
53void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
54void __setReg(const int whichReg, __u64 value);
55
56void __mf(void);
57void __mfa(void);
58void __synci(void);
59void __itcd(__s64 pa);
60void __itci(__s64 pa);
61void __itrd(__s64 whichTransReg, __s64 pa);
62void __itri(__s64 whichTransReg, __s64 pa);
63void __ptce(__s64 va);
64void __ptcl(__s64 va, __s64 pagesz);
65void __ptcg(__s64 va, __s64 pagesz);
66void __ptcga(__s64 va, __s64 pagesz);
67void __ptri(__s64 va, __s64 pagesz);
68void __ptrd(__s64 va, __s64 pagesz);
69void __invala (void);
70void __invala_gr(const int whichGeneralReg /* 0-127 */ );
71void __invala_fr(const int whichFloatReg /* 0-127 */ );
72void __nop(const int);
73void __fc(__u64 *addr);
74void __sum(int mask);
75void __rum(int mask);
76void __ssm(int mask);
77void __rsm(int mask);
78__u64 __thash(__s64);
79__u64 __ttag(__s64);
80__s64 __tpa(__s64);
81
82/* Intrinsics for implementing get/put_user macros */
83void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
84void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
85
86/* This intrinsic does not generate code, it creates a barrier across which
87 * the compiler will not schedule data access instructions.
88 */
89void __memory_barrier(void);
90
91void __isrlz(void);
92void __dsrlz(void);
93
94__u64 _m64_mux1(__u64 a, const int n);
95__u64 __thash(__u64);
96
97/* Lock and Atomic Operation Related Intrinsics */
98__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
99__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
100__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
101__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
102
103__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
104__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
105__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
106__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
107__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
108__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
109__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
110__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
111
112__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
113__s64 _m64_shrp(__s64 a, __s64 b, const int count);
114__s64 _m64_popcnt(__s64 a);
115
116#define ia64_barrier() __memory_barrier()
117
118#define ia64_stop() /* Nothing: As of now stop bit is generated for each
119 * intrinsic
120 */
121
122#define ia64_getreg __getReg
123#define ia64_setreg __setReg
124
125#define ia64_hint(x)
126
127#define ia64_mux1_brcst 0
128#define ia64_mux1_mix 8
129#define ia64_mux1_shuf 9
130#define ia64_mux1_alt 10
131#define ia64_mux1_rev 11
132
133#define ia64_mux1 _m64_mux1
134#define ia64_popcnt _m64_popcnt
135#define ia64_getf_exp __getf_exp
136#define ia64_shrp _m64_shrp
137
138#define ia64_tpa __tpa
139#define ia64_invala __invala
140#define ia64_invala_gr __invala_gr
141#define ia64_invala_fr __invala_fr
142#define ia64_nop __nop
143#define ia64_sum __sum
144#define ia64_ssm __ssm
145#define ia64_rum __rum
146#define ia64_rsm __rsm
147#define ia64_fc __fc
148
149#define ia64_ldfs __ldfs
150#define ia64_ldfd __ldfd
151#define ia64_ldfe __ldfe
152#define ia64_ldf8 __ldf8
153#define ia64_ldf_fill __ldf_fill
154
155#define ia64_stfs __stfs
156#define ia64_stfd __stfd
157#define ia64_stfe __stfe
158#define ia64_stf8 __stf8
159#define ia64_stf_spill __stf_spill
160
161#define ia64_mf __mf
162#define ia64_mfa __mfa
163
164#define ia64_fetchadd4_acq __fetchadd4_acq
165#define ia64_fetchadd4_rel __fetchadd4_rel
166#define ia64_fetchadd8_acq __fetchadd8_acq
167#define ia64_fetchadd8_rel __fetchadd8_rel
168
169#define ia64_xchg1 _InterlockedExchange8
170#define ia64_xchg2 _InterlockedExchange16
171#define ia64_xchg4 _InterlockedExchange
172#define ia64_xchg8 _InterlockedExchange64
173
174#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
175#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
176#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
177#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
178#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
179#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
180#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
181#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
182
183#define __ia64_set_dbr(index, val) \
184 __setIndReg(_IA64_REG_INDR_DBR, index, val)
185#define ia64_set_ibr(index, val) \
186 __setIndReg(_IA64_REG_INDR_IBR, index, val)
187#define ia64_set_pkr(index, val) \
188 __setIndReg(_IA64_REG_INDR_PKR, index, val)
189#define ia64_set_pmc(index, val) \
190 __setIndReg(_IA64_REG_INDR_PMC, index, val)
191#define ia64_set_pmd(index, val) \
192 __setIndReg(_IA64_REG_INDR_PMD, index, val)
193#define ia64_set_rr(index, val) \
194 __setIndReg(_IA64_REG_INDR_RR, index, val)
195
196#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
197#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
198#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
199#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
200#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
201#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
202#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
203
204#define ia64_srlz_d __dsrlz
205#define ia64_srlz_i __isrlz
206
207#define ia64_dv_serialize_data()
208#define ia64_dv_serialize_instruction()
209
210#define ia64_st1_rel __st1_rel
211#define ia64_st2_rel __st2_rel
212#define ia64_st4_rel __st4_rel
213#define ia64_st8_rel __st8_rel
214
215#define ia64_ld1_acq __ld1_acq
216#define ia64_ld2_acq __ld2_acq
217#define ia64_ld4_acq __ld4_acq
218#define ia64_ld8_acq __ld8_acq
219
220#define ia64_sync_i __synci
221#define ia64_thash __thash
222#define ia64_ttag __ttag
223#define ia64_itcd __itcd
224#define ia64_itci __itci
225#define ia64_itrd __itrd
226#define ia64_itri __itri
227#define ia64_ptce __ptce
228#define ia64_ptcl __ptcl
229#define ia64_ptcg __ptcg
230#define ia64_ptcga __ptcga
231#define ia64_ptri __ptri
232#define ia64_ptrd __ptrd
233#define ia64_dep_mi _m64_dep_mi
234
235/* Values for lfhint in __lfetch and __lfetch_fault */
236
237#define ia64_lfhint_none 0
238#define ia64_lfhint_nt1 1
239#define ia64_lfhint_nt2 2
240#define ia64_lfhint_nta 3
241
242#define ia64_lfetch __lfetch
243#define ia64_lfetch_excl __lfetch_excl
244#define ia64_lfetch_fault __lfetch_fault
245#define ia64_lfetch_fault_excl __lfetch_fault_excl
246
247#define ia64_intrin_local_irq_restore(x) \
248do { \
249 if ((x) != 0) { \
250 ia64_ssm(IA64_PSR_I); \
251 ia64_srlz_d(); \
252 } else { \
253 ia64_rsm(IA64_PSR_I); \
254 } \
255} while (0)
256
257#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
new file mode 100644
index 000000000000..8089f955e5d2
--- /dev/null
+++ b/include/asm-ia64/intrinsics.h
@@ -0,0 +1,181 @@
1#ifndef _ASM_IA64_INTRINSICS_H
2#define _ASM_IA64_INTRINSICS_H
3
4/*
5 * Compiler-dependent intrinsics.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11#ifndef __ASSEMBLY__
12#include <linux/config.h>
13
14/* include compiler specific intrinsics */
15#include <asm/ia64regs.h>
16#ifdef __INTEL_COMPILER
17# include <asm/intel_intrin.h>
18#else
19# include <asm/gcc_intrin.h>
20#endif
21
22/*
23 * Force an unresolved reference if someone tries to use
24 * ia64_fetch_and_add() with a bad value.
25 */
26extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
27extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
28
29#define IA64_FETCHADD(tmp,v,n,sz,sem) \
30({ \
31 switch (sz) { \
32 case 4: \
33 tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
34 break; \
35 \
36 case 8: \
37 tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
38 break; \
39 \
40 default: \
41 __bad_size_for_ia64_fetch_and_add(); \
42 } \
43})
44
45#define ia64_fetchadd(i,v,sem) \
46({ \
47 __u64 _tmp; \
48 volatile __typeof__(*(v)) *_v = (v); \
49 /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
50 if ((i) == -16) \
51 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
52 else if ((i) == -8) \
53 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
54 else if ((i) == -4) \
55 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
56 else if ((i) == -1) \
57 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
58 else if ((i) == 1) \
59 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
60 else if ((i) == 4) \
61 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
62 else if ((i) == 8) \
63 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
64 else if ((i) == 16) \
65 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
66 else \
67 _tmp = __bad_increment_for_ia64_fetch_and_add(); \
68 (__typeof__(*(v))) (_tmp); /* return old value */ \
69})
70
71#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
72
73/*
74 * This function doesn't exist, so you'll get a linker error if
75 * something tries to do an invalid xchg().
76 */
77extern void ia64_xchg_called_with_bad_pointer (void);
78
79#define __xchg(x,ptr,size) \
80({ \
81 unsigned long __xchg_result; \
82 \
83 switch (size) { \
84 case 1: \
85 __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
86 break; \
87 \
88 case 2: \
89 __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
90 break; \
91 \
92 case 4: \
93 __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
94 break; \
95 \
96 case 8: \
97 __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
98 break; \
99 default: \
100 ia64_xchg_called_with_bad_pointer(); \
101 } \
102 __xchg_result; \
103})
104
105#define xchg(ptr,x) \
106 ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
107
108/*
109 * Atomic compare and exchange. Compare OLD with MEM, if identical,
110 * store NEW in MEM. Return the initial value in MEM. Success is
111 * indicated by comparing RETURN with OLD.
112 */
113
114#define __HAVE_ARCH_CMPXCHG 1
115
116/*
117 * This function doesn't exist, so you'll get a linker error
118 * if something tries to do an invalid cmpxchg().
119 */
120extern long ia64_cmpxchg_called_with_bad_pointer (void);
121
122#define ia64_cmpxchg(sem,ptr,old,new,size) \
123({ \
124 __u64 _o_, _r_; \
125 \
126 switch (size) { \
127 case 1: _o_ = (__u8 ) (long) (old); break; \
128 case 2: _o_ = (__u16) (long) (old); break; \
129 case 4: _o_ = (__u32) (long) (old); break; \
130 case 8: _o_ = (__u64) (long) (old); break; \
131 default: break; \
132 } \
133 switch (size) { \
134 case 1: \
135 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
136 break; \
137 \
138 case 2: \
139 _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
140 break; \
141 \
142 case 4: \
143 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
144 break; \
145 \
146 case 8: \
147 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
148 break; \
149 \
150 default: \
151 _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
152 break; \
153 } \
154 (__typeof__(old)) _r_; \
155})
156
157#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
158#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
159
160/* for compatibility with other platforms: */
161#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
162
163#ifdef CONFIG_IA64_DEBUG_CMPXCHG
164# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
165# define CMPXCHG_BUGCHECK(v) \
166 do { \
167 if (_cmpxchg_bugcheck_count-- <= 0) { \
168 void *ip; \
169 extern int printk(const char *fmt, ...); \
170 ip = (void *) ia64_getreg(_IA64_REG_IP); \
171 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
172 break; \
173 } \
174 } while (0)
175#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
176# define CMPXCHG_BUGCHECK_DECL
177# define CMPXCHG_BUGCHECK(v)
178#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
179
180#endif
181#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
new file mode 100644
index 000000000000..491e9d1fc538
--- /dev/null
+++ b/include/asm-ia64/io.h
@@ -0,0 +1,484 @@
1#ifndef _ASM_IA64_IO_H
2#define _ASM_IA64_IO_H
3
4/*
5 * This file contains the definitions for the emulated IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
9 *
10 * This file is not meant to be obfuscating: it's just complicated to
11 * (a) handle it all in a way that makes gcc able to optimize it as
12 * well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
14 * mistake somewhere.
15 *
16 * Copyright (C) 1998-2003 Hewlett-Packard Co
17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
20 */
21
22/* We don't use IO slowdowns on the ia64, but.. */
23#define __SLOW_DOWN_IO do { } while (0)
24#define SLOW_DOWN_IO do { } while (0)
25
26#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
27
28/*
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
30 * large machines may have multiple other I/O spaces so we can't place any a priori limit
31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
32 */
33#define IO_SPACE_LIMIT 0xffffffffffffffffUL
34
35#define MAX_IO_SPACES_BITS 4
36#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
37#define IO_SPACE_BITS 24
38#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
39
40#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
41#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
43
44#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff))
45
46struct io_space {
47 unsigned long mmio_base; /* base in MMIO space */
48 int sparse;
49};
50
51extern struct io_space io_space[];
52extern unsigned int num_io_spaces;
53
54# ifdef __KERNEL__
55
56/*
57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
60 *
61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
62 * code that uses bare port numbers without the prerequisite pci_iomap().
63 */
64#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
65#define PIO_MASK (PIO_OFFSET - 1)
66#define PIO_RESERVED __IA64_UNCACHED_OFFSET
67#define HAVE_ARCH_PIO_SIZE
68
69#include <asm/intrinsics.h>
70#include <asm/machvec.h>
71#include <asm/page.h>
72#include <asm/system.h>
73#include <asm-generic/iomap.h>
74
75/*
76 * Change virtual addresses to physical addresses and vv.
77 */
78static inline unsigned long
79virt_to_phys (volatile void *address)
80{
81 return (unsigned long) address - PAGE_OFFSET;
82}
83
84static inline void*
85phys_to_virt (unsigned long address)
86{
87 return (void *) (address + PAGE_OFFSET);
88}
89
90#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
91extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
92
93/*
94 * The following two macros are deprecated and scheduled for removal.
95 * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
96 */
97#define bus_to_virt phys_to_virt
98#define virt_to_bus virt_to_phys
99#define page_to_bus page_to_phys
100
101# endif /* KERNEL */
102
103/*
104 * Memory fence w/accept. This should never be used in code that is
105 * not IA-64 specific.
106 */
107#define __ia64_mf_a() ia64_mfa()
108
109/**
110 * ___ia64_mmiowb - I/O write barrier
111 *
112 * Ensure ordering of I/O space writes. This will make sure that writes
113 * following the barrier will arrive after all previous writes. For most
114 * ia64 platforms, this is a simple 'mf.a' instruction.
115 *
116 * See Documentation/DocBook/deviceiobook.tmpl for more information.
117 */
118static inline void ___ia64_mmiowb(void)
119{
120 ia64_mfa();
121}
122
123static inline const unsigned long
124__ia64_get_io_port_base (void)
125{
126 extern unsigned long ia64_iobase;
127
128 return ia64_iobase;
129}
130
131static inline void*
132__ia64_mk_io_addr (unsigned long port)
133{
134 struct io_space *space;
135 unsigned long offset;
136
137 space = &io_space[IO_SPACE_NR(port)];
138 port = IO_SPACE_PORT(port);
139 if (space->sparse)
140 offset = IO_SPACE_SPARSE_ENCODING(port);
141 else
142 offset = port;
143
144 return (void *) (space->mmio_base | offset);
145}
146
147#define __ia64_inb ___ia64_inb
148#define __ia64_inw ___ia64_inw
149#define __ia64_inl ___ia64_inl
150#define __ia64_outb ___ia64_outb
151#define __ia64_outw ___ia64_outw
152#define __ia64_outl ___ia64_outl
153#define __ia64_readb ___ia64_readb
154#define __ia64_readw ___ia64_readw
155#define __ia64_readl ___ia64_readl
156#define __ia64_readq ___ia64_readq
157#define __ia64_readb_relaxed ___ia64_readb
158#define __ia64_readw_relaxed ___ia64_readw
159#define __ia64_readl_relaxed ___ia64_readl
160#define __ia64_readq_relaxed ___ia64_readq
161#define __ia64_writeb ___ia64_writeb
162#define __ia64_writew ___ia64_writew
163#define __ia64_writel ___ia64_writel
164#define __ia64_writeq ___ia64_writeq
165#define __ia64_mmiowb ___ia64_mmiowb
166
167/*
168 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
169 * that the access has completed before executing other I/O accesses. Since we're doing
170 * the accesses through an uncachable (UC) translation, the CPU will execute them in
171 * program order. However, we still need to tell the compiler not to shuffle them around
172 * during optimization, which is why we use "volatile" pointers.
173 */
174
175static inline unsigned int
176___ia64_inb (unsigned long port)
177{
178 volatile unsigned char *addr = __ia64_mk_io_addr(port);
179 unsigned char ret;
180
181 ret = *addr;
182 __ia64_mf_a();
183 return ret;
184}
185
186static inline unsigned int
187___ia64_inw (unsigned long port)
188{
189 volatile unsigned short *addr = __ia64_mk_io_addr(port);
190 unsigned short ret;
191
192 ret = *addr;
193 __ia64_mf_a();
194 return ret;
195}
196
197static inline unsigned int
198___ia64_inl (unsigned long port)
199{
200 volatile unsigned int *addr = __ia64_mk_io_addr(port);
201 unsigned int ret;
202
203 ret = *addr;
204 __ia64_mf_a();
205 return ret;
206}
207
208static inline void
209___ia64_outb (unsigned char val, unsigned long port)
210{
211 volatile unsigned char *addr = __ia64_mk_io_addr(port);
212
213 *addr = val;
214 __ia64_mf_a();
215}
216
217static inline void
218___ia64_outw (unsigned short val, unsigned long port)
219{
220 volatile unsigned short *addr = __ia64_mk_io_addr(port);
221
222 *addr = val;
223 __ia64_mf_a();
224}
225
226static inline void
227___ia64_outl (unsigned int val, unsigned long port)
228{
229 volatile unsigned int *addr = __ia64_mk_io_addr(port);
230
231 *addr = val;
232 __ia64_mf_a();
233}
234
235static inline void
236__insb (unsigned long port, void *dst, unsigned long count)
237{
238 unsigned char *dp = dst;
239
240 while (count--)
241 *dp++ = platform_inb(port);
242}
243
244static inline void
245__insw (unsigned long port, void *dst, unsigned long count)
246{
247 unsigned short *dp = dst;
248
249 while (count--)
250 *dp++ = platform_inw(port);
251}
252
253static inline void
254__insl (unsigned long port, void *dst, unsigned long count)
255{
256 unsigned int *dp = dst;
257
258 while (count--)
259 *dp++ = platform_inl(port);
260}
261
262static inline void
263__outsb (unsigned long port, const void *src, unsigned long count)
264{
265 const unsigned char *sp = src;
266
267 while (count--)
268 platform_outb(*sp++, port);
269}
270
271static inline void
272__outsw (unsigned long port, const void *src, unsigned long count)
273{
274 const unsigned short *sp = src;
275
276 while (count--)
277 platform_outw(*sp++, port);
278}
279
280static inline void
281__outsl (unsigned long port, const void *src, unsigned long count)
282{
283 const unsigned int *sp = src;
284
285 while (count--)
286 platform_outl(*sp++, port);
287}
288
289/*
290 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
291 * specification regarding legacy I/O support. Thus, we have to make these operations
292 * platform dependent...
293 */
294#define __inb platform_inb
295#define __inw platform_inw
296#define __inl platform_inl
297#define __outb platform_outb
298#define __outw platform_outw
299#define __outl platform_outl
300#define __mmiowb platform_mmiowb
301
302#define inb(p) __inb(p)
303#define inw(p) __inw(p)
304#define inl(p) __inl(p)
305#define insb(p,d,c) __insb(p,d,c)
306#define insw(p,d,c) __insw(p,d,c)
307#define insl(p,d,c) __insl(p,d,c)
308#define outb(v,p) __outb(v,p)
309#define outw(v,p) __outw(v,p)
310#define outl(v,p) __outl(v,p)
311#define outsb(p,s,c) __outsb(p,s,c)
312#define outsw(p,s,c) __outsw(p,s,c)
313#define outsl(p,s,c) __outsl(p,s,c)
314#define mmiowb() __mmiowb()
315
316/*
317 * The address passed to these functions are ioremap()ped already.
318 *
319 * We need these to be machine vectors since some platforms don't provide
320 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
321 * a good idea). Writes are ok though for all existing ia64 platforms (and
322 * hopefully it'll stay that way).
323 */
324static inline unsigned char
325___ia64_readb (const volatile void __iomem *addr)
326{
327 return *(volatile unsigned char __force *)addr;
328}
329
330static inline unsigned short
331___ia64_readw (const volatile void __iomem *addr)
332{
333 return *(volatile unsigned short __force *)addr;
334}
335
336static inline unsigned int
337___ia64_readl (const volatile void __iomem *addr)
338{
339 return *(volatile unsigned int __force *) addr;
340}
341
342static inline unsigned long
343___ia64_readq (const volatile void __iomem *addr)
344{
345 return *(volatile unsigned long __force *) addr;
346}
347
348static inline void
349__writeb (unsigned char val, volatile void __iomem *addr)
350{
351 *(volatile unsigned char __force *) addr = val;
352}
353
354static inline void
355__writew (unsigned short val, volatile void __iomem *addr)
356{
357 *(volatile unsigned short __force *) addr = val;
358}
359
360static inline void
361__writel (unsigned int val, volatile void __iomem *addr)
362{
363 *(volatile unsigned int __force *) addr = val;
364}
365
366static inline void
367__writeq (unsigned long val, volatile void __iomem *addr)
368{
369 *(volatile unsigned long __force *) addr = val;
370}
371
372#define __readb platform_readb
373#define __readw platform_readw
374#define __readl platform_readl
375#define __readq platform_readq
376#define __readb_relaxed platform_readb_relaxed
377#define __readw_relaxed platform_readw_relaxed
378#define __readl_relaxed platform_readl_relaxed
379#define __readq_relaxed platform_readq_relaxed
380
381#define readb(a) __readb((a))
382#define readw(a) __readw((a))
383#define readl(a) __readl((a))
384#define readq(a) __readq((a))
385#define readb_relaxed(a) __readb_relaxed((a))
386#define readw_relaxed(a) __readw_relaxed((a))
387#define readl_relaxed(a) __readl_relaxed((a))
388#define readq_relaxed(a) __readq_relaxed((a))
389#define __raw_readb readb
390#define __raw_readw readw
391#define __raw_readl readl
392#define __raw_readq readq
393#define __raw_readb_relaxed readb_relaxed
394#define __raw_readw_relaxed readw_relaxed
395#define __raw_readl_relaxed readl_relaxed
396#define __raw_readq_relaxed readq_relaxed
397#define writeb(v,a) __writeb((v), (a))
398#define writew(v,a) __writew((v), (a))
399#define writel(v,a) __writel((v), (a))
400#define writeq(v,a) __writeq((v), (a))
401#define __raw_writeb writeb
402#define __raw_writew writew
403#define __raw_writel writel
404#define __raw_writeq writeq
405
406#ifndef inb_p
407# define inb_p inb
408#endif
409#ifndef inw_p
410# define inw_p inw
411#endif
412#ifndef inl_p
413# define inl_p inl
414#endif
415
416#ifndef outb_p
417# define outb_p outb
418#endif
419#ifndef outw_p
420# define outw_p outw
421#endif
422#ifndef outl_p
423# define outl_p outl
424#endif
425
426/*
427 * An "address" in IO memory space is not clearly either an integer or a pointer. We will
428 * accept both, thus the casts.
429 *
430 * On ia-64, we access the physical I/O memory space through the uncached kernel region.
431 */
432static inline void __iomem *
433ioremap (unsigned long offset, unsigned long size)
434{
435 return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
436}
437
438static inline void
439iounmap (volatile void __iomem *addr)
440{
441}
442
443#define ioremap_nocache(o,s) ioremap(o,s)
444
445# ifdef __KERNEL__
446
447/*
448 * String version of IO memory access ops:
449 */
450extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
451extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
452extern void memset_io(volatile void __iomem *s, int c, long n);
453
454#define dma_cache_inv(_start,_size) do { } while (0)
455#define dma_cache_wback(_start,_size) do { } while (0)
456#define dma_cache_wback_inv(_start,_size) do { } while (0)
457
458# endif /* __KERNEL__ */
459
460/*
461 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
462 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
463 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
464 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
465 * over BIO-level virtual merging.
466 */
467extern unsigned long ia64_max_iommu_merge_mask;
468#if 1
469#define BIO_VMERGE_BOUNDARY 0
470#else
471/*
472 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
473 * replaced by dma_merge_mask() or something of that sort. Note: the only way
474 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
475 * expanded into:
476 *
477 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
478 *
479 * which is precisely what we want.
480 */
481#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
482#endif
483
484#endif /* _ASM_IA64_IO_H */
diff --git a/include/asm-ia64/ioctl.h b/include/asm-ia64/ioctl.h
new file mode 100644
index 000000000000..be9cc2403d2a
--- /dev/null
+++ b/include/asm-ia64/ioctl.h
@@ -0,0 +1,77 @@
1#ifndef _ASM_IA64_IOCTL_H
2#define _ASM_IA64_IOCTL_H
3
4/*
5 * Based on <asm-i386/ioctl.h>.
6 *
7 * Modified 1998, 1999
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11/* ioctl command encoding: 32 bits total, command in lower 16 bits,
12 * size of the parameter structure in the lower 14 bits of the
13 * upper 16 bits.
14 * Encoding the size of the parameter structure in the ioctl request
15 * is useful for catching programs compiled with old versions
16 * and to avoid overwriting user space outside the user buffer area.
17 * The highest 2 bits are reserved for indicating the ``access mode''.
18 * NOTE: This limits the max parameter size to 16kB -1 !
19 */
20
21/*
22 * The following is for compatibility across the various Linux
23 * platforms. The ia64 ioctl numbering scheme doesn't really enforce
24 * a type field. De facto, however, the top 8 bits of the lower 16
25 * bits are indeed used as a type field, so we might just as well make
26 * this explicit here. Please be sure to use the decoding macros
27 * below from now on.
28 */
29#define _IOC_NRBITS 8
30#define _IOC_TYPEBITS 8
31#define _IOC_SIZEBITS 14
32#define _IOC_DIRBITS 2
33
34#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
35#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
36#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
37#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
38
39#define _IOC_NRSHIFT 0
40#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
41#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
42#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
43
44/*
45 * Direction bits.
46 */
47#define _IOC_NONE 0U
48#define _IOC_WRITE 1U
49#define _IOC_READ 2U
50
51#define _IOC(dir,type,nr,size) \
52 (((dir) << _IOC_DIRSHIFT) | \
53 ((type) << _IOC_TYPESHIFT) | \
54 ((nr) << _IOC_NRSHIFT) | \
55 ((size) << _IOC_SIZESHIFT))
56
57/* used to create numbers */
58#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
59#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
60#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
61#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
62
63/* used to decode ioctl numbers.. */
64#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
65#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
66#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
67#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
68
69/* ...and for the drivers/sound files... */
70
71#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
72#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
73#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
74#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
75#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
76
77#endif /* _ASM_IA64_IOCTL_H */
diff --git a/include/asm-ia64/ioctl32.h b/include/asm-ia64/ioctl32.h
new file mode 100644
index 000000000000..d0d227f45e05
--- /dev/null
+++ b/include/asm-ia64/ioctl32.h
@@ -0,0 +1 @@
#include <linux/ioctl32.h>
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h
new file mode 100644
index 000000000000..31ee521aeb7a
--- /dev/null
+++ b/include/asm-ia64/ioctls.h
@@ -0,0 +1,89 @@
1#ifndef _ASM_IA64_IOCTLS_H
2#define _ASM_IA64_IOCTLS_H
3
4/*
5 * Based on <asm-i386/ioctls.h>
6 *
7 * Modified 1998, 1999, 2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#include <asm/ioctl.h>
12
13/* 0x54 is just a magic number to make these relatively unique ('T') */
14
15#define TCGETS 0x5401
16#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
17#define TCSETSW 0x5403
18#define TCSETSF 0x5404
19#define TCGETA 0x5405
20#define TCSETA 0x5406
21#define TCSETAW 0x5407
22#define TCSETAF 0x5408
23#define TCSBRK 0x5409
24#define TCXONC 0x540A
25#define TCFLSH 0x540B
26#define TIOCEXCL 0x540C
27#define TIOCNXCL 0x540D
28#define TIOCSCTTY 0x540E
29#define TIOCGPGRP 0x540F
30#define TIOCSPGRP 0x5410
31#define TIOCOUTQ 0x5411
32#define TIOCSTI 0x5412
33#define TIOCGWINSZ 0x5413
34#define TIOCSWINSZ 0x5414
35#define TIOCMGET 0x5415
36#define TIOCMBIS 0x5416
37#define TIOCMBIC 0x5417
38#define TIOCMSET 0x5418
39#define TIOCGSOFTCAR 0x5419
40#define TIOCSSOFTCAR 0x541A
41#define FIONREAD 0x541B
42#define TIOCINQ FIONREAD
43#define TIOCLINUX 0x541C
44#define TIOCCONS 0x541D
45#define TIOCGSERIAL 0x541E
46#define TIOCSSERIAL 0x541F
47#define TIOCPKT 0x5420
48#define FIONBIO 0x5421
49#define TIOCNOTTY 0x5422
50#define TIOCSETD 0x5423
51#define TIOCGETD 0x5424
52#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
53#define TIOCSBRK 0x5427 /* BSD compatibility */
54#define TIOCCBRK 0x5428 /* BSD compatibility */
55#define TIOCGSID 0x5429 /* Return the session ID of FD */
56#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
57#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
58
59#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
60#define FIOCLEX 0x5451
61#define FIOASYNC 0x5452
62#define TIOCSERCONFIG 0x5453
63#define TIOCSERGWILD 0x5454
64#define TIOCSERSWILD 0x5455
65#define TIOCGLCKTRMIOS 0x5456
66#define TIOCSLCKTRMIOS 0x5457
67#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
68#define TIOCSERGETLSR 0x5459 /* Get line status register */
69#define TIOCSERGETMULTI 0x545A /* Get multiport config */
70#define TIOCSERSETMULTI 0x545B /* Set multiport config */
71
72#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
73#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
74#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
75#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
76#define FIOQSIZE 0x5460
77
78/* Used for packet mode */
79#define TIOCPKT_DATA 0
80#define TIOCPKT_FLUSHREAD 1
81#define TIOCPKT_FLUSHWRITE 2
82#define TIOCPKT_STOP 4
83#define TIOCPKT_START 8
84#define TIOCPKT_NOSTOP 16
85#define TIOCPKT_DOSTOP 32
86
87#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
88
89#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
new file mode 100644
index 000000000000..38a7a72791cc
--- /dev/null
+++ b/include/asm-ia64/iosapic.h
@@ -0,0 +1,110 @@
1#ifndef __ASM_IA64_IOSAPIC_H
2#define __ASM_IA64_IOSAPIC_H
3
4#define IOSAPIC_REG_SELECT 0x0
5#define IOSAPIC_WINDOW 0x10
6#define IOSAPIC_EOI 0x40
7
8#define IOSAPIC_VERSION 0x1
9
10/*
11 * Redirection table entry
12 */
13#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
14#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
15
16#define IOSAPIC_DEST_SHIFT 16
17
18/*
19 * Delivery mode
20 */
21#define IOSAPIC_DELIVERY_SHIFT 8
22#define IOSAPIC_FIXED 0x0
23#define IOSAPIC_LOWEST_PRIORITY 0x1
24#define IOSAPIC_PMI 0x2
25#define IOSAPIC_NMI 0x4
26#define IOSAPIC_INIT 0x5
27#define IOSAPIC_EXTINT 0x7
28
29/*
30 * Interrupt polarity
31 */
32#define IOSAPIC_POLARITY_SHIFT 13
33#define IOSAPIC_POL_HIGH 0
34#define IOSAPIC_POL_LOW 1
35
36/*
37 * Trigger mode
38 */
39#define IOSAPIC_TRIGGER_SHIFT 15
40#define IOSAPIC_EDGE 0
41#define IOSAPIC_LEVEL 1
42
43/*
44 * Mask bit
45 */
46
47#define IOSAPIC_MASK_SHIFT 16
48#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
49
50#ifndef __ASSEMBLY__
51
52#ifdef CONFIG_IOSAPIC
53
54#define NR_IOSAPICS 256
55
56static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
57{
58 writel(reg, iosapic + IOSAPIC_REG_SELECT);
59 return readl(iosapic + IOSAPIC_WINDOW);
60}
61
62static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
63{
64 writel(reg, iosapic + IOSAPIC_REG_SELECT);
65 writel(val, iosapic + IOSAPIC_WINDOW);
66}
67
68static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
69{
70 writel(vector, iosapic + IOSAPIC_EOI);
71}
72
73extern void __init iosapic_system_init (int pcat_compat);
74extern void __init iosapic_init (unsigned long address,
75 unsigned int gsi_base);
76extern int gsi_to_vector (unsigned int gsi);
77extern int gsi_to_irq (unsigned int gsi);
78extern void iosapic_enable_intr (unsigned int vector);
79extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
80 unsigned long trigger);
81#ifdef CONFIG_ACPI_DEALLOCATE_IRQ
82extern void iosapic_unregister_intr (unsigned int irq);
83#endif
84extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
85 unsigned long polarity,
86 unsigned long trigger);
87extern int __init iosapic_register_platform_intr (u32 int_type,
88 unsigned int gsi,
89 int pmi_vector,
90 u16 eid, u16 id,
91 unsigned long polarity,
92 unsigned long trigger);
93extern unsigned int iosapic_version (char __iomem *addr);
94
95extern void iosapic_pci_fixup (int);
96#ifdef CONFIG_NUMA
97extern void __init map_iosapic_to_node (unsigned int, int);
98#endif
99#else
100#define iosapic_system_init(pcat_compat) do { } while (0)
101#define iosapic_init(address,gsi_base) do { } while (0)
102#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
103#define iosapic_unregister_intr(irq) do { } while (0)
104#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
105#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
106 polarity,trigger) (gsi)
107#endif
108
109# endif /* !__ASSEMBLY__ */
110#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/include/asm-ia64/ipcbuf.h b/include/asm-ia64/ipcbuf.h
new file mode 100644
index 000000000000..079899ae7d32
--- /dev/null
+++ b/include/asm-ia64/ipcbuf.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_IA64_IPCBUF_H
2#define _ASM_IA64_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for IA-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit seq
11 * - 2 miscellaneous 64-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned short seq;
23 unsigned short __pad1;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _ASM_IA64_IPCBUF_H */
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
new file mode 100644
index 000000000000..bd07d11d9f37
--- /dev/null
+++ b/include/asm-ia64/irq.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_IA64_IRQ_H
2#define _ASM_IA64_IRQ_H
3
4/*
5 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 *
9 * 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
10 * 01/20/99 S.Eranian added keyboard interrupt
11 * 02/29/00 D.Mosberger moved most things into hw_irq.h
12 */
13
14#define NR_IRQS 256
15#define NR_IRQ_VECTORS NR_IRQS
16
17static __inline__ int
18irq_canonicalize (int irq)
19{
20 /*
21 * We do the legacy thing here of pretending that irqs < 16
22 * are 8259 irqs. This really shouldn't be necessary at all,
23 * but we keep it here as serial.c still uses it...
24 */
25 return ((irq == 2) ? 9 : irq);
26}
27
28extern void disable_irq (unsigned int);
29extern void disable_irq_nosync (unsigned int);
30extern void enable_irq (unsigned int);
31extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
32
33#ifdef CONFIG_SMP
34extern void move_irq(int irq);
35#else
36#define move_irq(irq)
37#endif
38
39struct irqaction;
40struct pt_regs;
41int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
42
43#endif /* _ASM_IA64_IRQ_H */
diff --git a/include/asm-ia64/kmap_types.h b/include/asm-ia64/kmap_types.h
new file mode 100644
index 000000000000..bc777525fa12
--- /dev/null
+++ b/include/asm-ia64/kmap_types.h
@@ -0,0 +1,31 @@
1#ifndef _ASM_IA64_KMAP_TYPES_H
2#define _ASM_IA64_KMAP_TYPES_H
3
4#include <linux/config.h>
5
6#ifdef CONFIG_DEBUG_HIGHMEM
7# define D(n) __KM_FENCE_##n ,
8#else
9# define D(n)
10#endif
11
12enum km_type {
13D(0) KM_BOUNCE_READ,
14D(1) KM_SKB_SUNRPC_DATA,
15D(2) KM_SKB_DATA_SOFTIRQ,
16D(3) KM_USER0,
17D(4) KM_USER1,
18D(5) KM_BIO_SRC_IRQ,
19D(6) KM_BIO_DST_IRQ,
20D(7) KM_PTE0,
21D(8) KM_PTE1,
22D(9) KM_IRQ0,
23D(10) KM_IRQ1,
24D(11) KM_SOFTIRQ0,
25D(12) KM_SOFTIRQ1,
26D(13) KM_TYPE_NR
27};
28
29#undef D
30
31#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h
new file mode 100644
index 000000000000..221b5cb564b2
--- /dev/null
+++ b/include/asm-ia64/kregs.h
@@ -0,0 +1,163 @@
1#ifndef _ASM_IA64_KREGS_H
2#define _ASM_IA64_KREGS_H
3
4/*
5 * Copyright (C) 2001-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8/*
9 * This file defines the kernel register usage convention used by Linux/ia64.
10 */
11
12/*
13 * Kernel registers:
14 */
15#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
16#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
17#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
18#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
19#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
20#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
21#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
22
23#define _IA64_KR_PASTE(x,y) x##y
24#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
25#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
26
27/*
28 * Translation registers:
29 */
30#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
31#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
32#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
33#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */
34
35/* Processor status register bits: */
36#define IA64_PSR_BE_BIT 1
37#define IA64_PSR_UP_BIT 2
38#define IA64_PSR_AC_BIT 3
39#define IA64_PSR_MFL_BIT 4
40#define IA64_PSR_MFH_BIT 5
41#define IA64_PSR_IC_BIT 13
42#define IA64_PSR_I_BIT 14
43#define IA64_PSR_PK_BIT 15
44#define IA64_PSR_DT_BIT 17
45#define IA64_PSR_DFL_BIT 18
46#define IA64_PSR_DFH_BIT 19
47#define IA64_PSR_SP_BIT 20
48#define IA64_PSR_PP_BIT 21
49#define IA64_PSR_DI_BIT 22
50#define IA64_PSR_SI_BIT 23
51#define IA64_PSR_DB_BIT 24
52#define IA64_PSR_LP_BIT 25
53#define IA64_PSR_TB_BIT 26
54#define IA64_PSR_RT_BIT 27
55/* The following are not affected by save_flags()/restore_flags(): */
56#define IA64_PSR_CPL0_BIT 32
57#define IA64_PSR_CPL1_BIT 33
58#define IA64_PSR_IS_BIT 34
59#define IA64_PSR_MC_BIT 35
60#define IA64_PSR_IT_BIT 36
61#define IA64_PSR_ID_BIT 37
62#define IA64_PSR_DA_BIT 38
63#define IA64_PSR_DD_BIT 39
64#define IA64_PSR_SS_BIT 40
65#define IA64_PSR_RI_BIT 41
66#define IA64_PSR_ED_BIT 43
67#define IA64_PSR_BN_BIT 44
68#define IA64_PSR_IA_BIT 45
69
70/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
71 execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
72 execve(). */
73#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
74 IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
75 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
76#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
77
78#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
79#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
80#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
81#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
82#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
83#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
84#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
85#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
86#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
87#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
88#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
89#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
90#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
91#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
92#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
93#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
94#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
95#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
96#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
97/* The following are not affected by save_flags()/restore_flags(): */
98#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
99#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
100#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
101#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
102#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
103#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
104#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
105#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
106#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
107#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
108#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
109#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
110
111/* User mask bits: */
112#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
113
114/* Default Control Register */
115#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
116#define IA64_DCR_BE_BIT 1 /* big-endian default */
117#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
118#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
119#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
120#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
121#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
122#define IA64_DCR_DR_BIT 12 /* defer access right faults */
123#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
124#define IA64_DCR_DD_BIT 14 /* defer debug faults */
125
126#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
127#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
128#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
129#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
130#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
131#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
132#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
133#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
134#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
135#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
136
137/* Interrupt Status Register */
138#define IA64_ISR_X_BIT 32 /* execute access */
139#define IA64_ISR_W_BIT 33 /* write access */
140#define IA64_ISR_R_BIT 34 /* read access */
141#define IA64_ISR_NA_BIT 35 /* non-access */
142#define IA64_ISR_SP_BIT 36 /* speculative load exception */
143#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
144#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
145#define IA64_ISR_CODE_MASK 0xf
146
147#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
148#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
149#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
150#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
151#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
152#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
153#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
154
155/* ISR code field for non-access instructions */
156#define IA64_ISR_CODE_TPA 0
157#define IA64_ISR_CODE_FC 1
158#define IA64_ISR_CODE_PROBE 2
159#define IA64_ISR_CODE_TAK 3
160#define IA64_ISR_CODE_LFETCH 4
161#define IA64_ISR_CODE_PROBEF 5
162
163#endif /* _ASM_IA64_kREGS_H */
diff --git a/include/asm-ia64/linkage.h b/include/asm-ia64/linkage.h
new file mode 100644
index 000000000000..14cd72cd8007
--- /dev/null
+++ b/include/asm-ia64/linkage.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
5
6#endif
diff --git a/include/asm-ia64/local.h b/include/asm-ia64/local.h
new file mode 100644
index 000000000000..1dbd584ad851
--- /dev/null
+++ b/include/asm-ia64/local.h
@@ -0,0 +1,50 @@
1#ifndef _ASM_IA64_LOCAL_H
2#define _ASM_IA64_LOCAL_H
3
4/*
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/percpu.h>
10
11typedef struct {
12 atomic64_t val;
13} local_t;
14
15#define LOCAL_INIT(i) ((local_t) { { (i) } })
16#define local_read(l) atomic64_read(&(l)->val)
17#define local_set(l, i) atomic64_set(&(l)->val, i)
18#define local_inc(l) atomic64_inc(&(l)->val)
19#define local_dec(l) atomic64_dec(&(l)->val)
20#define local_add(l) atomic64_add(&(l)->val)
21#define local_sub(l) atomic64_sub(&(l)->val)
22
23/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
24
25#define __local_inc(l) (++(l)->val.counter)
26#define __local_dec(l) (--(l)->val.counter)
27#define __local_add(i,l) ((l)->val.counter += (i))
28#define __local_sub(i,l) ((l)->val.counter -= (i))
29
30/*
31 * Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
32 * not an address.
33 */
34#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
35#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
36#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
37#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
38#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
39#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
40
41/*
42 * Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
43 * etc.
44 */
45#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
46#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
47#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
48#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
49
50#endif /* _ASM_IA64_LOCAL_H */
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
new file mode 100644
index 000000000000..79e89a7db566
--- /dev/null
+++ b/include/asm-ia64/machvec.h
@@ -0,0 +1,390 @@
1/*
2 * Machine vector for IA-64.
3 *
4 * Copyright (C) 1999 Silicon Graphics, Inc.
5 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
6 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
7 * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10#ifndef _ASM_IA64_MACHVEC_H
11#define _ASM_IA64_MACHVEC_H
12
13#include <linux/config.h>
14#include <linux/types.h>
15
16/* forward declarations: */
17struct device;
18struct pt_regs;
19struct scatterlist;
20struct page;
21struct mm_struct;
22struct pci_bus;
23
24typedef void ia64_mv_setup_t (char **);
25typedef void ia64_mv_cpu_init_t (void);
26typedef void ia64_mv_irq_init_t (void);
27typedef void ia64_mv_send_ipi_t (int, int, int, int);
28typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
29typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
30typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
31typedef unsigned int ia64_mv_local_vector_to_irq (u8);
32typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
33typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
34 u8 size);
35typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
36 u8 size);
37
38/* DMA-mapping interface: */
39typedef void ia64_mv_dma_init (void);
40typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
41typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
42typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
43typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
44typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
45typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
46typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
47typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
48typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
49typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
50typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
51typedef int ia64_mv_dma_supported (struct device *, u64);
52
53/*
54 * WARNING: The legacy I/O space is _architected_. Platforms are
55 * expected to follow this architected model (see Section 10.7 in the
56 * IA-64 Architecture Software Developer's Manual). Unfortunately,
57 * some broken machines do not follow that model, which is why we have
58 * to make the inX/outX operations part of the machine vector.
59 * Platform designers should follow the architected model whenever
60 * possible.
61 */
62typedef unsigned int ia64_mv_inb_t (unsigned long);
63typedef unsigned int ia64_mv_inw_t (unsigned long);
64typedef unsigned int ia64_mv_inl_t (unsigned long);
65typedef void ia64_mv_outb_t (unsigned char, unsigned long);
66typedef void ia64_mv_outw_t (unsigned short, unsigned long);
67typedef void ia64_mv_outl_t (unsigned int, unsigned long);
68typedef void ia64_mv_mmiowb_t (void);
69typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
70typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
71typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
72typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
73typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
74typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
75typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
76typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
77
78static inline void
79machvec_noop (void)
80{
81}
82
83static inline void
84machvec_noop_mm (struct mm_struct *mm)
85{
86}
87
88extern void machvec_setup (char **);
89extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
90extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
91extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
92extern void machvec_tlb_migrate_finish (struct mm_struct *);
93
94# if defined (CONFIG_IA64_HP_SIM)
95# include <asm/machvec_hpsim.h>
96# elif defined (CONFIG_IA64_DIG)
97# include <asm/machvec_dig.h>
98# elif defined (CONFIG_IA64_HP_ZX1)
99# include <asm/machvec_hpzx1.h>
100# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
101# include <asm/machvec_hpzx1_swiotlb.h>
102# elif defined (CONFIG_IA64_SGI_SN2)
103# include <asm/machvec_sn2.h>
104# elif defined (CONFIG_IA64_GENERIC)
105
106# ifdef MACHVEC_PLATFORM_HEADER
107# include MACHVEC_PLATFORM_HEADER
108# else
109# define platform_name ia64_mv.name
110# define platform_setup ia64_mv.setup
111# define platform_cpu_init ia64_mv.cpu_init
112# define platform_irq_init ia64_mv.irq_init
113# define platform_send_ipi ia64_mv.send_ipi
114# define platform_timer_interrupt ia64_mv.timer_interrupt
115# define platform_global_tlb_purge ia64_mv.global_tlb_purge
116# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
117# define platform_dma_init ia64_mv.dma_init
118# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
119# define platform_dma_free_coherent ia64_mv.dma_free_coherent
120# define platform_dma_map_single ia64_mv.dma_map_single
121# define platform_dma_unmap_single ia64_mv.dma_unmap_single
122# define platform_dma_map_sg ia64_mv.dma_map_sg
123# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg
124# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
125# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
126# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
127# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
128# define platform_dma_mapping_error ia64_mv.dma_mapping_error
129# define platform_dma_supported ia64_mv.dma_supported
130# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
131# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
132# define platform_pci_legacy_read ia64_mv.pci_legacy_read
133# define platform_pci_legacy_write ia64_mv.pci_legacy_write
134# define platform_inb ia64_mv.inb
135# define platform_inw ia64_mv.inw
136# define platform_inl ia64_mv.inl
137# define platform_outb ia64_mv.outb
138# define platform_outw ia64_mv.outw
139# define platform_outl ia64_mv.outl
140# define platform_mmiowb ia64_mv.mmiowb
141# define platform_readb ia64_mv.readb
142# define platform_readw ia64_mv.readw
143# define platform_readl ia64_mv.readl
144# define platform_readq ia64_mv.readq
145# define platform_readb_relaxed ia64_mv.readb_relaxed
146# define platform_readw_relaxed ia64_mv.readw_relaxed
147# define platform_readl_relaxed ia64_mv.readl_relaxed
148# define platform_readq_relaxed ia64_mv.readq_relaxed
149# endif
150
151/* __attribute__((__aligned__(16))) is required to make size of the
152 * structure multiple of 16 bytes.
153 * This will fillup the holes created because of section 3.3.1 in
154 * Software Conventions guide.
155 */
156struct ia64_machine_vector {
157 const char *name;
158 ia64_mv_setup_t *setup;
159 ia64_mv_cpu_init_t *cpu_init;
160 ia64_mv_irq_init_t *irq_init;
161 ia64_mv_send_ipi_t *send_ipi;
162 ia64_mv_timer_interrupt_t *timer_interrupt;
163 ia64_mv_global_tlb_purge_t *global_tlb_purge;
164 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
165 ia64_mv_dma_init *dma_init;
166 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
167 ia64_mv_dma_free_coherent *dma_free_coherent;
168 ia64_mv_dma_map_single *dma_map_single;
169 ia64_mv_dma_unmap_single *dma_unmap_single;
170 ia64_mv_dma_map_sg *dma_map_sg;
171 ia64_mv_dma_unmap_sg *dma_unmap_sg;
172 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
173 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
174 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
175 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
176 ia64_mv_dma_mapping_error *dma_mapping_error;
177 ia64_mv_dma_supported *dma_supported;
178 ia64_mv_local_vector_to_irq *local_vector_to_irq;
179 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
180 ia64_mv_pci_legacy_read_t *pci_legacy_read;
181 ia64_mv_pci_legacy_write_t *pci_legacy_write;
182 ia64_mv_inb_t *inb;
183 ia64_mv_inw_t *inw;
184 ia64_mv_inl_t *inl;
185 ia64_mv_outb_t *outb;
186 ia64_mv_outw_t *outw;
187 ia64_mv_outl_t *outl;
188 ia64_mv_mmiowb_t *mmiowb;
189 ia64_mv_readb_t *readb;
190 ia64_mv_readw_t *readw;
191 ia64_mv_readl_t *readl;
192 ia64_mv_readq_t *readq;
193 ia64_mv_readb_relaxed_t *readb_relaxed;
194 ia64_mv_readw_relaxed_t *readw_relaxed;
195 ia64_mv_readl_relaxed_t *readl_relaxed;
196 ia64_mv_readq_relaxed_t *readq_relaxed;
197} __attribute__((__aligned__(16))); /* align attrib? see above comment */
198
199#define MACHVEC_INIT(name) \
200{ \
201 #name, \
202 platform_setup, \
203 platform_cpu_init, \
204 platform_irq_init, \
205 platform_send_ipi, \
206 platform_timer_interrupt, \
207 platform_global_tlb_purge, \
208 platform_tlb_migrate_finish, \
209 platform_dma_init, \
210 platform_dma_alloc_coherent, \
211 platform_dma_free_coherent, \
212 platform_dma_map_single, \
213 platform_dma_unmap_single, \
214 platform_dma_map_sg, \
215 platform_dma_unmap_sg, \
216 platform_dma_sync_single_for_cpu, \
217 platform_dma_sync_sg_for_cpu, \
218 platform_dma_sync_single_for_device, \
219 platform_dma_sync_sg_for_device, \
220 platform_dma_mapping_error, \
221 platform_dma_supported, \
222 platform_local_vector_to_irq, \
223 platform_pci_get_legacy_mem, \
224 platform_pci_legacy_read, \
225 platform_pci_legacy_write, \
226 platform_inb, \
227 platform_inw, \
228 platform_inl, \
229 platform_outb, \
230 platform_outw, \
231 platform_outl, \
232 platform_mmiowb, \
233 platform_readb, \
234 platform_readw, \
235 platform_readl, \
236 platform_readq, \
237 platform_readb_relaxed, \
238 platform_readw_relaxed, \
239 platform_readl_relaxed, \
240 platform_readq_relaxed, \
241}
242
243extern struct ia64_machine_vector ia64_mv;
244extern void machvec_init (const char *name);
245
246# else
247# error Unknown configuration. Update asm-ia64/machvec.h.
248# endif /* CONFIG_IA64_GENERIC */
249
250/*
251 * Declare default routines which aren't declared anywhere else:
252 */
253extern ia64_mv_dma_init swiotlb_init;
254extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
255extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
256extern ia64_mv_dma_map_single swiotlb_map_single;
257extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
258extern ia64_mv_dma_map_sg swiotlb_map_sg;
259extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
260extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
261extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
262extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
263extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
264extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
265extern ia64_mv_dma_supported swiotlb_dma_supported;
266
267/*
268 * Define default versions so we can extend machvec for new platforms without having
269 * to update the machvec files for all existing platforms.
270 */
271#ifndef platform_setup
272# define platform_setup machvec_setup
273#endif
274#ifndef platform_cpu_init
275# define platform_cpu_init machvec_noop
276#endif
277#ifndef platform_irq_init
278# define platform_irq_init machvec_noop
279#endif
280
281#ifndef platform_send_ipi
282# define platform_send_ipi ia64_send_ipi /* default to architected version */
283#endif
284#ifndef platform_timer_interrupt
285# define platform_timer_interrupt machvec_timer_interrupt
286#endif
287#ifndef platform_global_tlb_purge
288# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
289#endif
290#ifndef platform_tlb_migrate_finish
291# define platform_tlb_migrate_finish machvec_noop_mm
292#endif
293#ifndef platform_dma_init
294# define platform_dma_init swiotlb_init
295#endif
296#ifndef platform_dma_alloc_coherent
297# define platform_dma_alloc_coherent swiotlb_alloc_coherent
298#endif
299#ifndef platform_dma_free_coherent
300# define platform_dma_free_coherent swiotlb_free_coherent
301#endif
302#ifndef platform_dma_map_single
303# define platform_dma_map_single swiotlb_map_single
304#endif
305#ifndef platform_dma_unmap_single
306# define platform_dma_unmap_single swiotlb_unmap_single
307#endif
308#ifndef platform_dma_map_sg
309# define platform_dma_map_sg swiotlb_map_sg
310#endif
311#ifndef platform_dma_unmap_sg
312# define platform_dma_unmap_sg swiotlb_unmap_sg
313#endif
314#ifndef platform_dma_sync_single_for_cpu
315# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
316#endif
317#ifndef platform_dma_sync_sg_for_cpu
318# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
319#endif
320#ifndef platform_dma_sync_single_for_device
321# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
322#endif
323#ifndef platform_dma_sync_sg_for_device
324# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
325#endif
326#ifndef platform_dma_mapping_error
327# define platform_dma_mapping_error swiotlb_dma_mapping_error
328#endif
329#ifndef platform_dma_supported
330# define platform_dma_supported swiotlb_dma_supported
331#endif
332#ifndef platform_local_vector_to_irq
333# define platform_local_vector_to_irq __ia64_local_vector_to_irq
334#endif
335#ifndef platform_pci_get_legacy_mem
336# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
337#endif
338#ifndef platform_pci_legacy_read
339# define platform_pci_legacy_read ia64_pci_legacy_read
340#endif
341#ifndef platform_pci_legacy_write
342# define platform_pci_legacy_write ia64_pci_legacy_write
343#endif
344#ifndef platform_inb
345# define platform_inb __ia64_inb
346#endif
347#ifndef platform_inw
348# define platform_inw __ia64_inw
349#endif
350#ifndef platform_inl
351# define platform_inl __ia64_inl
352#endif
353#ifndef platform_outb
354# define platform_outb __ia64_outb
355#endif
356#ifndef platform_outw
357# define platform_outw __ia64_outw
358#endif
359#ifndef platform_outl
360# define platform_outl __ia64_outl
361#endif
362#ifndef platform_mmiowb
363# define platform_mmiowb __ia64_mmiowb
364#endif
365#ifndef platform_readb
366# define platform_readb __ia64_readb
367#endif
368#ifndef platform_readw
369# define platform_readw __ia64_readw
370#endif
371#ifndef platform_readl
372# define platform_readl __ia64_readl
373#endif
374#ifndef platform_readq
375# define platform_readq __ia64_readq
376#endif
377#ifndef platform_readb_relaxed
378# define platform_readb_relaxed __ia64_readb_relaxed
379#endif
380#ifndef platform_readw_relaxed
381# define platform_readw_relaxed __ia64_readw_relaxed
382#endif
383#ifndef platform_readl_relaxed
384# define platform_readl_relaxed __ia64_readl_relaxed
385#endif
386#ifndef platform_readq_relaxed
387# define platform_readq_relaxed __ia64_readq_relaxed
388#endif
389
390#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h
new file mode 100644
index 000000000000..4dc8522c974f
--- /dev/null
+++ b/include/asm-ia64/machvec_dig.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_IA64_MACHVEC_DIG_h
2#define _ASM_IA64_MACHVEC_DIG_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_irq_init_t dig_irq_init;
6
7/*
8 * This stuff has dual use!
9 *
10 * For a generic kernel, the macros are used to initialize the
11 * platform's machvec structure. When compiling a non-generic kernel,
12 * the macros are used directly.
13 */
14#define platform_name "dig"
15#define platform_setup dig_setup
16#define platform_irq_init dig_irq_init
17
18#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/include/asm-ia64/machvec_hpsim.h b/include/asm-ia64/machvec_hpsim.h
new file mode 100644
index 000000000000..cf72fc87fdfe
--- /dev/null
+++ b/include/asm-ia64/machvec_hpsim.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_IA64_MACHVEC_HPSIM_h
2#define _ASM_IA64_MACHVEC_HPSIM_h
3
4extern ia64_mv_setup_t hpsim_setup;
5extern ia64_mv_irq_init_t hpsim_irq_init;
6
7/*
8 * This stuff has dual use!
9 *
10 * For a generic kernel, the macros are used to initialize the
11 * platform's machvec structure. When compiling a non-generic kernel,
12 * the macros are used directly.
13 */
14#define platform_name "hpsim"
15#define platform_setup hpsim_setup
16#define platform_irq_init hpsim_irq_init
17
18#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h
new file mode 100644
index 000000000000..daafe504c5f4
--- /dev/null
+++ b/include/asm-ia64/machvec_hpzx1.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_MACHVEC_HPZX1_h
2#define _ASM_IA64_MACHVEC_HPZX1_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_setup_t sba_setup;
6extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
7extern ia64_mv_dma_free_coherent sba_free_coherent;
8extern ia64_mv_dma_map_single sba_map_single;
9extern ia64_mv_dma_unmap_single sba_unmap_single;
10extern ia64_mv_dma_map_sg sba_map_sg;
11extern ia64_mv_dma_unmap_sg sba_unmap_sg;
12extern ia64_mv_dma_supported sba_dma_supported;
13extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
14
15/*
16 * This stuff has dual use!
17 *
18 * For a generic kernel, the macros are used to initialize the
19 * platform's machvec structure. When compiling a non-generic kernel,
20 * the macros are used directly.
21 */
22#define platform_name "hpzx1"
23#define platform_setup sba_setup
24#define platform_dma_init machvec_noop
25#define platform_dma_alloc_coherent sba_alloc_coherent
26#define platform_dma_free_coherent sba_free_coherent
27#define platform_dma_map_single sba_map_single
28#define platform_dma_unmap_single sba_unmap_single
29#define platform_dma_map_sg sba_map_sg
30#define platform_dma_unmap_sg sba_unmap_sg
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported sba_dma_supported
36#define platform_dma_mapping_error sba_dma_mapping_error
37
38#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h
new file mode 100644
index 000000000000..9924b1b00a6c
--- /dev/null
+++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_init hwsw_init;
6extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
7extern ia64_mv_dma_free_coherent hwsw_free_coherent;
8extern ia64_mv_dma_map_single hwsw_map_single;
9extern ia64_mv_dma_unmap_single hwsw_unmap_single;
10extern ia64_mv_dma_map_sg hwsw_map_sg;
11extern ia64_mv_dma_unmap_sg hwsw_unmap_sg;
12extern ia64_mv_dma_supported hwsw_dma_supported;
13extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
14extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
15extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
16extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
17extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
18
19/*
20 * This stuff has dual use!
21 *
22 * For a generic kernel, the macros are used to initialize the
23 * platform's machvec structure. When compiling a non-generic kernel,
24 * the macros are used directly.
25 */
26#define platform_name "hpzx1_swiotlb"
27
28#define platform_setup dig_setup
29#define platform_dma_init hwsw_init
30#define platform_dma_alloc_coherent hwsw_alloc_coherent
31#define platform_dma_free_coherent hwsw_free_coherent
32#define platform_dma_map_single hwsw_map_single
33#define platform_dma_unmap_single hwsw_unmap_single
34#define platform_dma_map_sg hwsw_map_sg
35#define platform_dma_unmap_sg hwsw_unmap_sg
36#define platform_dma_supported hwsw_dma_supported
37#define platform_dma_mapping_error hwsw_dma_mapping_error
38#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
39#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
40#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
41#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
42
43#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h
new file mode 100644
index 000000000000..2d36f6840f0b
--- /dev/null
+++ b/include/asm-ia64/machvec_init.h
@@ -0,0 +1,32 @@
1#include <asm/machvec.h>
2
3extern ia64_mv_send_ipi_t ia64_send_ipi;
4extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
5extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
6extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
7extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
8extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
9
10extern ia64_mv_inb_t __ia64_inb;
11extern ia64_mv_inw_t __ia64_inw;
12extern ia64_mv_inl_t __ia64_inl;
13extern ia64_mv_outb_t __ia64_outb;
14extern ia64_mv_outw_t __ia64_outw;
15extern ia64_mv_outl_t __ia64_outl;
16extern ia64_mv_mmiowb_t __ia64_mmiowb;
17extern ia64_mv_readb_t __ia64_readb;
18extern ia64_mv_readw_t __ia64_readw;
19extern ia64_mv_readl_t __ia64_readl;
20extern ia64_mv_readq_t __ia64_readq;
21extern ia64_mv_readb_t __ia64_readb_relaxed;
22extern ia64_mv_readw_t __ia64_readw_relaxed;
23extern ia64_mv_readl_t __ia64_readl_relaxed;
24extern ia64_mv_readq_t __ia64_readq_relaxed;
25
26#define MACHVEC_HELPER(name) \
27 struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
28 = MACHVEC_INIT(name);
29
30#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
31
32MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
new file mode 100644
index 000000000000..e1b6cd63f49e
--- /dev/null
+++ b/include/asm-ia64/machvec_sn2.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public
20 * License along with this program; if not, write the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
31 */
32
33#ifndef _ASM_IA64_MACHVEC_SN2_H
34#define _ASM_IA64_MACHVEC_SN2_H
35
36extern ia64_mv_setup_t sn_setup;
37extern ia64_mv_cpu_init_t sn_cpu_init;
38extern ia64_mv_irq_init_t sn_irq_init;
39extern ia64_mv_send_ipi_t sn2_send_IPI;
40extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
41extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
42extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
43extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
44extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
45extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
46extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
47extern ia64_mv_inb_t __sn_inb;
48extern ia64_mv_inw_t __sn_inw;
49extern ia64_mv_inl_t __sn_inl;
50extern ia64_mv_outb_t __sn_outb;
51extern ia64_mv_outw_t __sn_outw;
52extern ia64_mv_outl_t __sn_outl;
53extern ia64_mv_mmiowb_t __sn_mmiowb;
54extern ia64_mv_readb_t __sn_readb;
55extern ia64_mv_readw_t __sn_readw;
56extern ia64_mv_readl_t __sn_readl;
57extern ia64_mv_readq_t __sn_readq;
58extern ia64_mv_readb_t __sn_readb_relaxed;
59extern ia64_mv_readw_t __sn_readw_relaxed;
60extern ia64_mv_readl_t __sn_readl_relaxed;
61extern ia64_mv_readq_t __sn_readq_relaxed;
62extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
63extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
64extern ia64_mv_dma_map_single sn_dma_map_single;
65extern ia64_mv_dma_unmap_single sn_dma_unmap_single;
66extern ia64_mv_dma_map_sg sn_dma_map_sg;
67extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg;
68extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
69extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
70extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
71extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
72extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
73extern ia64_mv_dma_supported sn_dma_supported;
74
75/*
76 * This stuff has dual use!
77 *
78 * For a generic kernel, the macros are used to initialize the
79 * platform's machvec structure. When compiling a non-generic kernel,
80 * the macros are used directly.
81 */
82#define platform_name "sn2"
83#define platform_setup sn_setup
84#define platform_cpu_init sn_cpu_init
85#define platform_irq_init sn_irq_init
86#define platform_send_ipi sn2_send_IPI
87#define platform_timer_interrupt sn_timer_interrupt
88#define platform_global_tlb_purge sn2_global_tlb_purge
89#define platform_tlb_migrate_finish sn_tlb_migrate_finish
90#define platform_pci_fixup sn_pci_fixup
91#define platform_inb __sn_inb
92#define platform_inw __sn_inw
93#define platform_inl __sn_inl
94#define platform_outb __sn_outb
95#define platform_outw __sn_outw
96#define platform_outl __sn_outl
97#define platform_mmiowb __sn_mmiowb
98#define platform_readb __sn_readb
99#define platform_readw __sn_readw
100#define platform_readl __sn_readl
101#define platform_readq __sn_readq
102#define platform_readb_relaxed __sn_readb_relaxed
103#define platform_readw_relaxed __sn_readw_relaxed
104#define platform_readl_relaxed __sn_readl_relaxed
105#define platform_readq_relaxed __sn_readq_relaxed
106#define platform_local_vector_to_irq sn_local_vector_to_irq
107#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
108#define platform_pci_legacy_read sn_pci_legacy_read
109#define platform_pci_legacy_write sn_pci_legacy_write
110#define platform_dma_init machvec_noop
111#define platform_dma_alloc_coherent sn_dma_alloc_coherent
112#define platform_dma_free_coherent sn_dma_free_coherent
113#define platform_dma_map_single sn_dma_map_single
114#define platform_dma_unmap_single sn_dma_unmap_single
115#define platform_dma_map_sg sn_dma_map_sg
116#define platform_dma_unmap_sg sn_dma_unmap_sg
117#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
118#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
119#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
120#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
121#define platform_dma_mapping_error sn_dma_mapping_error
122#define platform_dma_supported sn_dma_supported
123
124#include <asm/sn/io.h>
125
126#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff --git a/include/asm-ia64/mc146818rtc.h b/include/asm-ia64/mc146818rtc.h
new file mode 100644
index 000000000000..407787a237ba
--- /dev/null
+++ b/include/asm-ia64/mc146818rtc.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_IA64_MC146818RTC_H
2#define _ASM_IA64_MC146818RTC_H
3
4/*
5 * Machine dependent access functions for RTC registers.
6 */
7
8/* empty include file to satisfy the include in genrtc.c */
9
10#endif /* _ASM_IA64_MC146818RTC_H */
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
new file mode 100644
index 000000000000..149ad0118455
--- /dev/null
+++ b/include/asm-ia64/mca.h
@@ -0,0 +1,132 @@
1/*
2 * File: mca.h
3 * Purpose: Machine check handling specific defines
4 *
5 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
6 * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
7 * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
8 * Copyright (C) Russ Anderson (rja@sgi.com)
9 */
10
11#ifndef _ASM_IA64_MCA_H
12#define _ASM_IA64_MCA_H
13
14#define IA64_MCA_STACK_SIZE 8192
15
16#if !defined(__ASSEMBLY__)
17
18#include <linux/interrupt.h>
19#include <linux/types.h>
20
21#include <asm/param.h>
22#include <asm/sal.h>
23#include <asm/processor.h>
24#include <asm/mca_asm.h>
25
26#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
27
28typedef struct ia64_fptr {
29 unsigned long fp;
30 unsigned long gp;
31} ia64_fptr_t;
32
33typedef union cmcv_reg_u {
34 u64 cmcv_regval;
35 struct {
36 u64 cmcr_vector : 8;
37 u64 cmcr_reserved1 : 4;
38 u64 cmcr_ignored1 : 1;
39 u64 cmcr_reserved2 : 3;
40 u64 cmcr_mask : 1;
41 u64 cmcr_ignored2 : 47;
42 } cmcv_reg_s;
43
44} cmcv_reg_t;
45
46#define cmcv_mask cmcv_reg_s.cmcr_mask
47#define cmcv_vector cmcv_reg_s.cmcr_vector
48
49enum {
50 IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
51 IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1
52};
53
54/* Information maintained by the MC infrastructure */
55typedef struct ia64_mc_info_s {
56 u64 imi_mca_handler;
57 size_t imi_mca_handler_size;
58 u64 imi_monarch_init_handler;
59 size_t imi_monarch_init_handler_size;
60 u64 imi_slave_init_handler;
61 size_t imi_slave_init_handler_size;
62 u8 imi_rendez_checkin[NR_CPUS];
63
64} ia64_mc_info_t;
65
66typedef struct ia64_mca_sal_to_os_state_s {
67 u64 imsto_os_gp; /* GP of the os registered with the SAL */
68 u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */
69 u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */
70 u64 imsto_sal_gp; /* GP of the SAL - physical */
71 u64 imsto_rendez_state; /* Rendez state information */
72 u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going
73 * back to SAL from OS after MCA handling.
74 */
75 u64 pal_min_state; /* from PAL in r17 */
76 u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */
77} ia64_mca_sal_to_os_state_t;
78
79enum {
80 IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
81 IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
82 IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
83 IA64_MCA_HALT = -3 /* System to be halted by SAL */
84};
85
86enum {
87 IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
88 IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
89};
90
91typedef struct ia64_mca_os_to_sal_state_s {
92 u64 imots_os_status; /* OS status to SAL as to what happened
93 * with the MCA handling.
94 */
95 u64 imots_sal_gp; /* GP of the SAL - physical */
96 u64 imots_context; /* 0 if return to same context
97 1 if return to new context */
98 u64 *imots_new_min_state; /* Pointer to structure containing
99 * new values of registers in the min state
100 * save area.
101 */
102 u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
103 * back to SAL from OS after MCA handling.
104 */
105} ia64_mca_os_to_sal_state_t;
106
107/* Per-CPU MCA state that is too big for normal per-CPU variables. */
108
109struct ia64_mca_cpu {
110 u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */
111 u64 proc_state_dump[512];
112 u64 stackframe[32];
113 u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
114 u64 init_stack[KERNEL_STACK_SIZE/8];
115} __attribute__ ((aligned(16)));
116
117/* Array of physical addresses of each CPU's MCA area. */
118extern unsigned long __per_cpu_mca[NR_CPUS];
119
120extern void ia64_mca_init(void);
121extern void ia64_mca_cpu_init(void *);
122extern void ia64_os_mca_dispatch(void);
123extern void ia64_os_mca_dispatch_end(void);
124extern void ia64_mca_ucmc_handler(void);
125extern void ia64_monarch_init_handler(void);
126extern void ia64_slave_init_handler(void);
127extern void ia64_mca_cmc_vector_setup(void);
128extern int ia64_reg_MCA_extension(void*);
129extern void ia64_unreg_MCA_extension(void);
130
131#endif /* !__ASSEMBLY__ */
132#endif /* _ASM_IA64_MCA_H */
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h
new file mode 100644
index 000000000000..836953e0f91f
--- /dev/null
+++ b/include/asm-ia64/mca_asm.h
@@ -0,0 +1,312 @@
1/*
2 * File: mca_asm.h
3 *
4 * Copyright (C) 1999 Silicon Graphics, Inc.
5 * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
6 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
7 * Copyright (C) 2000 Hewlett-Packard Co.
8 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 2002 Intel Corp.
10 * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
11 */
12#ifndef _ASM_IA64_MCA_ASM_H
13#define _ASM_IA64_MCA_ASM_H
14
15#define PSR_IC 13
16#define PSR_I 14
17#define PSR_DT 17
18#define PSR_RT 27
19#define PSR_MC 35
20#define PSR_IT 36
21#define PSR_BN 44
22
23/*
24 * This macro converts a instruction virtual address to a physical address
25 * Right now for simulation purposes the virtual addresses are
26 * direct mapped to physical addresses.
27 * 1. Lop off bits 61 thru 63 in the virtual address
28 */
29#define INST_VA_TO_PA(addr) \
30 dep addr = 0, addr, 61, 3
31/*
32 * This macro converts a data virtual address to a physical address
33 * Right now for simulation purposes the virtual addresses are
34 * direct mapped to physical addresses.
35 * 1. Lop off bits 61 thru 63 in the virtual address
36 */
37#define DATA_VA_TO_PA(addr) \
38 tpa addr = addr
39/*
40 * This macro converts a data physical address to a virtual address
41 * Right now for simulation purposes the virtual addresses are
42 * direct mapped to physical addresses.
43 * 1. Put 0x7 in bits 61 thru 63.
44 */
45#define DATA_PA_TO_VA(addr,temp) \
46 mov temp = 0x7 ;; \
47 dep addr = temp, addr, 61, 3
48
49#define GET_THIS_PADDR(reg, var) \
50 mov reg = IA64_KR(PER_CPU_DATA);; \
51 addl reg = THIS_CPU(var), reg
52
53/*
54 * This macro jumps to the instruction at the given virtual address
55 * and starts execution in physical mode with all the address
56 * translations turned off.
57 * 1. Save the current psr
58 * 2. Make sure that all the upper 32 bits are off
59 *
60 * 3. Clear the interrupt enable and interrupt state collection bits
61 * in the psr before updating the ipsr and iip.
62 *
63 * 4. Turn off the instruction, data and rse translation bits of the psr
64 * and store the new value into ipsr
65 * Also make sure that the interrupts are disabled.
66 * Ensure that we are in little endian mode.
67 * [psr.{rt, it, dt, i, be} = 0]
68 *
69 * 5. Get the physical address corresponding to the virtual address
70 * of the next instruction bundle and put it in iip.
71 * (Using magic numbers 24 and 40 in the deposint instruction since
72 * the IA64_SDK code directly maps to lower 24bits as physical address
73 * from a virtual address).
74 *
75 * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
76 */
77#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
78 mov old_psr = psr; \
79 ;; \
80 dep old_psr = 0, old_psr, 32, 32; \
81 \
82 mov ar.rsc = 0 ; \
83 ;; \
84 srlz.d; \
85 mov temp2 = ar.bspstore; \
86 ;; \
87 DATA_VA_TO_PA(temp2); \
88 ;; \
89 mov temp1 = ar.rnat; \
90 ;; \
91 mov ar.bspstore = temp2; \
92 ;; \
93 mov ar.rnat = temp1; \
94 mov temp1 = psr; \
95 mov temp2 = psr; \
96 ;; \
97 \
98 dep temp2 = 0, temp2, PSR_IC, 2; \
99 ;; \
100 mov psr.l = temp2; \
101 ;; \
102 srlz.d; \
103 dep temp1 = 0, temp1, 32, 32; \
104 ;; \
105 dep temp1 = 0, temp1, PSR_IT, 1; \
106 ;; \
107 dep temp1 = 0, temp1, PSR_DT, 1; \
108 ;; \
109 dep temp1 = 0, temp1, PSR_RT, 1; \
110 ;; \
111 dep temp1 = 0, temp1, PSR_I, 1; \
112 ;; \
113 dep temp1 = 0, temp1, PSR_IC, 1; \
114 ;; \
115 dep temp1 = -1, temp1, PSR_MC, 1; \
116 ;; \
117 mov cr.ipsr = temp1; \
118 ;; \
119 LOAD_PHYSICAL(p0, temp2, start_addr); \
120 ;; \
121 mov cr.iip = temp2; \
122 mov cr.ifs = r0; \
123 DATA_VA_TO_PA(sp); \
124 DATA_VA_TO_PA(gp); \
125 ;; \
126 srlz.i; \
127 ;; \
128 nop 1; \
129 nop 2; \
130 nop 1; \
131 nop 2; \
132 rfi; \
133 ;;
134
135/*
136 * This macro jumps to the instruction at the given virtual address
137 * and starts execution in virtual mode with all the address
138 * translations turned on.
139 * 1. Get the old saved psr
140 *
141 * 2. Clear the interrupt state collection bit in the current psr.
142 *
143 * 3. Set the instruction translation bit back in the old psr
144 * Note we have to do this since we are right now saving only the
145 * lower 32-bits of old psr.(Also the old psr has the data and
146 * rse translation bits on)
147 *
148 * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
149 *
150 * 5. Reset the current thread pointer (r13).
151 *
152 * 6. Set iip to the virtual address of the next instruction bundle.
153 *
154 * 7. Do an rfi to move ipsr to psr and iip to ip.
155 */
156
157#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
158 mov temp2 = psr; \
159 ;; \
160 mov old_psr = temp2; \
161 ;; \
162 dep temp2 = 0, temp2, PSR_IC, 2; \
163 ;; \
164 mov psr.l = temp2; \
165 mov ar.rsc = 0; \
166 ;; \
167 srlz.d; \
168 mov r13 = ar.k6; \
169 mov temp2 = ar.bspstore; \
170 ;; \
171 DATA_PA_TO_VA(temp2,temp1); \
172 ;; \
173 mov temp1 = ar.rnat; \
174 ;; \
175 mov ar.bspstore = temp2; \
176 ;; \
177 mov ar.rnat = temp1; \
178 ;; \
179 mov temp1 = old_psr; \
180 ;; \
181 mov temp2 = 1; \
182 ;; \
183 dep temp1 = temp2, temp1, PSR_IC, 1; \
184 ;; \
185 dep temp1 = temp2, temp1, PSR_IT, 1; \
186 ;; \
187 dep temp1 = temp2, temp1, PSR_DT, 1; \
188 ;; \
189 dep temp1 = temp2, temp1, PSR_RT, 1; \
190 ;; \
191 dep temp1 = temp2, temp1, PSR_BN, 1; \
192 ;; \
193 \
194 mov cr.ipsr = temp1; \
195 movl temp2 = start_addr; \
196 ;; \
197 mov cr.iip = temp2; \
198 ;; \
199 DATA_PA_TO_VA(sp, temp1); \
200 DATA_PA_TO_VA(gp, temp2); \
201 srlz.i; \
202 ;; \
203 nop 1; \
204 nop 2; \
205 nop 1; \
206 rfi \
207 ;;
208
209/*
210 * The following offsets capture the order in which the
211 * RSE related registers from the old context are
212 * saved onto the new stack frame.
213 *
214 * +-----------------------+
215 * |NDIRTY [BSP - BSPSTORE]|
216 * +-----------------------+
217 * | RNAT |
218 * +-----------------------+
219 * | BSPSTORE |
220 * +-----------------------+
221 * | IFS |
222 * +-----------------------+
223 * | PFS |
224 * +-----------------------+
225 * | RSC |
226 * +-----------------------+ <-------- Bottom of new stack frame
227 */
228#define rse_rsc_offset 0
229#define rse_pfs_offset (rse_rsc_offset+0x08)
230#define rse_ifs_offset (rse_pfs_offset+0x08)
231#define rse_bspstore_offset (rse_ifs_offset+0x08)
232#define rse_rnat_offset (rse_bspstore_offset+0x08)
233#define rse_ndirty_offset (rse_rnat_offset+0x08)
234
235/*
236 * rse_switch_context
237 *
238 * 1. Save old RSC onto the new stack frame
239 * 2. Save PFS onto new stack frame
240 * 3. Cover the old frame and start a new frame.
241 * 4. Save IFS onto new stack frame
242 * 5. Save the old BSPSTORE on the new stack frame
243 * 6. Save the old RNAT on the new stack frame
244 * 7. Write BSPSTORE with the new backing store pointer
245 * 8. Read and save the new BSP to calculate the #dirty registers
246 * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
247 */
248#define rse_switch_context(temp,p_stackframe,p_bspstore) \
249 ;; \
250 mov temp=ar.rsc;; \
251 st8 [p_stackframe]=temp,8;; \
252 mov temp=ar.pfs;; \
253 st8 [p_stackframe]=temp,8; \
254 cover ;; \
255 mov temp=cr.ifs;; \
256 st8 [p_stackframe]=temp,8;; \
257 mov temp=ar.bspstore;; \
258 st8 [p_stackframe]=temp,8;; \
259 mov temp=ar.rnat;; \
260 st8 [p_stackframe]=temp,8; \
261 mov ar.bspstore=p_bspstore;; \
262 mov temp=ar.bsp;; \
263 sub temp=temp,p_bspstore;; \
264 st8 [p_stackframe]=temp,8;;
265
266/*
267 * rse_return_context
268 * 1. Allocate a zero-sized frame
269 * 2. Store the number of dirty registers RSC.loadrs field
270 * 3. Issue a loadrs to insure that any registers from the interrupted
271 * context which were saved on the new stack frame have been loaded
272 * back into the stacked registers
273 * 4. Restore BSPSTORE
274 * 5. Restore RNAT
275 * 6. Restore PFS
276 * 7. Restore IFS
277 * 8. Restore RSC
278 * 9. Issue an RFI
279 */
280#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
281 ;; \
282 alloc temp=ar.pfs,0,0,0,0; \
283 add p_stackframe=rse_ndirty_offset,p_stackframe;; \
284 ld8 temp=[p_stackframe];; \
285 shl temp=temp,16;; \
286 mov ar.rsc=temp;; \
287 loadrs;; \
288 add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
289 ld8 temp=[p_stackframe];; \
290 mov ar.bspstore=temp;; \
291 add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
292 ld8 temp=[p_stackframe];; \
293 mov ar.rnat=temp;; \
294 add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
295 ld8 temp=[p_stackframe];; \
296 mov ar.pfs=temp;; \
297 add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
298 ld8 temp=[p_stackframe];; \
299 mov cr.ifs=temp;; \
300 add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
301 ld8 temp=[p_stackframe];; \
302 mov ar.rsc=temp ; \
303 mov temp=psr;; \
304 or temp=temp,psr_mask_reg;; \
305 mov cr.ipsr=temp;; \
306 mov temp=ip;; \
307 add temp=0x30,temp;; \
308 mov cr.iip=temp;; \
309 srlz.i;; \
310 rfi;;
311
312#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
new file mode 100644
index 000000000000..1590dc65b30b
--- /dev/null
+++ b/include/asm-ia64/meminit.h
@@ -0,0 +1,60 @@
1#ifndef meminit_h
2#define meminit_h
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#include <linux/config.h>
11
12/*
13 * Entries defined so far:
14 * - boot param structure itself
15 * - memory map
16 * - initrd (optional)
17 * - command line string
18 * - kernel code & data
19 *
20 * More could be added if necessary
21 */
22#define IA64_MAX_RSVD_REGIONS 5
23
24struct rsvd_region {
25 unsigned long start; /* virtual address of beginning of element */
26 unsigned long end; /* virtual address of end of element + 1 */
27};
28
29extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
30extern int num_rsvd_regions;
31
32extern void find_memory (void);
33extern void reserve_memory (void);
34extern void find_initrd (void);
35extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
36
37/*
38 * For rounding an address to the next IA64_GRANULE_SIZE or order
39 */
40#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
41#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
42#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
43
44#ifdef CONFIG_DISCONTIGMEM
45 extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
46#else
47# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
48#endif
49
50#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
51
52#ifdef CONFIG_VIRTUAL_MEM_MAP
53# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
54 extern unsigned long vmalloc_end;
55 extern struct page *vmem_map;
56 extern int find_largest_hole (u64 start, u64 end, void *arg);
57 extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
58#endif
59
60#endif /* meminit_h */
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
new file mode 100644
index 000000000000..1c0a73af1461
--- /dev/null
+++ b/include/asm-ia64/mman.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_IA64_MMAN_H
2#define _ASM_IA64_MMAN_H
3
4/*
5 * Based on <asm-i386/mman.h>.
6 *
7 * Modified 1998-2000, 2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#define PROT_READ 0x1 /* page can be read */
12#define PROT_WRITE 0x2 /* page can be written */
13#define PROT_EXEC 0x4 /* page can be executed */
14#define PROT_SEM 0x8 /* page may be used for atomic ops */
15#define PROT_NONE 0x0 /* page can not be accessed */
16#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
17#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
18
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */
24
25#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
26#define MAP_GROWSUP 0x00200 /* register stack-like segment */
27#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
28#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
29#define MAP_LOCKED 0x02000 /* pages are locked */
30#define MAP_NORESERVE 0x04000 /* don't check for reservations */
31#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
32#define MAP_NONBLOCK 0x10000 /* do not block on IO */
33
34#define MS_ASYNC 1 /* sync memory asynchronously */
35#define MS_INVALIDATE 2 /* invalidate the caches */
36#define MS_SYNC 4 /* synchronous memory sync */
37
38#define MCL_CURRENT 1 /* lock all current mappings */
39#define MCL_FUTURE 2 /* lock all future mappings */
40
41#define MADV_NORMAL 0x0 /* default page-in behavior */
42#define MADV_RANDOM 0x1 /* page-in minimum required */
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */
46
47/* compatibility flags */
48#define MAP_ANON MAP_ANONYMOUS
49#define MAP_FILE 0
50
51#endif /* _ASM_IA64_MMAN_H */
diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h
new file mode 100644
index 000000000000..ae1525352a25
--- /dev/null
+++ b/include/asm-ia64/mmu.h
@@ -0,0 +1,11 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4/*
5 * Type for a context number. We declare it volatile to ensure proper ordering when it's
6 * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
7 * init_new_context()).
8 */
9typedef volatile unsigned long mm_context_t;
10
11#endif
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
new file mode 100644
index 000000000000..0096e7e05012
--- /dev/null
+++ b/include/asm-ia64/mmu_context.h
@@ -0,0 +1,170 @@
1#ifndef _ASM_IA64_MMU_CONTEXT_H
2#define _ASM_IA64_MMU_CONTEXT_H
3
4/*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9/*
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
16 */
17
18#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
19
20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
21
22# ifndef __ASSEMBLY__
23
24#include <linux/compiler.h>
25#include <linux/percpu.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28
29#include <asm/processor.h>
30
31struct ia64_ctx {
32 spinlock_t lock;
33 unsigned int next; /* next context number to use */
34 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
35 unsigned int max_ctx; /* max. context value supported by all CPUs */
36};
37
38extern struct ia64_ctx ia64_ctx;
39DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
40
41extern void wrap_mmu_context (struct mm_struct *mm);
42
43static inline void
44enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
45{
46}
47
48/*
49 * When the context counter wraps around all TLBs need to be flushed because an old
50 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
51 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
52 * <efocht@ess.nec.de>
53 */
54static inline void
55delayed_tlb_flush (void)
56{
57 extern void local_flush_tlb_all (void);
58
59 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
60 local_flush_tlb_all();
61 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
62 }
63}
64
65static inline mm_context_t
66get_mmu_context (struct mm_struct *mm)
67{
68 unsigned long flags;
69 mm_context_t context = mm->context;
70
71 if (context)
72 return context;
73
74 spin_lock_irqsave(&ia64_ctx.lock, flags);
75 {
76 /* re-check, now that we've got the lock: */
77 context = mm->context;
78 if (context == 0) {
79 cpus_clear(mm->cpu_vm_mask);
80 if (ia64_ctx.next >= ia64_ctx.limit)
81 wrap_mmu_context(mm);
82 mm->context = context = ia64_ctx.next++;
83 }
84 }
85 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
86 return context;
87}
88
89/*
90 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
91 * address-space, so no TLB flushing is needed, ever.
92 */
93static inline int
94init_new_context (struct task_struct *p, struct mm_struct *mm)
95{
96 mm->context = 0;
97 return 0;
98}
99
100static inline void
101destroy_context (struct mm_struct *mm)
102{
103 /* Nothing to do. */
104}
105
106static inline void
107reload_context (mm_context_t context)
108{
109 unsigned long rid;
110 unsigned long rid_incr = 0;
111 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
112
113 old_rr4 = ia64_get_rr(0x8000000000000000UL);
114 rid = context << 3; /* make space for encoding the region number */
115 rid_incr = 1 << 8;
116
117 /* encode the region id, preferred page size, and VHPT enable bit: */
118 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
119 rr1 = rr0 + 1*rid_incr;
120 rr2 = rr0 + 2*rid_incr;
121 rr3 = rr0 + 3*rid_incr;
122 rr4 = rr0 + 4*rid_incr;
123#ifdef CONFIG_HUGETLB_PAGE
124 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
125#endif
126
127 ia64_set_rr(0x0000000000000000UL, rr0);
128 ia64_set_rr(0x2000000000000000UL, rr1);
129 ia64_set_rr(0x4000000000000000UL, rr2);
130 ia64_set_rr(0x6000000000000000UL, rr3);
131 ia64_set_rr(0x8000000000000000UL, rr4);
132 ia64_srlz_i(); /* srlz.i implies srlz.d */
133}
134
135static inline void
136activate_context (struct mm_struct *mm)
137{
138 mm_context_t context;
139
140 do {
141 context = get_mmu_context(mm);
142 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
143 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
144 reload_context(context);
145 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
146 } while (unlikely(context != mm->context));
147}
148
149#define deactivate_mm(tsk,mm) do { } while (0)
150
151/*
152 * Switch from address space PREV to address space NEXT.
153 */
154static inline void
155activate_mm (struct mm_struct *prev, struct mm_struct *next)
156{
157 delayed_tlb_flush();
158
159 /*
160 * We may get interrupts here, but that's OK because interrupt handlers cannot
161 * touch user-space.
162 */
163 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
164 activate_context(next);
165}
166
167#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
168
169# endif /* ! __ASSEMBLY__ */
170#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/include/asm-ia64/mmzone.h b/include/asm-ia64/mmzone.h
new file mode 100644
index 000000000000..9491dacc89cf
--- /dev/null
+++ b/include/asm-ia64/mmzone.h
@@ -0,0 +1,32 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved.
7 * Copyright (c) 2002 NEC Corp.
8 * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
9 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
10 */
11#ifndef _ASM_IA64_MMZONE_H
12#define _ASM_IA64_MMZONE_H
13
14#include <linux/numa.h>
15#include <asm/page.h>
16#include <asm/meminit.h>
17
18#ifdef CONFIG_DISCONTIGMEM
19
20#ifdef CONFIG_IA64_DIG /* DIG systems are small */
21# define MAX_PHYSNODE_ID 8
22# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
23#else /* sn2 is the biggest case, so we use that if !DIG */
24# define MAX_PHYSNODE_ID 2048
25# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
26#endif
27
28#else /* CONFIG_DISCONTIGMEM */
29# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
30#endif /* CONFIG_DISCONTIGMEM */
31
32#endif /* _ASM_IA64_MMZONE_H */
diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h
new file mode 100644
index 000000000000..85c82bd819f2
--- /dev/null
+++ b/include/asm-ia64/module.h
@@ -0,0 +1,35 @@
1#ifndef _ASM_IA64_MODULE_H
2#define _ASM_IA64_MODULE_H
3
4/*
5 * IA-64-specific support for kernel module loader.
6 *
7 * Copyright (C) 2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11struct elf64_shdr; /* forward declration */
12
13struct mod_arch_specific {
14 struct elf64_shdr *core_plt; /* core PLT section */
15 struct elf64_shdr *init_plt; /* init PLT section */
16 struct elf64_shdr *got; /* global offset table */
17 struct elf64_shdr *opd; /* official procedure descriptors */
18 struct elf64_shdr *unwind; /* unwind-table section */
19 unsigned long gp; /* global-pointer for module */
20
21 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
22 void *init_unw_table; /* init unwind-table cookie returned by unwinder */
23 unsigned int next_got_entry; /* index of next available got entry */
24};
25
26#define Elf_Shdr Elf64_Shdr
27#define Elf_Sym Elf64_Sym
28#define Elf_Ehdr Elf64_Ehdr
29
30#define MODULE_PROC_FAMILY "ia64"
31#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
32
33#define ARCH_SHF_SMALL SHF_IA_64_SHORT
34
35#endif /* _ASM_IA64_MODULE_H */
diff --git a/include/asm-ia64/msgbuf.h b/include/asm-ia64/msgbuf.h
new file mode 100644
index 000000000000..6c64c0d2aae1
--- /dev/null
+++ b/include/asm-ia64/msgbuf.h
@@ -0,0 +1,27 @@
1#ifndef _ASM_IA64_MSGBUF_H
2#define _ASM_IA64_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for IA-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct msqid64_ds {
14 struct ipc64_perm msg_perm;
15 __kernel_time_t msg_stime; /* last msgsnd time */
16 __kernel_time_t msg_rtime; /* last msgrcv time */
17 __kernel_time_t msg_ctime; /* last change time */
18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
21 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
22 __kernel_pid_t msg_lrpid; /* last receive pid */
23 unsigned long __unused1;
24 unsigned long __unused2;
25};
26
27#endif /* _ASM_IA64_MSGBUF_H */
diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h
new file mode 100644
index 000000000000..60f2137f9278
--- /dev/null
+++ b/include/asm-ia64/msi.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2003-2004 Intel
3 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
4 */
5
6#ifndef ASM_MSI_H
7#define ASM_MSI_H
8
9#define NR_VECTORS NR_IRQS
10#define FIRST_DEVICE_VECTOR IA64_FIRST_DEVICE_VECTOR
11#define LAST_DEVICE_VECTOR IA64_LAST_DEVICE_VECTOR
12static inline void set_intr_gate (int nr, void *func) {}
13#define IO_APIC_VECTOR(irq) (irq)
14#define ack_APIC_irq ia64_eoi
15#define cpu_mask_to_apicid(mask) cpu_physical_id(first_cpu(mask))
16#define MSI_DEST_MODE MSI_PHYSICAL_MODE
17#define MSI_TARGET_CPU ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
18#define MSI_TARGET_CPU_SHIFT 4
19
20#endif /* ASM_MSI_H */
diff --git a/include/asm-ia64/namei.h b/include/asm-ia64/namei.h
new file mode 100644
index 000000000000..78e768079083
--- /dev/null
+++ b/include/asm-ia64/namei.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_IA64_NAMEI_H
2#define _ASM_IA64_NAMEI_H
3
4/*
5 * Modified 1998, 1999, 2001
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9#include <asm/ptrace.h>
10#include <asm/system.h>
11
12#define EMUL_PREFIX_LINUX_IA32 "/emul/ia32-linux/"
13
14static inline char *
15__emul_prefix (void)
16{
17 switch (current->personality) {
18 case PER_LINUX32:
19 return EMUL_PREFIX_LINUX_IA32;
20 default:
21 return NULL;
22 }
23}
24
25#endif /* _ASM_IA64_NAMEI_H */
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h
new file mode 100644
index 000000000000..6b0f3ed89b7e
--- /dev/null
+++ b/include/asm-ia64/nodedata.h
@@ -0,0 +1,52 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
7 * Copyright (c) 2002 NEC Corp.
8 * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
9 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
10 */
11#ifndef _ASM_IA64_NODEDATA_H
12#define _ASM_IA64_NODEDATA_H
13
14#include <linux/config.h>
15#include <linux/numa.h>
16
17#include <asm/percpu.h>
18#include <asm/mmzone.h>
19
20#ifdef CONFIG_DISCONTIGMEM
21
22/*
23 * Node Data. One of these structures is located on each node of a NUMA system.
24 */
25
26struct pglist_data;
27struct ia64_node_data {
28 short active_cpu_count;
29 short node;
30 struct pglist_data *pg_data_ptrs[MAX_NUMNODES];
31};
32
33
34/*
35 * Return a pointer to the node_data structure for the executing cpu.
36 */
37#define local_node_data (local_cpu_data->node_data)
38
39/*
40 * Given a node id, return a pointer to the pg_data_t for the node.
41 *
42 * NODE_DATA - should be used in all code not related to system
43 * initialization. It uses pernode data structures to minimize
44 * offnode memory references. However, these structure are not
45 * present during boot. This macro can be used once cpu_init
46 * completes.
47 */
48#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
49
50#endif /* CONFIG_DISCONTIGMEM */
51
52#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
new file mode 100644
index 000000000000..3ae128fe0823
--- /dev/null
+++ b/include/asm-ia64/numa.h
@@ -0,0 +1,74 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * This file contains NUMA specific prototypes and definitions.
7 *
8 * 2002/08/05 Erich Focht <efocht@ess.nec.de>
9 *
10 */
11#ifndef _ASM_IA64_NUMA_H
12#define _ASM_IA64_NUMA_H
13
14#include <linux/config.h>
15
16#ifdef CONFIG_NUMA
17
18#include <linux/cache.h>
19#include <linux/cpumask.h>
20#include <linux/numa.h>
21#include <linux/smp.h>
22#include <linux/threads.h>
23
24#include <asm/mmzone.h>
25
26extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
27extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
28
29/* Stuff below this line could be architecture independent */
30
31extern int num_node_memblks; /* total number of memory chunks */
32
33/*
34 * List of node memory chunks. Filled when parsing SRAT table to
35 * obtain information about memory nodes.
36*/
37
38struct node_memblk_s {
39 unsigned long start_paddr;
40 unsigned long size;
41 int nid; /* which logical node contains this chunk? */
42 int bank; /* which mem bank on this node */
43};
44
45struct node_cpuid_s {
46 u16 phys_id; /* id << 8 | eid */
47 int nid; /* logical node containing this CPU */
48};
49
50extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
51extern struct node_cpuid_s node_cpuid[NR_CPUS];
52
53/*
54 * ACPI 2.0 SLIT (System Locality Information Table)
55 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
56 *
57 * This is a matrix with "distances" between nodes, they should be
58 * proportional to the memory access latency ratios.
59 */
60
61extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
62#define node_distance(from,to) (numa_slit[(from) * num_online_nodes() + (to)])
63
64extern int paddr_to_nid(unsigned long paddr);
65
66#define local_nodeid (cpu_to_node_map[smp_processor_id()])
67
68#else /* !CONFIG_NUMA */
69
70#define paddr_to_nid(addr) 0
71
72#endif /* CONFIG_NUMA */
73
74#endif /* _ASM_IA64_NUMA_H */
diff --git a/include/asm-ia64/numnodes.h b/include/asm-ia64/numnodes.h
new file mode 100644
index 000000000000..21cff4da5485
--- /dev/null
+++ b/include/asm-ia64/numnodes.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_MAX_NUMNODES_H
2#define _ASM_MAX_NUMNODES_H
3
4#ifdef CONFIG_IA64_DIG
5/* Max 8 Nodes */
6#define NODES_SHIFT 3
7#elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
8/* Max 32 Nodes */
9#define NODES_SHIFT 5
10#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
11/* Max 256 Nodes */
12#define NODES_SHIFT 8
13#endif
14
15#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
new file mode 100644
index 000000000000..24aab801a8ca
--- /dev/null
+++ b/include/asm-ia64/page.h
@@ -0,0 +1,207 @@
1#ifndef _ASM_IA64_PAGE_H
2#define _ASM_IA64_PAGE_H
3/*
4 * Pagetable related stuff.
5 *
6 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9
10#include <linux/config.h>
11
12#include <asm/intrinsics.h>
13#include <asm/types.h>
14
15/*
16 * PAGE_SHIFT determines the actual kernel page size.
17 */
18#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
19# define PAGE_SHIFT 12
20#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
21# define PAGE_SHIFT 13
22#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
23# define PAGE_SHIFT 14
24#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
25# define PAGE_SHIFT 16
26#else
27# error Unsupported page size!
28#endif
29
30#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
31#define PAGE_MASK (~(PAGE_SIZE - 1))
32#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
33
34#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
35#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
36
37#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
38
39#ifdef CONFIG_HUGETLB_PAGE
40# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/
41# define REGION_SHIFT 61
42# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
43# define HPAGE_SHIFT hpage_shift
44# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
45# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
46# define HPAGE_MASK (~(HPAGE_SIZE - 1))
47
48# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
49# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
50#endif /* CONFIG_HUGETLB_PAGE */
51
52#ifdef __ASSEMBLY__
53# define __pa(x) ((x) - PAGE_OFFSET)
54# define __va(x) ((x) + PAGE_OFFSET)
55#else /* !__ASSEMBLY */
56# ifdef __KERNEL__
57# define STRICT_MM_TYPECHECKS
58
59extern void clear_page (void *page);
60extern void copy_page (void *to, void *from);
61
62/*
63 * clear_user_page() and copy_user_page() can't be inline functions because
64 * flush_dcache_page() can't be defined until later...
65 */
66#define clear_user_page(addr, vaddr, page) \
67do { \
68 clear_page(addr); \
69 flush_dcache_page(page); \
70} while (0)
71
72#define copy_user_page(to, from, vaddr, page) \
73do { \
74 copy_page((to), (from)); \
75 flush_dcache_page(page); \
76} while (0)
77
78
79#define alloc_zeroed_user_highpage(vma, vaddr) \
80({ \
81 struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
82 if (page) \
83 flush_dcache_page(page); \
84 page; \
85})
86
87#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
88
89#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
90
91#ifdef CONFIG_VIRTUAL_MEM_MAP
92extern int ia64_pfn_valid (unsigned long pfn);
93#else
94# define ia64_pfn_valid(pfn) 1
95#endif
96
97#ifndef CONFIG_DISCONTIGMEM
98# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
99# define page_to_pfn(page) ((unsigned long) (page - mem_map))
100# define pfn_to_page(pfn) (mem_map + (pfn))
101#else
102extern struct page *vmem_map;
103extern unsigned long max_low_pfn;
104# define pfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
105# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
106# define pfn_to_page(pfn) (vmem_map + (pfn))
107#endif
108
109#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
110#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
111
112typedef union ia64_va {
113 struct {
114 unsigned long off : 61; /* intra-region offset */
115 unsigned long reg : 3; /* region number */
116 } f;
117 unsigned long l;
118 void *p;
119} ia64_va;
120
121/*
122 * Note: These macros depend on the fact that PAGE_OFFSET has all
123 * region bits set to 1 and all other bits set to zero. They are
124 * expressed in this way to ensure they result in a single "dep"
125 * instruction.
126 */
127#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
128#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
129
130#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
131#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
132
133#define REGION_SIZE REGION_NUMBER(1)
134#define REGION_KERNEL 7
135
136#ifdef CONFIG_HUGETLB_PAGE
137# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
138 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
139# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
140# define is_hugepage_only_range(mm, addr, len) \
141 (REGION_NUMBER(addr) == REGION_HPAGE && \
142 REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
143extern unsigned int hpage_shift;
144#endif
145
146static __inline__ int
147get_order (unsigned long size)
148{
149 long double d = size - 1;
150 long order;
151
152 order = ia64_getf_exp(d);
153 order = order - PAGE_SHIFT - 0xffff + 1;
154 if (order < 0)
155 order = 0;
156 return order;
157}
158
159# endif /* __KERNEL__ */
160#endif /* !__ASSEMBLY__ */
161
162#ifdef STRICT_MM_TYPECHECKS
163 /*
164 * These are used to make use of C type-checking..
165 */
166 typedef struct { unsigned long pte; } pte_t;
167 typedef struct { unsigned long pmd; } pmd_t;
168 typedef struct { unsigned long pgd; } pgd_t;
169 typedef struct { unsigned long pgprot; } pgprot_t;
170
171# define pte_val(x) ((x).pte)
172# define pmd_val(x) ((x).pmd)
173# define pgd_val(x) ((x).pgd)
174# define pgprot_val(x) ((x).pgprot)
175
176# define __pte(x) ((pte_t) { (x) } )
177# define __pgprot(x) ((pgprot_t) { (x) } )
178
179#else /* !STRICT_MM_TYPECHECKS */
180 /*
181 * .. while these make it easier on the compiler
182 */
183# ifndef __ASSEMBLY__
184 typedef unsigned long pte_t;
185 typedef unsigned long pmd_t;
186 typedef unsigned long pgd_t;
187 typedef unsigned long pgprot_t;
188# endif
189
190# define pte_val(x) (x)
191# define pmd_val(x) (x)
192# define pgd_val(x) (x)
193# define pgprot_val(x) (x)
194
195# define __pte(x) (x)
196# define __pgd(x) (x)
197# define __pgprot(x) (x)
198#endif /* !STRICT_MM_TYPECHECKS */
199
200#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
201
202#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
203 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
204 (((current->personality & READ_IMPLIES_EXEC) != 0) \
205 ? VM_EXEC : 0))
206
207#endif /* _ASM_IA64_PAGE_H */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
new file mode 100644
index 000000000000..5dd477ffb88e
--- /dev/null
+++ b/include/asm-ia64/pal.h
@@ -0,0 +1,1564 @@
1#ifndef _ASM_IA64_PAL_H
2#define _ASM_IA64_PAL_H
3
4/*
5 * Processor Abstraction Layer definitions.
6 *
7 * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
8 * chapter 11 IA-64 Processor Abstraction Layer
9 *
10 * Copyright (C) 1998-2001 Hewlett-Packard Co
11 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * Stephane Eranian <eranian@hpl.hp.com>
13 * Copyright (C) 1999 VA Linux Systems
14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
16 *
17 * 99/10/01 davidm Make sure we pass zero for reserved parameters.
18 * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
19 * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
20 * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
21 * 00/05/25 eranian Support for stack calls, and static physical calls
22 * 00/06/18 eranian Support for stacked physical calls
23 */
24
25/*
26 * Note that some of these calls use a static-register only calling
27 * convention which has nothing to do with the regular calling
28 * convention.
29 */
30#define PAL_CACHE_FLUSH 1 /* flush i/d cache */
31#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */
32#define PAL_CACHE_INIT 3 /* initialize i/d cache */
33#define PAL_CACHE_SUMMARY 4 /* get summary of cache heirarchy */
34#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */
35#define PAL_PTCE_INFO 6 /* purge TLB info */
36#define PAL_VM_INFO 7 /* return supported virtual memory features */
37#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */
38#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */
39#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */
40#define PAL_DEBUG_INFO 11 /* get number of debug registers */
41#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */
42#define PAL_FREQ_BASE 13 /* base frequency of the platform */
43#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */
44#define PAL_PERF_MON_INFO 15 /* return performance monitor info */
45#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */
46#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */
47#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */
48#define PAL_RSE_INFO 19 /* return rse information */
49#define PAL_VERSION 20 /* return version of PAL code */
50#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */
51#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */
52#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */
53#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */
54#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */
55#define PAL_MC_RESUME 26 /* Return to interrupted process */
56#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */
57#define PAL_HALT 28 /* enter the low power HALT state */
58#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/
59#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */
60#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */
61#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */
62#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */
63#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
64
65#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */
66#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
67#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
68#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
69#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
70
71#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
72#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
73#define PAL_TEST_PROC 258 /* perform late processor self-test */
74#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
75#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
76#define PAL_VM_TR_READ 261 /* read contents of translation register */
77
78#ifndef __ASSEMBLY__
79
80#include <linux/types.h>
81#include <asm/fpu.h>
82
83/*
84 * Data types needed to pass information into PAL procedures and
85 * interpret information returned by them.
86 */
87
88/* Return status from the PAL procedure */
89typedef s64 pal_status_t;
90
91#define PAL_STATUS_SUCCESS 0 /* No error */
92#define PAL_STATUS_UNIMPLEMENTED (-1) /* Unimplemented procedure */
93#define PAL_STATUS_EINVAL (-2) /* Invalid argument */
94#define PAL_STATUS_ERROR (-3) /* Error */
95#define PAL_STATUS_CACHE_INIT_FAIL (-4) /* Could not initialize the
96 * specified level and type of
97 * cache without sideeffects
98 * and "restrict" was 1
99 */
100
101/* Processor cache level in the heirarchy */
102typedef u64 pal_cache_level_t;
103#define PAL_CACHE_LEVEL_L0 0 /* L0 */
104#define PAL_CACHE_LEVEL_L1 1 /* L1 */
105#define PAL_CACHE_LEVEL_L2 2 /* L2 */
106
107
108/* Processor cache type at a particular level in the heirarchy */
109
110typedef u64 pal_cache_type_t;
111#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */
112#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */
113#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */
114
115
116#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */
117#define PAL_CACHE_FLUSH_CHK_INTRS 2 /* check for interrupts/mc while flushing */
118
119/* Processor cache line size in bytes */
120typedef int pal_cache_line_size_t;
121
122/* Processor cache line state */
123typedef u64 pal_cache_line_state_t;
124#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */
125#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */
126#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */
127#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
128
129typedef struct pal_freq_ratio {
130 u64 den : 32, num : 32; /* numerator & denominator */
131} itc_ratio, proc_ratio;
132
133typedef union pal_cache_config_info_1_s {
134 struct {
135 u64 u : 1, /* 0 Unified cache ? */
136 at : 2, /* 2-1 Cache mem attr*/
137 reserved : 5, /* 7-3 Reserved */
138 associativity : 8, /* 16-8 Associativity*/
139 line_size : 8, /* 23-17 Line size */
140 stride : 8, /* 31-24 Stride */
141 store_latency : 8, /*39-32 Store latency*/
142 load_latency : 8, /* 47-40 Load latency*/
143 store_hints : 8, /* 55-48 Store hints*/
144 load_hints : 8; /* 63-56 Load hints */
145 } pcci1_bits;
146 u64 pcci1_data;
147} pal_cache_config_info_1_t;
148
149typedef union pal_cache_config_info_2_s {
150 struct {
151 u64 cache_size : 32, /*cache size in bytes*/
152
153
154 alias_boundary : 8, /* 39-32 aliased addr
155 * separation for max
156 * performance.
157 */
158 tag_ls_bit : 8, /* 47-40 LSb of addr*/
159 tag_ms_bit : 8, /* 55-48 MSb of addr*/
160 reserved : 8; /* 63-56 Reserved */
161 } pcci2_bits;
162 u64 pcci2_data;
163} pal_cache_config_info_2_t;
164
165
166typedef struct pal_cache_config_info_s {
167 pal_status_t pcci_status;
168 pal_cache_config_info_1_t pcci_info_1;
169 pal_cache_config_info_2_t pcci_info_2;
170 u64 pcci_reserved;
171} pal_cache_config_info_t;
172
173#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints
174#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints
175#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
176#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
177#define pcci_stride pcci_info_1.pcci1_bits.stride
178#define pcci_line_size pcci_info_1.pcci1_bits.line_size
179#define pcci_assoc pcci_info_1.pcci1_bits.associativity
180#define pcci_cache_attr pcci_info_1.pcci1_bits.at
181#define pcci_unified pcci_info_1.pcci1_bits.u
182#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit
183#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit
184#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary
185#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size
186
187
188
189/* Possible values for cache attributes */
190
191#define PAL_CACHE_ATTR_WT 0 /* Write through cache */
192#define PAL_CACHE_ATTR_WB 1 /* Write back cache */
193#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write
194 * back depending on TLB
195 * memory attributes
196 */
197
198
199/* Possible values for cache hints */
200
201#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */
202#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */
203#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */
204
205/* Processor cache protection information */
206typedef union pal_cache_protection_element_u {
207 u32 pcpi_data;
208 struct {
209 u32 data_bits : 8, /* # data bits covered by
210 * each unit of protection
211 */
212
213 tagprot_lsb : 6, /* Least -do- */
214 tagprot_msb : 6, /* Most Sig. tag address
215 * bit that this
216 * protection covers.
217 */
218 prot_bits : 6, /* # of protection bits */
219 method : 4, /* Protection method */
220 t_d : 2; /* Indicates which part
221 * of the cache this
222 * protection encoding
223 * applies.
224 */
225 } pcp_info;
226} pal_cache_protection_element_t;
227
228#define pcpi_cache_prot_part pcp_info.t_d
229#define pcpi_prot_method pcp_info.method
230#define pcpi_prot_bits pcp_info.prot_bits
231#define pcpi_tagprot_msb pcp_info.tagprot_msb
232#define pcpi_tagprot_lsb pcp_info.tagprot_lsb
233#define pcpi_data_bits pcp_info.data_bits
234
235/* Processor cache part encodings */
236#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */
237#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */
238#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is
239 * more significant )
240 */
241#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is
242 * more significant )
243 */
244#define PAL_CACHE_PROT_PART_MAX 6
245
246
247typedef struct pal_cache_protection_info_s {
248 pal_status_t pcpi_status;
249 pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX];
250} pal_cache_protection_info_t;
251
252
253/* Processor cache protection method encodings */
254#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */
255#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */
256#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */
257#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */
258
259
260/* Processor cache line identification in the heirarchy */
261typedef union pal_cache_line_id_u {
262 u64 pclid_data;
263 struct {
264 u64 cache_type : 8, /* 7-0 cache type */
265 level : 8, /* 15-8 level of the
266 * cache in the
267 * heirarchy.
268 */
269 way : 8, /* 23-16 way in the set
270 */
271 part : 8, /* 31-24 part of the
272 * cache
273 */
274 reserved : 32; /* 63-32 is reserved*/
275 } pclid_info_read;
276 struct {
277 u64 cache_type : 8, /* 7-0 cache type */
278 level : 8, /* 15-8 level of the
279 * cache in the
280 * heirarchy.
281 */
282 way : 8, /* 23-16 way in the set
283 */
284 part : 8, /* 31-24 part of the
285 * cache
286 */
287 mesi : 8, /* 39-32 cache line
288 * state
289 */
290 start : 8, /* 47-40 lsb of data to
291 * invert
292 */
293 length : 8, /* 55-48 #bits to
294 * invert
295 */
296 trigger : 8; /* 63-56 Trigger error
297 * by doing a load
298 * after the write
299 */
300
301 } pclid_info_write;
302} pal_cache_line_id_u_t;
303
304#define pclid_read_part pclid_info_read.part
305#define pclid_read_way pclid_info_read.way
306#define pclid_read_level pclid_info_read.level
307#define pclid_read_cache_type pclid_info_read.cache_type
308
309#define pclid_write_trigger pclid_info_write.trigger
310#define pclid_write_length pclid_info_write.length
311#define pclid_write_start pclid_info_write.start
312#define pclid_write_mesi pclid_info_write.mesi
313#define pclid_write_part pclid_info_write.part
314#define pclid_write_way pclid_info_write.way
315#define pclid_write_level pclid_info_write.level
316#define pclid_write_cache_type pclid_info_write.cache_type
317
318/* Processor cache line part encodings */
319#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */
320#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */
321#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */
322#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */
323#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag
324 * protection
325 */
326typedef struct pal_cache_line_info_s {
327 pal_status_t pcli_status; /* Return status of the read cache line
328 * info call.
329 */
330 u64 pcli_data; /* 64-bit data, tag, protection bits .. */
331 u64 pcli_data_len; /* data length in bits */
332 pal_cache_line_state_t pcli_cache_line_state; /* mesi state */
333
334} pal_cache_line_info_t;
335
336
337/* Machine Check related crap */
338
339/* Pending event status bits */
340typedef u64 pal_mc_pending_events_t;
341
342#define PAL_MC_PENDING_MCA (1 << 0)
343#define PAL_MC_PENDING_INIT (1 << 1)
344
345/* Error information type */
346typedef u64 pal_mc_info_index_t;
347
348#define PAL_MC_INFO_PROCESSOR 0 /* Processor */
349#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */
350#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */
351#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */
352#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */
353#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */
354#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */
355#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation
356 * dependent
357 */
358
359
360typedef struct pal_process_state_info_s {
361 u64 reserved1 : 2,
362 rz : 1, /* PAL_CHECK processor
363 * rendezvous
364 * successful.
365 */
366
367 ra : 1, /* PAL_CHECK attempted
368 * a rendezvous.
369 */
370 me : 1, /* Distinct multiple
371 * errors occurred
372 */
373
374 mn : 1, /* Min. state save
375 * area has been
376 * registered with PAL
377 */
378
379 sy : 1, /* Storage integrity
380 * synched
381 */
382
383
384 co : 1, /* Continuable */
385 ci : 1, /* MC isolated */
386 us : 1, /* Uncontained storage
387 * damage.
388 */
389
390
391 hd : 1, /* Non-essential hw
392 * lost (no loss of
393 * functionality)
394 * causing the
395 * processor to run in
396 * degraded mode.
397 */
398
399 tl : 1, /* 1 => MC occurred
400 * after an instr was
401 * executed but before
402 * the trap that
403 * resulted from instr
404 * execution was
405 * generated.
406 * (Trap Lost )
407 */
408 mi : 1, /* More information available
409 * call PAL_MC_ERROR_INFO
410 */
411 pi : 1, /* Precise instruction pointer */
412 pm : 1, /* Precise min-state save area */
413
414 dy : 1, /* Processor dynamic
415 * state valid
416 */
417
418
419 in : 1, /* 0 = MC, 1 = INIT */
420 rs : 1, /* RSE valid */
421 cm : 1, /* MC corrected */
422 ex : 1, /* MC is expected */
423 cr : 1, /* Control regs valid*/
424 pc : 1, /* Perf cntrs valid */
425 dr : 1, /* Debug regs valid */
426 tr : 1, /* Translation regs
427 * valid
428 */
429 rr : 1, /* Region regs valid */
430 ar : 1, /* App regs valid */
431 br : 1, /* Branch regs valid */
432 pr : 1, /* Predicate registers
433 * valid
434 */
435
436 fp : 1, /* fp registers valid*/
437 b1 : 1, /* Preserved bank one
438 * general registers
439 * are valid
440 */
441 b0 : 1, /* Preserved bank zero
442 * general registers
443 * are valid
444 */
445 gr : 1, /* General registers
446 * are valid
447 * (excl. banked regs)
448 */
449 dsize : 16, /* size of dynamic
450 * state returned
451 * by the processor
452 */
453
454 reserved2 : 11,
455 cc : 1, /* Cache check */
456 tc : 1, /* TLB check */
457 bc : 1, /* Bus check */
458 rc : 1, /* Register file check */
459 uc : 1; /* Uarch check */
460
461} pal_processor_state_info_t;
462
463typedef struct pal_cache_check_info_s {
464 u64 op : 4, /* Type of cache
465 * operation that
466 * caused the machine
467 * check.
468 */
469 level : 2, /* Cache level */
470 reserved1 : 2,
471 dl : 1, /* Failure in data part
472 * of cache line
473 */
474 tl : 1, /* Failure in tag part
475 * of cache line
476 */
477 dc : 1, /* Failure in dcache */
478 ic : 1, /* Failure in icache */
479 mesi : 3, /* Cache line state */
480 mv : 1, /* mesi valid */
481 way : 5, /* Way in which the
482 * error occurred
483 */
484 wiv : 1, /* Way field valid */
485 reserved2 : 10,
486
487 index : 20, /* Cache line index */
488 reserved3 : 2,
489
490 is : 1, /* instruction set (1 == ia32) */
491 iv : 1, /* instruction set field valid */
492 pl : 2, /* privilege level */
493 pv : 1, /* privilege level field valid */
494 mcc : 1, /* Machine check corrected */
495 tv : 1, /* Target address
496 * structure is valid
497 */
498 rq : 1, /* Requester identifier
499 * structure is valid
500 */
501 rp : 1, /* Responder identifier
502 * structure is valid
503 */
504 pi : 1; /* Precise instruction pointer
505 * structure is valid
506 */
507} pal_cache_check_info_t;
508
509typedef struct pal_tlb_check_info_s {
510
511 u64 tr_slot : 8, /* Slot# of TR where
512 * error occurred
513 */
514 trv : 1, /* tr_slot field is valid */
515 reserved1 : 1,
516 level : 2, /* TLB level where failure occurred */
517 reserved2 : 4,
518 dtr : 1, /* Fail in data TR */
519 itr : 1, /* Fail in inst TR */
520 dtc : 1, /* Fail in data TC */
521 itc : 1, /* Fail in inst. TC */
522 op : 4, /* Cache operation */
523 reserved3 : 30,
524
525 is : 1, /* instruction set (1 == ia32) */
526 iv : 1, /* instruction set field valid */
527 pl : 2, /* privilege level */
528 pv : 1, /* privilege level field valid */
529 mcc : 1, /* Machine check corrected */
530 tv : 1, /* Target address
531 * structure is valid
532 */
533 rq : 1, /* Requester identifier
534 * structure is valid
535 */
536 rp : 1, /* Responder identifier
537 * structure is valid
538 */
539 pi : 1; /* Precise instruction pointer
540 * structure is valid
541 */
542} pal_tlb_check_info_t;
543
544typedef struct pal_bus_check_info_s {
545 u64 size : 5, /* Xaction size */
546 ib : 1, /* Internal bus error */
547 eb : 1, /* External bus error */
548 cc : 1, /* Error occurred
549 * during cache-cache
550 * transfer.
551 */
552 type : 8, /* Bus xaction type*/
553 sev : 5, /* Bus error severity*/
554 hier : 2, /* Bus hierarchy level */
555 reserved1 : 1,
556 bsi : 8, /* Bus error status
557 * info
558 */
559 reserved2 : 22,
560
561 is : 1, /* instruction set (1 == ia32) */
562 iv : 1, /* instruction set field valid */
563 pl : 2, /* privilege level */
564 pv : 1, /* privilege level field valid */
565 mcc : 1, /* Machine check corrected */
566 tv : 1, /* Target address
567 * structure is valid
568 */
569 rq : 1, /* Requester identifier
570 * structure is valid
571 */
572 rp : 1, /* Responder identifier
573 * structure is valid
574 */
575 pi : 1; /* Precise instruction pointer
576 * structure is valid
577 */
578} pal_bus_check_info_t;
579
580typedef struct pal_reg_file_check_info_s {
581 u64 id : 4, /* Register file identifier */
582 op : 4, /* Type of register
583 * operation that
584 * caused the machine
585 * check.
586 */
587 reg_num : 7, /* Register number */
588 rnv : 1, /* reg_num valid */
589 reserved2 : 38,
590
591 is : 1, /* instruction set (1 == ia32) */
592 iv : 1, /* instruction set field valid */
593 pl : 2, /* privilege level */
594 pv : 1, /* privilege level field valid */
595 mcc : 1, /* Machine check corrected */
596 reserved3 : 3,
597 pi : 1; /* Precise instruction pointer
598 * structure is valid
599 */
600} pal_reg_file_check_info_t;
601
602typedef struct pal_uarch_check_info_s {
603 u64 sid : 5, /* Structure identification */
604 level : 3, /* Level of failure */
605 array_id : 4, /* Array identification */
606 op : 4, /* Type of
607 * operation that
608 * caused the machine
609 * check.
610 */
611 way : 6, /* Way of structure */
612 wv : 1, /* way valid */
613 xv : 1, /* index valid */
614 reserved1 : 8,
615 index : 8, /* Index or set of the uarch
616 * structure that failed.
617 */
618 reserved2 : 24,
619
620 is : 1, /* instruction set (1 == ia32) */
621 iv : 1, /* instruction set field valid */
622 pl : 2, /* privilege level */
623 pv : 1, /* privilege level field valid */
624 mcc : 1, /* Machine check corrected */
625 tv : 1, /* Target address
626 * structure is valid
627 */
628 rq : 1, /* Requester identifier
629 * structure is valid
630 */
631 rp : 1, /* Responder identifier
632 * structure is valid
633 */
634 pi : 1; /* Precise instruction pointer
635 * structure is valid
636 */
637} pal_uarch_check_info_t;
638
639typedef union pal_mc_error_info_u {
640 u64 pmei_data;
641 pal_processor_state_info_t pme_processor;
642 pal_cache_check_info_t pme_cache;
643 pal_tlb_check_info_t pme_tlb;
644 pal_bus_check_info_t pme_bus;
645 pal_reg_file_check_info_t pme_reg_file;
646 pal_uarch_check_info_t pme_uarch;
647} pal_mc_error_info_t;
648
649#define pmci_proc_unknown_check pme_processor.uc
650#define pmci_proc_bus_check pme_processor.bc
651#define pmci_proc_tlb_check pme_processor.tc
652#define pmci_proc_cache_check pme_processor.cc
653#define pmci_proc_dynamic_state_size pme_processor.dsize
654#define pmci_proc_gpr_valid pme_processor.gr
655#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0
656#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1
657#define pmci_proc_fp_valid pme_processor.fp
658#define pmci_proc_predicate_regs_valid pme_processor.pr
659#define pmci_proc_branch_regs_valid pme_processor.br
660#define pmci_proc_app_regs_valid pme_processor.ar
661#define pmci_proc_region_regs_valid pme_processor.rr
662#define pmci_proc_translation_regs_valid pme_processor.tr
663#define pmci_proc_debug_regs_valid pme_processor.dr
664#define pmci_proc_perf_counters_valid pme_processor.pc
665#define pmci_proc_control_regs_valid pme_processor.cr
666#define pmci_proc_machine_check_expected pme_processor.ex
667#define pmci_proc_machine_check_corrected pme_processor.cm
668#define pmci_proc_rse_valid pme_processor.rs
669#define pmci_proc_machine_check_or_init pme_processor.in
670#define pmci_proc_dynamic_state_valid pme_processor.dy
671#define pmci_proc_operation pme_processor.op
672#define pmci_proc_trap_lost pme_processor.tl
673#define pmci_proc_hardware_damage pme_processor.hd
674#define pmci_proc_uncontained_storage_damage pme_processor.us
675#define pmci_proc_machine_check_isolated pme_processor.ci
676#define pmci_proc_continuable pme_processor.co
677#define pmci_proc_storage_intergrity_synced pme_processor.sy
678#define pmci_proc_min_state_save_area_regd pme_processor.mn
679#define pmci_proc_distinct_multiple_errors pme_processor.me
680#define pmci_proc_pal_attempted_rendezvous pme_processor.ra
681#define pmci_proc_pal_rendezvous_complete pme_processor.rz
682
683
684#define pmci_cache_level pme_cache.level
685#define pmci_cache_line_state pme_cache.mesi
686#define pmci_cache_line_state_valid pme_cache.mv
687#define pmci_cache_line_index pme_cache.index
688#define pmci_cache_instr_cache_fail pme_cache.ic
689#define pmci_cache_data_cache_fail pme_cache.dc
690#define pmci_cache_line_tag_fail pme_cache.tl
691#define pmci_cache_line_data_fail pme_cache.dl
692#define pmci_cache_operation pme_cache.op
693#define pmci_cache_way_valid pme_cache.wv
694#define pmci_cache_target_address_valid pme_cache.tv
695#define pmci_cache_way pme_cache.way
696#define pmci_cache_mc pme_cache.mc
697
698#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc
699#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc
700#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr
701#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr
702#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot
703#define pmci_tlb_mc pme_tlb.mc
704
705#define pmci_bus_status_info pme_bus.bsi
706#define pmci_bus_req_address_valid pme_bus.rq
707#define pmci_bus_resp_address_valid pme_bus.rp
708#define pmci_bus_target_address_valid pme_bus.tv
709#define pmci_bus_error_severity pme_bus.sev
710#define pmci_bus_transaction_type pme_bus.type
711#define pmci_bus_cache_cache_transfer pme_bus.cc
712#define pmci_bus_transaction_size pme_bus.size
713#define pmci_bus_internal_error pme_bus.ib
714#define pmci_bus_external_error pme_bus.eb
715#define pmci_bus_mc pme_bus.mc
716
717/*
718 * NOTE: this min_state_save area struct only includes the 1KB
719 * architectural state save area. The other 3 KB is scratch space
720 * for PAL.
721 */
722
723typedef struct pal_min_state_area_s {
724 u64 pmsa_nat_bits; /* nat bits for saved GRs */
725 u64 pmsa_gr[15]; /* GR1 - GR15 */
726 u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */
727 u64 pmsa_bank1_gr[16]; /* GR16 - GR31 */
728 u64 pmsa_pr; /* predicate registers */
729 u64 pmsa_br0; /* branch register 0 */
730 u64 pmsa_rsc; /* ar.rsc */
731 u64 pmsa_iip; /* cr.iip */
732 u64 pmsa_ipsr; /* cr.ipsr */
733 u64 pmsa_ifs; /* cr.ifs */
734 u64 pmsa_xip; /* previous iip */
735 u64 pmsa_xpsr; /* previous psr */
736 u64 pmsa_xfs; /* previous ifs */
737 u64 pmsa_br1; /* branch register 1 */
738 u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */
739} pal_min_state_area_t;
740
741
742struct ia64_pal_retval {
743 /*
744 * A zero status value indicates call completed without error.
745 * A negative status value indicates reason of call failure.
746 * A positive status value indicates success but an
747 * informational value should be printed (e.g., "reboot for
748 * change to take effect").
749 */
750 s64 status;
751 u64 v0;
752 u64 v1;
753 u64 v2;
754};
755
756/*
757 * Note: Currently unused PAL arguments are generally labeled
758 * "reserved" so the value specified in the PAL documentation
759 * (generally 0) MUST be passed. Reserved parameters are not optional
760 * parameters.
761 */
762extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64);
763extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
764extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
765extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
766extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
767extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
768
769#define PAL_CALL(iprv,a0,a1,a2,a3) do { \
770 struct ia64_fpreg fr[6]; \
771 ia64_save_scratch_fpregs(fr); \
772 iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \
773 ia64_load_scratch_fpregs(fr); \
774} while (0)
775
776#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do { \
777 struct ia64_fpreg fr[6]; \
778 ia64_save_scratch_fpregs(fr); \
779 iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \
780 ia64_load_scratch_fpregs(fr); \
781} while (0)
782
783#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \
784 struct ia64_fpreg fr[6]; \
785 ia64_save_scratch_fpregs(fr); \
786 iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \
787 ia64_load_scratch_fpregs(fr); \
788} while (0)
789
790#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \
791 struct ia64_fpreg fr[6]; \
792 ia64_save_scratch_fpregs(fr); \
793 iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \
794 ia64_load_scratch_fpregs(fr); \
795} while (0)
796
797#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \
798 struct ia64_fpreg fr[6]; \
799 ia64_save_scratch_fpregs(fr); \
800 iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \
801 ia64_load_scratch_fpregs(fr); \
802} while (0)
803
804typedef int (*ia64_pal_handler) (u64, ...);
805extern ia64_pal_handler ia64_pal;
806extern void ia64_pal_handler_init (void *);
807
808extern ia64_pal_handler ia64_pal;
809
810extern pal_cache_config_info_t l0d_cache_config_info;
811extern pal_cache_config_info_t l0i_cache_config_info;
812extern pal_cache_config_info_t l1_cache_config_info;
813extern pal_cache_config_info_t l2_cache_config_info;
814
815extern pal_cache_protection_info_t l0d_cache_protection_info;
816extern pal_cache_protection_info_t l0i_cache_protection_info;
817extern pal_cache_protection_info_t l1_cache_protection_info;
818extern pal_cache_protection_info_t l2_cache_protection_info;
819
820extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t,
821 pal_cache_type_t);
822
823extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t,
824 pal_cache_type_t);
825
826
827extern void pal_error(int);
828
829
830/* Useful wrappers for the current list of pal procedures */
831
832typedef union pal_bus_features_u {
833 u64 pal_bus_features_val;
834 struct {
835 u64 pbf_reserved1 : 29;
836 u64 pbf_req_bus_parking : 1;
837 u64 pbf_bus_lock_mask : 1;
838 u64 pbf_enable_half_xfer_rate : 1;
839 u64 pbf_reserved2 : 22;
840 u64 pbf_disable_xaction_queueing : 1;
841 u64 pbf_disable_resp_err_check : 1;
842 u64 pbf_disable_berr_check : 1;
843 u64 pbf_disable_bus_req_internal_err_signal : 1;
844 u64 pbf_disable_bus_req_berr_signal : 1;
845 u64 pbf_disable_bus_init_event_check : 1;
846 u64 pbf_disable_bus_init_event_signal : 1;
847 u64 pbf_disable_bus_addr_err_check : 1;
848 u64 pbf_disable_bus_addr_err_signal : 1;
849 u64 pbf_disable_bus_data_err_check : 1;
850 } pal_bus_features_s;
851} pal_bus_features_u_t;
852
853extern void pal_bus_features_print (u64);
854
855/* Provide information about configurable processor bus features */
856static inline s64
857ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
858 pal_bus_features_u_t *features_status,
859 pal_bus_features_u_t *features_control)
860{
861 struct ia64_pal_retval iprv;
862 PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
863 if (features_avail)
864 features_avail->pal_bus_features_val = iprv.v0;
865 if (features_status)
866 features_status->pal_bus_features_val = iprv.v1;
867 if (features_control)
868 features_control->pal_bus_features_val = iprv.v2;
869 return iprv.status;
870}
871
872/* Enables/disables specific processor bus features */
873static inline s64
874ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
875{
876 struct ia64_pal_retval iprv;
877 PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
878 return iprv.status;
879}
880
881/* Get detailed cache information */
882static inline s64
883ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
884{
885 struct ia64_pal_retval iprv;
886
887 PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
888
889 if (iprv.status == 0) {
890 conf->pcci_status = iprv.status;
891 conf->pcci_info_1.pcci1_data = iprv.v0;
892 conf->pcci_info_2.pcci2_data = iprv.v1;
893 conf->pcci_reserved = iprv.v2;
894 }
895 return iprv.status;
896
897}
898
899/* Get detailed cche protection information */
900static inline s64
901ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
902{
903 struct ia64_pal_retval iprv;
904
905 PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
906
907 if (iprv.status == 0) {
908 prot->pcpi_status = iprv.status;
909 prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
910 prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
911 prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
912 prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
913 prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
914 prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
915 }
916 return iprv.status;
917}
918
919/*
920 * Flush the processor instruction or data caches. *PROGRESS must be
921 * initialized to zero before calling this for the first time..
922 */
923static inline s64
924ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
925{
926 struct ia64_pal_retval iprv;
927 PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
928 if (vector)
929 *vector = iprv.v0;
930 *progress = iprv.v1;
931 return iprv.status;
932}
933
934
935/* Initialize the processor controlled caches */
936static inline s64
937ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
938{
939 struct ia64_pal_retval iprv;
940 PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
941 return iprv.status;
942}
943
944/* Initialize the tags and data of a data or unified cache line of
945 * processor controlled cache to known values without the availability
946 * of backing memory.
947 */
948static inline s64
949ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
950{
951 struct ia64_pal_retval iprv;
952 PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
953 return iprv.status;
954}
955
956
957/* Read the data and tag of a processor controlled cache line for diags */
958static inline s64
959ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
960{
961 struct ia64_pal_retval iprv;
962 PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0);
963 return iprv.status;
964}
965
966/* Return summary information about the heirarchy of caches controlled by the processor */
967static inline s64
968ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
969{
970 struct ia64_pal_retval iprv;
971 PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
972 if (cache_levels)
973 *cache_levels = iprv.v0;
974 if (unique_caches)
975 *unique_caches = iprv.v1;
976 return iprv.status;
977}
978
979/* Write the data and tag of a processor-controlled cache line for diags */
980static inline s64
981ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
982{
983 struct ia64_pal_retval iprv;
984 PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data);
985 return iprv.status;
986}
987
988
989/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
990static inline s64
991ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
992 u64 *buffer_size, u64 *buffer_align)
993{
994 struct ia64_pal_retval iprv;
995 PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
996 if (buffer_size)
997 *buffer_size = iprv.v0;
998 if (buffer_align)
999 *buffer_align = iprv.v1;
1000 return iprv.status;
1001}
1002
1003/* Copy relocatable PAL procedures from ROM to memory */
1004static inline s64
1005ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
1006{
1007 struct ia64_pal_retval iprv;
1008 PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
1009 if (pal_proc_offset)
1010 *pal_proc_offset = iprv.v0;
1011 return iprv.status;
1012}
1013
1014/* Return the number of instruction and data debug register pairs */
1015static inline s64
1016ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
1017{
1018 struct ia64_pal_retval iprv;
1019 PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
1020 if (inst_regs)
1021 *inst_regs = iprv.v0;
1022 if (data_regs)
1023 *data_regs = iprv.v1;
1024
1025 return iprv.status;
1026}
1027
1028#ifdef TBD
1029/* Switch from IA64-system environment to IA-32 system environment */
1030static inline s64
1031ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
1032{
1033 struct ia64_pal_retval iprv;
1034 PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
1035 return iprv.status;
1036}
1037#endif
1038
1039/* Get unique geographical address of this processor on its bus */
1040static inline s64
1041ia64_pal_fixed_addr (u64 *global_unique_addr)
1042{
1043 struct ia64_pal_retval iprv;
1044 PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
1045 if (global_unique_addr)
1046 *global_unique_addr = iprv.v0;
1047 return iprv.status;
1048}
1049
1050/* Get base frequency of the platform if generated by the processor */
1051static inline s64
1052ia64_pal_freq_base (u64 *platform_base_freq)
1053{
1054 struct ia64_pal_retval iprv;
1055 PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
1056 if (platform_base_freq)
1057 *platform_base_freq = iprv.v0;
1058 return iprv.status;
1059}
1060
1061/*
1062 * Get the ratios for processor frequency, bus frequency and interval timer to
1063 * to base frequency of the platform
1064 */
1065static inline s64
1066ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
1067 struct pal_freq_ratio *itc_ratio)
1068{
1069 struct ia64_pal_retval iprv;
1070 PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
1071 if (proc_ratio)
1072 *(u64 *)proc_ratio = iprv.v0;
1073 if (bus_ratio)
1074 *(u64 *)bus_ratio = iprv.v1;
1075 if (itc_ratio)
1076 *(u64 *)itc_ratio = iprv.v2;
1077 return iprv.status;
1078}
1079
1080/* Make the processor enter HALT or one of the implementation dependent low
1081 * power states where prefetching and execution are suspended and cache and
1082 * TLB coherency is not maintained.
1083 */
1084static inline s64
1085ia64_pal_halt (u64 halt_state)
1086{
1087 struct ia64_pal_retval iprv;
1088 PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
1089 return iprv.status;
1090}
1091
1092typedef union pal_power_mgmt_info_u {
1093 u64 ppmi_data;
1094 struct {
1095 u64 exit_latency : 16,
1096 entry_latency : 16,
1097 power_consumption : 28,
1098 im : 1,
1099 co : 1,
1100 reserved : 2;
1101 } pal_power_mgmt_info_s;
1102} pal_power_mgmt_info_u_t;
1103
1104/* Return information about processor's optional power management capabilities. */
1105static inline s64
1106ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
1107{
1108 struct ia64_pal_retval iprv;
1109 PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
1110 return iprv.status;
1111}
1112
1113/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
1114 * suspended, but cache and TLB coherency is maintained.
1115 */
1116static inline s64
1117ia64_pal_halt_light (void)
1118{
1119 struct ia64_pal_retval iprv;
1120 PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
1121 return iprv.status;
1122}
1123
1124/* Clear all the processor error logging registers and reset the indicator that allows
1125 * the error logging registers to be written. This procedure also checks the pending
1126 * machine check bit and pending INIT bit and reports their states.
1127 */
1128static inline s64
1129ia64_pal_mc_clear_log (u64 *pending_vector)
1130{
1131 struct ia64_pal_retval iprv;
1132 PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
1133 if (pending_vector)
1134 *pending_vector = iprv.v0;
1135 return iprv.status;
1136}
1137
1138/* Ensure that all outstanding transactions in a processor are completed or that any
1139 * MCA due to thes outstanding transaction is taken.
1140 */
1141static inline s64
1142ia64_pal_mc_drain (void)
1143{
1144 struct ia64_pal_retval iprv;
1145 PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
1146 return iprv.status;
1147}
1148
1149/* Return the machine check dynamic processor state */
1150static inline s64
1151ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
1152{
1153 struct ia64_pal_retval iprv;
1154 PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
1155 if (size)
1156 *size = iprv.v0;
1157 if (pds)
1158 *pds = iprv.v1;
1159 return iprv.status;
1160}
1161
1162/* Return processor machine check information */
1163static inline s64
1164ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
1165{
1166 struct ia64_pal_retval iprv;
1167 PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
1168 if (size)
1169 *size = iprv.v0;
1170 if (error_info)
1171 *error_info = iprv.v1;
1172 return iprv.status;
1173}
1174
1175/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
1176 * attempt to correct any expected machine checks.
1177 */
1178static inline s64
1179ia64_pal_mc_expected (u64 expected, u64 *previous)
1180{
1181 struct ia64_pal_retval iprv;
1182 PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
1183 if (previous)
1184 *previous = iprv.v0;
1185 return iprv.status;
1186}
1187
1188/* Register a platform dependent location with PAL to which it can save
1189 * minimal processor state in the event of a machine check or initialization
1190 * event.
1191 */
1192static inline s64
1193ia64_pal_mc_register_mem (u64 physical_addr)
1194{
1195 struct ia64_pal_retval iprv;
1196 PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
1197 return iprv.status;
1198}
1199
1200/* Restore minimal architectural processor state, set CMC interrupt if necessary
1201 * and resume execution
1202 */
1203static inline s64
1204ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
1205{
1206 struct ia64_pal_retval iprv;
1207 PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
1208 return iprv.status;
1209}
1210
1211/* Return the memory attributes implemented by the processor */
1212static inline s64
1213ia64_pal_mem_attrib (u64 *mem_attrib)
1214{
1215 struct ia64_pal_retval iprv;
1216 PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
1217 if (mem_attrib)
1218 *mem_attrib = iprv.v0 & 0xff;
1219 return iprv.status;
1220}
1221
1222/* Return the amount of memory needed for second phase of processor
1223 * self-test and the required alignment of memory.
1224 */
1225static inline s64
1226ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
1227{
1228 struct ia64_pal_retval iprv;
1229 PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
1230 if (bytes_needed)
1231 *bytes_needed = iprv.v0;
1232 if (alignment)
1233 *alignment = iprv.v1;
1234 return iprv.status;
1235}
1236
1237typedef union pal_perf_mon_info_u {
1238 u64 ppmi_data;
1239 struct {
1240 u64 generic : 8,
1241 width : 8,
1242 cycles : 8,
1243 retired : 8,
1244 reserved : 32;
1245 } pal_perf_mon_info_s;
1246} pal_perf_mon_info_u_t;
1247
1248/* Return the performance monitor information about what can be counted
1249 * and how to configure the monitors to count the desired events.
1250 */
1251static inline s64
1252ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
1253{
1254 struct ia64_pal_retval iprv;
1255 PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
1256 if (pm_info)
1257 pm_info->ppmi_data = iprv.v0;
1258 return iprv.status;
1259}
1260
1261/* Specifies the physical address of the processor interrupt block
1262 * and I/O port space.
1263 */
1264static inline s64
1265ia64_pal_platform_addr (u64 type, u64 physical_addr)
1266{
1267 struct ia64_pal_retval iprv;
1268 PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
1269 return iprv.status;
1270}
1271
1272/* Set the SAL PMI entrypoint in memory */
1273static inline s64
1274ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
1275{
1276 struct ia64_pal_retval iprv;
1277 PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
1278 return iprv.status;
1279}
1280
1281struct pal_features_s;
1282/* Provide information about configurable processor features */
1283static inline s64
1284ia64_pal_proc_get_features (u64 *features_avail,
1285 u64 *features_status,
1286 u64 *features_control)
1287{
1288 struct ia64_pal_retval iprv;
1289 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0);
1290 if (iprv.status == 0) {
1291 *features_avail = iprv.v0;
1292 *features_status = iprv.v1;
1293 *features_control = iprv.v2;
1294 }
1295 return iprv.status;
1296}
1297
1298/* Enable/disable processor dependent features */
1299static inline s64
1300ia64_pal_proc_set_features (u64 feature_select)
1301{
1302 struct ia64_pal_retval iprv;
1303 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
1304 return iprv.status;
1305}
1306
1307/*
1308 * Put everything in a struct so we avoid the global offset table whenever
1309 * possible.
1310 */
1311typedef struct ia64_ptce_info_s {
1312 u64 base;
1313 u32 count[2];
1314 u32 stride[2];
1315} ia64_ptce_info_t;
1316
1317/* Return the information required for the architected loop used to purge
1318 * (initialize) the entire TC
1319 */
1320static inline s64
1321ia64_get_ptce (ia64_ptce_info_t *ptce)
1322{
1323 struct ia64_pal_retval iprv;
1324
1325 if (!ptce)
1326 return -1;
1327
1328 PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
1329 if (iprv.status == 0) {
1330 ptce->base = iprv.v0;
1331 ptce->count[0] = iprv.v1 >> 32;
1332 ptce->count[1] = iprv.v1 & 0xffffffff;
1333 ptce->stride[0] = iprv.v2 >> 32;
1334 ptce->stride[1] = iprv.v2 & 0xffffffff;
1335 }
1336 return iprv.status;
1337}
1338
1339/* Return info about implemented application and control registers. */
1340static inline s64
1341ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
1342{
1343 struct ia64_pal_retval iprv;
1344 PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
1345 if (reg_info_1)
1346 *reg_info_1 = iprv.v0;
1347 if (reg_info_2)
1348 *reg_info_2 = iprv.v1;
1349 return iprv.status;
1350}
1351
1352typedef union pal_hints_u {
1353 u64 ph_data;
1354 struct {
1355 u64 si : 1,
1356 li : 1,
1357 reserved : 62;
1358 } pal_hints_s;
1359} pal_hints_u_t;
1360
1361/* Return information about the register stack and RSE for this processor
1362 * implementation.
1363 */
1364static inline s64
1365ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
1366{
1367 struct ia64_pal_retval iprv;
1368 PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
1369 if (num_phys_stacked)
1370 *num_phys_stacked = iprv.v0;
1371 if (hints)
1372 hints->ph_data = iprv.v1;
1373 return iprv.status;
1374}
1375
1376/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
1377 * suspended, but cause cache and TLB coherency to be maintained.
1378 * This is usually called in IA-32 mode.
1379 */
1380static inline s64
1381ia64_pal_shutdown (void)
1382{
1383 struct ia64_pal_retval iprv;
1384 PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
1385 return iprv.status;
1386}
1387
1388/* Perform the second phase of processor self-test. */
1389static inline s64
1390ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
1391{
1392 struct ia64_pal_retval iprv;
1393 PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
1394 if (self_test_state)
1395 *self_test_state = iprv.v0;
1396 return iprv.status;
1397}
1398
1399typedef union pal_version_u {
1400 u64 pal_version_val;
1401 struct {
1402 u64 pv_pal_b_rev : 8;
1403 u64 pv_pal_b_model : 8;
1404 u64 pv_reserved1 : 8;
1405 u64 pv_pal_vendor : 8;
1406 u64 pv_pal_a_rev : 8;
1407 u64 pv_pal_a_model : 8;
1408 u64 pv_reserved2 : 16;
1409 } pal_version_s;
1410} pal_version_u_t;
1411
1412
1413/* Return PAL version information */
1414static inline s64
1415ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
1416{
1417 struct ia64_pal_retval iprv;
1418 PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
1419 if (pal_min_version)
1420 pal_min_version->pal_version_val = iprv.v0;
1421
1422 if (pal_cur_version)
1423 pal_cur_version->pal_version_val = iprv.v1;
1424
1425 return iprv.status;
1426}
1427
1428typedef union pal_tc_info_u {
1429 u64 pti_val;
1430 struct {
1431 u64 num_sets : 8,
1432 associativity : 8,
1433 num_entries : 16,
1434 pf : 1,
1435 unified : 1,
1436 reduce_tr : 1,
1437 reserved : 29;
1438 } pal_tc_info_s;
1439} pal_tc_info_u_t;
1440
1441#define tc_reduce_tr pal_tc_info_s.reduce_tr
1442#define tc_unified pal_tc_info_s.unified
1443#define tc_pf pal_tc_info_s.pf
1444#define tc_num_entries pal_tc_info_s.num_entries
1445#define tc_associativity pal_tc_info_s.associativity
1446#define tc_num_sets pal_tc_info_s.num_sets
1447
1448
1449/* Return information about the virtual memory characteristics of the processor
1450 * implementation.
1451 */
1452static inline s64
1453ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
1454{
1455 struct ia64_pal_retval iprv;
1456 PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
1457 if (tc_info)
1458 tc_info->pti_val = iprv.v0;
1459 if (tc_pages)
1460 *tc_pages = iprv.v1;
1461 return iprv.status;
1462}
1463
1464/* Get page size information about the virtual memory characteristics of the processor
1465 * implementation.
1466 */
1467static inline s64
1468ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
1469{
1470 struct ia64_pal_retval iprv;
1471 PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
1472 if (tr_pages)
1473 *tr_pages = iprv.v0;
1474 if (vw_pages)
1475 *vw_pages = iprv.v1;
1476 return iprv.status;
1477}
1478
1479typedef union pal_vm_info_1_u {
1480 u64 pvi1_val;
1481 struct {
1482 u64 vw : 1,
1483 phys_add_size : 7,
1484 key_size : 8,
1485 max_pkr : 8,
1486 hash_tag_id : 8,
1487 max_dtr_entry : 8,
1488 max_itr_entry : 8,
1489 max_unique_tcs : 8,
1490 num_tc_levels : 8;
1491 } pal_vm_info_1_s;
1492} pal_vm_info_1_u_t;
1493
1494typedef union pal_vm_info_2_u {
1495 u64 pvi2_val;
1496 struct {
1497 u64 impl_va_msb : 8,
1498 rid_size : 8,
1499 reserved : 48;
1500 } pal_vm_info_2_s;
1501} pal_vm_info_2_u_t;
1502
1503/* Get summary information about the virtual memory characteristics of the processor
1504 * implementation.
1505 */
1506static inline s64
1507ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
1508{
1509 struct ia64_pal_retval iprv;
1510 PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
1511 if (vm_info_1)
1512 vm_info_1->pvi1_val = iprv.v0;
1513 if (vm_info_2)
1514 vm_info_2->pvi2_val = iprv.v1;
1515 return iprv.status;
1516}
1517
1518typedef union pal_itr_valid_u {
1519 u64 piv_val;
1520 struct {
1521 u64 access_rights_valid : 1,
1522 priv_level_valid : 1,
1523 dirty_bit_valid : 1,
1524 mem_attr_valid : 1,
1525 reserved : 60;
1526 } pal_tr_valid_s;
1527} pal_tr_valid_u_t;
1528
1529/* Read a translation register */
1530static inline s64
1531ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
1532{
1533 struct ia64_pal_retval iprv;
1534 PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
1535 if (tr_valid)
1536 tr_valid->piv_val = iprv.v0;
1537 return iprv.status;
1538}
1539
1540/*
1541 * PAL_PREFETCH_VISIBILITY transaction types
1542 */
1543#define PAL_VISIBILITY_VIRTUAL 0
1544#define PAL_VISIBILITY_PHYSICAL 1
1545
1546/*
1547 * PAL_PREFETCH_VISIBILITY return codes
1548 */
1549#define PAL_VISIBILITY_OK 1
1550#define PAL_VISIBILITY_OK_REMOTE_NEEDED 0
1551#define PAL_VISIBILITY_INVAL_ARG -2
1552#define PAL_VISIBILITY_ERROR -3
1553
1554static inline s64
1555ia64_pal_prefetch_visibility (s64 trans_type)
1556{
1557 struct ia64_pal_retval iprv;
1558 PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
1559 return iprv.status;
1560}
1561
1562#endif /* __ASSEMBLY__ */
1563
1564#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h
new file mode 100644
index 000000000000..6c6b679b7a9e
--- /dev/null
+++ b/include/asm-ia64/param.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_IA64_PARAM_H
2#define _ASM_IA64_PARAM_H
3
4/*
5 * Fundamental kernel parameters.
6 *
7 * Based on <asm-i386/param.h>.
8 *
9 * Modified 1998, 1999, 2002-2003
10 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
11 */
12
13#define EXEC_PAGESIZE 65536
14
15#ifndef NOGROUP
16# define NOGROUP (-1)
17#endif
18
19#define MAXHOSTNAMELEN 64 /* max length of hostname */
20
21#ifdef __KERNEL__
22# include <linux/config.h> /* mustn't include <linux/config.h> outside of #ifdef __KERNEL__ */
23# ifdef CONFIG_IA64_HP_SIM
24 /*
25 * Yeah, simulating stuff is slow, so let us catch some breath between
26 * timer interrupts...
27 */
28# define HZ 32
29# else
30# define HZ 1024
31# endif
32# define USER_HZ HZ
33# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
34#else
35 /*
36 * Technically, this is wrong, but some old apps still refer to it. The proper way to
37 * get the HZ value is via sysconf(_SC_CLK_TCK).
38 */
39# define HZ 1024
40#endif
41
42#endif /* _ASM_IA64_PARAM_H */
diff --git a/include/asm-ia64/parport.h b/include/asm-ia64/parport.h
new file mode 100644
index 000000000000..67e16adfcd25
--- /dev/null
+++ b/include/asm-ia64/parport.h
@@ -0,0 +1,20 @@
1/*
2 * parport.h: platform-specific PC-style parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_IA64_PARPORT_H
10#define _ASM_IA64_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13
14static int __devinit
15parport_pc_find_nonpci_ports (int autoirq, int autodma)
16{
17 return parport_pc_find_isa_ports(autoirq, autodma);
18}
19
20#endif /* _ASM_IA64_PARPORT_H */
diff --git a/include/asm-ia64/patch.h b/include/asm-ia64/patch.h
new file mode 100644
index 000000000000..4797f3535e6d
--- /dev/null
+++ b/include/asm-ia64/patch.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_IA64_PATCH_H
2#define _ASM_IA64_PATCH_H
3
4/*
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * There are a number of reasons for patching instructions. Rather than duplicating code
9 * all over the place, we put the common stuff here. Reasons for patching: in-kernel
10 * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
11 * shared library. Undoubtedly, some of these reasons will disappear and others will
12 * be added over time.
13 */
14#include <linux/elf.h>
15#include <linux/types.h>
16
17extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
18extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
19extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
20
21extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
22extern void ia64_patch_vtop (unsigned long start, unsigned long end);
23extern void ia64_patch_gate (void);
24
25#endif /* _ASM_IA64_PATCH_H */
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
new file mode 100644
index 000000000000..a8314ee4e7d2
--- /dev/null
+++ b/include/asm-ia64/pci.h
@@ -0,0 +1,141 @@
1#ifndef _ASM_IA64_PCI_H
2#define _ASM_IA64_PCI_H
3
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/string.h>
8#include <linux/types.h>
9
10#include <asm/io.h>
11#include <asm/scatterlist.h>
12
13/*
14 * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
15 * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
16 * loader.
17 */
18#define pcibios_assign_all_busses() 0
19#define pcibios_scan_all_fns(a, b) 0
20
21#define PCIBIOS_MIN_IO 0x1000
22#define PCIBIOS_MIN_MEM 0x10000000
23
24void pcibios_config_init(void);
25
26struct pci_dev;
27
28/*
29 * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
30 * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
31 * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
32 * network device layers. Platforms with separate bus address spaces _must_ turn this off
33 * and provide a device DMA mapping implementation that takes care of the necessary
34 * address translation.
35 *
36 * For now, the ia64 platforms which may have separate/multiple bus address spaces all
37 * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
38 * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
39 */
40extern unsigned long ia64_max_iommu_merge_mask;
41#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
42
43static inline void
44pcibios_set_master (struct pci_dev *dev)
45{
46 /* No special bus mastering setup handling */
47}
48
49static inline void
50pcibios_penalize_isa_irq (int irq)
51{
52 /* We don't do dynamic PCI IRQ allocation */
53}
54
55#define HAVE_ARCH_PCI_MWI 1
56extern int pcibios_prep_mwi (struct pci_dev *);
57
58#include <asm-generic/pci-dma-compat.h>
59
60/* pci_unmap_{single,page} is not a nop, thus... */
61#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
62 dma_addr_t ADDR_NAME;
63#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
64 __u32 LEN_NAME;
65#define pci_unmap_addr(PTR, ADDR_NAME) \
66 ((PTR)->ADDR_NAME)
67#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
68 (((PTR)->ADDR_NAME) = (VAL))
69#define pci_unmap_len(PTR, LEN_NAME) \
70 ((PTR)->LEN_NAME)
71#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
72 (((PTR)->LEN_NAME) = (VAL))
73
74/* The ia64 platform always supports 64-bit addressing. */
75#define pci_dac_dma_supported(pci_dev, mask) (1)
76#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
77#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
78#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr)
79#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
80#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
81
82#define sg_dma_len(sg) ((sg)->dma_length)
83#define sg_dma_address(sg) ((sg)->dma_address)
84
85#define HAVE_PCI_MMAP
86extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
87 enum pci_mmap_state mmap_state, int write_combine);
88#define HAVE_PCI_LEGACY
89extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
90 struct vm_area_struct *vma);
91extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
92 size_t count);
93extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
94 size_t count);
95extern int pci_mmap_legacy_mem(struct kobject *kobj,
96 struct bin_attribute *attr,
97 struct vm_area_struct *vma);
98
99#define pci_get_legacy_mem platform_pci_get_legacy_mem
100#define pci_legacy_read platform_pci_legacy_read
101#define pci_legacy_write platform_pci_legacy_write
102
103struct pci_window {
104 struct resource resource;
105 u64 offset;
106};
107
108struct pci_controller {
109 void *acpi_handle;
110 void *iommu;
111 int segment;
112
113 unsigned int windows;
114 struct pci_window *window;
115
116 void *platform_data;
117};
118
119#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
120#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
121
122extern struct pci_ops pci_root_ops;
123
124static inline int pci_proc_domain(struct pci_bus *bus)
125{
126 return (pci_domain_nr(bus) != 0);
127}
128
129static inline void pcibios_add_platform_entries(struct pci_dev *dev)
130{
131}
132
133extern void pcibios_resource_to_bus(struct pci_dev *dev,
134 struct pci_bus_region *region, struct resource *res);
135
136extern void pcibios_bus_to_resource(struct pci_dev *dev,
137 struct resource *res, struct pci_bus_region *region);
138
139#define pcibios_scan_all_fns(a, b) 0
140
141#endif /* _ASM_IA64_PCI_H */
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
new file mode 100644
index 000000000000..1e87f19dad56
--- /dev/null
+++ b/include/asm-ia64/percpu.h
@@ -0,0 +1,72 @@
1#ifndef _ASM_IA64_PERCPU_H
2#define _ASM_IA64_PERCPU_H
3
4/*
5 * Copyright (C) 2002-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10
11#ifdef __ASSEMBLY__
12# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
13#else /* !__ASSEMBLY__ */
14
15#include <linux/config.h>
16
17#include <linux/threads.h>
18
19#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
20# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
21#else
22# define __SMALL_ADDR_AREA
23#endif
24
25#define DECLARE_PER_CPU(type, name) \
26 extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
27
28/* Separate out the type, so (int[3], foo) works. */
29#define DEFINE_PER_CPU(type, name) \
30 __attribute__((__section__(".data.percpu"))) \
31 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
32
33/*
34 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
35 * external routine, to avoid include-hell.
36 */
37#ifdef CONFIG_SMP
38
39extern unsigned long __per_cpu_offset[NR_CPUS];
40
41/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
42DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
43
44#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
45#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
46
47extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
48extern void setup_per_cpu_areas (void);
49extern void *per_cpu_init(void);
50
51#else /* ! SMP */
52
53#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
54#define __get_cpu_var(var) per_cpu__##var
55#define per_cpu_init() (__phys_per_cpu_start)
56
57#endif /* SMP */
58
59#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
60#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
61
62/*
63 * Be extremely careful when taking the address of this variable! Due to virtual
64 * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
65 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
66 * more efficient.
67 */
68#define __ia64_per_cpu_var(var) (per_cpu__##var)
69
70#endif /* !__ASSEMBLY__ */
71
72#endif /* _ASM_IA64_PERCPU_H */
diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h
new file mode 100644
index 000000000000..136c60e6bfcc
--- /dev/null
+++ b/include/asm-ia64/perfmon.h
@@ -0,0 +1,259 @@
1/*
2 * Copyright (C) 2001-2003 Hewlett-Packard Co
3 * Stephane Eranian <eranian@hpl.hp.com>
4 */
5
6#ifndef _ASM_IA64_PERFMON_H
7#define _ASM_IA64_PERFMON_H
8
9/*
10 * perfmon comamnds supported on all CPU models
11 */
12#define PFM_WRITE_PMCS 0x01
13#define PFM_WRITE_PMDS 0x02
14#define PFM_READ_PMDS 0x03
15#define PFM_STOP 0x04
16#define PFM_START 0x05
17#define PFM_ENABLE 0x06 /* obsolete */
18#define PFM_DISABLE 0x07 /* obsolete */
19#define PFM_CREATE_CONTEXT 0x08
20#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
21#define PFM_RESTART 0x0a
22#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
23#define PFM_GET_FEATURES 0x0c
24#define PFM_DEBUG 0x0d
25#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
26#define PFM_GET_PMC_RESET_VAL 0x0f
27#define PFM_LOAD_CONTEXT 0x10
28#define PFM_UNLOAD_CONTEXT 0x11
29
30/*
31 * PMU model specific commands (may not be supported on all PMU models)
32 */
33#define PFM_WRITE_IBRS 0x20
34#define PFM_WRITE_DBRS 0x21
35
36/*
37 * context flags
38 */
39#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
40#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
41#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
42
43/*
44 * event set flags
45 */
46#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
47
48/*
49 * PMC flags
50 */
51#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
52#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
53
54/*
55 * PMD/PMC/IBR/DBR return flags (ignored on input)
56 *
57 * Those flags are used on output and must be checked in case EAGAIN is returned
58 * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
59 */
60#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
61#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
62#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
63
64#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
65
66typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
67
68/*
69 * Request structure used to define a context
70 */
71typedef struct {
72 pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
73 unsigned long ctx_flags; /* noblock/block */
74 unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
75 unsigned short ctx_reserved1; /* for future use */
76 int ctx_fd; /* return arg: unique identification for context */
77 void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
78 unsigned long ctx_reserved2[11];/* for future use */
79} pfarg_context_t;
80
81/*
82 * Request structure used to write/read a PMC or PMD
83 */
84typedef struct {
85 unsigned int reg_num; /* which register */
86 unsigned short reg_set; /* event set for this register */
87 unsigned short reg_reserved1; /* for future use */
88
89 unsigned long reg_value; /* initial pmc/pmd value */
90 unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
91
92 unsigned long reg_long_reset; /* reset after buffer overflow notification */
93 unsigned long reg_short_reset; /* reset after counter overflow */
94
95 unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
96 unsigned long reg_random_seed; /* seed value when randomization is used */
97 unsigned long reg_random_mask; /* bitmask used to limit random value */
98 unsigned long reg_last_reset_val;/* return: PMD last reset value */
99
100 unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
101 unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
102
103 unsigned long reg_reserved2[3]; /* for future use */
104} pfarg_reg_t;
105
106typedef struct {
107 unsigned int dbreg_num; /* which debug register */
108 unsigned short dbreg_set; /* event set for this register */
109 unsigned short dbreg_reserved1; /* for future use */
110 unsigned long dbreg_value; /* value for debug register */
111 unsigned long dbreg_flags; /* return: dbreg error */
112 unsigned long dbreg_reserved2[1]; /* for future use */
113} pfarg_dbreg_t;
114
115typedef struct {
116 unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
117 unsigned int ft_reserved; /* reserved for future use */
118 unsigned long reserved[4]; /* for future use */
119} pfarg_features_t;
120
121typedef struct {
122 pid_t load_pid; /* process to load the context into */
123 unsigned short load_set; /* first event set to load */
124 unsigned short load_reserved1; /* for future use */
125 unsigned long load_reserved2[3]; /* for future use */
126} pfarg_load_t;
127
128typedef struct {
129 int msg_type; /* generic message header */
130 int msg_ctx_fd; /* generic message header */
131 unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
132 unsigned short msg_active_set; /* active set at the time of overflow */
133 unsigned short msg_reserved1; /* for future use */
134 unsigned int msg_reserved2; /* for future use */
135 unsigned long msg_tstamp; /* for perf tuning/debug */
136} pfm_ovfl_msg_t;
137
138typedef struct {
139 int msg_type; /* generic message header */
140 int msg_ctx_fd; /* generic message header */
141 unsigned long msg_tstamp; /* for perf tuning */
142} pfm_end_msg_t;
143
144typedef struct {
145 int msg_type; /* type of the message */
146 int msg_ctx_fd; /* unique identifier for the context */
147 unsigned long msg_tstamp; /* for perf tuning */
148} pfm_gen_msg_t;
149
150#define PFM_MSG_OVFL 1 /* an overflow happened */
151#define PFM_MSG_END 2 /* task to which context was attached ended */
152
153typedef union {
154 pfm_ovfl_msg_t pfm_ovfl_msg;
155 pfm_end_msg_t pfm_end_msg;
156 pfm_gen_msg_t pfm_gen_msg;
157} pfm_msg_t;
158
159/*
160 * Define the version numbers for both perfmon as a whole and the sampling buffer format.
161 */
162#define PFM_VERSION_MAJ 2U
163#define PFM_VERSION_MIN 0U
164#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
165#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
166#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
167
168
169/*
170 * miscellaneous architected definitions
171 */
172#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
173#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
174#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
175
176#ifdef __KERNEL__
177
178extern long perfmonctl(int fd, int cmd, void *arg, int narg);
179
180extern void pfm_save_regs (struct task_struct *);
181extern void pfm_load_regs (struct task_struct *);
182
183extern void pfm_exit_thread(struct task_struct *);
184extern int pfm_use_debug_registers(struct task_struct *);
185extern int pfm_release_debug_registers(struct task_struct *);
186extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
187extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
188extern void pfm_init_percpu(void);
189extern void pfm_handle_work(void);
190
191/*
192 * Reset PMD register flags
193 */
194#define PFM_PMD_SHORT_RESET 0
195#define PFM_PMD_LONG_RESET 1
196
197typedef union {
198 unsigned int val;
199 struct {
200 unsigned int notify_user:1; /* notify user program of overflow */
201 unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
202 unsigned int block_task:1; /* block monitored task on kernel exit */
203 unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
204 unsigned int reserved:28; /* for future use */
205 } bits;
206} pfm_ovfl_ctrl_t;
207
208typedef struct {
209 unsigned char ovfl_pmd; /* index of overflowed PMD */
210 unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
211 unsigned short active_set; /* event set active at the time of the overflow */
212 pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
213
214 unsigned long pmd_last_reset; /* last reset value of of the PMD */
215 unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
216 unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
217 unsigned long pmd_value; /* current 64-bit value of the PMD */
218 unsigned long pmd_eventid; /* eventid associated with PMD */
219} pfm_ovfl_arg_t;
220
221
222typedef struct {
223 char *fmt_name;
224 pfm_uuid_t fmt_uuid;
225 size_t fmt_arg_size;
226 unsigned long fmt_flags;
227
228 int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
229 int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
230 int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
231 int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
232 int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
233 int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
234 int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
235
236 struct list_head fmt_list;
237} pfm_buffer_fmt_t;
238
239extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
240extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
241
242/*
243 * perfmon interface exported to modules
244 */
245extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
246extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
247extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
248extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
249
250/*
251 * describe the content of the local_cpu_date->pfm_syst_info field
252 */
253#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
254#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
255#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
256
257#endif /* __KERNEL__ */
258
259#endif /* _ASM_IA64_PERFMON_H */
diff --git a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h
new file mode 100644
index 000000000000..48822c0811d8
--- /dev/null
+++ b/include/asm-ia64/perfmon_default_smpl.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2002-2003 Hewlett-Packard Co
3 * Stephane Eranian <eranian@hpl.hp.com>
4 *
5 * This file implements the default sampling buffer format
6 * for Linux/ia64 perfmon subsystem.
7 */
8#ifndef __PERFMON_DEFAULT_SMPL_H__
9#define __PERFMON_DEFAULT_SMPL_H__ 1
10
11#define PFM_DEFAULT_SMPL_UUID { \
12 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
13
14/*
15 * format specific parameters (passed at context creation)
16 */
17typedef struct {
18 unsigned long buf_size; /* size of the buffer in bytes */
19 unsigned int flags; /* buffer specific flags */
20 unsigned int res1; /* for future use */
21 unsigned long reserved[2]; /* for future use */
22} pfm_default_smpl_arg_t;
23
24/*
25 * combined context+format specific structure. Can be passed
26 * to PFM_CONTEXT_CREATE
27 */
28typedef struct {
29 pfarg_context_t ctx_arg;
30 pfm_default_smpl_arg_t buf_arg;
31} pfm_default_smpl_ctx_arg_t;
32
33/*
34 * This header is at the beginning of the sampling buffer returned to the user.
35 * It is directly followed by the first record.
36 */
37typedef struct {
38 unsigned long hdr_count; /* how many valid entries */
39 unsigned long hdr_cur_offs; /* current offset from top of buffer */
40 unsigned long hdr_reserved2; /* reserved for future use */
41
42 unsigned long hdr_overflows; /* how many times the buffer overflowed */
43 unsigned long hdr_buf_size; /* how many bytes in the buffer */
44
45 unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
46 unsigned int hdr_reserved1; /* for future use */
47 unsigned long hdr_reserved[10]; /* for future use */
48} pfm_default_smpl_hdr_t;
49
50/*
51 * Entry header in the sampling buffer. The header is directly followed
52 * with the values of the PMD registers of interest saved in increasing
53 * index order: PMD4, PMD5, and so on. How many PMDs are present depends
54 * on how the session was programmed.
55 *
56 * In the case where multiple counters overflow at the same time, multiple
57 * entries are written consecutively.
58 *
59 * last_reset_value member indicates the initial value of the overflowed PMD.
60 */
61typedef struct {
62 int pid; /* thread id (for NPTL, this is gettid()) */
63 unsigned char reserved1[3]; /* reserved for future use */
64 unsigned char ovfl_pmd; /* index of overflowed PMD */
65
66 unsigned long last_reset_val; /* initial value of overflowed PMD */
67 unsigned long ip; /* where did the overflow interrupt happened */
68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
69
70 unsigned short cpu; /* cpu on which the overfow occured */
71 unsigned short set; /* event set active when overflow ocurred */
72 int tgid; /* thread group id (for NPTL, this is getpid()) */
73} pfm_default_smpl_entry_t;
74
75#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
76#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
77#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
78
79#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
80#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
81#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
82
83#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
new file mode 100644
index 000000000000..0f05dc8bd460
--- /dev/null
+++ b/include/asm-ia64/pgalloc.h
@@ -0,0 +1,167 @@
1#ifndef _ASM_IA64_PGALLOC_H
2#define _ASM_IA64_PGALLOC_H
3
4/*
5 * This file contains the functions and defines necessary to allocate
6 * page tables.
7 *
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
10 *
11 * Copyright (C) 1998-2001 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14 */
15
16#include <linux/config.h>
17
18#include <linux/compiler.h>
19#include <linux/mm.h>
20#include <linux/page-flags.h>
21#include <linux/threads.h>
22
23#include <asm/mmu_context.h>
24
25/*
26 * Very stupidly, we used to get new pgd's and pmd's, init their contents
27 * to point to the NULL versions of the next level page table, later on
28 * completely re-init them the same way, then free them up. This wasted
29 * a lot of work and caused unnecessary memory traffic. How broken...
30 * We fix this by caching them.
31 */
32#define pgd_quicklist (local_cpu_data->pgd_quick)
33#define pmd_quicklist (local_cpu_data->pmd_quick)
34#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
35
36static inline pgd_t*
37pgd_alloc_one_fast (struct mm_struct *mm)
38{
39 unsigned long *ret = NULL;
40
41 preempt_disable();
42
43 ret = pgd_quicklist;
44 if (likely(ret != NULL)) {
45 pgd_quicklist = (unsigned long *)(*ret);
46 ret[0] = 0;
47 --pgtable_cache_size;
48 } else
49 ret = NULL;
50
51 preempt_enable();
52
53 return (pgd_t *) ret;
54}
55
56static inline pgd_t*
57pgd_alloc (struct mm_struct *mm)
58{
59 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
60 pgd_t *pgd = pgd_alloc_one_fast(mm);
61
62 if (unlikely(pgd == NULL)) {
63 pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
64 }
65 return pgd;
66}
67
68static inline void
69pgd_free (pgd_t *pgd)
70{
71 preempt_disable();
72 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
73 pgd_quicklist = (unsigned long *) pgd;
74 ++pgtable_cache_size;
75 preempt_enable();
76}
77
78static inline void
79pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
80{
81 pud_val(*pud_entry) = __pa(pmd);
82}
83
84static inline pmd_t*
85pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
86{
87 unsigned long *ret = NULL;
88
89 preempt_disable();
90
91 ret = (unsigned long *)pmd_quicklist;
92 if (likely(ret != NULL)) {
93 pmd_quicklist = (unsigned long *)(*ret);
94 ret[0] = 0;
95 --pgtable_cache_size;
96 }
97
98 preempt_enable();
99
100 return (pmd_t *)ret;
101}
102
103static inline pmd_t*
104pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
105{
106 pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
107
108 return pmd;
109}
110
111static inline void
112pmd_free (pmd_t *pmd)
113{
114 preempt_disable();
115 *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
116 pmd_quicklist = (unsigned long *) pmd;
117 ++pgtable_cache_size;
118 preempt_enable();
119}
120
121#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
122
123static inline void
124pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
125{
126 pmd_val(*pmd_entry) = page_to_phys(pte);
127}
128
129static inline void
130pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
131{
132 pmd_val(*pmd_entry) = __pa(pte);
133}
134
135static inline struct page *
136pte_alloc_one (struct mm_struct *mm, unsigned long addr)
137{
138 struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
139
140 return pte;
141}
142
143static inline pte_t *
144pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
145{
146 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
147
148 return pte;
149}
150
151static inline void
152pte_free (struct page *pte)
153{
154 __free_page(pte);
155}
156
157static inline void
158pte_free_kernel (pte_t *pte)
159{
160 free_page((unsigned long) pte);
161}
162
163#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
164
165extern void check_pgt_cache (void);
166
167#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
new file mode 100644
index 000000000000..1757a811f436
--- /dev/null
+++ b/include/asm-ia64/pgtable.h
@@ -0,0 +1,593 @@
1#ifndef _ASM_IA64_PGTABLE_H
2#define _ASM_IA64_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the IA-64 page table tree.
7 *
8 * This hopefully works with any (fixed) IA-64 page-size, as defined
9 * in <asm/page.h>.
10 *
11 * Copyright (C) 1998-2004 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 */
14
15#include <linux/config.h>
16
17#include <asm/mman.h>
18#include <asm/page.h>
19#include <asm/processor.h>
20#include <asm/system.h>
21#include <asm/types.h>
22
23#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
24
25/*
26 * First, define the various bits in a PTE. Note that the PTE format
27 * matches the VHPT short format, the firt doubleword of the VHPD long
28 * format, and the first doubleword of the TLB insertion format.
29 */
30#define _PAGE_P_BIT 0
31#define _PAGE_A_BIT 5
32#define _PAGE_D_BIT 6
33
34#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
35#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
36#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
37#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
38#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
39#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
40#define _PAGE_MA_MASK (0x7 << 2)
41#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
42#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
43#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
44#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
45#define _PAGE_PL_MASK (3 << 7)
46#define _PAGE_AR_R (0 << 9) /* read only */
47#define _PAGE_AR_RX (1 << 9) /* read & execute */
48#define _PAGE_AR_RW (2 << 9) /* read & write */
49#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
50#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
51#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
52#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
53#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
54#define _PAGE_AR_MASK (7 << 9)
55#define _PAGE_AR_SHIFT 9
56#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
57#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
58#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
59#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
60#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
61
62/* Valid only for a PTE with the present bit cleared: */
63#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
64
65#define _PFN_MASK _PAGE_PPN_MASK
66/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
67#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
68
69#define _PAGE_SIZE_4K 12
70#define _PAGE_SIZE_8K 13
71#define _PAGE_SIZE_16K 14
72#define _PAGE_SIZE_64K 16
73#define _PAGE_SIZE_256K 18
74#define _PAGE_SIZE_1M 20
75#define _PAGE_SIZE_4M 22
76#define _PAGE_SIZE_16M 24
77#define _PAGE_SIZE_64M 26
78#define _PAGE_SIZE_256M 28
79#define _PAGE_SIZE_1G 30
80#define _PAGE_SIZE_4G 32
81
82#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
83#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
84#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
85
86/*
87 * Definitions for first level:
88 *
89 * PGDIR_SHIFT determines what a first-level page table entry can map.
90 */
91#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
92#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
93#define PGDIR_MASK (~(PGDIR_SIZE-1))
94#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
95#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
96#define FIRST_USER_PGD_NR 0
97
98/*
99 * Definitions for second level:
100 *
101 * PMD_SHIFT determines the size of the area a second-level page table
102 * can map.
103 */
104#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
105#define PMD_SIZE (1UL << PMD_SHIFT)
106#define PMD_MASK (~(PMD_SIZE-1))
107#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
108
109/*
110 * Definitions for third level:
111 */
112#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
113
114/*
115 * All the normal masks have the "page accessed" bits on, as any time
116 * they are used, the page is accessed. They are cleared only by the
117 * page-out routines.
118 */
119#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
120#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
121#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
122#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
123#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
124#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
125#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
126#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
127
128# ifndef __ASSEMBLY__
129
130#include <asm/bitops.h>
131#include <asm/cacheflush.h>
132#include <asm/mmu_context.h>
133#include <asm/processor.h>
134
135/*
136 * Next come the mappings that determine how mmap() protection bits
137 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
138 * _P version gets used for a private shared memory segment, the _S
139 * version gets used for a shared memory segment with MAP_SHARED on.
140 * In a private shared memory segment, we do a copy-on-write if a task
141 * attempts to write to the page.
142 */
143 /* xwr */
144#define __P000 PAGE_NONE
145#define __P001 PAGE_READONLY
146#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
147#define __P011 PAGE_READONLY /* ditto */
148#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
149#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
150#define __P110 PAGE_COPY_EXEC
151#define __P111 PAGE_COPY_EXEC
152
153#define __S000 PAGE_NONE
154#define __S001 PAGE_READONLY
155#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
156#define __S011 PAGE_SHARED
157#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
158#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
159#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
160#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
161
162#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
163#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
164#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
165
166
167/*
168 * Some definitions to translate between mem_map, PTEs, and page addresses:
169 */
170
171
172/* Quick test to see if ADDR is a (potentially) valid physical address. */
173static inline long
174ia64_phys_addr_valid (unsigned long addr)
175{
176 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
177}
178
179/*
180 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
181 * memory. For the return value to be meaningful, ADDR must be >=
182 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
183 * require a hash-, or multi-level tree-lookup or something of that
184 * sort) but it guarantees to return TRUE only if accessing the page
185 * at that address does not cause an error. Note that there may be
186 * addresses for which kern_addr_valid() returns FALSE even though an
187 * access would not cause an error (e.g., this is typically true for
188 * memory mapped I/O regions.
189 *
190 * XXX Need to implement this for IA-64.
191 */
192#define kern_addr_valid(addr) (1)
193
194
195/*
196 * Now come the defines and routines to manage and access the three-level
197 * page table.
198 */
199
200/*
201 * On some architectures, special things need to be done when setting
202 * the PTE in a page table. Nothing special needs to be on IA-64.
203 */
204#define set_pte(ptep, pteval) (*(ptep) = (pteval))
205#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
206
207#define RGN_SIZE (1UL << 61)
208#define RGN_KERNEL 7
209
210#define VMALLOC_START 0xa000000200000000UL
211#ifdef CONFIG_VIRTUAL_MEM_MAP
212# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
213# define VMALLOC_END vmalloc_end
214 extern unsigned long vmalloc_end;
215#else
216# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
217#endif
218
219/* fs/proc/kcore.c */
220#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
221#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
222
223/*
224 * Conversion functions: convert page frame number (pfn) and a protection value to a page
225 * table entry (pte).
226 */
227#define pfn_pte(pfn, pgprot) \
228({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
229
230/* Extract pfn from pte. */
231#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
232
233#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
234
235/* This takes a physical page address that is used by the remapping functions */
236#define mk_pte_phys(physpage, pgprot) \
237({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
238
239#define pte_modify(_pte, newprot) \
240 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
241
242#define page_pte_prot(page,prot) mk_pte(page, prot)
243#define page_pte(page) page_pte_prot(page, __pgprot(0))
244
245#define pte_none(pte) (!pte_val(pte))
246#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
247#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
248/* pte_page() returns the "struct page *" corresponding to the PTE: */
249#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
250
251#define pmd_none(pmd) (!pmd_val(pmd))
252#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
253#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
254#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
255#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
256#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
257
258#define pud_none(pud) (!pud_val(pud))
259#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
260#define pud_present(pud) (pud_val(pud) != 0UL)
261#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
262
263#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
264
265/*
266 * The following have defined behavior only work if pte_present() is true.
267 */
268#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
269#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
270#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
271#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
272#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
273#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
274#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
275/*
276 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
277 * access rights:
278 */
279#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
280#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
281#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
282#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
283#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
284#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
285#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
286
287/*
288 * Macro to a page protection value as "uncacheable". Note that "protection" is really a
289 * misnomer here as the protection value contains the memory attribute bits, dirty bits,
290 * and various other bits as well.
291 */
292#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
293
294/*
295 * Macro to make mark a page protection value as "write-combining".
296 * Note that "protection" is really a misnomer here as the protection
297 * value contains the memory attribute bits, dirty bits, and various
298 * other bits as well. Accesses through a write-combining translation
299 * works bypasses the caches, but does allow for consecutive writes to
300 * be combined into single (but larger) write transactions.
301 */
302#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
303
304static inline unsigned long
305pgd_index (unsigned long address)
306{
307 unsigned long region = address >> 61;
308 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
309
310 return (region << (PAGE_SHIFT - 6)) | l1index;
311}
312
313/* The offset in the 1-level directory is given by the 3 region bits
314 (61..63) and the level-1 bits. */
315static inline pgd_t*
316pgd_offset (struct mm_struct *mm, unsigned long address)
317{
318 return mm->pgd + pgd_index(address);
319}
320
321/* In the kernel's mapped region we completely ignore the region number
322 (since we know it's in region number 5). */
323#define pgd_offset_k(addr) \
324 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
325
326/* Look up a pgd entry in the gate area. On IA-64, the gate-area
327 resides in the kernel-mapped segment, hence we use pgd_offset_k()
328 here. */
329#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
330
331/* Find an entry in the second-level page table.. */
332#define pmd_offset(dir,addr) \
333 ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
334
335/*
336 * Find an entry in the third-level page table. This looks more complicated than it
337 * should be because some platforms place page tables in high memory.
338 */
339#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
340#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
341#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
342#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
343#define pte_unmap(pte) do { } while (0)
344#define pte_unmap_nested(pte) do { } while (0)
345
346/* atomic versions of the some PTE manipulations: */
347
348static inline int
349ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
350{
351#ifdef CONFIG_SMP
352 if (!pte_young(*ptep))
353 return 0;
354 return test_and_clear_bit(_PAGE_A_BIT, ptep);
355#else
356 pte_t pte = *ptep;
357 if (!pte_young(pte))
358 return 0;
359 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
360 return 1;
361#endif
362}
363
364static inline int
365ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
366{
367#ifdef CONFIG_SMP
368 if (!pte_dirty(*ptep))
369 return 0;
370 return test_and_clear_bit(_PAGE_D_BIT, ptep);
371#else
372 pte_t pte = *ptep;
373 if (!pte_dirty(pte))
374 return 0;
375 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
376 return 1;
377#endif
378}
379
380static inline pte_t
381ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
382{
383#ifdef CONFIG_SMP
384 return __pte(xchg((long *) ptep, 0));
385#else
386 pte_t pte = *ptep;
387 pte_clear(mm, addr, ptep);
388 return pte;
389#endif
390}
391
392static inline void
393ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
394{
395#ifdef CONFIG_SMP
396 unsigned long new, old;
397
398 do {
399 old = pte_val(*ptep);
400 new = pte_val(pte_wrprotect(__pte (old)));
401 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
402#else
403 pte_t old_pte = *ptep;
404 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
405#endif
406}
407
408static inline int
409pte_same (pte_t a, pte_t b)
410{
411 return pte_val(a) == pte_val(b);
412}
413
414#define update_mmu_cache(vma, address, pte) do { } while (0)
415
416extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
417extern void paging_init (void);
418
419/*
420 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
421 * bits in the swap-type field of the swap pte. It would be nice to
422 * enforce that, but we can't easily include <linux/swap.h> here.
423 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
424 *
425 * Format of swap pte:
426 * bit 0 : present bit (must be zero)
427 * bit 1 : _PAGE_FILE (must be zero)
428 * bits 2- 8: swap-type
429 * bits 9-62: swap offset
430 * bit 63 : _PAGE_PROTNONE bit
431 *
432 * Format of file pte:
433 * bit 0 : present bit (must be zero)
434 * bit 1 : _PAGE_FILE (must be one)
435 * bits 2-62: file_offset/PAGE_SIZE
436 * bit 63 : _PAGE_PROTNONE bit
437 */
438#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
439#define __swp_offset(entry) (((entry).val << 1) >> 10)
440#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
441#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
442#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
443
444#define PTE_FILE_MAX_BITS 61
445#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
446#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
447
448/* XXX is this right? */
449#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
450 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
451
452#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
453 remap_pfn_range(vma, vaddr, pfn, size, prot)
454
455#define MK_IOSPACE_PFN(space, pfn) (pfn)
456#define GET_IOSPACE(pfn) 0
457#define GET_PFN(pfn) (pfn)
458
459/*
460 * ZERO_PAGE is a global shared page that is always zero: used
461 * for zero-mapped memory areas etc..
462 */
463extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
464extern struct page *zero_page_memmap_ptr;
465#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
466
467/* We provide our own get_unmapped_area to cope with VA holes for userland */
468#define HAVE_ARCH_UNMAPPED_AREA
469
470#ifdef CONFIG_HUGETLB_PAGE
471#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
472#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
473#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
474struct mmu_gather;
475extern void hugetlb_free_pgtables(struct mmu_gather *tlb,
476 struct vm_area_struct * prev, unsigned long start, unsigned long end);
477#endif
478
479/*
480 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
481 * information. However, we use this routine to take care of any (delayed) i-cache
482 * flushing that may be necessary.
483 */
484extern void lazy_mmu_prot_update (pte_t pte);
485
486#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
487/*
488 * Update PTEP with ENTRY, which is guaranteed to be a less
489 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
490 * WRITABLE bits turned on, when the value at PTEP did not. The
491 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
492 *
493 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
494 * having to worry about races. On SMP machines, there are only two
495 * cases where this is true:
496 *
497 * (1) *PTEP has the PRESENT bit turned OFF
498 * (2) ENTRY has the DIRTY bit turned ON
499 *
500 * On ia64, we could implement this routine with a cmpxchg()-loop
501 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
502 * However, like on x86, we can get a more streamlined version by
503 * observing that it is OK to drop ACCESSED bit updates when
504 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
505 * result in an extra Access-bit fault, which would then turn on the
506 * ACCESSED bit in the low-level fault handler (iaccess_bit or
507 * daccess_bit in ivt.S).
508 */
509#ifdef CONFIG_SMP
510# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
511do { \
512 if (__safely_writable) { \
513 set_pte(__ptep, __entry); \
514 flush_tlb_page(__vma, __addr); \
515 } \
516} while (0)
517#else
518# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
519 ptep_establish(__vma, __addr, __ptep, __entry)
520#endif
521
522# ifdef CONFIG_VIRTUAL_MEM_MAP
523 /* arch mem_map init routine is needed due to holes in a virtual mem_map */
524# define __HAVE_ARCH_MEMMAP_INIT
525 extern void memmap_init (unsigned long size, int nid, unsigned long zone,
526 unsigned long start_pfn);
527# endif /* CONFIG_VIRTUAL_MEM_MAP */
528# endif /* !__ASSEMBLY__ */
529
530/*
531 * Identity-mapped regions use a large page size. We'll call such large pages
532 * "granules". If you can think of a better name that's unambiguous, let me
533 * know...
534 */
535#if defined(CONFIG_IA64_GRANULE_64MB)
536# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
537#elif defined(CONFIG_IA64_GRANULE_16MB)
538# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
539#endif
540#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
541/*
542 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
543 */
544#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
545#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
546
547/*
548 * No page table caches to initialise
549 */
550#define pgtable_cache_init() do { } while (0)
551
552/* These tell get_user_pages() that the first gate page is accessible from user-level. */
553#define FIXADDR_USER_START GATE_ADDR
554#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
555
556#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
557#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
558#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
559#define __HAVE_ARCH_PTEP_SET_WRPROTECT
560#define __HAVE_ARCH_PTE_SAME
561#define __HAVE_ARCH_PGD_OFFSET_GATE
562#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
563
564/*
565 * Override for pgd_addr_end() to deal with the virtual address space holes
566 * in each region. In regions 0..4 virtual address bits are used like this:
567 * +--------+------+--------+-----+-----+--------+
568 * | pgdhi3 | rsvd | pgdlow | pmd | pte | offset |
569 * +--------+------+--------+-----+-----+--------+
570 * 'pgdlow' overflows to pgdhi3 (a.k.a. region bits) leaving rsvd==0
571 */
572#define IA64_PGD_OVERFLOW (PGDIR_SIZE << (PAGE_SHIFT-6))
573
574#define pgd_addr_end(addr, end) \
575({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
576 if (REGION_NUMBER(__boundary) < 5 && \
577 __boundary & IA64_PGD_OVERFLOW) \
578 __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\
579 (__boundary - 1 < (end) - 1)? __boundary: (end); \
580})
581
582#define pmd_addr_end(addr, end) \
583({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
584 if (REGION_NUMBER(__boundary) < 5 && \
585 __boundary & IA64_PGD_OVERFLOW) \
586 __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\
587 (__boundary - 1 < (end) - 1)? __boundary: (end); \
588})
589
590#include <asm-generic/pgtable-nopud.h>
591#include <asm-generic/pgtable.h>
592
593#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h
new file mode 100644
index 000000000000..160258a0528d
--- /dev/null
+++ b/include/asm-ia64/poll.h
@@ -0,0 +1,31 @@
1#ifndef _ASM_IA64_POLL_H
2#define _ASM_IA64_POLL_H
3
4/*
5 * poll(2) bit definitions. Based on <asm-i386/poll.h>.
6 *
7 * Modified 1998, 1999, 2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#define POLLIN 0x0001
12#define POLLPRI 0x0002
13#define POLLOUT 0x0004
14#define POLLERR 0x0008
15#define POLLHUP 0x0010
16#define POLLNVAL 0x0020
17
18#define POLLRDNORM 0x0040
19#define POLLRDBAND 0x0080
20#define POLLWRNORM 0x0100
21#define POLLWRBAND 0x0200
22#define POLLMSG 0x0400
23#define POLLREMOVE 0x1000
24
25struct pollfd {
26 int fd;
27 short events;
28 short revents;
29};
30
31#endif /* _ASM_IA64_POLL_H */
diff --git a/include/asm-ia64/posix_types.h b/include/asm-ia64/posix_types.h
new file mode 100644
index 000000000000..adb62272694f
--- /dev/null
+++ b/include/asm-ia64/posix_types.h
@@ -0,0 +1,126 @@
1#ifndef _ASM_IA64_POSIX_TYPES_H
2#define _ASM_IA64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * Based on <asm-alpha/posix_types.h>.
10 *
11 * Modified 1998-2000, 2003
12 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
13 */
14
15typedef unsigned long __kernel_ino_t;
16typedef unsigned int __kernel_mode_t;
17typedef unsigned int __kernel_nlink_t;
18typedef long __kernel_off_t;
19typedef long long __kernel_loff_t;
20typedef int __kernel_pid_t;
21typedef int __kernel_ipc_pid_t;
22typedef unsigned int __kernel_uid_t;
23typedef unsigned int __kernel_gid_t;
24typedef unsigned long __kernel_size_t;
25typedef long __kernel_ssize_t;
26typedef long __kernel_ptrdiff_t;
27typedef long __kernel_time_t;
28typedef long __kernel_suseconds_t;
29typedef long __kernel_clock_t;
30typedef int __kernel_timer_t;
31typedef int __kernel_clockid_t;
32typedef int __kernel_daddr_t;
33typedef char * __kernel_caddr_t;
34typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
35typedef unsigned short __kernel_uid16_t;
36typedef unsigned short __kernel_gid16_t;
37
38typedef struct {
39 int val[2];
40} __kernel_fsid_t;
41
42typedef __kernel_uid_t __kernel_old_uid_t;
43typedef __kernel_gid_t __kernel_old_gid_t;
44typedef __kernel_uid_t __kernel_uid32_t;
45typedef __kernel_gid_t __kernel_gid32_t;
46
47typedef unsigned int __kernel_old_dev_t;
48
49# ifdef __KERNEL__
50
51# ifndef __GNUC__
52
53#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
54#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
55#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
56#define __FD_ZERO(set) \
57 ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
58
59# else /* !__GNUC__ */
60
61/* With GNU C, use inline functions instead so args are evaluated only once: */
62
63#undef __FD_SET
64static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
69}
70
71#undef __FD_CLR
72static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
73{
74 unsigned long _tmp = fd / __NFDBITS;
75 unsigned long _rem = fd % __NFDBITS;
76 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
77}
78
79#undef __FD_ISSET
80static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
81{
82 unsigned long _tmp = fd / __NFDBITS;
83 unsigned long _rem = fd % __NFDBITS;
84 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
85}
86
87/*
88 * This will unroll the loop for the normal constant case (8 ints,
89 * for a 256-bit fd_set)
90 */
91#undef __FD_ZERO
92static __inline__ void __FD_ZERO(__kernel_fd_set *p)
93{
94 unsigned long *tmp = p->fds_bits;
95 int i;
96
97 if (__builtin_constant_p(__FDSET_LONGS)) {
98 switch (__FDSET_LONGS) {
99 case 16:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
103 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
104 return;
105
106 case 8:
107 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
108 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
109 return;
110
111 case 4:
112 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
113 return;
114 }
115 }
116 i = __FDSET_LONGS;
117 while (i) {
118 i--;
119 *tmp = 0;
120 tmp++;
121 }
122}
123
124# endif /* !__GNUC__ */
125# endif /* __KERNEL__ */
126#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
new file mode 100644
index 000000000000..8769dd9df369
--- /dev/null
+++ b/include/asm-ia64/processor.h
@@ -0,0 +1,698 @@
1#ifndef _ASM_IA64_PROCESSOR_H
2#define _ASM_IA64_PROCESSOR_H
3
4/*
5 * Copyright (C) 1998-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10 *
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
14 */
15
16#include <linux/config.h>
17
18#include <asm/intrinsics.h>
19#include <asm/kregs.h>
20#include <asm/ptrace.h>
21#include <asm/ustack.h>
22
23/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
24#define ARCH_HAS_SCHED_DOMAIN
25
26#define IA64_NUM_DBG_REGS 8
27/*
28 * Limits for PMC and PMD are set to less than maximum architected values
29 * but should be sufficient for a while
30 */
31#define IA64_NUM_PMC_REGS 32
32#define IA64_NUM_PMD_REGS 32
33
34#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
35#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
36
37/*
38 * TASK_SIZE really is a mis-named. It really is the maximum user
39 * space address (plus one). On IA-64, there are five regions of 2TB
40 * each (assuming 8KB page size), for a total of 8TB of user virtual
41 * address space.
42 */
43#define TASK_SIZE (current->thread.task_size)
44
45/*
46 * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
47 * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
48 * because the kernel may have installed helper-mappings above TASK_SIZE. For example,
49 * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
50 */
51#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
52
53/*
54 * This decides where the kernel will search for a free chunk of vm
55 * space during mmap's.
56 */
57#define TASK_UNMAPPED_BASE (current->thread.map_base)
58
59#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
60#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
61#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
62#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
63#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
64 /* bit 5 is currently unused */
65#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
66#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
67
68#define IA64_THREAD_UAC_SHIFT 3
69#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
70#define IA64_THREAD_FPEMU_SHIFT 6
71#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
72
73
74/*
75 * This shift should be large enough to be able to represent 1000000000/itc_freq with good
76 * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
77 * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
78 */
79#define IA64_NSEC_PER_CYC_SHIFT 30
80
81#ifndef __ASSEMBLY__
82
83#include <linux/cache.h>
84#include <linux/compiler.h>
85#include <linux/threads.h>
86#include <linux/types.h>
87
88#include <asm/fpu.h>
89#include <asm/page.h>
90#include <asm/percpu.h>
91#include <asm/rse.h>
92#include <asm/unwind.h>
93#include <asm/atomic.h>
94#ifdef CONFIG_NUMA
95#include <asm/nodedata.h>
96#endif
97
98/* like above but expressed as bitfields for more efficient access: */
99struct ia64_psr {
100 __u64 reserved0 : 1;
101 __u64 be : 1;
102 __u64 up : 1;
103 __u64 ac : 1;
104 __u64 mfl : 1;
105 __u64 mfh : 1;
106 __u64 reserved1 : 7;
107 __u64 ic : 1;
108 __u64 i : 1;
109 __u64 pk : 1;
110 __u64 reserved2 : 1;
111 __u64 dt : 1;
112 __u64 dfl : 1;
113 __u64 dfh : 1;
114 __u64 sp : 1;
115 __u64 pp : 1;
116 __u64 di : 1;
117 __u64 si : 1;
118 __u64 db : 1;
119 __u64 lp : 1;
120 __u64 tb : 1;
121 __u64 rt : 1;
122 __u64 reserved3 : 4;
123 __u64 cpl : 2;
124 __u64 is : 1;
125 __u64 mc : 1;
126 __u64 it : 1;
127 __u64 id : 1;
128 __u64 da : 1;
129 __u64 dd : 1;
130 __u64 ss : 1;
131 __u64 ri : 2;
132 __u64 ed : 1;
133 __u64 bn : 1;
134 __u64 reserved4 : 19;
135};
136
137/*
138 * CPU type, hardware bug flags, and per-CPU state. Frequently used
139 * state comes earlier:
140 */
141struct cpuinfo_ia64 {
142 __u32 softirq_pending;
143 __u64 itm_delta; /* # of clock cycles between clock ticks */
144 __u64 itm_next; /* interval timer mask value to use for next clock tick */
145 __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
146 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
147 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
148 __u64 *pgd_quick;
149 __u64 *pmd_quick;
150 __u64 pgtable_cache_sz;
151 __u64 itc_freq; /* frequency of ITC counter */
152 __u64 proc_freq; /* frequency of processor */
153 __u64 cyc_per_usec; /* itc_freq/1000000 */
154 __u64 ptce_base;
155 __u32 ptce_count[2];
156 __u32 ptce_stride[2];
157 struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
158
159#ifdef CONFIG_SMP
160 __u64 loops_per_jiffy;
161 int cpu;
162#endif
163
164 /* CPUID-derived information: */
165 __u64 ppn;
166 __u64 features;
167 __u8 number;
168 __u8 revision;
169 __u8 model;
170 __u8 family;
171 __u8 archrev;
172 char vendor[16];
173
174#ifdef CONFIG_NUMA
175 struct ia64_node_data *node_data;
176#endif
177};
178
179DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
180
181/*
182 * The "local" data variable. It refers to the per-CPU data of the currently executing
183 * CPU, much like "current" points to the per-task data of the currently executing task.
184 * Do not use the address of local_cpu_data, since it will be different from
185 * cpu_data(smp_processor_id())!
186 */
187#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
188#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
189
190extern void identify_cpu (struct cpuinfo_ia64 *);
191extern void print_cpu_info (struct cpuinfo_ia64 *);
192
193typedef struct {
194 unsigned long seg;
195} mm_segment_t;
196
197#define SET_UNALIGN_CTL(task,value) \
198({ \
199 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
200 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
201 0; \
202})
203#define GET_UNALIGN_CTL(task,addr) \
204({ \
205 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
206 (int __user *) (addr)); \
207})
208
209#define SET_FPEMU_CTL(task,value) \
210({ \
211 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
212 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
213 0; \
214})
215#define GET_FPEMU_CTL(task,addr) \
216({ \
217 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
218 (int __user *) (addr)); \
219})
220
221#ifdef CONFIG_IA32_SUPPORT
222struct desc_struct {
223 unsigned int a, b;
224};
225
226#define desc_empty(desc) (!((desc)->a + (desc)->b))
227#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
228
229#define GDT_ENTRY_TLS_ENTRIES 3
230#define GDT_ENTRY_TLS_MIN 6
231#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
232
233#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
234
235struct partial_page_list;
236#endif
237
238struct thread_struct {
239 __u32 flags; /* various thread flags (see IA64_THREAD_*) */
240 /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
241 __u8 on_ustack; /* executing on user-stacks? */
242 __u8 pad[3];
243 __u64 ksp; /* kernel stack pointer */
244 __u64 map_base; /* base address for get_unmapped_area() */
245 __u64 task_size; /* limit for task size */
246 __u64 rbs_bot; /* the base address for the RBS */
247 int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
248
249#ifdef CONFIG_IA32_SUPPORT
250 __u64 eflag; /* IA32 EFLAGS reg */
251 __u64 fsr; /* IA32 floating pt status reg */
252 __u64 fcr; /* IA32 floating pt control reg */
253 __u64 fir; /* IA32 fp except. instr. reg */
254 __u64 fdr; /* IA32 fp except. data reg */
255 __u64 old_k1; /* old value of ar.k1 */
256 __u64 old_iob; /* old IOBase value */
257 struct partial_page_list *ppl; /* partial page list for 4K page size issue */
258 /* cached TLS descriptors. */
259 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
260
261# define INIT_THREAD_IA32 .eflag = 0, \
262 .fsr = 0, \
263 .fcr = 0x17800000037fULL, \
264 .fir = 0, \
265 .fdr = 0, \
266 .old_k1 = 0, \
267 .old_iob = 0, \
268 .ppl = NULL,
269#else
270# define INIT_THREAD_IA32
271#endif /* CONFIG_IA32_SUPPORT */
272#ifdef CONFIG_PERFMON
273 __u64 pmcs[IA64_NUM_PMC_REGS];
274 __u64 pmds[IA64_NUM_PMD_REGS];
275 void *pfm_context; /* pointer to detailed PMU context */
276 unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
277# define INIT_THREAD_PM .pmcs = {0UL, }, \
278 .pmds = {0UL, }, \
279 .pfm_context = NULL, \
280 .pfm_needs_checking = 0UL,
281#else
282# define INIT_THREAD_PM
283#endif
284 __u64 dbr[IA64_NUM_DBG_REGS];
285 __u64 ibr[IA64_NUM_DBG_REGS];
286 struct ia64_fpreg fph[96]; /* saved/loaded on demand */
287};
288
289#define INIT_THREAD { \
290 .flags = 0, \
291 .on_ustack = 0, \
292 .ksp = 0, \
293 .map_base = DEFAULT_MAP_BASE, \
294 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
295 .task_size = DEFAULT_TASK_SIZE, \
296 .last_fph_cpu = -1, \
297 INIT_THREAD_IA32 \
298 INIT_THREAD_PM \
299 .dbr = {0, }, \
300 .ibr = {0, }, \
301 .fph = {{{{0}}}, } \
302}
303
304#define start_thread(regs,new_ip,new_sp) do { \
305 set_fs(USER_DS); \
306 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
307 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
308 regs->cr_iip = new_ip; \
309 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
310 regs->ar_rnat = 0; \
311 regs->ar_bspstore = current->thread.rbs_bot; \
312 regs->ar_fpsr = FPSR_DEFAULT; \
313 regs->loadrs = 0; \
314 regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
315 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
316 if (unlikely(!current->mm->dumpable)) { \
317 /* \
318 * Zap scratch regs to avoid leaking bits between processes with different \
319 * uid/privileges. \
320 */ \
321 regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
322 regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
323 } \
324} while (0)
325
326/* Forward declarations, a strange C thing... */
327struct mm_struct;
328struct task_struct;
329
330/*
331 * Free all resources held by a thread. This is called after the
332 * parent of DEAD_TASK has collected the exit status of the task via
333 * wait().
334 */
335#define release_thread(dead_task)
336
337/* Prepare to copy thread state - unlazy all lazy status */
338#define prepare_to_copy(tsk) do { } while (0)
339
340/*
341 * This is the mechanism for creating a new kernel thread.
342 *
343 * NOTE 1: Only a kernel-only process (ie the swapper or direct
344 * descendants who haven't done an "execve()") should use this: it
345 * will work within a system call from a "real" process, but the
346 * process memory space will not be free'd until both the parent and
347 * the child have exited.
348 *
349 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
350 * into trouble in init/main.c when the child thread returns to
351 * do_basic_setup() and the timing is such that free_initmem() has
352 * been called already.
353 */
354extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
355
356/* Get wait channel for task P. */
357extern unsigned long get_wchan (struct task_struct *p);
358
359/* Return instruction pointer of blocked task TSK. */
360#define KSTK_EIP(tsk) \
361 ({ \
362 struct pt_regs *_regs = ia64_task_regs(tsk); \
363 _regs->cr_iip + ia64_psr(_regs)->ri; \
364 })
365
366/* Return stack pointer of blocked task TSK. */
367#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
368
369extern void ia64_getreg_unknown_kr (void);
370extern void ia64_setreg_unknown_kr (void);
371
372#define ia64_get_kr(regnum) \
373({ \
374 unsigned long r = 0; \
375 \
376 switch (regnum) { \
377 case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
378 case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
379 case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
380 case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
381 case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
382 case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
383 case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
384 case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
385 default: ia64_getreg_unknown_kr(); break; \
386 } \
387 r; \
388})
389
390#define ia64_set_kr(regnum, r) \
391({ \
392 switch (regnum) { \
393 case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
394 case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
395 case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
396 case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
397 case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
398 case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
399 case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
400 case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
401 default: ia64_setreg_unknown_kr(); break; \
402 } \
403})
404
405/*
406 * The following three macros can't be inline functions because we don't have struct
407 * task_struct at this point.
408 */
409
410/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
411#define ia64_is_local_fpu_owner(t) \
412({ \
413 struct task_struct *__ia64_islfo_task = (t); \
414 (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
415 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
416})
417
418/* Mark task T as owning the fph partition of the CPU we're running on. */
419#define ia64_set_local_fpu_owner(t) do { \
420 struct task_struct *__ia64_slfo_task = (t); \
421 __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
422 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
423} while (0)
424
425/* Mark the fph partition of task T as being invalid on all CPUs. */
426#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
427
428extern void __ia64_init_fpu (void);
429extern void __ia64_save_fpu (struct ia64_fpreg *fph);
430extern void __ia64_load_fpu (struct ia64_fpreg *fph);
431extern void ia64_save_debug_regs (unsigned long *save_area);
432extern void ia64_load_debug_regs (unsigned long *save_area);
433
434#ifdef CONFIG_IA32_SUPPORT
435extern void ia32_save_state (struct task_struct *task);
436extern void ia32_load_state (struct task_struct *task);
437#endif
438
439#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
440#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
441
442/* load fp 0.0 into fph */
443static inline void
444ia64_init_fpu (void) {
445 ia64_fph_enable();
446 __ia64_init_fpu();
447 ia64_fph_disable();
448}
449
450/* save f32-f127 at FPH */
451static inline void
452ia64_save_fpu (struct ia64_fpreg *fph) {
453 ia64_fph_enable();
454 __ia64_save_fpu(fph);
455 ia64_fph_disable();
456}
457
458/* load f32-f127 from FPH */
459static inline void
460ia64_load_fpu (struct ia64_fpreg *fph) {
461 ia64_fph_enable();
462 __ia64_load_fpu(fph);
463 ia64_fph_disable();
464}
465
466static inline __u64
467ia64_clear_ic (void)
468{
469 __u64 psr;
470 psr = ia64_getreg(_IA64_REG_PSR);
471 ia64_stop();
472 ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
473 ia64_srlz_i();
474 return psr;
475}
476
477/*
478 * Restore the psr.
479 */
480static inline void
481ia64_set_psr (__u64 psr)
482{
483 ia64_stop();
484 ia64_setreg(_IA64_REG_PSR_L, psr);
485 ia64_srlz_d();
486}
487
488/*
489 * Insert a translation into an instruction and/or data translation
490 * register.
491 */
492static inline void
493ia64_itr (__u64 target_mask, __u64 tr_num,
494 __u64 vmaddr, __u64 pte,
495 __u64 log_page_size)
496{
497 ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
498 ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
499 ia64_stop();
500 if (target_mask & 0x1)
501 ia64_itri(tr_num, pte);
502 if (target_mask & 0x2)
503 ia64_itrd(tr_num, pte);
504}
505
506/*
507 * Insert a translation into the instruction and/or data translation
508 * cache.
509 */
510static inline void
511ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
512 __u64 log_page_size)
513{
514 ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
515 ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
516 ia64_stop();
517 /* as per EAS2.6, itc must be the last instruction in an instruction group */
518 if (target_mask & 0x1)
519 ia64_itci(pte);
520 if (target_mask & 0x2)
521 ia64_itcd(pte);
522}
523
524/*
525 * Purge a range of addresses from instruction and/or data translation
526 * register(s).
527 */
528static inline void
529ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
530{
531 if (target_mask & 0x1)
532 ia64_ptri(vmaddr, (log_size << 2));
533 if (target_mask & 0x2)
534 ia64_ptrd(vmaddr, (log_size << 2));
535}
536
537/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
538static inline void
539ia64_set_iva (void *ivt_addr)
540{
541 ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
542 ia64_srlz_i();
543}
544
545/* Set the page table address and control bits. */
546static inline void
547ia64_set_pta (__u64 pta)
548{
549 /* Note: srlz.i implies srlz.d */
550 ia64_setreg(_IA64_REG_CR_PTA, pta);
551 ia64_srlz_i();
552}
553
554static inline void
555ia64_eoi (void)
556{
557 ia64_setreg(_IA64_REG_CR_EOI, 0);
558 ia64_srlz_d();
559}
560
561#define cpu_relax() ia64_hint(ia64_hint_pause)
562
563static inline void
564ia64_set_lrr0 (unsigned long val)
565{
566 ia64_setreg(_IA64_REG_CR_LRR0, val);
567 ia64_srlz_d();
568}
569
570static inline void
571ia64_set_lrr1 (unsigned long val)
572{
573 ia64_setreg(_IA64_REG_CR_LRR1, val);
574 ia64_srlz_d();
575}
576
577
578/*
579 * Given the address to which a spill occurred, return the unat bit
580 * number that corresponds to this address.
581 */
582static inline __u64
583ia64_unat_pos (void *spill_addr)
584{
585 return ((__u64) spill_addr >> 3) & 0x3f;
586}
587
588/*
589 * Set the NaT bit of an integer register which was spilled at address
590 * SPILL_ADDR. UNAT is the mask to be updated.
591 */
592static inline void
593ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
594{
595 __u64 bit = ia64_unat_pos(spill_addr);
596 __u64 mask = 1UL << bit;
597
598 *unat = (*unat & ~mask) | (nat << bit);
599}
600
601/*
602 * Return saved PC of a blocked thread.
603 * Note that the only way T can block is through a call to schedule() -> switch_to().
604 */
605static inline unsigned long
606thread_saved_pc (struct task_struct *t)
607{
608 struct unw_frame_info info;
609 unsigned long ip;
610
611 unw_init_from_blocked_task(&info, t);
612 if (unw_unwind(&info) < 0)
613 return 0;
614 unw_get_ip(&info, &ip);
615 return ip;
616}
617
618/*
619 * Get the current instruction/program counter value.
620 */
621#define current_text_addr() \
622 ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
623
624static inline __u64
625ia64_get_ivr (void)
626{
627 __u64 r;
628 ia64_srlz_d();
629 r = ia64_getreg(_IA64_REG_CR_IVR);
630 ia64_srlz_d();
631 return r;
632}
633
634static inline void
635ia64_set_dbr (__u64 regnum, __u64 value)
636{
637 __ia64_set_dbr(regnum, value);
638#ifdef CONFIG_ITANIUM
639 ia64_srlz_d();
640#endif
641}
642
643static inline __u64
644ia64_get_dbr (__u64 regnum)
645{
646 __u64 retval;
647
648 retval = __ia64_get_dbr(regnum);
649#ifdef CONFIG_ITANIUM
650 ia64_srlz_d();
651#endif
652 return retval;
653}
654
655static inline __u64
656ia64_rotr (__u64 w, __u64 n)
657{
658 return (w >> n) | (w << (64 - n));
659}
660
661#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
662
663/*
664 * Take a mapped kernel address and return the equivalent address
665 * in the region 7 identity mapped virtual area.
666 */
667static inline void *
668ia64_imva (void *addr)
669{
670 void *result;
671 result = (void *) ia64_tpa(addr);
672 return __va(result);
673}
674
675#define ARCH_HAS_PREFETCH
676#define ARCH_HAS_PREFETCHW
677#define ARCH_HAS_SPINLOCK_PREFETCH
678#define PREFETCH_STRIDE L1_CACHE_BYTES
679
680static inline void
681prefetch (const void *x)
682{
683 ia64_lfetch(ia64_lfhint_none, x);
684}
685
686static inline void
687prefetchw (const void *x)
688{
689 ia64_lfetch_excl(ia64_lfhint_none, x);
690}
691
692#define spin_lock_prefetch(x) prefetchw(x)
693
694extern unsigned long boot_option_idle_override;
695
696#endif /* !__ASSEMBLY__ */
697
698#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
new file mode 100644
index 000000000000..0bef19538406
--- /dev/null
+++ b/include/asm-ia64/ptrace.h
@@ -0,0 +1,337 @@
1#ifndef _ASM_IA64_PTRACE_H
2#define _ASM_IA64_PTRACE_H
3
4/*
5 * Copyright (C) 1998-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 2003 Intel Co
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Fenghua Yu <fenghua.yu@intel.com>
11 * Arun Sharma <arun.sharma@intel.com>
12 *
13 * 12/07/98 S. Eranian added pt_regs & switch_stack
14 * 12/21/98 D. Mosberger updated to match latest code
15 * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
16 *
17 */
18/*
19 * When a user process is blocked, its state looks as follows:
20 *
21 * +----------------------+ ------- IA64_STK_OFFSET
22 * | | ^
23 * | struct pt_regs | |
24 * | | |
25 * +----------------------+ |
26 * | | |
27 * | memory stack | |
28 * | (growing downwards) | |
29 * //.....................// |
30 * |
31 * //.....................// |
32 * | | |
33 * +----------------------+ |
34 * | struct switch_stack | |
35 * | | |
36 * +----------------------+ |
37 * | | |
38 * //.....................// |
39 * |
40 * //.....................// |
41 * | | |
42 * | register stack | |
43 * | (growing upwards) | |
44 * | | |
45 * +----------------------+ | --- IA64_RBS_OFFSET
46 * | struct thread_info | | ^
47 * +----------------------+ | |
48 * | | | |
49 * | struct task_struct | | |
50 * current -> | | | |
51 * +----------------------+ -------
52 *
53 * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
54 * This is because ar.ec is saved as part of ar.pfs.
55 */
56
57#include <linux/config.h>
58
59#include <asm/fpu.h>
60#include <asm/offsets.h>
61
62/*
63 * Base-2 logarithm of number of pages to allocate per task structure
64 * (including register backing store and memory stack):
65 */
66#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
67# define KERNEL_STACK_SIZE_ORDER 3
68#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
69# define KERNEL_STACK_SIZE_ORDER 2
70#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
71# define KERNEL_STACK_SIZE_ORDER 1
72#else
73# define KERNEL_STACK_SIZE_ORDER 0
74#endif
75
76#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
77#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
78
79#define KERNEL_STACK_SIZE IA64_STK_OFFSET
80
81#ifndef __ASSEMBLY__
82
83#include <asm/current.h>
84#include <asm/page.h>
85
86/*
87 * This struct defines the way the registers are saved on system
88 * calls.
89 *
90 * We don't save all floating point register because the kernel
91 * is compiled to use only a very small subset, so the other are
92 * untouched.
93 *
94 * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
95 * (because the memory stack pointer MUST ALWAYS be aligned this way)
96 *
97 */
98struct pt_regs {
99 /* The following registers are saved by SAVE_MIN: */
100 unsigned long b6; /* scratch */
101 unsigned long b7; /* scratch */
102
103 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
104 unsigned long ar_ssd; /* reserved for future use (scratch) */
105
106 unsigned long r8; /* scratch (return value register 0) */
107 unsigned long r9; /* scratch (return value register 1) */
108 unsigned long r10; /* scratch (return value register 2) */
109 unsigned long r11; /* scratch (return value register 3) */
110
111 unsigned long cr_ipsr; /* interrupted task's psr */
112 unsigned long cr_iip; /* interrupted task's instruction pointer */
113 /*
114 * interrupted task's function state; if bit 63 is cleared, it
115 * contains syscall's ar.pfs.pfm:
116 */
117 unsigned long cr_ifs;
118
119 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
120 unsigned long ar_pfs; /* prev function state */
121 unsigned long ar_rsc; /* RSE configuration */
122 /* The following two are valid only if cr_ipsr.cpl > 0: */
123 unsigned long ar_rnat; /* RSE NaT */
124 unsigned long ar_bspstore; /* RSE bspstore */
125
126 unsigned long pr; /* 64 predicate registers (1 bit each) */
127 unsigned long b0; /* return pointer (bp) */
128 unsigned long loadrs; /* size of dirty partition << 16 */
129
130 unsigned long r1; /* the gp pointer */
131 unsigned long r12; /* interrupted task's memory stack pointer */
132 unsigned long r13; /* thread pointer */
133
134 unsigned long ar_fpsr; /* floating point status (preserved) */
135 unsigned long r15; /* scratch */
136
137 /* The remaining registers are NOT saved for system calls. */
138
139 unsigned long r14; /* scratch */
140 unsigned long r2; /* scratch */
141 unsigned long r3; /* scratch */
142
143 /* The following registers are saved by SAVE_REST: */
144 unsigned long r16; /* scratch */
145 unsigned long r17; /* scratch */
146 unsigned long r18; /* scratch */
147 unsigned long r19; /* scratch */
148 unsigned long r20; /* scratch */
149 unsigned long r21; /* scratch */
150 unsigned long r22; /* scratch */
151 unsigned long r23; /* scratch */
152 unsigned long r24; /* scratch */
153 unsigned long r25; /* scratch */
154 unsigned long r26; /* scratch */
155 unsigned long r27; /* scratch */
156 unsigned long r28; /* scratch */
157 unsigned long r29; /* scratch */
158 unsigned long r30; /* scratch */
159 unsigned long r31; /* scratch */
160
161 unsigned long ar_ccv; /* compare/exchange value (scratch) */
162
163 /*
164 * Floating point registers that the kernel considers scratch:
165 */
166 struct ia64_fpreg f6; /* scratch */
167 struct ia64_fpreg f7; /* scratch */
168 struct ia64_fpreg f8; /* scratch */
169 struct ia64_fpreg f9; /* scratch */
170 struct ia64_fpreg f10; /* scratch */
171 struct ia64_fpreg f11; /* scratch */
172};
173
174/*
175 * This structure contains the addition registers that need to
176 * preserved across a context switch. This generally consists of
177 * "preserved" registers.
178 */
179struct switch_stack {
180 unsigned long caller_unat; /* user NaT collection register (preserved) */
181 unsigned long ar_fpsr; /* floating-point status register */
182
183 struct ia64_fpreg f2; /* preserved */
184 struct ia64_fpreg f3; /* preserved */
185 struct ia64_fpreg f4; /* preserved */
186 struct ia64_fpreg f5; /* preserved */
187
188 struct ia64_fpreg f12; /* scratch, but untouched by kernel */
189 struct ia64_fpreg f13; /* scratch, but untouched by kernel */
190 struct ia64_fpreg f14; /* scratch, but untouched by kernel */
191 struct ia64_fpreg f15; /* scratch, but untouched by kernel */
192 struct ia64_fpreg f16; /* preserved */
193 struct ia64_fpreg f17; /* preserved */
194 struct ia64_fpreg f18; /* preserved */
195 struct ia64_fpreg f19; /* preserved */
196 struct ia64_fpreg f20; /* preserved */
197 struct ia64_fpreg f21; /* preserved */
198 struct ia64_fpreg f22; /* preserved */
199 struct ia64_fpreg f23; /* preserved */
200 struct ia64_fpreg f24; /* preserved */
201 struct ia64_fpreg f25; /* preserved */
202 struct ia64_fpreg f26; /* preserved */
203 struct ia64_fpreg f27; /* preserved */
204 struct ia64_fpreg f28; /* preserved */
205 struct ia64_fpreg f29; /* preserved */
206 struct ia64_fpreg f30; /* preserved */
207 struct ia64_fpreg f31; /* preserved */
208
209 unsigned long r4; /* preserved */
210 unsigned long r5; /* preserved */
211 unsigned long r6; /* preserved */
212 unsigned long r7; /* preserved */
213
214 unsigned long b0; /* so we can force a direct return in copy_thread */
215 unsigned long b1;
216 unsigned long b2;
217 unsigned long b3;
218 unsigned long b4;
219 unsigned long b5;
220
221 unsigned long ar_pfs; /* previous function state */
222 unsigned long ar_lc; /* loop counter (preserved) */
223 unsigned long ar_unat; /* NaT bits for r4-r7 */
224 unsigned long ar_rnat; /* RSE NaT collection register */
225 unsigned long ar_bspstore; /* RSE dirty base (preserved) */
226 unsigned long pr; /* 64 predicate registers (1 bit each) */
227};
228
229#ifdef __KERNEL__
230/*
231 * We use the ia64_psr(regs)->ri to determine which of the three
232 * instructions in bundle (16 bytes) took the sample. Generate
233 * the canonical representation by adding to instruction pointer.
234 */
235# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
236/* Conserve space in histogram by encoding slot bits in address
237 * bits 2 and 3 rather than bits 0 and 1.
238 */
239#define profile_pc(regs) \
240({ \
241 unsigned long __ip = instruction_pointer(regs); \
242 (__ip & ~3UL) + ((__ip & 3UL) << 2); \
243})
244
245 /* given a pointer to a task_struct, return the user's pt_regs */
246# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
247# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
248# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
249# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
250# define fsys_mode(task,regs) \
251 ({ \
252 struct task_struct *_task = (task); \
253 struct pt_regs *_regs = (regs); \
254 !user_mode(_regs) && user_stack(_task, _regs); \
255 })
256
257 /*
258 * System call handlers that, upon successful completion, need to return a negative value
259 * should call force_successful_syscall_return() right before returning. On architectures
260 * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
261 * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
262 * flag will not get set. On architectures which do not support a separate error flag,
263 * the macro is a no-op and the spurious error condition needs to be filtered out by some
264 * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
265 * or something along those lines).
266 *
267 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
268 */
269# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
270
271 struct task_struct; /* forward decl */
272 struct unw_frame_info; /* forward decl */
273
274 extern void show_regs (struct pt_regs *);
275 extern void ia64_do_show_stack (struct unw_frame_info *, void *);
276 extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
277 unsigned long *);
278 extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
279 unsigned long, long *);
280 extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
281 unsigned long, long);
282 extern void ia64_flush_fph (struct task_struct *);
283 extern void ia64_sync_fph (struct task_struct *);
284 extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
285 unsigned long, unsigned long);
286
287 /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
288 extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
289 /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
290 extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
291
292 extern void ia64_increment_ip (struct pt_regs *pt);
293 extern void ia64_decrement_ip (struct pt_regs *pt);
294
295#endif /* !__KERNEL__ */
296
297/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
298struct pt_all_user_regs {
299 unsigned long nat;
300 unsigned long cr_iip;
301 unsigned long cfm;
302 unsigned long cr_ipsr;
303 unsigned long pr;
304
305 unsigned long gr[32];
306 unsigned long br[8];
307 unsigned long ar[128];
308 struct ia64_fpreg fr[128];
309};
310
311#endif /* !__ASSEMBLY__ */
312
313/* indices to application-registers array in pt_all_user_regs */
314#define PT_AUR_RSC 16
315#define PT_AUR_BSP 17
316#define PT_AUR_BSPSTORE 18
317#define PT_AUR_RNAT 19
318#define PT_AUR_CCV 32
319#define PT_AUR_UNAT 36
320#define PT_AUR_FPSR 40
321#define PT_AUR_PFS 64
322#define PT_AUR_LC 65
323#define PT_AUR_EC 66
324
325/*
326 * The numbers chosen here are somewhat arbitrary but absolutely MUST
327 * not overlap with any of the number assigned in <linux/ptrace.h>.
328 */
329#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
330#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
331#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
332#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
333#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
334
335#define PTRACE_OLDSETOPTIONS 21
336
337#endif /* _ASM_IA64_PTRACE_H */
diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h
new file mode 100644
index 000000000000..b712773c759e
--- /dev/null
+++ b/include/asm-ia64/ptrace_offsets.h
@@ -0,0 +1,268 @@
1#ifndef _ASM_IA64_PTRACE_OFFSETS_H
2#define _ASM_IA64_PTRACE_OFFSETS_H
3
4/*
5 * Copyright (C) 1999, 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8/*
9 * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
10 * virtual structure that would have the following definition:
11 *
12 * struct uarea {
13 * struct ia64_fpreg fph[96]; // f32-f127
14 * unsigned long nat_bits;
15 * unsigned long empty1;
16 * struct ia64_fpreg f2; // f2-f5
17 * :
18 * struct ia64_fpreg f5;
19 * struct ia64_fpreg f10; // f10-f31
20 * :
21 * struct ia64_fpreg f31;
22 * unsigned long r4; // r4-r7
23 * :
24 * unsigned long r7;
25 * unsigned long b1; // b1-b5
26 * :
27 * unsigned long b5;
28 * unsigned long ar_ec;
29 * unsigned long ar_lc;
30 * unsigned long empty2[5];
31 * unsigned long cr_ipsr;
32 * unsigned long cr_iip;
33 * unsigned long cfm;
34 * unsigned long ar_unat;
35 * unsigned long ar_pfs;
36 * unsigned long ar_rsc;
37 * unsigned long ar_rnat;
38 * unsigned long ar_bspstore;
39 * unsigned long pr;
40 * unsigned long b6;
41 * unsigned long ar_bsp;
42 * unsigned long r1;
43 * unsigned long r2;
44 * unsigned long r3;
45 * unsigned long r12;
46 * unsigned long r13;
47 * unsigned long r14;
48 * unsigned long r15;
49 * unsigned long r8;
50 * unsigned long r9;
51 * unsigned long r10;
52 * unsigned long r11;
53 * unsigned long r16;
54 * :
55 * unsigned long r31;
56 * unsigned long ar_ccv;
57 * unsigned long ar_fpsr;
58 * unsigned long b0;
59 * unsigned long b7;
60 * unsigned long f6;
61 * unsigned long f7;
62 * unsigned long f8;
63 * unsigned long f9;
64 * unsigned long ar_csd;
65 * unsigned long ar_ssd;
66 * unsigned long rsvd1[710];
67 * unsigned long dbr[8];
68 * unsigned long rsvd2[504];
69 * unsigned long ibr[8];
70 * unsigned long rsvd3[504];
71 * unsigned long pmd[4];
72 * }
73 */
74
75/* fph: */
76#define PT_F32 0x0000
77#define PT_F33 0x0010
78#define PT_F34 0x0020
79#define PT_F35 0x0030
80#define PT_F36 0x0040
81#define PT_F37 0x0050
82#define PT_F38 0x0060
83#define PT_F39 0x0070
84#define PT_F40 0x0080
85#define PT_F41 0x0090
86#define PT_F42 0x00a0
87#define PT_F43 0x00b0
88#define PT_F44 0x00c0
89#define PT_F45 0x00d0
90#define PT_F46 0x00e0
91#define PT_F47 0x00f0
92#define PT_F48 0x0100
93#define PT_F49 0x0110
94#define PT_F50 0x0120
95#define PT_F51 0x0130
96#define PT_F52 0x0140
97#define PT_F53 0x0150
98#define PT_F54 0x0160
99#define PT_F55 0x0170
100#define PT_F56 0x0180
101#define PT_F57 0x0190
102#define PT_F58 0x01a0
103#define PT_F59 0x01b0
104#define PT_F60 0x01c0
105#define PT_F61 0x01d0
106#define PT_F62 0x01e0
107#define PT_F63 0x01f0
108#define PT_F64 0x0200
109#define PT_F65 0x0210
110#define PT_F66 0x0220
111#define PT_F67 0x0230
112#define PT_F68 0x0240
113#define PT_F69 0x0250
114#define PT_F70 0x0260
115#define PT_F71 0x0270
116#define PT_F72 0x0280
117#define PT_F73 0x0290
118#define PT_F74 0x02a0
119#define PT_F75 0x02b0
120#define PT_F76 0x02c0
121#define PT_F77 0x02d0
122#define PT_F78 0x02e0
123#define PT_F79 0x02f0
124#define PT_F80 0x0300
125#define PT_F81 0x0310
126#define PT_F82 0x0320
127#define PT_F83 0x0330
128#define PT_F84 0x0340
129#define PT_F85 0x0350
130#define PT_F86 0x0360
131#define PT_F87 0x0370
132#define PT_F88 0x0380
133#define PT_F89 0x0390
134#define PT_F90 0x03a0
135#define PT_F91 0x03b0
136#define PT_F92 0x03c0
137#define PT_F93 0x03d0
138#define PT_F94 0x03e0
139#define PT_F95 0x03f0
140#define PT_F96 0x0400
141#define PT_F97 0x0410
142#define PT_F98 0x0420
143#define PT_F99 0x0430
144#define PT_F100 0x0440
145#define PT_F101 0x0450
146#define PT_F102 0x0460
147#define PT_F103 0x0470
148#define PT_F104 0x0480
149#define PT_F105 0x0490
150#define PT_F106 0x04a0
151#define PT_F107 0x04b0
152#define PT_F108 0x04c0
153#define PT_F109 0x04d0
154#define PT_F110 0x04e0
155#define PT_F111 0x04f0
156#define PT_F112 0x0500
157#define PT_F113 0x0510
158#define PT_F114 0x0520
159#define PT_F115 0x0530
160#define PT_F116 0x0540
161#define PT_F117 0x0550
162#define PT_F118 0x0560
163#define PT_F119 0x0570
164#define PT_F120 0x0580
165#define PT_F121 0x0590
166#define PT_F122 0x05a0
167#define PT_F123 0x05b0
168#define PT_F124 0x05c0
169#define PT_F125 0x05d0
170#define PT_F126 0x05e0
171#define PT_F127 0x05f0
172
173#define PT_NAT_BITS 0x0600
174
175#define PT_F2 0x0610
176#define PT_F3 0x0620
177#define PT_F4 0x0630
178#define PT_F5 0x0640
179#define PT_F10 0x0650
180#define PT_F11 0x0660
181#define PT_F12 0x0670
182#define PT_F13 0x0680
183#define PT_F14 0x0690
184#define PT_F15 0x06a0
185#define PT_F16 0x06b0
186#define PT_F17 0x06c0
187#define PT_F18 0x06d0
188#define PT_F19 0x06e0
189#define PT_F20 0x06f0
190#define PT_F21 0x0700
191#define PT_F22 0x0710
192#define PT_F23 0x0720
193#define PT_F24 0x0730
194#define PT_F25 0x0740
195#define PT_F26 0x0750
196#define PT_F27 0x0760
197#define PT_F28 0x0770
198#define PT_F29 0x0780
199#define PT_F30 0x0790
200#define PT_F31 0x07a0
201#define PT_R4 0x07b0
202#define PT_R5 0x07b8
203#define PT_R6 0x07c0
204#define PT_R7 0x07c8
205
206#define PT_B1 0x07d8
207#define PT_B2 0x07e0
208#define PT_B3 0x07e8
209#define PT_B4 0x07f0
210#define PT_B5 0x07f8
211
212#define PT_AR_EC 0x0800
213#define PT_AR_LC 0x0808
214
215#define PT_CR_IPSR 0x0830
216#define PT_CR_IIP 0x0838
217#define PT_CFM 0x0840
218#define PT_AR_UNAT 0x0848
219#define PT_AR_PFS 0x0850
220#define PT_AR_RSC 0x0858
221#define PT_AR_RNAT 0x0860
222#define PT_AR_BSPSTORE 0x0868
223#define PT_PR 0x0870
224#define PT_B6 0x0878
225#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */
226#define PT_R1 0x0888
227#define PT_R2 0x0890
228#define PT_R3 0x0898
229#define PT_R12 0x08a0
230#define PT_R13 0x08a8
231#define PT_R14 0x08b0
232#define PT_R15 0x08b8
233#define PT_R8 0x08c0
234#define PT_R9 0x08c8
235#define PT_R10 0x08d0
236#define PT_R11 0x08d8
237#define PT_R16 0x08e0
238#define PT_R17 0x08e8
239#define PT_R18 0x08f0
240#define PT_R19 0x08f8
241#define PT_R20 0x0900
242#define PT_R21 0x0908
243#define PT_R22 0x0910
244#define PT_R23 0x0918
245#define PT_R24 0x0920
246#define PT_R25 0x0928
247#define PT_R26 0x0930
248#define PT_R27 0x0938
249#define PT_R28 0x0940
250#define PT_R29 0x0948
251#define PT_R30 0x0950
252#define PT_R31 0x0958
253#define PT_AR_CCV 0x0960
254#define PT_AR_FPSR 0x0968
255#define PT_B0 0x0970
256#define PT_B7 0x0978
257#define PT_F6 0x0980
258#define PT_F7 0x0990
259#define PT_F8 0x09a0
260#define PT_F9 0x09b0
261#define PT_AR_CSD 0x09c0
262#define PT_AR_SSD 0x09c8
263
264#define PT_DBR 0x2000 /* data breakpoint registers */
265#define PT_IBR 0x3000 /* instruction breakpoint registers */
266#define PT_PMD 0x4000 /* performance monitoring counters */
267
268#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h
new file mode 100644
index 000000000000..77b1eee01f30
--- /dev/null
+++ b/include/asm-ia64/resource.h
@@ -0,0 +1,8 @@
1#ifndef _ASM_IA64_RESOURCE_H
2#define _ASM_IA64_RESOURCE_H
3
4#include <asm/ustack.h>
5#define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE
6#include <asm-generic/resource.h>
7
8#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/include/asm-ia64/rse.h b/include/asm-ia64/rse.h
new file mode 100644
index 000000000000..02830a3b0196
--- /dev/null
+++ b/include/asm-ia64/rse.h
@@ -0,0 +1,66 @@
1#ifndef _ASM_IA64_RSE_H
2#define _ASM_IA64_RSE_H
3
4/*
5 * Copyright (C) 1998, 1999 Hewlett-Packard Co
6 * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * Register stack engine related helper functions. This file may be
9 * used in applications, so be careful about the name-space and give
10 * some consideration to non-GNU C compilers (though __inline__ is
11 * fine).
12 */
13
14static __inline__ unsigned long
15ia64_rse_slot_num (unsigned long *addr)
16{
17 return (((unsigned long) addr) >> 3) & 0x3f;
18}
19
20/*
21 * Return TRUE if ADDR is the address of an RNAT slot.
22 */
23static __inline__ unsigned long
24ia64_rse_is_rnat_slot (unsigned long *addr)
25{
26 return ia64_rse_slot_num(addr) == 0x3f;
27}
28
29/*
30 * Returns the address of the RNAT slot that covers the slot at
31 * address SLOT_ADDR.
32 */
33static __inline__ unsigned long *
34ia64_rse_rnat_addr (unsigned long *slot_addr)
35{
36 return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
37}
38
39/*
40 * Calculate the number of registers in the dirty partition starting at BSPSTORE and
41 * ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
42 * ar.rnat.
43 */
44static __inline__ unsigned long
45ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
46{
47 unsigned long slots = (bsp - bspstore);
48
49 return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
50}
51
52/*
53 * The inverse of the above: given bspstore and the number of
54 * registers, calculate ar.bsp.
55 */
56static __inline__ unsigned long *
57ia64_rse_skip_regs (unsigned long *addr, long num_regs)
58{
59 long delta = ia64_rse_slot_num(addr) + num_regs;
60
61 if (num_regs < 0)
62 delta -= 0x3e;
63 return addr + num_regs + delta/0x3f;
64}
65
66#endif /* _ASM_IA64_RSE_H */
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
new file mode 100644
index 000000000000..6ece5061dc19
--- /dev/null
+++ b/include/asm-ia64/rwsem.h
@@ -0,0 +1,188 @@
1/*
2 * asm-ia64/rwsem.h: R/W semaphores for ia64
3 *
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
6 *
7 * Based on asm-i386/rwsem.h and other architecture implementation.
8 *
9 * The MSW of the count is the negated number of active writers and
10 * waiting lockers, and the LSW is the total number of active locks.
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case
15 * of an uncontended lock. Readers increment by 1 and see a positive value
16 * when uncontended, negative if there are writers (and maybe) readers
17 * waiting (in which case it goes to sleep).
18 */
19
20#ifndef _ASM_IA64_RWSEM_H
21#define _ASM_IA64_RWSEM_H
22
23#include <linux/list.h>
24#include <linux/spinlock.h>
25
26#include <asm/intrinsics.h>
27
28/*
29 * the semaphore definition
30 */
31struct rw_semaphore {
32 signed int count;
33 spinlock_t wait_lock;
34 struct list_head wait_list;
35#if RWSEM_DEBUG
36 int debug;
37#endif
38};
39
40#define RWSEM_UNLOCKED_VALUE 0x00000000
41#define RWSEM_ACTIVE_BIAS 0x00000001
42#define RWSEM_ACTIVE_MASK 0x0000ffff
43#define RWSEM_WAITING_BIAS (-0x00010000)
44#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
45#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
46
47/*
48 * initialization
49 */
50#if RWSEM_DEBUG
51#define __RWSEM_DEBUG_INIT , 0
52#else
53#define __RWSEM_DEBUG_INIT /* */
54#endif
55
56#define __RWSEM_INITIALIZER(name) \
57 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
58 LIST_HEAD_INIT((name).wait_list) \
59 __RWSEM_DEBUG_INIT }
60
61#define DECLARE_RWSEM(name) \
62 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
63
64extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
65extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
66extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
67extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
68
69static inline void
70init_rwsem (struct rw_semaphore *sem)
71{
72 sem->count = RWSEM_UNLOCKED_VALUE;
73 spin_lock_init(&sem->wait_lock);
74 INIT_LIST_HEAD(&sem->wait_list);
75#if RWSEM_DEBUG
76 sem->debug = 0;
77#endif
78}
79
80/*
81 * lock for reading
82 */
83static inline void
84__down_read (struct rw_semaphore *sem)
85{
86 int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1);
87
88 if (result < 0)
89 rwsem_down_read_failed(sem);
90}
91
92/*
93 * lock for writing
94 */
95static inline void
96__down_write (struct rw_semaphore *sem)
97{
98 int old, new;
99
100 do {
101 old = sem->count;
102 new = old + RWSEM_ACTIVE_WRITE_BIAS;
103 } while (cmpxchg_acq(&sem->count, old, new) != old);
104
105 if (old != 0)
106 rwsem_down_write_failed(sem);
107}
108
109/*
110 * unlock after reading
111 */
112static inline void
113__up_read (struct rw_semaphore *sem)
114{
115 int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1);
116
117 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
118 rwsem_wake(sem);
119}
120
121/*
122 * unlock after writing
123 */
124static inline void
125__up_write (struct rw_semaphore *sem)
126{
127 int old, new;
128
129 do {
130 old = sem->count;
131 new = old - RWSEM_ACTIVE_WRITE_BIAS;
132 } while (cmpxchg_rel(&sem->count, old, new) != old);
133
134 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
135 rwsem_wake(sem);
136}
137
138/*
139 * trylock for reading -- returns 1 if successful, 0 if contention
140 */
141static inline int
142__down_read_trylock (struct rw_semaphore *sem)
143{
144 int tmp;
145 while ((tmp = sem->count) >= 0) {
146 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
147 return 1;
148 }
149 }
150 return 0;
151}
152
153/*
154 * trylock for writing -- returns 1 if successful, 0 if contention
155 */
156static inline int
157__down_write_trylock (struct rw_semaphore *sem)
158{
159 int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
160 RWSEM_ACTIVE_WRITE_BIAS);
161 return tmp == RWSEM_UNLOCKED_VALUE;
162}
163
164/*
165 * downgrade write lock to read lock
166 */
167static inline void
168__downgrade_write (struct rw_semaphore *sem)
169{
170 int old, new;
171
172 do {
173 old = sem->count;
174 new = old - RWSEM_WAITING_BIAS;
175 } while (cmpxchg_rel(&sem->count, old, new) != old);
176
177 if (old < 0)
178 rwsem_downgrade_wake(sem);
179}
180
181/*
182 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
183 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
184 */
185#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
186#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
187
188#endif /* _ASM_IA64_RWSEM_H */
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
new file mode 100644
index 000000000000..ea1ed377de4c
--- /dev/null
+++ b/include/asm-ia64/sal.h
@@ -0,0 +1,840 @@
1#ifndef _ASM_IA64_SAL_H
2#define _ASM_IA64_SAL_H
3
4/*
5 * System Abstraction Layer definitions.
6 *
7 * This is based on version 2.5 of the manual "IA-64 System
8 * Abstraction Layer".
9 *
10 * Copyright (C) 2001 Intel
11 * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
12 * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
13 * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
14 * David Mosberger-Tang <davidm@hpl.hp.com>
15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
16 *
17 * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
18 * revision of the SAL spec.
19 * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
20 * revision of the SAL spec.
21 * 99/09/29 davidm Updated for SAL 2.6.
22 * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6)
23 * (plus examples of platform error info structures from smariset @ Intel)
24 */
25
26#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
27#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
28#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
29#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
30
31#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
32#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
33#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
34#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
35
36#ifndef __ASSEMBLY__
37
38#include <linux/bcd.h>
39#include <linux/spinlock.h>
40#include <linux/efi.h>
41
42#include <asm/pal.h>
43#include <asm/system.h>
44#include <asm/fpu.h>
45
46extern spinlock_t sal_lock;
47
48/* SAL spec _requires_ eight args for each call. */
49#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \
50 result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
51
52# define SAL_CALL(result,args...) do { \
53 unsigned long __ia64_sc_flags; \
54 struct ia64_fpreg __ia64_sc_fr[6]; \
55 ia64_save_scratch_fpregs(__ia64_sc_fr); \
56 spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
57 __SAL_CALL(result, args); \
58 spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
59 ia64_load_scratch_fpregs(__ia64_sc_fr); \
60} while (0)
61
62# define SAL_CALL_NOLOCK(result,args...) do { \
63 unsigned long __ia64_scn_flags; \
64 struct ia64_fpreg __ia64_scn_fr[6]; \
65 ia64_save_scratch_fpregs(__ia64_scn_fr); \
66 local_irq_save(__ia64_scn_flags); \
67 __SAL_CALL(result, args); \
68 local_irq_restore(__ia64_scn_flags); \
69 ia64_load_scratch_fpregs(__ia64_scn_fr); \
70} while (0)
71
72# define SAL_CALL_REENTRANT(result,args...) do { \
73 struct ia64_fpreg __ia64_scs_fr[6]; \
74 ia64_save_scratch_fpregs(__ia64_scs_fr); \
75 preempt_disable(); \
76 __SAL_CALL(result, args); \
77 preempt_enable(); \
78 ia64_load_scratch_fpregs(__ia64_scs_fr); \
79} while (0)
80
81#define SAL_SET_VECTORS 0x01000000
82#define SAL_GET_STATE_INFO 0x01000001
83#define SAL_GET_STATE_INFO_SIZE 0x01000002
84#define SAL_CLEAR_STATE_INFO 0x01000003
85#define SAL_MC_RENDEZ 0x01000004
86#define SAL_MC_SET_PARAMS 0x01000005
87#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
88
89#define SAL_CACHE_FLUSH 0x01000008
90#define SAL_CACHE_INIT 0x01000009
91#define SAL_PCI_CONFIG_READ 0x01000010
92#define SAL_PCI_CONFIG_WRITE 0x01000011
93#define SAL_FREQ_BASE 0x01000012
94
95#define SAL_UPDATE_PAL 0x01000020
96
97struct ia64_sal_retval {
98 /*
99 * A zero status value indicates call completed without error.
100 * A negative status value indicates reason of call failure.
101 * A positive status value indicates success but an
102 * informational value should be printed (e.g., "reboot for
103 * change to take effect").
104 */
105 s64 status;
106 u64 v0;
107 u64 v1;
108 u64 v2;
109};
110
111typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
112
113enum {
114 SAL_FREQ_BASE_PLATFORM = 0,
115 SAL_FREQ_BASE_INTERVAL_TIMER = 1,
116 SAL_FREQ_BASE_REALTIME_CLOCK = 2
117};
118
119/*
120 * The SAL system table is followed by a variable number of variable
121 * length descriptors. The structure of these descriptors follows
122 * below.
123 * The defininition follows SAL specs from July 2000
124 */
125struct ia64_sal_systab {
126 u8 signature[4]; /* should be "SST_" */
127 u32 size; /* size of this table in bytes */
128 u8 sal_rev_minor;
129 u8 sal_rev_major;
130 u16 entry_count; /* # of entries in variable portion */
131 u8 checksum;
132 u8 reserved1[7];
133 u8 sal_a_rev_minor;
134 u8 sal_a_rev_major;
135 u8 sal_b_rev_minor;
136 u8 sal_b_rev_major;
137 /* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
138 u8 oem_id[32];
139 u8 product_id[32]; /* ASCII product id */
140 u8 reserved2[8];
141};
142
143enum sal_systab_entry_type {
144 SAL_DESC_ENTRY_POINT = 0,
145 SAL_DESC_MEMORY = 1,
146 SAL_DESC_PLATFORM_FEATURE = 2,
147 SAL_DESC_TR = 3,
148 SAL_DESC_PTC = 4,
149 SAL_DESC_AP_WAKEUP = 5
150};
151
152/*
153 * Entry type: Size:
154 * 0 48
155 * 1 32
156 * 2 16
157 * 3 32
158 * 4 16
159 * 5 16
160 */
161#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
162
163typedef struct ia64_sal_desc_entry_point {
164 u8 type;
165 u8 reserved1[7];
166 u64 pal_proc;
167 u64 sal_proc;
168 u64 gp;
169 u8 reserved2[16];
170}ia64_sal_desc_entry_point_t;
171
172typedef struct ia64_sal_desc_memory {
173 u8 type;
174 u8 used_by_sal; /* needs to be mapped for SAL? */
175 u8 mem_attr; /* current memory attribute setting */
176 u8 access_rights; /* access rights set up by SAL */
177 u8 mem_attr_mask; /* mask of supported memory attributes */
178 u8 reserved1;
179 u8 mem_type; /* memory type */
180 u8 mem_usage; /* memory usage */
181 u64 addr; /* physical address of memory */
182 u32 length; /* length (multiple of 4KB pages) */
183 u32 reserved2;
184 u8 oem_reserved[8];
185} ia64_sal_desc_memory_t;
186
187typedef struct ia64_sal_desc_platform_feature {
188 u8 type;
189 u8 feature_mask;
190 u8 reserved1[14];
191} ia64_sal_desc_platform_feature_t;
192
193typedef struct ia64_sal_desc_tr {
194 u8 type;
195 u8 tr_type; /* 0 == instruction, 1 == data */
196 u8 regnum; /* translation register number */
197 u8 reserved1[5];
198 u64 addr; /* virtual address of area covered */
199 u64 page_size; /* encoded page size */
200 u8 reserved2[8];
201} ia64_sal_desc_tr_t;
202
203typedef struct ia64_sal_desc_ptc {
204 u8 type;
205 u8 reserved1[3];
206 u32 num_domains; /* # of coherence domains */
207 u64 domain_info; /* physical address of domain info table */
208} ia64_sal_desc_ptc_t;
209
210typedef struct ia64_sal_ptc_domain_info {
211 u64 proc_count; /* number of processors in domain */
212 u64 proc_list; /* physical address of LID array */
213} ia64_sal_ptc_domain_info_t;
214
215typedef struct ia64_sal_ptc_domain_proc_entry {
216 u64 id : 8; /* id of processor */
217 u64 eid : 8; /* eid of processor */
218} ia64_sal_ptc_domain_proc_entry_t;
219
220
221#define IA64_SAL_AP_EXTERNAL_INT 0
222
223typedef struct ia64_sal_desc_ap_wakeup {
224 u8 type;
225 u8 mechanism; /* 0 == external interrupt */
226 u8 reserved1[6];
227 u64 vector; /* interrupt vector in range 0x10-0xff */
228} ia64_sal_desc_ap_wakeup_t ;
229
230extern ia64_sal_handler ia64_sal;
231extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
232
233extern unsigned short sal_revision; /* supported SAL spec revision */
234extern unsigned short sal_version; /* SAL version; OEM dependent */
235#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
236
237extern const char *ia64_sal_strerror (long status);
238extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
239
240/* SAL information type encodings */
241enum {
242 SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
243 SAL_INFO_TYPE_INIT = 1, /* Init information */
244 SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */
245 SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */
246};
247
248/* Encodings for machine check parameter types */
249enum {
250 SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
251 SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
252 SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
253};
254
255/* Encodings for rendezvous mechanisms */
256enum {
257 SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
258 SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
259};
260
261/* Encodings for vectors which can be registered by the OS with SAL */
262enum {
263 SAL_VECTOR_OS_MCA = 0,
264 SAL_VECTOR_OS_INIT = 1,
265 SAL_VECTOR_OS_BOOT_RENDEZ = 2
266};
267
268/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
269#define SAL_MC_PARAM_RZ_ALWAYS 0x1
270#define SAL_MC_PARAM_BINIT_ESCALATE 0x10
271
272/*
273 * Definition of the SAL Error Log from the SAL spec
274 */
275
276/* SAL Error Record Section GUID Definitions */
277#define SAL_PROC_DEV_ERR_SECT_GUID \
278 EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
279#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \
280 EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
281#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \
282 EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
283#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \
284 EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
285#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \
286 EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
287#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \
288 EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
289#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \
290 EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
291#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \
292 EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
293#define SAL_PLAT_BUS_ERR_SECT_GUID \
294 EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
295
296#define MAX_CACHE_ERRORS 6
297#define MAX_TLB_ERRORS 6
298#define MAX_BUS_ERRORS 1
299
300/* Definition of version according to SAL spec for logging purposes */
301typedef struct sal_log_revision {
302 u8 minor; /* BCD (0..99) */
303 u8 major; /* BCD (0..99) */
304} sal_log_revision_t;
305
306/* Definition of timestamp according to SAL spec for logging purposes */
307typedef struct sal_log_timestamp {
308 u8 slh_second; /* Second (0..59) */
309 u8 slh_minute; /* Minute (0..59) */
310 u8 slh_hour; /* Hour (0..23) */
311 u8 slh_reserved;
312 u8 slh_day; /* Day (1..31) */
313 u8 slh_month; /* Month (1..12) */
314 u8 slh_year; /* Year (00..99) */
315 u8 slh_century; /* Century (19, 20, 21, ...) */
316} sal_log_timestamp_t;
317
318/* Definition of log record header structures */
319typedef struct sal_log_record_header {
320 u64 id; /* Unique monotonically increasing ID */
321 sal_log_revision_t revision; /* Major and Minor revision of header */
322 u16 severity; /* Error Severity */
323 u32 len; /* Length of this error log in bytes */
324 sal_log_timestamp_t timestamp; /* Timestamp */
325 efi_guid_t platform_guid; /* Unique OEM Platform ID */
326} sal_log_record_header_t;
327
328#define sal_log_severity_recoverable 0
329#define sal_log_severity_fatal 1
330#define sal_log_severity_corrected 2
331
332/* Definition of log section header structures */
333typedef struct sal_log_sec_header {
334 efi_guid_t guid; /* Unique Section ID */
335 sal_log_revision_t revision; /* Major and Minor revision of Section */
336 u16 reserved;
337 u32 len; /* Section length */
338} sal_log_section_hdr_t;
339
340typedef struct sal_log_mod_error_info {
341 struct {
342 u64 check_info : 1,
343 requestor_identifier : 1,
344 responder_identifier : 1,
345 target_identifier : 1,
346 precise_ip : 1,
347 reserved : 59;
348 } valid;
349 u64 check_info;
350 u64 requestor_identifier;
351 u64 responder_identifier;
352 u64 target_identifier;
353 u64 precise_ip;
354} sal_log_mod_error_info_t;
355
356typedef struct sal_processor_static_info {
357 struct {
358 u64 minstate : 1,
359 br : 1,
360 cr : 1,
361 ar : 1,
362 rr : 1,
363 fr : 1,
364 reserved : 58;
365 } valid;
366 pal_min_state_area_t min_state_area;
367 u64 br[8];
368 u64 cr[128];
369 u64 ar[128];
370 u64 rr[8];
371 struct ia64_fpreg __attribute__ ((packed)) fr[128];
372} sal_processor_static_info_t;
373
374struct sal_cpuid_info {
375 u64 regs[5];
376 u64 reserved;
377};
378
379typedef struct sal_log_processor_info {
380 sal_log_section_hdr_t header;
381 struct {
382 u64 proc_error_map : 1,
383 proc_state_param : 1,
384 proc_cr_lid : 1,
385 psi_static_struct : 1,
386 num_cache_check : 4,
387 num_tlb_check : 4,
388 num_bus_check : 4,
389 num_reg_file_check : 4,
390 num_ms_check : 4,
391 cpuid_info : 1,
392 reserved1 : 39;
393 } valid;
394 u64 proc_error_map;
395 u64 proc_state_parameter;
396 u64 proc_cr_lid;
397 /*
398 * The rest of this structure consists of variable-length arrays, which can't be
399 * expressed in C.
400 */
401 sal_log_mod_error_info_t info[0];
402 /*
403 * This is what the rest looked like if C supported variable-length arrays:
404 *
405 * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
406 * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
407 * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
408 * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
409 * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
410 * struct sal_cpuid_info cpuid_info;
411 * sal_processor_static_info_t processor_static_info;
412 */
413} sal_log_processor_info_t;
414
415/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
416#define SAL_LPI_PSI_INFO(l) \
417({ sal_log_processor_info_t *_l = (l); \
418 ((sal_processor_static_info_t *) \
419 ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \
420 + _l->valid.num_bus_check + _l->valid.num_reg_file_check \
421 + _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \
422 + sizeof(struct sal_cpuid_info)))); \
423})
424
425/* platform error log structures */
426
427typedef struct sal_log_mem_dev_err_info {
428 sal_log_section_hdr_t header;
429 struct {
430 u64 error_status : 1,
431 physical_addr : 1,
432 addr_mask : 1,
433 node : 1,
434 card : 1,
435 module : 1,
436 bank : 1,
437 device : 1,
438 row : 1,
439 column : 1,
440 bit_position : 1,
441 requestor_id : 1,
442 responder_id : 1,
443 target_id : 1,
444 bus_spec_data : 1,
445 oem_id : 1,
446 oem_data : 1,
447 reserved : 47;
448 } valid;
449 u64 error_status;
450 u64 physical_addr;
451 u64 addr_mask;
452 u16 node;
453 u16 card;
454 u16 module;
455 u16 bank;
456 u16 device;
457 u16 row;
458 u16 column;
459 u16 bit_position;
460 u64 requestor_id;
461 u64 responder_id;
462 u64 target_id;
463 u64 bus_spec_data;
464 u8 oem_id[16];
465 u8 oem_data[1]; /* Variable length data */
466} sal_log_mem_dev_err_info_t;
467
468typedef struct sal_log_sel_dev_err_info {
469 sal_log_section_hdr_t header;
470 struct {
471 u64 record_id : 1,
472 record_type : 1,
473 generator_id : 1,
474 evm_rev : 1,
475 sensor_type : 1,
476 sensor_num : 1,
477 event_dir : 1,
478 event_data1 : 1,
479 event_data2 : 1,
480 event_data3 : 1,
481 reserved : 54;
482 } valid;
483 u16 record_id;
484 u8 record_type;
485 u8 timestamp[4];
486 u16 generator_id;
487 u8 evm_rev;
488 u8 sensor_type;
489 u8 sensor_num;
490 u8 event_dir;
491 u8 event_data1;
492 u8 event_data2;
493 u8 event_data3;
494} sal_log_sel_dev_err_info_t;
495
496typedef struct sal_log_pci_bus_err_info {
497 sal_log_section_hdr_t header;
498 struct {
499 u64 err_status : 1,
500 err_type : 1,
501 bus_id : 1,
502 bus_address : 1,
503 bus_data : 1,
504 bus_cmd : 1,
505 requestor_id : 1,
506 responder_id : 1,
507 target_id : 1,
508 oem_data : 1,
509 reserved : 54;
510 } valid;
511 u64 err_status;
512 u16 err_type;
513 u16 bus_id;
514 u32 reserved;
515 u64 bus_address;
516 u64 bus_data;
517 u64 bus_cmd;
518 u64 requestor_id;
519 u64 responder_id;
520 u64 target_id;
521 u8 oem_data[1]; /* Variable length data */
522} sal_log_pci_bus_err_info_t;
523
524typedef struct sal_log_smbios_dev_err_info {
525 sal_log_section_hdr_t header;
526 struct {
527 u64 event_type : 1,
528 length : 1,
529 time_stamp : 1,
530 data : 1,
531 reserved1 : 60;
532 } valid;
533 u8 event_type;
534 u8 length;
535 u8 time_stamp[6];
536 u8 data[1]; /* data of variable length, length == slsmb_length */
537} sal_log_smbios_dev_err_info_t;
538
539typedef struct sal_log_pci_comp_err_info {
540 sal_log_section_hdr_t header;
541 struct {
542 u64 err_status : 1,
543 comp_info : 1,
544 num_mem_regs : 1,
545 num_io_regs : 1,
546 reg_data_pairs : 1,
547 oem_data : 1,
548 reserved : 58;
549 } valid;
550 u64 err_status;
551 struct {
552 u16 vendor_id;
553 u16 device_id;
554 u8 class_code[3];
555 u8 func_num;
556 u8 dev_num;
557 u8 bus_num;
558 u8 seg_num;
559 u8 reserved[5];
560 } comp_info;
561 u32 num_mem_regs;
562 u32 num_io_regs;
563 u64 reg_data_pairs[1];
564 /*
565 * array of address/data register pairs is num_mem_regs + num_io_regs elements
566 * long. Each array element consists of a u64 address followed by a u64 data
567 * value. The oem_data array immediately follows the reg_data_pairs array
568 */
569 u8 oem_data[1]; /* Variable length data */
570} sal_log_pci_comp_err_info_t;
571
572typedef struct sal_log_plat_specific_err_info {
573 sal_log_section_hdr_t header;
574 struct {
575 u64 err_status : 1,
576 guid : 1,
577 oem_data : 1,
578 reserved : 61;
579 } valid;
580 u64 err_status;
581 efi_guid_t guid;
582 u8 oem_data[1]; /* platform specific variable length data */
583} sal_log_plat_specific_err_info_t;
584
585typedef struct sal_log_host_ctlr_err_info {
586 sal_log_section_hdr_t header;
587 struct {
588 u64 err_status : 1,
589 requestor_id : 1,
590 responder_id : 1,
591 target_id : 1,
592 bus_spec_data : 1,
593 oem_data : 1,
594 reserved : 58;
595 } valid;
596 u64 err_status;
597 u64 requestor_id;
598 u64 responder_id;
599 u64 target_id;
600 u64 bus_spec_data;
601 u8 oem_data[1]; /* Variable length OEM data */
602} sal_log_host_ctlr_err_info_t;
603
604typedef struct sal_log_plat_bus_err_info {
605 sal_log_section_hdr_t header;
606 struct {
607 u64 err_status : 1,
608 requestor_id : 1,
609 responder_id : 1,
610 target_id : 1,
611 bus_spec_data : 1,
612 oem_data : 1,
613 reserved : 58;
614 } valid;
615 u64 err_status;
616 u64 requestor_id;
617 u64 responder_id;
618 u64 target_id;
619 u64 bus_spec_data;
620 u8 oem_data[1]; /* Variable length OEM data */
621} sal_log_plat_bus_err_info_t;
622
623/* Overall platform error section structure */
624typedef union sal_log_platform_err_info {
625 sal_log_mem_dev_err_info_t mem_dev_err;
626 sal_log_sel_dev_err_info_t sel_dev_err;
627 sal_log_pci_bus_err_info_t pci_bus_err;
628 sal_log_smbios_dev_err_info_t smbios_dev_err;
629 sal_log_pci_comp_err_info_t pci_comp_err;
630 sal_log_plat_specific_err_info_t plat_specific_err;
631 sal_log_host_ctlr_err_info_t host_ctlr_err;
632 sal_log_plat_bus_err_info_t plat_bus_err;
633} sal_log_platform_err_info_t;
634
635/* SAL log over-all, multi-section error record structure (processor+platform) */
636typedef struct err_rec {
637 sal_log_record_header_t sal_elog_header;
638 sal_log_processor_info_t proc_err;
639 sal_log_platform_err_info_t plat_err;
640 u8 oem_data_pad[1024];
641} ia64_err_rec_t;
642
643/*
644 * Now define a couple of inline functions for improved type checking
645 * and convenience.
646 */
647static inline long
648ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
649 unsigned long *drift_info)
650{
651 struct ia64_sal_retval isrv;
652
653 SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
654 *ticks_per_second = isrv.v0;
655 *drift_info = isrv.v1;
656 return isrv.status;
657}
658
659/* Flush all the processor and platform level instruction and/or data caches */
660static inline s64
661ia64_sal_cache_flush (u64 cache_type)
662{
663 struct ia64_sal_retval isrv;
664 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
665 return isrv.status;
666}
667
668
669/* Initialize all the processor and platform level instruction and data caches */
670static inline s64
671ia64_sal_cache_init (void)
672{
673 struct ia64_sal_retval isrv;
674 SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
675 return isrv.status;
676}
677
678/*
679 * Clear the processor and platform information logged by SAL with respect to the machine
680 * state at the time of MCA's, INITs, CMCs, or CPEs.
681 */
682static inline s64
683ia64_sal_clear_state_info (u64 sal_info_type)
684{
685 struct ia64_sal_retval isrv;
686 SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
687 0, 0, 0, 0, 0);
688 return isrv.status;
689}
690
691
692/* Get the processor and platform information logged by SAL with respect to the machine
693 * state at the time of the MCAs, INITs, CMCs, or CPEs.
694 */
695static inline u64
696ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
697{
698 struct ia64_sal_retval isrv;
699 SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
700 sal_info, 0, 0, 0, 0);
701 if (isrv.status)
702 return 0;
703
704 return isrv.v0;
705}
706
707/*
708 * Get the maximum size of the information logged by SAL with respect to the machine state
709 * at the time of MCAs, INITs, CMCs, or CPEs.
710 */
711static inline u64
712ia64_sal_get_state_info_size (u64 sal_info_type)
713{
714 struct ia64_sal_retval isrv;
715 SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
716 0, 0, 0, 0, 0);
717 if (isrv.status)
718 return 0;
719 return isrv.v0;
720}
721
722/*
723 * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
724 * the monarch processor. Must not lock, because it will not return on any cpu until the
725 * monarch processor sends a wake up.
726 */
727static inline s64
728ia64_sal_mc_rendez (void)
729{
730 struct ia64_sal_retval isrv;
731 SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
732 return isrv.status;
733}
734
735/*
736 * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
737 * the machine check rendezvous sequence as well as the mechanism to wake up the
738 * non-monarch processor at the end of machine check processing.
739 * Returns the complete ia64_sal_retval because some calls return more than just a status
740 * value.
741 */
742static inline struct ia64_sal_retval
743ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
744{
745 struct ia64_sal_retval isrv;
746 SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
747 timeout, rz_always, 0, 0);
748 return isrv;
749}
750
751/* Read from PCI configuration space */
752static inline s64
753ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
754{
755 struct ia64_sal_retval isrv;
756 SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
757 if (value)
758 *value = isrv.v0;
759 return isrv.status;
760}
761
762/* Write to PCI configuration space */
763static inline s64
764ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
765{
766 struct ia64_sal_retval isrv;
767 SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
768 type, 0, 0, 0);
769 return isrv.status;
770}
771
772/*
773 * Register physical addresses of locations needed by SAL when SAL procedures are invoked
774 * in virtual mode.
775 */
776static inline s64
777ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
778{
779 struct ia64_sal_retval isrv;
780 SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
781 0, 0, 0, 0, 0);
782 return isrv.status;
783}
784
785/*
786 * Register software dependent code locations within SAL. These locations are handlers or
787 * entry points where SAL will pass control for the specified event. These event handlers
788 * are for the bott rendezvous, MCAs and INIT scenarios.
789 */
790static inline s64
791ia64_sal_set_vectors (u64 vector_type,
792 u64 handler_addr1, u64 gp1, u64 handler_len1,
793 u64 handler_addr2, u64 gp2, u64 handler_len2)
794{
795 struct ia64_sal_retval isrv;
796 SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
797 handler_addr1, gp1, handler_len1,
798 handler_addr2, gp2, handler_len2);
799
800 return isrv.status;
801}
802
803/* Update the contents of PAL block in the non-volatile storage device */
804static inline s64
805ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
806 u64 *error_code, u64 *scratch_buf_size_needed)
807{
808 struct ia64_sal_retval isrv;
809 SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
810 0, 0, 0, 0);
811 if (error_code)
812 *error_code = isrv.v0;
813 if (scratch_buf_size_needed)
814 *scratch_buf_size_needed = isrv.v1;
815 return isrv.status;
816}
817
818extern unsigned long sal_platform_features;
819
820extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
821
822struct sal_ret_values {
823 long r8; long r9; long r10; long r11;
824};
825
826#define IA64_SAL_OEMFUNC_MIN 0x02000000
827#define IA64_SAL_OEMFUNC_MAX 0x03ffffff
828
829extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
830 u64, u64, u64);
831extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
832 u64, u64, u64, u64, u64);
833extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
834 u64, u64, u64, u64, u64);
835
836extern void ia64_sal_handler_init(void *entry_point, void *gpval);
837
838#endif /* __ASSEMBLY__ */
839
840#endif /* _ASM_IA64_SAL_H */
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
new file mode 100644
index 000000000000..834a189ef189
--- /dev/null
+++ b/include/asm-ia64/scatterlist.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_IA64_SCATTERLIST_H
2#define _ASM_IA64_SCATTERLIST_H
3
4/*
5 * Modified 1998-1999, 2001-2002, 2004
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9struct scatterlist {
10 struct page *page;
11 unsigned int offset;
12 unsigned int length; /* buffer length */
13
14 dma_addr_t dma_address;
15 unsigned int dma_length;
16};
17
18/*
19 * It used to be that ISA_DMA_THRESHOLD had something to do with the
20 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
21 * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
22 * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
23 * address of a page is that is allocated with GFP_DMA. On IA-64,
24 * that's 4GB - 1.
25 */
26#define ISA_DMA_THRESHOLD 0xffffffff
27
28#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-ia64/sections.h b/include/asm-ia64/sections.h
new file mode 100644
index 000000000000..8e3dbde1b429
--- /dev/null
+++ b/include/asm-ia64/sections.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_SECTIONS_H
2#define _ASM_IA64_SECTIONS_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <asm-generic/sections.h>
10
11extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
12extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
13extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
14extern char __start_gate_section[];
15extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
16extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
17extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
18extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
19extern char __start_unwind[], __end_unwind[];
20
21#endif /* _ASM_IA64_SECTIONS_H */
22
diff --git a/include/asm-ia64/segment.h b/include/asm-ia64/segment.h
new file mode 100644
index 000000000000..b89e2b3d648f
--- /dev/null
+++ b/include/asm-ia64/segment.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_IA64_SEGMENT_H
2#define _ASM_IA64_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif /* _ASM_IA64_SEGMENT_H */
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
new file mode 100644
index 000000000000..3a2f0f3f78f3
--- /dev/null
+++ b/include/asm-ia64/semaphore.h
@@ -0,0 +1,102 @@
1#ifndef _ASM_IA64_SEMAPHORE_H
2#define _ASM_IA64_SEMAPHORE_H
3
4/*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13
14struct semaphore {
15 atomic_t count;
16 int sleepers;
17 wait_queue_head_t wait;
18};
19
20#define __SEMAPHORE_INITIALIZER(name, n) \
21{ \
22 .count = ATOMIC_INIT(n), \
23 .sleepers = 0, \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
25}
26
27#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1)
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
31
32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
33#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
34
35static inline void
36sema_init (struct semaphore *sem, int val)
37{
38 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
39}
40
41static inline void
42init_MUTEX (struct semaphore *sem)
43{
44 sema_init(sem, 1);
45}
46
47static inline void
48init_MUTEX_LOCKED (struct semaphore *sem)
49{
50 sema_init(sem, 0);
51}
52
53extern void __down (struct semaphore * sem);
54extern int __down_interruptible (struct semaphore * sem);
55extern int __down_trylock (struct semaphore * sem);
56extern void __up (struct semaphore * sem);
57
58/*
59 * Atomically decrement the semaphore's count. If it goes negative,
60 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
61 */
62static inline void
63down (struct semaphore *sem)
64{
65 might_sleep();
66 if (atomic_dec_return(&sem->count) < 0)
67 __down(sem);
68}
69
70/*
71 * Atomically decrement the semaphore's count. If it goes negative,
72 * block the calling thread in the TASK_INTERRUPTIBLE state.
73 */
74static inline int
75down_interruptible (struct semaphore * sem)
76{
77 int ret = 0;
78
79 might_sleep();
80 if (atomic_dec_return(&sem->count) < 0)
81 ret = __down_interruptible(sem);
82 return ret;
83}
84
85static inline int
86down_trylock (struct semaphore *sem)
87{
88 int ret = 0;
89
90 if (atomic_dec_return(&sem->count) < 0)
91 ret = __down_trylock(sem);
92 return ret;
93}
94
95static inline void
96up (struct semaphore * sem)
97{
98 if (atomic_inc_return(&sem->count) <= 0)
99 __up(sem);
100}
101
102#endif /* _ASM_IA64_SEMAPHORE_H */
diff --git a/include/asm-ia64/sembuf.h b/include/asm-ia64/sembuf.h
new file mode 100644
index 000000000000..1340fbc04d3e
--- /dev/null
+++ b/include/asm-ia64/sembuf.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_SEMBUF_H
2#define _ASM_IA64_SEMBUF_H
3
4/*
5 * The semid64_ds structure for IA-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct semid64_ds {
14 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
15 __kernel_time_t sem_otime; /* last semop time */
16 __kernel_time_t sem_ctime; /* last change time */
17 unsigned long sem_nsems; /* no. of semaphores in array */
18 unsigned long __unused1;
19 unsigned long __unused2;
20};
21
22#endif /* _ASM_IA64_SEMBUF_H */
diff --git a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h
new file mode 100644
index 000000000000..0c7a2f3dcf13
--- /dev/null
+++ b/include/asm-ia64/serial.h
@@ -0,0 +1,19 @@
1/*
2 * include/asm-ia64/serial.h
3 *
4 * Derived from the i386 version.
5 */
6
7/*
8 * This assumes you have a 1.8432 MHz clock for your UART.
9 *
10 * It'd be nice if someone built a serial card with a 24.576 MHz
11 * clock, since the 16550A is capable of handling a top speed of 1.5
12 * megabits/second; but this requires the faster clock.
13 */
14#define BASE_BAUD ( 1843200 / 16 )
15
16/*
17 * All legacy serial ports should be enumerated via ACPI namespace, so
18 * we need not list them here.
19 */
diff --git a/include/asm-ia64/setup.h b/include/asm-ia64/setup.h
new file mode 100644
index 000000000000..ea29b57affcb
--- /dev/null
+++ b/include/asm-ia64/setup.h
@@ -0,0 +1,6 @@
1#ifndef __IA64_SETUP_H
2#define __IA64_SETUP_H
3
4#define COMMAND_LINE_SIZE 512
5
6#endif
diff --git a/include/asm-ia64/shmbuf.h b/include/asm-ia64/shmbuf.h
new file mode 100644
index 000000000000..585002a77acd
--- /dev/null
+++ b/include/asm-ia64/shmbuf.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_SHMBUF_H
2#define _ASM_IA64_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for IA-64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct shmid64_ds {
14 struct ipc64_perm shm_perm; /* operation perms */
15 size_t shm_segsz; /* size of segment (bytes) */
16 __kernel_time_t shm_atime; /* last attach time */
17 __kernel_time_t shm_dtime; /* last detach time */
18 __kernel_time_t shm_ctime; /* last change time */
19 __kernel_pid_t shm_cpid; /* pid of creator */
20 __kernel_pid_t shm_lpid; /* pid of last operator */
21 unsigned long shm_nattch; /* no. of current attaches */
22 unsigned long __unused1;
23 unsigned long __unused2;
24};
25
26struct shminfo64 {
27 unsigned long shmmax;
28 unsigned long shmmin;
29 unsigned long shmmni;
30 unsigned long shmseg;
31 unsigned long shmall;
32 unsigned long __unused1;
33 unsigned long __unused2;
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif /* _ASM_IA64_SHMBUF_H */
diff --git a/include/asm-ia64/shmparam.h b/include/asm-ia64/shmparam.h
new file mode 100644
index 000000000000..d07508dc54ae
--- /dev/null
+++ b/include/asm-ia64/shmparam.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_IA64_SHMPARAM_H
2#define _ASM_IA64_SHMPARAM_H
3
4/*
5 * SHMLBA controls minimum alignment at which shared memory segments
6 * get attached. The IA-64 architecture says that there may be a
7 * performance degradation when there are virtual aliases within 1MB.
8 * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
9 */
10#define SHMLBA (1024*1024)
11
12#endif /* _ASM_IA64_SHMPARAM_H */
diff --git a/include/asm-ia64/sigcontext.h b/include/asm-ia64/sigcontext.h
new file mode 100644
index 000000000000..57ff777bcc40
--- /dev/null
+++ b/include/asm-ia64/sigcontext.h
@@ -0,0 +1,70 @@
1#ifndef _ASM_IA64_SIGCONTEXT_H
2#define _ASM_IA64_SIGCONTEXT_H
3
4/*
5 * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
6 * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <asm/fpu.h>
10
11#define IA64_SC_FLAG_ONSTACK_BIT 0 /* is handler running on signal stack? */
12#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */
13#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */
14
15#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT)
16#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
17#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT)
18
19# ifndef __ASSEMBLY__
20
21/*
22 * Note on handling of register backing store: sc_ar_bsp contains the address that would
23 * be found in ar.bsp after executing a "cover" instruction the context in which the
24 * signal was raised. If signal delivery required switching to an alternate signal stack
25 * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
26 * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
27 * original one. In this case, sc_rbs_base contains the base address of the new register
28 * backing store. The number of registers in the dirty partition can be calculated as:
29 *
30 * ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
31 *
32 */
33
34struct sigcontext {
35 unsigned long sc_flags; /* see manifest constants above */
36 unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */
37 stack_t sc_stack; /* previously active stack */
38
39 unsigned long sc_ip; /* instruction pointer */
40 unsigned long sc_cfm; /* current frame marker */
41 unsigned long sc_um; /* user mask bits */
42 unsigned long sc_ar_rsc; /* register stack configuration register */
43 unsigned long sc_ar_bsp; /* backing store pointer */
44 unsigned long sc_ar_rnat; /* RSE NaT collection register */
45 unsigned long sc_ar_ccv; /* compare and exchange compare value register */
46 unsigned long sc_ar_unat; /* ar.unat of interrupted context */
47 unsigned long sc_ar_fpsr; /* floating-point status register */
48 unsigned long sc_ar_pfs; /* previous function state */
49 unsigned long sc_ar_lc; /* loop count register */
50 unsigned long sc_pr; /* predicate registers */
51 unsigned long sc_br[8]; /* branch registers */
52 /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
53 unsigned long sc_gr[32]; /* general registers (static partition) */
54 struct ia64_fpreg sc_fr[128]; /* floating-point registers */
55
56 unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
57 unsigned long sc_loadrs; /* see description above */
58
59 unsigned long sc_ar25; /* cmp8xchg16 uses this */
60 unsigned long sc_ar26; /* rsvd for scratch use */
61 unsigned long sc_rsvd[12]; /* reserved for future use */
62 /*
63 * The mask must come last so we can increase _NSIG_WORDS
64 * without breaking binary compatibility.
65 */
66 sigset_t sc_mask; /* signal mask to restore after handler returns */
67};
68
69# endif /* __ASSEMBLY__ */
70#endif /* _ASM_IA64_SIGCONTEXT_H */
diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h
new file mode 100644
index 000000000000..d55f139cbcdc
--- /dev/null
+++ b/include/asm-ia64/siginfo.h
@@ -0,0 +1,141 @@
1#ifndef _ASM_IA64_SIGINFO_H
2#define _ASM_IA64_SIGINFO_H
3
4/*
5 * Based on <asm-i386/siginfo.h>.
6 *
7 * Modified 1998-2002
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4)
12
13#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
14
15#define HAVE_ARCH_SIGINFO_T
16#define HAVE_ARCH_COPY_SIGINFO
17#define HAVE_ARCH_COPY_SIGINFO_TO_USER
18
19#include <asm-generic/siginfo.h>
20
21typedef struct siginfo {
22 int si_signo;
23 int si_errno;
24 int si_code;
25 int __pad0;
26
27 union {
28 int _pad[SI_PAD_SIZE];
29
30 /* kill() */
31 struct {
32 pid_t _pid; /* sender's pid */
33 uid_t _uid; /* sender's uid */
34 } _kill;
35
36 /* POSIX.1b timers */
37 struct {
38 timer_t _tid; /* timer id */
39 int _overrun; /* overrun count */
40 char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
41 sigval_t _sigval; /* must overlay ._rt._sigval! */
42 int _sys_private; /* not to be passed to user */
43 } _timer;
44
45 /* POSIX.1b signals */
46 struct {
47 pid_t _pid; /* sender's pid */
48 uid_t _uid; /* sender's uid */
49 sigval_t _sigval;
50 } _rt;
51
52 /* SIGCHLD */
53 struct {
54 pid_t _pid; /* which child */
55 uid_t _uid; /* sender's uid */
56 int _status; /* exit code */
57 clock_t _utime;
58 clock_t _stime;
59 } _sigchld;
60
61 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
62 struct {
63 void __user *_addr; /* faulting insn/memory ref. */
64 int _imm; /* immediate value for "break" */
65 unsigned int _flags; /* see below */
66 unsigned long _isr; /* isr */
67 } _sigfault;
68
69 /* SIGPOLL */
70 struct {
71 long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
72 int _fd;
73 } _sigpoll;
74 } _sifields;
75} siginfo_t;
76
77#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
78#define si_flags _sifields._sigfault._flags
79/*
80 * si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that
81 * si_code is non-zero and __ISR_VALID is set in si_flags.
82 */
83#define si_isr _sifields._sigfault._isr
84
85/*
86 * Flag values for si_flags:
87 */
88#define __ISR_VALID_BIT 0
89#define __ISR_VALID (1 << __ISR_VALID_BIT)
90
91/*
92 * SIGILL si_codes
93 */
94#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */
95#define __ILL_BREAK (__SI_FAULT|10) /* illegal break */
96#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */
97#undef NSIGILL
98#define NSIGILL 11
99
100/*
101 * SIGFPE si_codes
102 */
103#define __FPE_DECOVF (__SI_FAULT|9) /* decimal overflow */
104#define __FPE_DECDIV (__SI_FAULT|10) /* decimal division by zero */
105#define __FPE_DECERR (__SI_FAULT|11) /* packed decimal error */
106#define __FPE_INVASC (__SI_FAULT|12) /* invalid ASCII digit */
107#define __FPE_INVDEC (__SI_FAULT|13) /* invalid decimal digit */
108#undef NSIGFPE
109#define NSIGFPE 13
110
111/*
112 * SIGSEGV si_codes
113 */
114#define __SEGV_PSTKOVF (__SI_FAULT|3) /* paragraph stack overflow */
115#undef NSIGSEGV
116#define NSIGSEGV 3
117
118/*
119 * SIGTRAP si_codes
120 */
121#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
122#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
123#undef NSIGTRAP
124#define NSIGTRAP 4
125
126#ifdef __KERNEL__
127#include <linux/string.h>
128
129static inline void
130copy_siginfo (siginfo_t *to, siginfo_t *from)
131{
132 if (from->si_code < 0)
133 memcpy(to, from, sizeof(siginfo_t));
134 else
135 /* _sigchld is currently the largest know union member */
136 memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
137}
138
139#endif /* __KERNEL__ */
140
141#endif /* _ASM_IA64_SIGINFO_H */
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h
new file mode 100644
index 000000000000..660a759744dd
--- /dev/null
+++ b/include/asm-ia64/signal.h
@@ -0,0 +1,185 @@
1#ifndef _ASM_IA64_SIGNAL_H
2#define _ASM_IA64_SIGNAL_H
3
4/*
5 * Modified 1998-2001, 2003
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 *
8 * Unfortunately, this file is being included by bits/signal.h in
9 * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
10 */
11
12#define SIGHUP 1
13#define SIGINT 2
14#define SIGQUIT 3
15#define SIGILL 4
16#define SIGTRAP 5
17#define SIGABRT 6
18#define SIGIOT 6
19#define SIGBUS 7
20#define SIGFPE 8
21#define SIGKILL 9
22#define SIGUSR1 10
23#define SIGSEGV 11
24#define SIGUSR2 12
25#define SIGPIPE 13
26#define SIGALRM 14
27#define SIGTERM 15
28#define SIGSTKFLT 16
29#define SIGCHLD 17
30#define SIGCONT 18
31#define SIGSTOP 19
32#define SIGTSTP 20
33#define SIGTTIN 21
34#define SIGTTOU 22
35#define SIGURG 23
36#define SIGXCPU 24
37#define SIGXFSZ 25
38#define SIGVTALRM 26
39#define SIGPROF 27
40#define SIGWINCH 28
41#define SIGIO 29
42#define SIGPOLL SIGIO
43/*
44#define SIGLOST 29
45*/
46#define SIGPWR 30
47#define SIGSYS 31
48/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
49#define SIGUNUSED 31
50
51/* These should not be considered constants from userland. */
52#define SIGRTMIN 32
53#define SIGRTMAX _NSIG
54
55/*
56 * SA_FLAGS values:
57 *
58 * SA_ONSTACK indicates that a registered stack_t will be used.
59 * SA_INTERRUPT is a no-op, but left due to historical reasons.
60 * SA_RESTART flag to get restarting signals (which were the default long ago)
61 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
62 * SA_RESETHAND clears the handler when the signal is delivered.
63 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
64 * SA_NODEFER prevents the current signal from being masked in the handler.
65 *
66 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
67 * Unix names RESETHAND and NODEFER respectively.
68 */
69#define SA_NOCLDSTOP 0x00000001
70#define SA_NOCLDWAIT 0x00000002
71#define SA_SIGINFO 0x00000004
72#define SA_ONSTACK 0x08000000
73#define SA_RESTART 0x10000000
74#define SA_NODEFER 0x40000000
75#define SA_RESETHAND 0x80000000
76
77#define SA_NOMASK SA_NODEFER
78#define SA_ONESHOT SA_RESETHAND
79#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
80
81#define SA_RESTORER 0x04000000
82
83/*
84 * sigaltstack controls
85 */
86#define SS_ONSTACK 1
87#define SS_DISABLE 2
88
89/*
90 * The minimum stack size needs to be fairly large because we want to
91 * be sure that an app compiled for today's CPUs will continue to run
92 * on all future CPU models. The CPU model matters because the signal
93 * frame needs to have space for the complete machine state, including
94 * all physical stacked registers. The number of physical stacked
95 * registers is CPU model dependent, but given that the width of
96 * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
97 * more than 16KB of space.
98 */
99#if 1
100 /*
101 * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
102 * in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
103 * incorrect value and fix libc only.
104 */
105# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
106#else
107# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
108#endif
109#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
110
111#ifdef __KERNEL__
112
113#define _NSIG 64
114#define _NSIG_BPW 64
115#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
116
117/*
118 * These values of sa_flags are used only by the kernel as part of the
119 * irq handling routines.
120 *
121 * SA_INTERRUPT is also used by the irq handling routines.
122 * SA_SHIRQ is for shared interrupt support on PCI and EISA.
123 */
124#define SA_PROBE SA_ONESHOT
125#define SA_SAMPLE_RANDOM SA_RESTART
126#define SA_SHIRQ 0x04000000
127#define SA_PERCPU_IRQ 0x02000000
128
129#endif /* __KERNEL__ */
130
131#define SIG_BLOCK 0 /* for blocking signals */
132#define SIG_UNBLOCK 1 /* for unblocking signals */
133#define SIG_SETMASK 2 /* for setting the signal mask */
134
135#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
136#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
137#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
138
139# ifndef __ASSEMBLY__
140
141# include <linux/types.h>
142
143/* Avoid too many header ordering problems. */
144struct siginfo;
145
146/* Type of a signal handler. */
147typedef void __user (*__sighandler_t)(int);
148
149typedef struct sigaltstack {
150 void __user *ss_sp;
151 int ss_flags;
152 size_t ss_size;
153} stack_t;
154
155#ifdef __KERNEL__
156
157/* Most things should be clean enough to redefine this at will, if care
158 is taken to make libc match. */
159
160typedef unsigned long old_sigset_t;
161
162typedef struct {
163 unsigned long sig[_NSIG_WORDS];
164} sigset_t;
165
166struct sigaction {
167 __sighandler_t sa_handler;
168 unsigned long sa_flags;
169 sigset_t sa_mask; /* mask last for extensibility */
170};
171
172struct k_sigaction {
173 struct sigaction sa;
174};
175
176# include <asm/sigcontext.h>
177
178#define ptrace_signal_deliver(regs, cookie) do { } while (0)
179
180void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
181
182#endif /* __KERNEL__ */
183
184# endif /* !__ASSEMBLY__ */
185#endif /* _ASM_IA64_SIGNAL_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
new file mode 100644
index 000000000000..c4a227acfeb0
--- /dev/null
+++ b/include/asm-ia64/smp.h
@@ -0,0 +1,134 @@
1/*
2 * SMP Support
3 *
4 * Copyright (C) 1999 VA Linux Systems
5 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6 * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 */
10#ifndef _ASM_IA64_SMP_H
11#define _ASM_IA64_SMP_H
12
13#include <linux/config.h>
14#include <linux/init.h>
15#include <linux/threads.h>
16#include <linux/kernel.h>
17#include <linux/cpumask.h>
18
19#include <asm/bitops.h>
20#include <asm/io.h>
21#include <asm/param.h>
22#include <asm/processor.h>
23#include <asm/ptrace.h>
24
25static inline unsigned int
26ia64_get_lid (void)
27{
28 union {
29 struct {
30 unsigned long reserved : 16;
31 unsigned long eid : 8;
32 unsigned long id : 8;
33 unsigned long ignored : 32;
34 } f;
35 unsigned long bits;
36 } lid;
37
38 lid.bits = ia64_getreg(_IA64_REG_CR_LID);
39 return lid.f.id << 8 | lid.f.eid;
40}
41
42#ifdef CONFIG_SMP
43
44#define XTP_OFFSET 0x1e0008
45
46#define SMP_IRQ_REDIRECTION (1 << 0)
47#define SMP_IPI_REDIRECTION (1 << 1)
48
49#define smp_processor_id() (current_thread_info()->cpu)
50
51extern struct smp_boot_data {
52 int cpu_count;
53 int cpu_phys_id[NR_CPUS];
54} smp_boot_data __initdata;
55
56extern char no_int_routing __devinitdata;
57
58extern cpumask_t cpu_online_map;
59extern void __iomem *ipi_base_addr;
60extern unsigned char smp_int_redirect;
61
62extern volatile int ia64_cpu_to_sapicid[];
63#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
64
65extern unsigned long ap_wakeup_vector;
66
67/*
68 * Function to map hard smp processor id to logical id. Slow, so don't use this in
69 * performance-critical code.
70 */
71static inline int
72cpu_logical_id (int cpuid)
73{
74 int i;
75
76 for (i = 0; i < NR_CPUS; ++i)
77 if (cpu_physical_id(i) == cpuid)
78 break;
79 return i;
80}
81
82/*
83 * XTP control functions:
84 * min_xtp : route all interrupts to this CPU
85 * normal_xtp: nominal XTP value
86 * max_xtp : never deliver interrupts to this CPU.
87 */
88
89static inline void
90min_xtp (void)
91{
92 if (smp_int_redirect & SMP_IRQ_REDIRECTION)
93 writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
94}
95
96static inline void
97normal_xtp (void)
98{
99 if (smp_int_redirect & SMP_IRQ_REDIRECTION)
100 writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
101}
102
103static inline void
104max_xtp (void)
105{
106 if (smp_int_redirect & SMP_IRQ_REDIRECTION)
107 writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
108}
109
110#define hard_smp_processor_id() ia64_get_lid()
111
112/* Upping and downing of CPUs */
113extern int __cpu_disable (void);
114extern void __cpu_die (unsigned int cpu);
115extern void cpu_die (void) __attribute__ ((noreturn));
116extern int __cpu_up (unsigned int cpu);
117extern void __init smp_build_cpu_map(void);
118
119extern void __init init_smp_config (void);
120extern void smp_do_timer (struct pt_regs *regs);
121
122extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
123 int retry, int wait);
124extern void smp_send_reschedule (int cpu);
125extern void lock_ipi_calllock(void);
126extern void unlock_ipi_calllock(void);
127
128#else
129
130#define cpu_logical_id(i) 0
131#define cpu_physical_id(i) ia64_get_lid()
132
133#endif /* CONFIG_SMP */
134#endif /* _ASM_IA64_SMP_H */
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
new file mode 100644
index 000000000000..c916bd22767a
--- /dev/null
+++ b/include/asm-ia64/sn/addrs.h
@@ -0,0 +1,238 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1992-1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_ADDRS_H
10#define _ASM_IA64_SN_ADDRS_H
11
12#include <asm/percpu.h>
13#include <asm/sn/types.h>
14#include <asm/sn/arch.h>
15#include <asm/sn/pda.h>
16
17/*
18 * Memory/SHUB Address Format:
19 * +-+---------+--+--------------+
20 * |0| NASID |AS| NodeOffset |
21 * +-+---------+--+--------------+
22 *
23 * NASID: (low NASID bit is 0) Memory and SHUB MMRs
24 * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
25 * 00: Local Resources and MMR space
26 * Top bit of NodeOffset
27 * 0: Local resources space
28 * node id:
29 * 0: IA64/NT compatibility space
30 * 2: Local MMR Space
31 * 4: Local memory, regardless of local node id
32 * 1: Global MMR space
33 * 01: GET space.
34 * 10: AMO space.
35 * 11: Cacheable memory space.
36 *
37 * NodeOffset: byte offset
38 *
39 *
40 * TIO address format:
41 * +-+----------+--+--------------+
42 * |0| NASID |AS| Nodeoffset |
43 * +-+----------+--+--------------+
44 *
45 * NASID: (low NASID bit is 1) TIO
46 * AS: 2-bit Chiplet Identifier
47 * 00: TIO LB (Indicates TIO MMR access.)
48 * 01: TIO ICE (indicates coretalk space access.)
49 *
50 * NodeOffset: top bit must be set.
51 *
52 *
53 * Note that in both of the above address formats, the low
54 * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
55 */
56
57
58/*
59 * Define basic shift & mask constants for manipulating NASIDs and AS values.
60 */
61#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
62#define NASID_SHIFT (sn_hub_info->nasid_shift)
63#define AS_SHIFT (sn_hub_info->as_shift)
64#define AS_BITMASK 0x3UL
65
66#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
67#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
68#define REGION_BITS 0xe000000000000000UL
69
70
71/*
72 * AS values. These are the same on both SHUB1 & SHUB2.
73 */
74#define AS_GET_VAL 1UL
75#define AS_AMO_VAL 2UL
76#define AS_CAC_VAL 3UL
77#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
78#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
79#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
80
81
82/*
83 * Base addresses for various address ranges.
84 */
85#define CACHED 0xe000000000000000UL
86#define UNCACHED 0xc000000000000000UL
87#define UNCACHED_PHYS 0x8000000000000000UL
88
89
90/*
91 * Virtual Mode Local & Global MMR space.
92 */
93#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
94#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
95#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
96#define LOCAL_MMR_SPACE (UNCACHED | LOCAL_MMR_OFFSET)
97#define LOCAL_PHYS_MMR_SPACE (UNCACHED_PHYS | LOCAL_MMR_OFFSET)
98
99#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
100#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
101#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
102#define GLOBAL_MMR_SPACE (UNCACHED | GLOBAL_MMR_OFFSET)
103
104/*
105 * Physical mode addresses
106 */
107#define GLOBAL_PHYS_MMR_SPACE (UNCACHED_PHYS | GLOBAL_MMR_OFFSET)
108
109
110/*
111 * Clear region & AS bits.
112 */
113#define TO_PHYS_MASK (~(REGION_BITS | AS_MASK))
114
115
116/*
117 * Misc NASID manipulation.
118 */
119#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
120#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
121#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
122#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
123#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
124#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
125#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
126#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
127#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
128#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
129
130
131/* non-II mmr's start at top of big window space (4G) */
132#define BWIN_TOP 0x0000000100000000UL
133
134/*
135 * general address defines
136 */
137#define CAC_BASE (CACHED | AS_CAC_SPACE)
138#define AMO_BASE (UNCACHED | AS_AMO_SPACE)
139#define GET_BASE (CACHED | AS_GET_SPACE)
140
141/*
142 * Convert Memory addresses between various addressing modes.
143 */
144#define TO_PHYS(x) (TO_PHYS_MASK & (x))
145#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
146#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
147#define TO_GET(x) (GET_BASE | TO_PHYS(x))
148
149
150/*
151 * Covert from processor physical address to II/TIO physical address:
152 * II - squeeze out the AS bits
153 * TIO- requires a chiplet id in bits 38-39. For DMA to memory,
154 * the chiplet id is zero. If we implement TIO-TIO dma, we might need
155 * to insert a chiplet id into this macro. However, it is our belief
156 * right now that this chiplet id will be ICE, which is also zero.
157 */
158#define PHYS_TO_TIODMA(x) ( (((u64)(x) & NASID_MASK) << 2) | NODE_OFFSET(x))
159#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
160
161
162/*
163 * The following definitions pertain to the IO special address
164 * space. They define the location of the big and little windows
165 * of any given node.
166 */
167#define BWIN_SIZE_BITS 29 /* big window size: 512M */
168#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
169#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
170 : RAW_NODE_SWIN_BASE(n, w))
171#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
172#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
173#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
174#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
175#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
176#define BWIN_WIDGET_MASK 0x7
177#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
178
179#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
180#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
181
182
183
184/*
185 * The following definitions pertain to the IO special address
186 * space. They define the location of the big and little windows
187 * of any given node.
188 */
189
190#define SWIN_SIZE_BITS 24
191#define SWIN_WIDGET_MASK 0xF
192
193#define TIO_SWIN_SIZE_BITS 28
194#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
195#define TIO_SWIN_WIDGET_MASK 0x3
196
197/*
198 * Convert smallwindow address to xtalk address.
199 *
200 * 'addr' can be physical or virtual address, but will be converted
201 * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
202 */
203#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
204#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
205
206
207/*
208 * The following macros produce the correct base virtual address for
209 * the hub registers. The REMOTE_HUB_* macro produce
210 * the address for the specified hub's registers. The intent is
211 * that the appropriate PI, MD, NI, or II register would be substituted
212 * for x.
213 *
214 * WARNING:
215 * When certain Hub chip workaround are defined, it's not sufficient
216 * to dereference the *_HUB_ADDR() macros. You should instead use
217 * HUB_L() and HUB_S() if you must deal with pointers to hub registers.
218 * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
219 * They're always safe.
220 */
221#define REMOTE_HUB_ADDR(n,x) \
222 ((n & 1) ? \
223 /* TIO: */ \
224 ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
225 : /* SHUB: */ \
226 (((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x)))\
227 : ((volatile u64 *)(NODE_SWIN_BASE(n,1) + 0x800000 + (x)))))
228
229
230
231#define HUB_L(x) (*((volatile typeof(*x) *)x))
232#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
233
234#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
235#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
236
237
238#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h
new file mode 100644
index 000000000000..7c349f07916a
--- /dev/null
+++ b/include/asm-ia64/sn/arch.h
@@ -0,0 +1,52 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI specific setup.
7 *
8 * Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
10 */
11#ifndef _ASM_IA64_SN_ARCH_H
12#define _ASM_IA64_SN_ARCH_H
13
14#include <asm/types.h>
15#include <asm/percpu.h>
16#include <asm/sn/types.h>
17#include <asm/sn/sn_cpuid.h>
18
19/*
20 * The following defines attributes of the HUB chip. These attributes are
21 * frequently referenced. They are kept in the per-cpu data areas of each cpu.
22 * They are kept together in a struct to minimize cache misses.
23 */
24struct sn_hub_info_s {
25 u8 shub2;
26 u8 nasid_shift;
27 u8 as_shift;
28 u8 shub_1_1_found;
29 u16 nasid_bitmask;
30};
31DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
32#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
33#define is_shub2() (sn_hub_info->shub2)
34#define is_shub1() (sn_hub_info->shub2 == 0)
35
36/*
37 * Use this macro to test if shub 1.1 wars should be enabled
38 */
39#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
40
41
42/*
43 * This is the maximum number of nodes that can be part of a kernel.
44 * Effectively, it's the maximum number of compact node ids (cnodeid_t).
45 * This is not necessarily the same as MAX_NASIDS.
46 */
47#define MAX_COMPACT_NODES 2048
48#define CPUS_PER_NODE 4
49
50extern void sn_flush_all_caches(long addr, long bytes);
51
52#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
new file mode 100644
index 000000000000..0ec27f99c181
--- /dev/null
+++ b/include/asm-ia64/sn/bte.h
@@ -0,0 +1,148 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10#ifndef _ASM_IA64_SN_BTE_H
11#define _ASM_IA64_SN_BTE_H
12
13#include <linux/timer.h>
14#include <linux/spinlock.h>
15#include <linux/cache.h>
16#include <asm/sn/types.h>
17
18
19/* #define BTE_DEBUG */
20/* #define BTE_DEBUG_VERBOSE */
21
22#ifdef BTE_DEBUG
23# define BTE_PRINTK(x) printk x /* Terse */
24# ifdef BTE_DEBUG_VERBOSE
25# define BTE_PRINTKV(x) printk x /* Verbose */
26# else
27# define BTE_PRINTKV(x)
28# endif /* BTE_DEBUG_VERBOSE */
29#else
30# define BTE_PRINTK(x)
31# define BTE_PRINTKV(x)
32#endif /* BTE_DEBUG */
33
34
35/* BTE status register only supports 16 bits for length field */
36#define BTE_LEN_BITS (16)
37#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
38#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
39
40
41/* Define hardware */
42#define BTES_PER_NODE 2
43
44
45/* Define hardware modes */
46#define BTE_NOTIFY (IBCT_NOTIFY)
47#define BTE_NORMAL BTE_NOTIFY
48#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
49/* Use a reserved bit to let the caller specify a wait for any BTE */
50#define BTE_WACQUIRE (0x4000)
51/* Use the BTE on the node with the destination memory */
52#define BTE_USE_DEST (BTE_WACQUIRE << 1)
53/* Use any available BTE interface on any node for the transfer */
54#define BTE_USE_ANY (BTE_USE_DEST << 1)
55/* macro to force the IBCT0 value valid */
56#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
57
58#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
59#define BTE_WORD_AVAILABLE (IBLS_BUSY << 1)
60#define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE)
61
62/*
63 * Some macros to simplify reading.
64 * Start with macros to locate the BTE control registers.
65 */
66#define BTE_LNSTAT_LOAD(_bte) \
67 HUB_L(_bte->bte_base_addr)
68#define BTE_LNSTAT_STORE(_bte, _x) \
69 HUB_S(_bte->bte_base_addr, (_x))
70#define BTE_SRC_STORE(_bte, _x) \
71 HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x))
72#define BTE_DEST_STORE(_bte, _x) \
73 HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x))
74#define BTE_CTRL_STORE(_bte, _x) \
75 HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x))
76#define BTE_NOTIF_STORE(_bte, _x) \
77 HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x))
78
79
80/* Possible results from bte_copy and bte_unaligned_copy */
81/* The following error codes map into the BTE hardware codes
82 * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
83 * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
84 * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
85 * codes to give the following error codes.
86 */
87#define BTEFAIL_OFFSET 1
88
89typedef enum {
90 BTE_SUCCESS, /* 0 is success */
91 BTEFAIL_DIR, /* Directory error due to IIO access*/
92 BTEFAIL_POISON, /* poison error on IO access (write to poison page) */
93 BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */
94 BTEFAIL_ACCESS, /* access error (protection violation) */
95 BTEFAIL_PWERR, /* Partial Write Error */
96 BTEFAIL_PRERR, /* Partial Read Error */
97 BTEFAIL_TOUT, /* CRB Time out */
98 BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */
99 BTEFAIL_NOTAVAIL, /* BTE not available */
100} bte_result_t;
101
102
103/*
104 * Structure defining a bte. An instance of this
105 * structure is created in the nodepda for each
106 * bte on that node (as defined by BTES_PER_NODE)
107 * This structure contains everything necessary
108 * to work with a BTE.
109 */
110struct bteinfo_s {
111 volatile u64 notify ____cacheline_aligned;
112 u64 *bte_base_addr ____cacheline_aligned;
113 spinlock_t spinlock;
114 cnodeid_t bte_cnode; /* cnode */
115 int bte_error_count; /* Number of errors encountered */
116 int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
117 int cleanup_active; /* Interface is locked for cleanup */
118 volatile bte_result_t bh_error; /* error while processing */
119 volatile u64 *most_rcnt_na;
120};
121
122
123/*
124 * Function prototypes (functions defined in bte.c, used elsewhere)
125 */
126extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
127extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
128extern void bte_error_handler(unsigned long);
129
130#define bte_zero(dest, len, mode, notification) \
131 bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
132
133/*
134 * The following is the prefered way of calling bte_unaligned_copy
135 * If the copy is fully cache line aligned, then bte_copy is
136 * used instead. Since bte_copy is inlined, this saves a call
137 * stack. NOTE: bte_copy is called synchronously and does block
138 * until the transfer is complete. In order to get the asynch
139 * version of bte_copy, you must perform this check yourself.
140 */
141#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
142 (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
143 (dest & L1_CACHE_MASK)) ? \
144 bte_unaligned_copy(src, dest, len, mode) : \
145 bte_copy(src, dest, len, mode, NULL))
146
147
148#endif /* _ASM_IA64_SN_BTE_H */
diff --git a/include/asm-ia64/sn/clksupport.h b/include/asm-ia64/sn/clksupport.h
new file mode 100644
index 000000000000..d340c365a824
--- /dev/null
+++ b/include/asm-ia64/sn/clksupport.h
@@ -0,0 +1,28 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9/*
10 * This file contains definitions for accessing a platform supported high resolution
11 * clock. The clock is monitonically increasing and can be accessed from any node
12 * in the system. The clock is synchronized across nodes - all nodes see the
13 * same value.
14 *
15 * RTC_COUNTER_ADDR - contains the address of the counter
16 *
17 */
18
19#ifndef _ASM_IA64_SN_CLKSUPPORT_H
20#define _ASM_IA64_SN_CLKSUPPORT_H
21
22extern unsigned long sn_rtc_cycles_per_second;
23
24#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
25
26#define rtc_time() (*RTC_COUNTER_ADDR)
27
28#endif /* _ASM_IA64_SN_CLKSUPPORT_H */
diff --git a/include/asm-ia64/sn/fetchop.h b/include/asm-ia64/sn/fetchop.h
new file mode 100644
index 000000000000..5f4ad8f4b5d2
--- /dev/null
+++ b/include/asm-ia64/sn/fetchop.h
@@ -0,0 +1,85 @@
1/*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
8 */
9
10#ifndef _ASM_IA64_SN_FETCHOP_H
11#define _ASM_IA64_SN_FETCHOP_H
12
13#include <linux/config.h>
14
15#define FETCHOP_BASENAME "sgi_fetchop"
16#define FETCHOP_FULLNAME "/dev/sgi_fetchop"
17
18
19
20#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
21
22#define FETCHOP_LOAD 0
23#define FETCHOP_INCREMENT 8
24#define FETCHOP_DECREMENT 16
25#define FETCHOP_CLEAR 24
26
27#define FETCHOP_STORE 0
28#define FETCHOP_AND 24
29#define FETCHOP_OR 32
30
31#define FETCHOP_CLEAR_CACHE 56
32
33#define FETCHOP_LOAD_OP(addr, op) ( \
34 *(volatile long *)((char*) (addr) + (op)))
35
36#define FETCHOP_STORE_OP(addr, op, x) ( \
37 *(volatile long *)((char*) (addr) + (op)) = (long) (x))
38
39#ifdef __KERNEL__
40
41/*
42 * Convert a region 6 (kaddr) address to the address of the fetchop variable
43 */
44#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr)
45
46
47/*
48 * Each Atomic Memory Operation (AMO formerly known as fetchop)
49 * variable is 64 bytes long. The first 8 bytes are used. The
50 * remaining 56 bytes are unaddressable due to the operation taking
51 * that portion of the address.
52 *
53 * NOTE: The AMO_t _MUST_ be placed in either the first or second half
54 * of the cache line. The cache line _MUST NOT_ be used for anything
55 * other than additional AMO_t entries. This is because there are two
56 * addresses which reference the same physical cache line. One will
57 * be a cached entry with the memory type bits all set. This address
58 * may be loaded into processor cache. The AMO_t will be referenced
59 * uncached via the memory special memory type. If any portion of the
60 * cached cache-line is modified, when that line is flushed, it will
61 * overwrite the uncached value in physical memory and lead to
62 * inconsistency.
63 */
64typedef struct {
65 u64 variable;
66 u64 unused[7];
67} AMO_t;
68
69
70/*
71 * The following APIs are externalized to the kernel to allocate/free pages of
72 * fetchop variables.
73 * fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the
74 * specified cnode.
75 * fetchop_kfree_page - Free a previously allocated fetchop page
76 */
77
78unsigned long fetchop_kalloc_page(int nid);
79void fetchop_kfree_page(unsigned long maddr);
80
81
82#endif /* __KERNEL__ */
83
84#endif /* _ASM_IA64_SN_FETCHOP_H */
85
diff --git a/include/asm-ia64/sn/geo.h b/include/asm-ia64/sn/geo.h
new file mode 100644
index 000000000000..f566343d25f8
--- /dev/null
+++ b/include/asm-ia64/sn/geo.h
@@ -0,0 +1,124 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_GEO_H
10#define _ASM_IA64_SN_GEO_H
11
12/* The geoid_t implementation below is based loosely on the pcfg_t
13 implementation in sys/SN/promcfg.h. */
14
15/* Type declaractions */
16
17/* Size of a geoid_t structure (must be before decl. of geoid_u) */
18#define GEOID_SIZE 8 /* Would 16 be better? The size can
19 be different on different platforms. */
20
21#define MAX_SLABS 0xe /* slabs per module */
22
23typedef unsigned char geo_type_t;
24
25/* Fields common to all substructures */
26typedef struct geo_any_s {
27 moduleid_t module; /* The module (box) this h/w lives in */
28 geo_type_t type; /* What type of h/w is named by this geoid_t */
29 slabid_t slab; /* The logical assembly within the module */
30} geo_any_t;
31
32/* Additional fields for particular types of hardware */
33typedef struct geo_node_s {
34 geo_any_t any; /* No additional fields needed */
35} geo_node_t;
36
37typedef struct geo_rtr_s {
38 geo_any_t any; /* No additional fields needed */
39} geo_rtr_t;
40
41typedef struct geo_iocntl_s {
42 geo_any_t any; /* No additional fields needed */
43} geo_iocntl_t;
44
45typedef struct geo_pcicard_s {
46 geo_iocntl_t any;
47 char bus; /* Bus/widget number */
48 char slot; /* PCI slot number */
49} geo_pcicard_t;
50
51/* Subcomponents of a node */
52typedef struct geo_cpu_s {
53 geo_node_t node;
54 char slice; /* Which CPU on the node */
55} geo_cpu_t;
56
57typedef struct geo_mem_s {
58 geo_node_t node;
59 char membus; /* The memory bus on the node */
60 char memslot; /* The memory slot on the bus */
61} geo_mem_t;
62
63
64typedef union geoid_u {
65 geo_any_t any;
66 geo_node_t node;
67 geo_iocntl_t iocntl;
68 geo_pcicard_t pcicard;
69 geo_rtr_t rtr;
70 geo_cpu_t cpu;
71 geo_mem_t mem;
72 char padsize[GEOID_SIZE];
73} geoid_t;
74
75
76/* Preprocessor macros */
77
78#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
79 module/001c07/slab/5/node/memory/2/slot/4 */
80
81/* Values for geo_type_t */
82#define GEO_TYPE_INVALID 0
83#define GEO_TYPE_MODULE 1
84#define GEO_TYPE_NODE 2
85#define GEO_TYPE_RTR 3
86#define GEO_TYPE_IOCNTL 4
87#define GEO_TYPE_IOCARD 5
88#define GEO_TYPE_CPU 6
89#define GEO_TYPE_MEM 7
90#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
91
92/* Parameter for hwcfg_format_geoid_compt() */
93#define GEO_COMPT_MODULE 1
94#define GEO_COMPT_SLAB 2
95#define GEO_COMPT_IOBUS 3
96#define GEO_COMPT_IOSLOT 4
97#define GEO_COMPT_CPU 5
98#define GEO_COMPT_MEMBUS 6
99#define GEO_COMPT_MEMSLOT 7
100
101#define GEO_INVALID_STR "<invalid>"
102
103#define INVALID_NASID ((nasid_t)-1)
104#define INVALID_CNODEID ((cnodeid_t)-1)
105#define INVALID_PNODEID ((pnodeid_t)-1)
106#define INVALID_SLAB (slabid_t)-1
107#define INVALID_MODULE ((moduleid_t)-1)
108#define INVALID_PARTID ((partid_t)-1)
109
110static inline slabid_t geo_slab(geoid_t g)
111{
112 return (g.any.type == GEO_TYPE_INVALID) ?
113 INVALID_SLAB : g.any.slab;
114}
115
116static inline moduleid_t geo_module(geoid_t g)
117{
118 return (g.any.type == GEO_TYPE_INVALID) ?
119 INVALID_MODULE : g.any.module;
120}
121
122extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
123
124#endif /* _ASM_IA64_SN_GEO_H */
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h
new file mode 100644
index 000000000000..e51471fb0867
--- /dev/null
+++ b/include/asm-ia64/sn/intr.h
@@ -0,0 +1,56 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_INTR_H
10#define _ASM_IA64_SN_INTR_H
11
12#define SGI_UART_VECTOR (0xe9)
13#define SGI_PCIBR_ERROR (0x33)
14
15/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
16#define SGI_XPC_ACTIVATE (0x30)
17#define SGI_II_ERROR (0x31)
18#define SGI_XBOW_ERROR (0x32)
19#define SGI_PCIBR_ERROR (0x33)
20#define SGI_ACPI_SCI_INT (0x34)
21#define SGI_TIOCA_ERROR (0x35)
22#define SGI_TIO_ERROR (0x36)
23#define SGI_TIOCX_ERROR (0x37)
24#define SGI_MMTIMER_VECTOR (0x38)
25#define SGI_XPC_NOTIFY (0xe7)
26
27#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c)
28#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6)
29
30#define SN2_IRQ_RESERVED (0x1)
31#define SN2_IRQ_CONNECTED (0x2)
32#define SN2_IRQ_SHARED (0x4)
33
34// The SN PROM irq struct
35struct sn_irq_info {
36 struct sn_irq_info *irq_next; /* sharing irq list */
37 short irq_nasid; /* Nasid IRQ is assigned to */
38 int irq_slice; /* slice IRQ is assigned to */
39 int irq_cpuid; /* kernel logical cpuid */
40 int irq_irq; /* the IRQ number */
41 int irq_int_bit; /* Bridge interrupt pin */
42 uint64_t irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
43 int irq_bridge_type;/* pciio asic type (pciio.h) */
44 void *irq_bridge; /* bridge generating irq */
45 void *irq_pciioinfo; /* associated pciio_info_t */
46 int irq_last_intr; /* For Shub lb lost intr WAR */
47 int irq_cookie; /* unique cookie */
48 int irq_flags; /* flags */
49 int irq_share_cnt; /* num devices sharing IRQ */
50};
51
52extern void sn_send_IPI_phys(int, long, int, int);
53
54#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
55
56#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/include/asm-ia64/sn/io.h b/include/asm-ia64/sn/io.h
new file mode 100644
index 000000000000..42209733f6b1
--- /dev/null
+++ b/include/asm-ia64/sn/io.h
@@ -0,0 +1,265 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_SN_IO_H
10#define _ASM_SN_IO_H
11#include <linux/compiler.h>
12#include <asm/intrinsics.h>
13
14extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
15extern void __sn_mmiowb(void); /* Forward definition */
16
17extern int numionodes;
18
19#define __sn_mf_a() ia64_mfa()
20
21extern void sn_dma_flush(unsigned long);
22
23#define __sn_inb ___sn_inb
24#define __sn_inw ___sn_inw
25#define __sn_inl ___sn_inl
26#define __sn_outb ___sn_outb
27#define __sn_outw ___sn_outw
28#define __sn_outl ___sn_outl
29#define __sn_readb ___sn_readb
30#define __sn_readw ___sn_readw
31#define __sn_readl ___sn_readl
32#define __sn_readq ___sn_readq
33#define __sn_readb_relaxed ___sn_readb_relaxed
34#define __sn_readw_relaxed ___sn_readw_relaxed
35#define __sn_readl_relaxed ___sn_readl_relaxed
36#define __sn_readq_relaxed ___sn_readq_relaxed
37
38/*
39 * The following routines are SN Platform specific, called when
40 * a reference is made to inX/outX set macros. SN Platform
41 * inX set of macros ensures that Posted DMA writes on the
42 * Bridge is flushed.
43 *
44 * The routines should be self explainatory.
45 */
46
47static inline unsigned int
48___sn_inb (unsigned long port)
49{
50 volatile unsigned char *addr;
51 unsigned char ret = -1;
52
53 if ((addr = sn_io_addr(port))) {
54 ret = *addr;
55 __sn_mf_a();
56 sn_dma_flush((unsigned long)addr);
57 }
58 return ret;
59}
60
61static inline unsigned int
62___sn_inw (unsigned long port)
63{
64 volatile unsigned short *addr;
65 unsigned short ret = -1;
66
67 if ((addr = sn_io_addr(port))) {
68 ret = *addr;
69 __sn_mf_a();
70 sn_dma_flush((unsigned long)addr);
71 }
72 return ret;
73}
74
75static inline unsigned int
76___sn_inl (unsigned long port)
77{
78 volatile unsigned int *addr;
79 unsigned int ret = -1;
80
81 if ((addr = sn_io_addr(port))) {
82 ret = *addr;
83 __sn_mf_a();
84 sn_dma_flush((unsigned long)addr);
85 }
86 return ret;
87}
88
89static inline void
90___sn_outb (unsigned char val, unsigned long port)
91{
92 volatile unsigned char *addr;
93
94 if ((addr = sn_io_addr(port))) {
95 *addr = val;
96 __sn_mmiowb();
97 }
98}
99
100static inline void
101___sn_outw (unsigned short val, unsigned long port)
102{
103 volatile unsigned short *addr;
104
105 if ((addr = sn_io_addr(port))) {
106 *addr = val;
107 __sn_mmiowb();
108 }
109}
110
111static inline void
112___sn_outl (unsigned int val, unsigned long port)
113{
114 volatile unsigned int *addr;
115
116 if ((addr = sn_io_addr(port))) {
117 *addr = val;
118 __sn_mmiowb();
119 }
120}
121
122/*
123 * The following routines are SN Platform specific, called when
124 * a reference is made to readX/writeX set macros. SN Platform
125 * readX set of macros ensures that Posted DMA writes on the
126 * Bridge is flushed.
127 *
128 * The routines should be self explainatory.
129 */
130
131static inline unsigned char
132___sn_readb (const volatile void __iomem *addr)
133{
134 unsigned char val;
135
136 val = *(volatile unsigned char __force *)addr;
137 __sn_mf_a();
138 sn_dma_flush((unsigned long)addr);
139 return val;
140}
141
142static inline unsigned short
143___sn_readw (const volatile void __iomem *addr)
144{
145 unsigned short val;
146
147 val = *(volatile unsigned short __force *)addr;
148 __sn_mf_a();
149 sn_dma_flush((unsigned long)addr);
150 return val;
151}
152
153static inline unsigned int
154___sn_readl (const volatile void __iomem *addr)
155{
156 unsigned int val;
157
158 val = *(volatile unsigned int __force *)addr;
159 __sn_mf_a();
160 sn_dma_flush((unsigned long)addr);
161 return val;
162}
163
164static inline unsigned long
165___sn_readq (const volatile void __iomem *addr)
166{
167 unsigned long val;
168
169 val = *(volatile unsigned long __force *)addr;
170 __sn_mf_a();
171 sn_dma_flush((unsigned long)addr);
172 return val;
173}
174
175/*
176 * For generic and SN2 kernels, we have a set of fast access
177 * PIO macros. These macros are provided on SN Platform
178 * because the normal inX and readX macros perform an
179 * additional task of flushing Post DMA request on the Bridge.
180 *
181 * These routines should be self explainatory.
182 */
183
184static inline unsigned int
185sn_inb_fast (unsigned long port)
186{
187 volatile unsigned char *addr = (unsigned char *)port;
188 unsigned char ret;
189
190 ret = *addr;
191 __sn_mf_a();
192 return ret;
193}
194
195static inline unsigned int
196sn_inw_fast (unsigned long port)
197{
198 volatile unsigned short *addr = (unsigned short *)port;
199 unsigned short ret;
200
201 ret = *addr;
202 __sn_mf_a();
203 return ret;
204}
205
206static inline unsigned int
207sn_inl_fast (unsigned long port)
208{
209 volatile unsigned int *addr = (unsigned int *)port;
210 unsigned int ret;
211
212 ret = *addr;
213 __sn_mf_a();
214 return ret;
215}
216
217static inline unsigned char
218___sn_readb_relaxed (const volatile void __iomem *addr)
219{
220 return *(volatile unsigned char __force *)addr;
221}
222
223static inline unsigned short
224___sn_readw_relaxed (const volatile void __iomem *addr)
225{
226 return *(volatile unsigned short __force *)addr;
227}
228
229static inline unsigned int
230___sn_readl_relaxed (const volatile void __iomem *addr)
231{
232 return *(volatile unsigned int __force *) addr;
233}
234
235static inline unsigned long
236___sn_readq_relaxed (const volatile void __iomem *addr)
237{
238 return *(volatile unsigned long __force *) addr;
239}
240
241struct pci_dev;
242
243static inline int
244sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
245{
246
247 if (vchan > 1) {
248 return -1;
249 }
250
251 if (!(*addr >> 32)) /* Using a mask here would be cleaner */
252 return 0; /* but this generates better code */
253
254 if (vchan == 1) {
255 /* Set Bit 57 */
256 *addr |= (1UL << 57);
257 } else {
258 /* Clear Bit 57 */
259 *addr &= ~(1UL << 57);
260 }
261
262 return 0;
263}
264
265#endif /* _ASM_SN_IO_H */
diff --git a/include/asm-ia64/sn/klconfig.h b/include/asm-ia64/sn/klconfig.h
new file mode 100644
index 000000000000..9f920c70a62a
--- /dev/null
+++ b/include/asm-ia64/sn/klconfig.h
@@ -0,0 +1,272 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Derived from IRIX <sys/SN/klconfig.h>.
7 *
8 * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
9 * Copyright (C) 1999 by Ralf Baechle
10 */
11#ifndef _ASM_IA64_SN_KLCONFIG_H
12#define _ASM_IA64_SN_KLCONFIG_H
13
14/*
15 * The KLCONFIG structures store info about the various BOARDs found
16 * during Hardware Discovery. In addition, it stores info about the
17 * components found on the BOARDs.
18 */
19
20typedef s32 klconf_off_t;
21
22
23/* Functions/macros needed to use this structure */
24
25typedef struct kl_config_hdr {
26 char pad[20];
27 klconf_off_t ch_board_info; /* the link list of boards */
28 char pad0[88];
29} kl_config_hdr_t;
30
31
32#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
33
34/*
35 * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
36 * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
37 * the LOCAL/current NODE. REMOTE means it is attached to a different
38 * node.(TBD - Need a way to treat ROUTER boards.)
39 *
40 * There are 2 different structures to represent these boards -
41 * lboard - Local board, rboard - remote board. These 2 structures
42 * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
43 * Figure below). The first byte of the rboard or lboard structure
44 * is used to find out its type - no unions are used.
45 * If it is a lboard, then the config info of this board will be found
46 * on the local node. (LOCAL NODE BASE + offset value gives pointer to
47 * the structure.
48 * If it is a rboard, the local structure contains the node number
49 * and the offset of the beginning of the LINKED LIST on the remote node.
50 * The details of the hardware on a remote node can be built locally,
51 * if required, by reading the LINKED LIST on the remote node and
52 * ignoring all the rboards on that node.
53 *
54 * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
55 * First board info on the remote node. The remote node list is
56 * traversed as the local list, using the REMOTE BASE ADDRESS and not
57 * the local base address and ignoring all rboard values.
58 *
59 *
60 KLCONFIG
61
62 +------------+ +------------+ +------------+ +------------+
63 | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
64 +------------+ | +------------+ | +------------+ | +------------+
65 | board info | | | board info | | |errinfo,bptr| | | board info |
66 +------------+ | +------------+ | +------------+ | +------------+
67 | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
68 +------------+ +------------+ +------------+ +------------+
69
70
71 +------------+
72 | board info |
73 +------------+ +--------------------------------+
74 | compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
75 +------------+ +--------------------------------+
76 | compt 2 |--+
77 +------------+ | +--------------------------------+
78 | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
79 +------------+ +--------------------------------+
80 | errinfo |--+
81 +------------+ | +--------------------------------+
82 +--->|r/l brd errinfo,compt err flags |
83 +--------------------------------+
84
85 *
86 * Each BOARD consists of COMPONENTs and the BOARD structure has
87 * pointers (offsets) to its COMPONENT structure.
88 * The COMPONENT structure has version info, size and speed info, revision,
89 * error info and the NIC info. This structure can accommodate any
90 * BOARD with arbitrary COMPONENT composition.
91 *
92 * The ERRORINFO part of each BOARD has error information
93 * that describes errors about the BOARD itself. It also has flags to
94 * indicate the COMPONENT(s) on the board that have errors. The error
95 * information specific to the COMPONENT is present in the respective
96 * COMPONENT structure.
97 *
98 * The ERRORINFO structure is also treated like a COMPONENT, ie. the
99 * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
100 * structure also has a pointer to the ERRORINFO structure. This is
101 * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
102 * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
103 * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
104 * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
105 * which is present on the REMOTE NODE.(TBD)
106 * REMOTE ERRINFO can be stored on any of the nearest nodes
107 * or on all the nearest nodes.(TBD)
108 * Like BOARD structures, REMOTE ERRINFO structures can be built locally
109 * using the rboard errinfo pointer.
110 *
111 * In order to get useful information from this Data organization, a set of
112 * interface routines are provided (TBD). The important thing to remember while
113 * manipulating the structures, is that, the NODE number information should
114 * be used. If the NODE is non-zero (remote) then each offset should
115 * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
116 * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
117 *
118 * Note that these structures do not provide much info about connectivity.
119 * That info will be part of HWGRAPH, which is an extension of the cfg_t
120 * data structure. (ref IP27prom/cfg.h) It has to be extended to include
121 * the IO part of the Network(TBD).
122 *
123 * The data structures below define the above concepts.
124 */
125
126
127/*
128 * BOARD classes
129 */
130
131#define KLCLASS_MASK 0xf0
132#define KLCLASS_NONE 0x00
133#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
134#define KLCLASS_CPU KLCLASS_NODE
135#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
136 and the non-graphics widget boards */
137#define KLCLASS_ROUTER 0x30 /* Router board */
138#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
139 so that we can record error info */
140#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
141#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
142
143#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
144
145
146/*
147 * board types
148 */
149
150#define KLTYPE_MASK 0x0f
151#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
152
153#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
154#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
155
156#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
157#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
158#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
159
160#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
161
162#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
163#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
164#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
165#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
166#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
167#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
168#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
169#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
170#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
171#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
172
173
174/*
175 * board structures
176 */
177
178#define MAX_COMPTS_PER_BRD 24
179
180typedef struct lboard_s {
181 klconf_off_t brd_next_any; /* Next BOARD */
182 unsigned char struct_type; /* type of structure, local or remote */
183 unsigned char brd_type; /* type+class */
184 unsigned char brd_sversion; /* version of this structure */
185 unsigned char brd_brevision; /* board revision */
186 unsigned char brd_promver; /* board prom version, if any */
187 unsigned char brd_flags; /* Enabled, Disabled etc */
188 unsigned char brd_slot; /* slot number */
189 unsigned short brd_debugsw; /* Debug switches */
190 geoid_t brd_geoid; /* geo id */
191 partid_t brd_partition; /* Partition number */
192 unsigned short brd_diagval; /* diagnostic value */
193 unsigned short brd_diagparm; /* diagnostic parameter */
194 unsigned char brd_inventory; /* inventory history */
195 unsigned char brd_numcompts; /* Number of components */
196 nic_t brd_nic; /* Number in CAN */
197 nasid_t brd_nasid; /* passed parameter */
198 klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
199 klconf_off_t brd_errinfo; /* Board's error information */
200 struct lboard_s *brd_parent; /* Logical parent for this brd */
201 char pad0[4];
202 unsigned char brd_confidence; /* confidence that the board is bad */
203 nasid_t brd_owner; /* who owns this board */
204 unsigned char brd_nic_flags; /* To handle 8 more NICs */
205 char pad1[24]; /* future expansion */
206 char brd_name[32];
207 nasid_t brd_next_same_host; /* host of next brd w/same nasid */
208 klconf_off_t brd_next_same; /* Next BOARD with same nasid */
209} lboard_t;
210
211#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts)
212#define NODE_OFFSET_TO_KLINFO(n,off) ((klinfo_t*) TO_NODE_CAC(n,off))
213#define KLCF_NEXT(_brd) \
214 ((_brd)->brd_next_same ? \
215 (NODE_OFFSET_TO_LBOARD((_brd)->brd_next_same_host, (_brd)->brd_next_same)): NULL)
216#define KLCF_NEXT_ANY(_brd) \
217 ((_brd)->brd_next_any ? \
218 (NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next_any)): NULL)
219#define KLCF_COMP(_brd, _ndx) \
220 ((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \
221 (NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)])))
222
223
224/*
225 * Generic info structure. This stores common info about a
226 * component.
227 */
228
229typedef struct klinfo_s { /* Generic info */
230 unsigned char struct_type; /* type of this structure */
231 unsigned char struct_version; /* version of this structure */
232 unsigned char flags; /* Enabled, disabled etc */
233 unsigned char revision; /* component revision */
234 unsigned short diagval; /* result of diagnostics */
235 unsigned short diagparm; /* diagnostic parameter */
236 unsigned char inventory; /* previous inventory status */
237 unsigned short partid; /* widget part number */
238 nic_t nic; /* MUst be aligned properly */
239 unsigned char physid; /* physical id of component */
240 unsigned int virtid; /* virtual id as seen by system */
241 unsigned char widid; /* Widget id - if applicable */
242 nasid_t nasid; /* node number - from parent */
243 char pad1; /* pad out structure. */
244 char pad2; /* pad out structure. */
245 void *data;
246 klconf_off_t errinfo; /* component specific errors */
247 unsigned short pad3; /* pci fields have moved over to */
248 unsigned short pad4; /* klbri_t */
249} klinfo_t ;
250
251
252static inline lboard_t *find_lboard_any(lboard_t * start, unsigned char brd_type)
253{
254 /* Search all boards stored on this node. */
255
256 while (start) {
257 if (start->brd_type == brd_type)
258 return start;
259 start = KLCF_NEXT_ANY(start);
260 }
261 /* Didn't find it. */
262 return (lboard_t *) NULL;
263}
264
265
266/* external declarations of Linux kernel functions. */
267
268extern lboard_t *root_lboard[];
269extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
270extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
271
272#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h
new file mode 100644
index 000000000000..d5dbd55e44b5
--- /dev/null
+++ b/include/asm-ia64/sn/l1.h
@@ -0,0 +1,36 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#ifndef _ASM_IA64_SN_L1_H
10#define _ASM_IA64_SN_L1_H
11
12/* brick type response codes */
13#define L1_BRICKTYPE_PX 0x23 /* # */
14#define L1_BRICKTYPE_PE 0x25 /* % */
15#define L1_BRICKTYPE_N_p0 0x26 /* & */
16#define L1_BRICKTYPE_IP45 0x34 /* 4 */
17#define L1_BRICKTYPE_IP41 0x35 /* 5 */
18#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
19#define L1_BRICKTYPE_IX 0x3d /* = */
20#define L1_BRICKTYPE_IP34 0x61 /* a */
21#define L1_BRICKTYPE_GA 0x62 /* b */
22#define L1_BRICKTYPE_C 0x63 /* c */
23#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
24#define L1_BRICKTYPE_I 0x69 /* i */
25#define L1_BRICKTYPE_N 0x6e /* n */
26#define L1_BRICKTYPE_OPUS 0x6f /* o */
27#define L1_BRICKTYPE_P 0x70 /* p */
28#define L1_BRICKTYPE_R 0x72 /* r */
29#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
30#define L1_BRICKTYPE_X 0x78 /* x */
31#define L1_BRICKTYPE_X2 0x79 /* y */
32#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */
33#define L1_BRICKTYPE_PA 0x6a /* j */
34#define L1_BRICKTYPE_IA 0x6b /* k */
35
36#endif /* _ASM_IA64_SN_L1_H */
diff --git a/include/asm-ia64/sn/leds.h b/include/asm-ia64/sn/leds.h
new file mode 100644
index 000000000000..66cf8c4d92c9
--- /dev/null
+++ b/include/asm-ia64/sn/leds.h
@@ -0,0 +1,33 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
6 */
7#ifndef _ASM_IA64_SN_LEDS_H
8#define _ASM_IA64_SN_LEDS_H
9
10#include <asm/sn/addrs.h>
11#include <asm/sn/pda.h>
12#include <asm/sn/shub_mmr.h>
13
14#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
15#define LED_CPU_SHIFT 16
16
17#define LED_CPU_HEARTBEAT 0x01
18#define LED_CPU_ACTIVITY 0x02
19#define LED_ALWAYS_SET 0x00
20
21/*
22 * Basic macros for flashing the LEDS on an SGI SN.
23 */
24
25static __inline__ void
26set_led_bits(u8 value, u8 mask)
27{
28 pda->led_state = (pda->led_state & ~mask) | (value & mask);
29 *pda->led_address = (short) pda->led_state;
30}
31
32#endif /* _ASM_IA64_SN_LEDS_H */
33
diff --git a/include/asm-ia64/sn/module.h b/include/asm-ia64/sn/module.h
new file mode 100644
index 000000000000..734e980ece2f
--- /dev/null
+++ b/include/asm-ia64/sn/module.h
@@ -0,0 +1,127 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_MODULE_H
9#define _ASM_IA64_SN_MODULE_H
10
11/* parameter for format_module_id() */
12#define MODULE_FORMAT_BRIEF 1
13#define MODULE_FORMAT_LONG 2
14#define MODULE_FORMAT_LCD 3
15
16/*
17 * Module id format
18 *
19 * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
20 * 15-8 Brick type (8-bit ascii character)
21 * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
22 *
23 */
24
25/*
26 * Macros for getting the brick type
27 */
28#define MODULE_BTYPE_MASK 0xff00
29#define MODULE_BTYPE_SHFT 8
30#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
31#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
32#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
33
34/*
35 * Macros for getting the rack ID.
36 */
37#define MODULE_RACK_MASK 0xffff0000
38#define MODULE_RACK_SHFT 16
39#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
40
41/*
42 * Macros for getting the brick position
43 */
44#define MODULE_BPOS_MASK 0x00ff
45#define MODULE_BPOS_SHFT 0
46#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
47
48/*
49 * Macros for encoding and decoding rack IDs
50 * A rack number consists of three parts:
51 * class (0==CPU/mixed, 1==I/O), group, number
52 *
53 * Rack number is stored just as it is displayed on the screen:
54 * a 3-decimal-digit number.
55 */
56#define RACK_CLASS_DVDR 100
57#define RACK_GROUP_DVDR 10
58#define RACK_NUM_DVDR 1
59
60#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
61 (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
62
63#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
64#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
65 RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
66#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
67 RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
68 RACK_GROUP_DVDR) / RACK_NUM_DVDR)
69
70/*
71 * Macros for encoding and decoding rack IDs
72 * A rack number consists of three parts:
73 * class 1 bit, 0==CPU/mixed, 1==I/O
74 * group 2 bits for CPU/mixed, 3 bits for I/O
75 * number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
76 */
77#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
78#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
79
80#define RACK_CLASS_MASK(_r) 0x20
81#define RACK_CLASS_SHFT(_r) 5
82#define RACK_ADD_CLASS(_r, _c) \
83 ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
84
85#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
86#define RACK_GROUP_MASK(_r) \
87 ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
88#define RACK_ADD_GROUP(_r, _g) \
89 ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
90
91#define RACK_NUM_SHFT(_r) 0
92#define RACK_NUM_MASK(_r) \
93 ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
94#define RACK_ADD_NUM(_r, _n) \
95 ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
96
97
98/*
99 * Brick type definitions
100 */
101#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
102
103extern char brick_types[];
104
105#define MODULE_CBRICK 0
106#define MODULE_RBRICK 1
107#define MODULE_IBRICK 2
108#define MODULE_KBRICK 3
109#define MODULE_XBRICK 4
110#define MODULE_DBRICK 5
111#define MODULE_PBRICK 6
112#define MODULE_NBRICK 7
113#define MODULE_PEBRICK 8
114#define MODULE_PXBRICK 9
115#define MODULE_IXBRICK 10
116#define MODULE_CGBRICK 11
117#define MODULE_OPUSBRICK 12
118#define MODULE_SABRICK 13 /* TIO BringUp Brick */
119#define MODULE_IABRICK 14
120#define MODULE_PABRICK 15
121#define MODULE_GABRICK 16
122#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
123
124extern char brick_types[];
125extern void format_module_id(char *, moduleid_t, int);
126
127#endif /* _ASM_IA64_SN_MODULE_H */
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
new file mode 100644
index 000000000000..2fbde33656e6
--- /dev/null
+++ b/include/asm-ia64/sn/nodepda.h
@@ -0,0 +1,86 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_NODEPDA_H
9#define _ASM_IA64_SN_NODEPDA_H
10
11
12#include <asm/semaphore.h>
13#include <asm/irq.h>
14#include <asm/sn/arch.h>
15#include <asm/sn/intr.h>
16#include <asm/sn/pda.h>
17#include <asm/sn/bte.h>
18
19/*
20 * NUMA Node-Specific Data structures are defined in this file.
21 * In particular, this is the location of the node PDA.
22 * A pointer to the right node PDA is saved in each CPU PDA.
23 */
24
25/*
26 * Node-specific data structure.
27 *
28 * One of these structures is allocated on each node of a NUMA system.
29 *
30 * This structure provides a convenient way of keeping together
31 * all per-node data structures.
32 */
33struct phys_cpuid {
34 short nasid;
35 char subnode;
36 char slice;
37};
38
39struct nodepda_s {
40 void *pdinfo; /* Platform-dependent per-node info */
41 spinlock_t bist_lock;
42
43 /*
44 * The BTEs on this node are shared by the local cpus
45 */
46 struct bteinfo_s bte_if[BTES_PER_NODE]; /* Virtual Interface */
47 struct timer_list bte_recovery_timer;
48 spinlock_t bte_recovery_lock;
49
50 /*
51 * Array of pointers to the nodepdas for each node.
52 */
53 struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
54
55 /*
56 * Array of physical cpu identifiers. Indexed by cpuid.
57 */
58 struct phys_cpuid phys_cpuid[NR_CPUS];
59};
60
61typedef struct nodepda_s nodepda_t;
62
63/*
64 * Access Functions for node PDA.
65 * Since there is one nodepda for each node, we need a convenient mechanism
66 * to access these nodepdas without cluttering code with #ifdefs.
67 * The next set of definitions provides this.
68 * Routines are expected to use
69 *
70 * nodepda -> to access node PDA for the node on which code is running
71 * subnodepda -> to access subnode PDA for the subnode on which code is running
72 *
73 * NODEPDA(cnode) -> to access node PDA for cnodeid
74 * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode
75 */
76
77#define nodepda pda->p_nodepda /* Ptr to this node's PDA */
78#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode])
79
80/*
81 * Check if given a compact node id the corresponding node has all the
82 * cpus disabled.
83 */
84#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0)
85
86#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
new file mode 100644
index 000000000000..e940d3647c80
--- /dev/null
+++ b/include/asm-ia64/sn/pda.h
@@ -0,0 +1,80 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_PDA_H
9#define _ASM_IA64_SN_PDA_H
10
11#include <linux/cache.h>
12#include <asm/percpu.h>
13#include <asm/system.h>
14#include <asm/sn/bte.h>
15
16
17/*
18 * CPU-specific data structure.
19 *
20 * One of these structures is allocated for each cpu of a NUMA system.
21 *
22 * This structure provides a convenient way of keeping together
23 * all SN per-cpu data structures.
24 */
25
26typedef struct pda_s {
27
28 /* Having a pointer in the begining of PDA tends to increase
29 * the chance of having this pointer in cache. (Yes something
30 * else gets pushed out). Doing this reduces the number of memory
31 * access to all nodepda variables to be one
32 */
33 struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
34 struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
35
36 /*
37 * Support for SN LEDs
38 */
39 volatile short *led_address;
40 u8 led_state;
41 u8 hb_state; /* supports blinking heartbeat leds */
42 unsigned int hb_count;
43
44 unsigned int idle_flag;
45
46 volatile unsigned long *bedrock_rev_id;
47 volatile unsigned long *pio_write_status_addr;
48 unsigned long pio_write_status_val;
49 volatile unsigned long *pio_shub_war_cam_addr;
50
51 unsigned long sn_soft_irr[4];
52 unsigned long sn_in_service_ivecs[4];
53 short cnodeid_to_nasid_table[MAX_NUMNODES];
54 int sn_lb_int_war_ticks;
55 int sn_last_irq;
56 int sn_first_irq;
57} pda_t;
58
59
60#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
61
62/*
63 * PDA
64 * Per-cpu private data area for each cpu. The PDA is located immediately after
65 * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
66 * cpu but only a small amout of the page is actually used. We put the SNIA PDA
67 * in the same page as the cpu_data area. Note that there is a check in the setup
68 * code to verify that we don't overflow the page.
69 *
70 * Seems like we should should cache-line align the pda so that any changes in the
71 * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
72 * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
73 */
74DECLARE_PER_CPU(struct pda_s, pda_percpu);
75
76#define pda (&__ia64_per_cpu_var(pda_percpu))
77
78#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
79
80#endif /* _ASM_IA64_SN_PDA_H */
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h
new file mode 100644
index 000000000000..f40fd1a5510d
--- /dev/null
+++ b/include/asm-ia64/sn/rw_mmr.h
@@ -0,0 +1,74 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8#ifndef _ASM_IA64_SN_RW_MMR_H
9#define _ASM_IA64_SN_RW_MMR_H
10
11
12/*
13 * This file contains macros used to access MMR registers via
14 * uncached physical addresses.
15 * pio_phys_read_mmr - read an MMR
16 * pio_phys_write_mmr - write an MMR
17 * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
18 * Second MMR will be skipped if address is NULL
19 *
20 * Addresses passed to these routines should be uncached physical addresses
21 * ie., 0x80000....
22 */
23
24
25extern inline long
26pio_phys_read_mmr(volatile long *mmr)
27{
28 long val;
29 asm volatile
30 ("mov r2=psr;;"
31 "rsm psr.i | psr.dt;;"
32 "srlz.i;;"
33 "ld8.acq %0=[%1];;"
34 "mov psr.l=r2;;"
35 "srlz.i;;"
36 : "=r"(val)
37 : "r"(mmr)
38 : "r2");
39 return val;
40}
41
42
43
44extern inline void
45pio_phys_write_mmr(volatile long *mmr, long val)
46{
47 asm volatile
48 ("mov r2=psr;;"
49 "rsm psr.i | psr.dt;;"
50 "srlz.i;;"
51 "st8.rel [%0]=%1;;"
52 "mov psr.l=r2;;"
53 "srlz.i;;"
54 :: "r"(mmr), "r"(val)
55 : "r2", "memory");
56}
57
58extern inline void
59pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2)
60{
61 asm volatile
62 ("mov r2=psr;;"
63 "rsm psr.i | psr.dt | psr.ic;;"
64 "cmp.ne p9,p0=%2,r0;"
65 "srlz.i;;"
66 "st8.rel [%0]=%1;"
67 "(p9) st8.rel [%2]=%3;;"
68 "mov psr.l=r2;;"
69 "srlz.i;;"
70 :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
71 : "p9", "r2", "memory");
72}
73
74#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/include/asm-ia64/sn/shub_mmr.h b/include/asm-ia64/sn/shub_mmr.h
new file mode 100644
index 000000000000..5c2fcf13d5ce
--- /dev/null
+++ b/include/asm-ia64/sn/shub_mmr.h
@@ -0,0 +1,441 @@
1/*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
8 */
9
10#ifndef _ASM_IA64_SN_SHUB_MMR_H
11#define _ASM_IA64_SN_SHUB_MMR_H
12
13/* ==================================================================== */
14/* Register "SH_IPI_INT" */
15/* SHub Inter-Processor Interrupt Registers */
16/* ==================================================================== */
17#define SH1_IPI_INT 0x0000000110000380
18#define SH2_IPI_INT 0x0000000010000380
19
20/* SH_IPI_INT_TYPE */
21/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
22#define SH_IPI_INT_TYPE_SHFT 0
23#define SH_IPI_INT_TYPE_MASK 0x0000000000000007
24
25/* SH_IPI_INT_AGT */
26/* Description: Agent, must be 0 for SHub */
27#define SH_IPI_INT_AGT_SHFT 3
28#define SH_IPI_INT_AGT_MASK 0x0000000000000008
29
30/* SH_IPI_INT_PID */
31/* Description: Processor ID, same setting as on targeted McKinley */
32#define SH_IPI_INT_PID_SHFT 4
33#define SH_IPI_INT_PID_MASK 0x00000000000ffff0
34
35/* SH_IPI_INT_BASE */
36/* Description: Optional interrupt vector area, 2MB aligned */
37#define SH_IPI_INT_BASE_SHFT 21
38#define SH_IPI_INT_BASE_MASK 0x0003ffffffe00000
39
40/* SH_IPI_INT_IDX */
41/* Description: Targeted McKinley interrupt vector */
42#define SH_IPI_INT_IDX_SHFT 52
43#define SH_IPI_INT_IDX_MASK 0x0ff0000000000000
44
45/* SH_IPI_INT_SEND */
46/* Description: Send Interrupt Message to PI, This generates a puls */
47#define SH_IPI_INT_SEND_SHFT 63
48#define SH_IPI_INT_SEND_MASK 0x8000000000000000
49
50/* ==================================================================== */
51/* Register "SH_EVENT_OCCURRED" */
52/* SHub Interrupt Event Occurred */
53/* ==================================================================== */
54#define SH1_EVENT_OCCURRED 0x0000000110010000
55#define SH1_EVENT_OCCURRED_ALIAS 0x0000000110010008
56#define SH2_EVENT_OCCURRED 0x0000000010010000
57#define SH2_EVENT_OCCURRED_ALIAS 0x0000000010010008
58
59/* ==================================================================== */
60/* Register "SH_PI_CAM_CONTROL" */
61/* CRB CAM MMR Access Control */
62/* ==================================================================== */
63#define SH1_PI_CAM_CONTROL 0x0000000120050300
64
65/* ==================================================================== */
66/* Register "SH_SHUB_ID" */
67/* SHub ID Number */
68/* ==================================================================== */
69#define SH1_SHUB_ID 0x0000000110060580
70#define SH1_SHUB_ID_REVISION_SHFT 28
71#define SH1_SHUB_ID_REVISION_MASK 0x00000000f0000000
72
73/* ==================================================================== */
74/* Register "SH_RTC" */
75/* Real-time Clock */
76/* ==================================================================== */
77#define SH1_RTC 0x00000001101c0000
78#define SH2_RTC 0x00000002101c0000
79#define SH_RTC_MASK 0x007fffffffffffff
80
81/* ==================================================================== */
82/* Register "SH_PIO_WRITE_STATUS_0|1" */
83/* PIO Write Status for CPU 0 & 1 */
84/* ==================================================================== */
85#define SH1_PIO_WRITE_STATUS_0 0x0000000120070200
86#define SH1_PIO_WRITE_STATUS_1 0x0000000120070280
87#define SH2_PIO_WRITE_STATUS_0 0x0000000020070200
88#define SH2_PIO_WRITE_STATUS_1 0x0000000020070280
89#define SH2_PIO_WRITE_STATUS_2 0x0000000020070300
90#define SH2_PIO_WRITE_STATUS_3 0x0000000020070380
91
92/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
93/* Description: Deadlock response detected */
94#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
95#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK 0x0000000000000002
96
97/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
98/* Description: Count of currently pending PIO writes */
99#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
100#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK 0x3f00000000000000
101
102/* ==================================================================== */
103/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
104/* ==================================================================== */
105#define SH1_PIO_WRITE_STATUS_0_ALIAS 0x0000000120070208
106#define SH2_PIO_WRITE_STATUS_0_ALIAS 0x0000000020070208
107
108/* ==================================================================== */
109/* Register "SH_EVENT_OCCURRED" */
110/* SHub Interrupt Event Occurred */
111/* ==================================================================== */
112/* SH_EVENT_OCCURRED_UART_INT */
113/* Description: Pending Junk Bus UART Interrupt */
114#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
115#define SH_EVENT_OCCURRED_UART_INT_MASK 0x0000000000100000
116
117/* SH_EVENT_OCCURRED_IPI_INT */
118/* Description: Pending IPI Interrupt */
119#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
120#define SH_EVENT_OCCURRED_IPI_INT_MASK 0x0000000010000000
121
122/* SH_EVENT_OCCURRED_II_INT0 */
123/* Description: Pending II 0 Interrupt */
124#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
125#define SH_EVENT_OCCURRED_II_INT0_MASK 0x0000000020000000
126
127/* SH_EVENT_OCCURRED_II_INT1 */
128/* Description: Pending II 1 Interrupt */
129#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
130#define SH_EVENT_OCCURRED_II_INT1_MASK 0x0000000040000000
131
132/* ==================================================================== */
133/* LEDS */
134/* ==================================================================== */
135#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
136#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
137#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
138#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
139
140#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
141#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
142#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
143#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
144
145/* ==================================================================== */
146/* Register "SH1_PTC_0" */
147/* Puge Translation Cache Message Configuration Information */
148/* ==================================================================== */
149#define SH1_PTC_0 0x00000001101a0000
150
151/* SH1_PTC_0_A */
152/* Description: Type */
153#define SH1_PTC_0_A_SHFT 0
154
155/* SH1_PTC_0_PS */
156/* Description: Page Size */
157#define SH1_PTC_0_PS_SHFT 2
158
159/* SH1_PTC_0_RID */
160/* Description: Region ID */
161#define SH1_PTC_0_RID_SHFT 8
162
163/* SH1_PTC_0_START */
164/* Description: Start */
165#define SH1_PTC_0_START_SHFT 63
166
167/* ==================================================================== */
168/* Register "SH1_PTC_1" */
169/* Puge Translation Cache Message Configuration Information */
170/* ==================================================================== */
171#define SH1_PTC_1 0x00000001101a0080
172
173/* SH1_PTC_1_START */
174/* Description: PTC_1 Start */
175#define SH1_PTC_1_START_SHFT 63
176
177
178/* ==================================================================== */
179/* Register "SH2_PTC" */
180/* Puge Translation Cache Message Configuration Information */
181/* ==================================================================== */
182#define SH2_PTC 0x0000000170000000
183
184/* SH2_PTC_A */
185/* Description: Type */
186#define SH2_PTC_A_SHFT 0
187
188/* SH2_PTC_PS */
189/* Description: Page Size */
190#define SH2_PTC_PS_SHFT 2
191
192/* SH2_PTC_RID */
193/* Description: Region ID */
194#define SH2_PTC_RID_SHFT 4
195
196/* SH2_PTC_START */
197/* Description: Start */
198#define SH2_PTC_START_SHFT 63
199
200/* SH2_PTC_ADDR_RID */
201/* Description: Region ID */
202#define SH2_PTC_ADDR_SHFT 4
203#define SH2_PTC_ADDR_MASK 0x1ffffffffffff000
204
205/* ==================================================================== */
206/* Register "SH_RTC1_INT_CONFIG" */
207/* SHub RTC 1 Interrupt Config Registers */
208/* ==================================================================== */
209
210#define SH1_RTC1_INT_CONFIG 0x0000000110001480
211#define SH2_RTC1_INT_CONFIG 0x0000000010001480
212#define SH_RTC1_INT_CONFIG_MASK 0x0ff3ffffffefffff
213#define SH_RTC1_INT_CONFIG_INIT 0x0000000000000000
214
215/* SH_RTC1_INT_CONFIG_TYPE */
216/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
217#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
218#define SH_RTC1_INT_CONFIG_TYPE_MASK 0x0000000000000007
219
220/* SH_RTC1_INT_CONFIG_AGT */
221/* Description: Agent, must be 0 for SHub */
222#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
223#define SH_RTC1_INT_CONFIG_AGT_MASK 0x0000000000000008
224
225/* SH_RTC1_INT_CONFIG_PID */
226/* Description: Processor ID, same setting as on targeted McKinley */
227#define SH_RTC1_INT_CONFIG_PID_SHFT 4
228#define SH_RTC1_INT_CONFIG_PID_MASK 0x00000000000ffff0
229
230/* SH_RTC1_INT_CONFIG_BASE */
231/* Description: Optional interrupt vector area, 2MB aligned */
232#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
233#define SH_RTC1_INT_CONFIG_BASE_MASK 0x0003ffffffe00000
234
235/* SH_RTC1_INT_CONFIG_IDX */
236/* Description: Targeted McKinley interrupt vector */
237#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
238#define SH_RTC1_INT_CONFIG_IDX_MASK 0x0ff0000000000000
239
240/* ==================================================================== */
241/* Register "SH_RTC1_INT_ENABLE" */
242/* SHub RTC 1 Interrupt Enable Registers */
243/* ==================================================================== */
244
245#define SH1_RTC1_INT_ENABLE 0x0000000110001500
246#define SH2_RTC1_INT_ENABLE 0x0000000010001500
247#define SH_RTC1_INT_ENABLE_MASK 0x0000000000000001
248#define SH_RTC1_INT_ENABLE_INIT 0x0000000000000000
249
250/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
251/* Description: Enable RTC 1 Interrupt */
252#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
253#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK 0x0000000000000001
254
255/* ==================================================================== */
256/* Register "SH_RTC2_INT_CONFIG" */
257/* SHub RTC 2 Interrupt Config Registers */
258/* ==================================================================== */
259
260#define SH1_RTC2_INT_CONFIG 0x0000000110001580
261#define SH2_RTC2_INT_CONFIG 0x0000000010001580
262#define SH_RTC2_INT_CONFIG_MASK 0x0ff3ffffffefffff
263#define SH_RTC2_INT_CONFIG_INIT 0x0000000000000000
264
265/* SH_RTC2_INT_CONFIG_TYPE */
266/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
267#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
268#define SH_RTC2_INT_CONFIG_TYPE_MASK 0x0000000000000007
269
270/* SH_RTC2_INT_CONFIG_AGT */
271/* Description: Agent, must be 0 for SHub */
272#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
273#define SH_RTC2_INT_CONFIG_AGT_MASK 0x0000000000000008
274
275/* SH_RTC2_INT_CONFIG_PID */
276/* Description: Processor ID, same setting as on targeted McKinley */
277#define SH_RTC2_INT_CONFIG_PID_SHFT 4
278#define SH_RTC2_INT_CONFIG_PID_MASK 0x00000000000ffff0
279
280/* SH_RTC2_INT_CONFIG_BASE */
281/* Description: Optional interrupt vector area, 2MB aligned */
282#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
283#define SH_RTC2_INT_CONFIG_BASE_MASK 0x0003ffffffe00000
284
285/* SH_RTC2_INT_CONFIG_IDX */
286/* Description: Targeted McKinley interrupt vector */
287#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
288#define SH_RTC2_INT_CONFIG_IDX_MASK 0x0ff0000000000000
289
290/* ==================================================================== */
291/* Register "SH_RTC2_INT_ENABLE" */
292/* SHub RTC 2 Interrupt Enable Registers */
293/* ==================================================================== */
294
295#define SH1_RTC2_INT_ENABLE 0x0000000110001600
296#define SH2_RTC2_INT_ENABLE 0x0000000010001600
297#define SH_RTC2_INT_ENABLE_MASK 0x0000000000000001
298#define SH_RTC2_INT_ENABLE_INIT 0x0000000000000000
299
300/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
301/* Description: Enable RTC 2 Interrupt */
302#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
303#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK 0x0000000000000001
304
305/* ==================================================================== */
306/* Register "SH_RTC3_INT_CONFIG" */
307/* SHub RTC 3 Interrupt Config Registers */
308/* ==================================================================== */
309
310#define SH1_RTC3_INT_CONFIG 0x0000000110001680
311#define SH2_RTC3_INT_CONFIG 0x0000000010001680
312#define SH_RTC3_INT_CONFIG_MASK 0x0ff3ffffffefffff
313#define SH_RTC3_INT_CONFIG_INIT 0x0000000000000000
314
315/* SH_RTC3_INT_CONFIG_TYPE */
316/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
317#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
318#define SH_RTC3_INT_CONFIG_TYPE_MASK 0x0000000000000007
319
320/* SH_RTC3_INT_CONFIG_AGT */
321/* Description: Agent, must be 0 for SHub */
322#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
323#define SH_RTC3_INT_CONFIG_AGT_MASK 0x0000000000000008
324
325/* SH_RTC3_INT_CONFIG_PID */
326/* Description: Processor ID, same setting as on targeted McKinley */
327#define SH_RTC3_INT_CONFIG_PID_SHFT 4
328#define SH_RTC3_INT_CONFIG_PID_MASK 0x00000000000ffff0
329
330/* SH_RTC3_INT_CONFIG_BASE */
331/* Description: Optional interrupt vector area, 2MB aligned */
332#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
333#define SH_RTC3_INT_CONFIG_BASE_MASK 0x0003ffffffe00000
334
335/* SH_RTC3_INT_CONFIG_IDX */
336/* Description: Targeted McKinley interrupt vector */
337#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
338#define SH_RTC3_INT_CONFIG_IDX_MASK 0x0ff0000000000000
339
340/* ==================================================================== */
341/* Register "SH_RTC3_INT_ENABLE" */
342/* SHub RTC 3 Interrupt Enable Registers */
343/* ==================================================================== */
344
345#define SH1_RTC3_INT_ENABLE 0x0000000110001700
346#define SH2_RTC3_INT_ENABLE 0x0000000010001700
347#define SH_RTC3_INT_ENABLE_MASK 0x0000000000000001
348#define SH_RTC3_INT_ENABLE_INIT 0x0000000000000000
349
350/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
351/* Description: Enable RTC 3 Interrupt */
352#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
353#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK 0x0000000000000001
354
355/* SH_EVENT_OCCURRED_RTC1_INT */
356/* Description: Pending RTC 1 Interrupt */
357#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
358#define SH_EVENT_OCCURRED_RTC1_INT_MASK 0x0000000001000000
359
360/* SH_EVENT_OCCURRED_RTC2_INT */
361/* Description: Pending RTC 2 Interrupt */
362#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
363#define SH_EVENT_OCCURRED_RTC2_INT_MASK 0x0000000002000000
364
365/* SH_EVENT_OCCURRED_RTC3_INT */
366/* Description: Pending RTC 3 Interrupt */
367#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
368#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000
369
370/* ==================================================================== */
371/* Register "SH_INT_CMPB" */
372/* RTC Compare Value for Processor B */
373/* ==================================================================== */
374
375#define SH1_INT_CMPB 0x00000001101b0080
376#define SH2_INT_CMPB 0x00000000101b0080
377#define SH_INT_CMPB_MASK 0x007fffffffffffff
378#define SH_INT_CMPB_INIT 0x0000000000000000
379
380/* SH_INT_CMPB_REAL_TIME_CMPB */
381/* Description: Real Time Clock Compare */
382#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
383#define SH_INT_CMPB_REAL_TIME_CMPB_MASK 0x007fffffffffffff
384
385/* ==================================================================== */
386/* Register "SH_INT_CMPC" */
387/* RTC Compare Value for Processor C */
388/* ==================================================================== */
389
390#define SH1_INT_CMPC 0x00000001101b0100
391#define SH2_INT_CMPC 0x00000000101b0100
392#define SH_INT_CMPC_MASK 0x007fffffffffffff
393#define SH_INT_CMPC_INIT 0x0000000000000000
394
395/* SH_INT_CMPC_REAL_TIME_CMPC */
396/* Description: Real Time Clock Compare */
397#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
398#define SH_INT_CMPC_REAL_TIME_CMPC_MASK 0x007fffffffffffff
399
400/* ==================================================================== */
401/* Register "SH_INT_CMPD" */
402/* RTC Compare Value for Processor D */
403/* ==================================================================== */
404
405#define SH1_INT_CMPD 0x00000001101b0180
406#define SH2_INT_CMPD 0x00000000101b0180
407#define SH_INT_CMPD_MASK 0x007fffffffffffff
408#define SH_INT_CMPD_INIT 0x0000000000000000
409
410/* SH_INT_CMPD_REAL_TIME_CMPD */
411/* Description: Real Time Clock Compare */
412#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
413#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff
414
415
416/* ==================================================================== */
417/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
418/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
419/* It is acceptible to use (for example) SH_IPI_INT to reference the */
420/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
421/* on the type of the SHUB. Do not use these #defines in performance */
422/* critical code or loops - there is a small performance penalty. */
423/* ==================================================================== */
424#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
425
426#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
427#define SH_IPI_INT shubmmr(SH, IPI_INT)
428#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
429#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
430#define SH_RTC shubmmr(SH, RTC)
431#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
432#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
433#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
434#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
435#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
436#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
437#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
438#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
439#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
440
441#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/include/asm-ia64/sn/shubio.h b/include/asm-ia64/sn/shubio.h
new file mode 100644
index 000000000000..fbd880e6bb96
--- /dev/null
+++ b/include/asm-ia64/sn/shubio.h
@@ -0,0 +1,3476 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_SHUBIO_H
10#define _ASM_IA64_SN_SHUBIO_H
11
12#define HUB_WIDGET_ID_MAX 0xf
13#define IIO_NUM_ITTES 7
14#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
15
16#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */
17 /* This register is also accessible from
18 * Crosstalk at address 0x0. */
19#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */
20#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */
21#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */
22#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */
23#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */
24#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */
25#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */
26#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */
27#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
28#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */
29
30#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */
31#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */
32
33#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */
34#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */
35
36#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */
37#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */
38#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */
39#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */
40#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */
41#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */
42#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */
43
44#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
45#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
46#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
47#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
48#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
49#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
50#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
51#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
52#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
53
54#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */
55#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */
56#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */
57#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */
58#define IIO_IBCR 0x00400200 /* IO BTE Control Register */
59
60#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */
61#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */
62
63#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
64
65#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
66#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
67
68
69#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
70#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
71
72#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */
73#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */
74#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */
75#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */
76#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */
77
78#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */
79
80#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */
81#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */
82#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */
83#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */
84#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */
85#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */
86#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */
87#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */
88
89#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */
90#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */
91#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */
92#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */
93#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */
94#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */
95#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */
96#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */
97
98#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */
99#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */
100#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */
101#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */
102#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */
103#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */
104#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
105#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */
106
107#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
108#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
109#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
110#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
111#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */
112
113#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */
114#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */
115#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */
116#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */
117#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */
118
119#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */
120#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */
121#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */
122#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */
123#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */
124
125#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */
126#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */
127#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */
128#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */
129#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */
130
131#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */
132#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */
133#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */
134#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */
135#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */
136
137#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */
138#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */
139#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */
140#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */
141#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */
142
143#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */
144#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */
145#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */
146#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */
147#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */
148
149#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */
150#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */
151#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */
152#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */
153#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */
154
155#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */
156#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */
157#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */
158#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */
159#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */
160
161#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */
162#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */
163#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */
164#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */
165#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */
166
167#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */
168#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */
169#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */
170#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */
171#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */
172
173#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */
174#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */
175#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */
176#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */
177#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */
178
179#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */
180#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */
181#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */
182#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */
183#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */
184
185#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */
186#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */
187#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */
188#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */
189#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */
190
191#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */
192#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */
193#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */
194#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */
195#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */
196
197#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */
198#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */
199#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */
200
201#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */
202
203#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */
204#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */
205#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */
206#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */
207#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */
208#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */
209#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */
210#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */
211#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */
212#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */
213#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */
214#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */
215
216#define IIO_IPCR 0x00430000 /* IO Performance Control */
217#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
218
219
220/************************************************************************
221 * *
222 * Description: This register echoes some information from the *
223 * LB_REV_ID register. It is available through Crosstalk as described *
224 * above. The REV_NUM and MFG_NUM fields receive their values from *
225 * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
226 * The PART_NUM field's value is the Crosstalk device ID number that *
227 * Steve Miller assigned to the SHub chip. *
228 * *
229 ************************************************************************/
230
231typedef union ii_wid_u {
232 uint64_t ii_wid_regval;
233 struct {
234 uint64_t w_rsvd_1 : 1;
235 uint64_t w_mfg_num : 11;
236 uint64_t w_part_num : 16;
237 uint64_t w_rev_num : 4;
238 uint64_t w_rsvd : 32;
239 } ii_wid_fld_s;
240} ii_wid_u_t;
241
242
243/************************************************************************
244 * *
245 * The fields in this register are set upon detection of an error *
246 * and cleared by various mechanisms, as explained in the *
247 * description. *
248 * *
249 ************************************************************************/
250
251typedef union ii_wstat_u {
252 uint64_t ii_wstat_regval;
253 struct {
254 uint64_t w_pending : 4;
255 uint64_t w_xt_crd_to : 1;
256 uint64_t w_xt_tail_to : 1;
257 uint64_t w_rsvd_3 : 3;
258 uint64_t w_tx_mx_rty : 1;
259 uint64_t w_rsvd_2 : 6;
260 uint64_t w_llp_tx_cnt : 8;
261 uint64_t w_rsvd_1 : 8;
262 uint64_t w_crazy : 1;
263 uint64_t w_rsvd : 31;
264 } ii_wstat_fld_s;
265} ii_wstat_u_t;
266
267
268/************************************************************************
269 * *
270 * Description: This is a read-write enabled register. It controls *
271 * various aspects of the Crosstalk flow control. *
272 * *
273 ************************************************************************/
274
275typedef union ii_wcr_u {
276 uint64_t ii_wcr_regval;
277 struct {
278 uint64_t w_wid : 4;
279 uint64_t w_tag : 1;
280 uint64_t w_rsvd_1 : 8;
281 uint64_t w_dst_crd : 3;
282 uint64_t w_f_bad_pkt : 1;
283 uint64_t w_dir_con : 1;
284 uint64_t w_e_thresh : 5;
285 uint64_t w_rsvd : 41;
286 } ii_wcr_fld_s;
287} ii_wcr_u_t;
288
289
290/************************************************************************
291 * *
292 * Description: This register's value is a bit vector that guards *
293 * access to local registers within the II as well as to external *
294 * Crosstalk widgets. Each bit in the register corresponds to a *
295 * particular region in the system; a region consists of one, two or *
296 * four nodes (depending on the value of the REGION_SIZE field in the *
297 * LB_REV_ID register, which is documented in Section 8.3.1.1). The *
298 * protection provided by this register applies to PIO read *
299 * operations as well as PIO write operations. The II will perform a *
300 * PIO read or write request only if the bit for the requestor's *
301 * region is set; otherwise, the II will not perform the requested *
302 * operation and will return an error response. When a PIO read or *
303 * write request targets an external Crosstalk widget, then not only *
304 * must the bit for the requestor's region be set in the ILAPR, but *
305 * also the target widget's bit in the IOWA register must be set in *
306 * order for the II to perform the requested operation; otherwise, *
307 * the II will return an error response. Hence, the protection *
308 * provided by the IOWA register supplements the protection provided *
309 * by the ILAPR for requests that target external Crosstalk widgets. *
310 * This register itself can be accessed only by the nodes whose *
311 * region ID bits are enabled in this same register. It can also be *
312 * accessed through the IAlias space by the local processors. *
313 * The reset value of this register allows access by all nodes. *
314 * *
315 ************************************************************************/
316
317typedef union ii_ilapr_u {
318 uint64_t ii_ilapr_regval;
319 struct {
320 uint64_t i_region : 64;
321 } ii_ilapr_fld_s;
322} ii_ilapr_u_t;
323
324
325
326
327/************************************************************************
328 * *
329 * Description: A write to this register of the 64-bit value *
330 * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
331 * corresponding to the region of the requestor to be set (allow *
332 * access). A write of any other value will be ignored. Access *
333 * protection for this register is "SGIrules". *
334 * This register can also be accessed through the IAlias space. *
335 * However, this access will not change the access permissions in the *
336 * ILAPR. *
337 * *
338 ************************************************************************/
339
340typedef union ii_ilapo_u {
341 uint64_t ii_ilapo_regval;
342 struct {
343 uint64_t i_io_ovrride : 64;
344 } ii_ilapo_fld_s;
345} ii_ilapo_u_t;
346
347
348
349/************************************************************************
350 * *
351 * This register qualifies all the PIO and Graphics writes launched *
352 * from the SHUB towards a widget. *
353 * *
354 ************************************************************************/
355
356typedef union ii_iowa_u {
357 uint64_t ii_iowa_regval;
358 struct {
359 uint64_t i_w0_oac : 1;
360 uint64_t i_rsvd_1 : 7;
361 uint64_t i_wx_oac : 8;
362 uint64_t i_rsvd : 48;
363 } ii_iowa_fld_s;
364} ii_iowa_u_t;
365
366
367/************************************************************************
368 * *
369 * Description: This register qualifies all the requests launched *
370 * from a widget towards the Shub. This register is intended to be *
371 * used by software in case of misbehaving widgets. *
372 * *
373 * *
374 ************************************************************************/
375
376typedef union ii_iiwa_u {
377 uint64_t ii_iiwa_regval;
378 struct {
379 uint64_t i_w0_iac : 1;
380 uint64_t i_rsvd_1 : 7;
381 uint64_t i_wx_iac : 8;
382 uint64_t i_rsvd : 48;
383 } ii_iiwa_fld_s;
384} ii_iiwa_u_t;
385
386
387
388/************************************************************************
389 * *
390 * Description: This register qualifies all the operations launched *
391 * from a widget towards the SHub. It allows individual access *
392 * control for up to 8 devices per widget. A device refers to *
393 * individual DMA master hosted by a widget. *
394 * The bits in each field of this register are cleared by the Shub *
395 * upon detection of an error which requires the device to be *
396 * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric *
397 * Crosstalk). Whether or not a device has access rights to this *
398 * Shub is determined by an AND of the device enable bit in the *
399 * appropriate field of this register and the corresponding bit in *
400 * the Wx_IAC field (for the widget which this device belongs to). *
401 * The bits in this field are set by writing a 1 to them. Incoming *
402 * replies from Crosstalk are not subject to this access control *
403 * mechanism. *
404 * *
405 ************************************************************************/
406
407typedef union ii_iidem_u {
408 uint64_t ii_iidem_regval;
409 struct {
410 uint64_t i_w8_dxs : 8;
411 uint64_t i_w9_dxs : 8;
412 uint64_t i_wa_dxs : 8;
413 uint64_t i_wb_dxs : 8;
414 uint64_t i_wc_dxs : 8;
415 uint64_t i_wd_dxs : 8;
416 uint64_t i_we_dxs : 8;
417 uint64_t i_wf_dxs : 8;
418 } ii_iidem_fld_s;
419} ii_iidem_u_t;
420
421
422/************************************************************************
423 * *
424 * This register contains the various programmable fields necessary *
425 * for controlling and observing the LLP signals. *
426 * *
427 ************************************************************************/
428
429typedef union ii_ilcsr_u {
430 uint64_t ii_ilcsr_regval;
431 struct {
432 uint64_t i_nullto : 6;
433 uint64_t i_rsvd_4 : 2;
434 uint64_t i_wrmrst : 1;
435 uint64_t i_rsvd_3 : 1;
436 uint64_t i_llp_en : 1;
437 uint64_t i_bm8 : 1;
438 uint64_t i_llp_stat : 2;
439 uint64_t i_remote_power : 1;
440 uint64_t i_rsvd_2 : 1;
441 uint64_t i_maxrtry : 10;
442 uint64_t i_d_avail_sel : 2;
443 uint64_t i_rsvd_1 : 4;
444 uint64_t i_maxbrst : 10;
445 uint64_t i_rsvd : 22;
446
447 } ii_ilcsr_fld_s;
448} ii_ilcsr_u_t;
449
450
451/************************************************************************
452 * *
453 * This is simply a status registers that monitors the LLP error *
454 * rate. *
455 * *
456 ************************************************************************/
457
458typedef union ii_illr_u {
459 uint64_t ii_illr_regval;
460 struct {
461 uint64_t i_sn_cnt : 16;
462 uint64_t i_cb_cnt : 16;
463 uint64_t i_rsvd : 32;
464 } ii_illr_fld_s;
465} ii_illr_u_t;
466
467
468/************************************************************************
469 * *
470 * Description: All II-detected non-BTE error interrupts are *
471 * specified via this register. *
472 * NOTE: The PI interrupt register address is hardcoded in the II. If *
473 * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI *
474 * packet) to address offset 0x0180_0090 within the local register *
475 * address space of PI0 on the node specified by the NODE field. If *
476 * PI_ID==1, then the II sends the interrupt request to address *
477 * offset 0x01A0_0090 within the local register address space of PI1 *
478 * on the node specified by the NODE field. *
479 * *
480 ************************************************************************/
481
482typedef union ii_iidsr_u {
483 uint64_t ii_iidsr_regval;
484 struct {
485 uint64_t i_level : 8;
486 uint64_t i_pi_id : 1;
487 uint64_t i_node : 11;
488 uint64_t i_rsvd_3 : 4;
489 uint64_t i_enable : 1;
490 uint64_t i_rsvd_2 : 3;
491 uint64_t i_int_sent : 2;
492 uint64_t i_rsvd_1 : 2;
493 uint64_t i_pi0_forward_int : 1;
494 uint64_t i_pi1_forward_int : 1;
495 uint64_t i_rsvd : 30;
496 } ii_iidsr_fld_s;
497} ii_iidsr_u_t;
498
499
500
501/************************************************************************
502 * *
503 * There are two instances of this register. This register is used *
504 * for matching up the incoming responses from the graphics widget to *
505 * the processor that initiated the graphics operation. The *
506 * write-responses are converted to graphics credits and returned to *
507 * the processor so that the processor interface can manage the flow *
508 * control. *
509 * *
510 ************************************************************************/
511
512typedef union ii_igfx0_u {
513 uint64_t ii_igfx0_regval;
514 struct {
515 uint64_t i_w_num : 4;
516 uint64_t i_pi_id : 1;
517 uint64_t i_n_num : 12;
518 uint64_t i_p_num : 1;
519 uint64_t i_rsvd : 46;
520 } ii_igfx0_fld_s;
521} ii_igfx0_u_t;
522
523
524/************************************************************************
525 * *
526 * There are two instances of this register. This register is used *
527 * for matching up the incoming responses from the graphics widget to *
528 * the processor that initiated the graphics operation. The *
529 * write-responses are converted to graphics credits and returned to *
530 * the processor so that the processor interface can manage the flow *
531 * control. *
532 * *
533 ************************************************************************/
534
535typedef union ii_igfx1_u {
536 uint64_t ii_igfx1_regval;
537 struct {
538 uint64_t i_w_num : 4;
539 uint64_t i_pi_id : 1;
540 uint64_t i_n_num : 12;
541 uint64_t i_p_num : 1;
542 uint64_t i_rsvd : 46;
543 } ii_igfx1_fld_s;
544} ii_igfx1_u_t;
545
546
547/************************************************************************
548 * *
549 * There are two instances of this registers. These registers are *
550 * used as scratch registers for software use. *
551 * *
552 ************************************************************************/
553
554typedef union ii_iscr0_u {
555 uint64_t ii_iscr0_regval;
556 struct {
557 uint64_t i_scratch : 64;
558 } ii_iscr0_fld_s;
559} ii_iscr0_u_t;
560
561
562
563/************************************************************************
564 * *
565 * There are two instances of this registers. These registers are *
566 * used as scratch registers for software use. *
567 * *
568 ************************************************************************/
569
570typedef union ii_iscr1_u {
571 uint64_t ii_iscr1_regval;
572 struct {
573 uint64_t i_scratch : 64;
574 } ii_iscr1_fld_s;
575} ii_iscr1_u_t;
576
577
578/************************************************************************
579 * *
580 * Description: There are seven instances of translation table entry *
581 * registers. Each register maps a Shub Big Window to a 48-bit *
582 * address on Crosstalk. *
583 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
584 * number) are used to select one of these 7 registers. The Widget *
585 * number field is then derived from the W_NUM field for synthesizing *
586 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
587 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
588 * are padded with zeros. Although the maximum Crosstalk space *
589 * addressable by the SHub is thus the lower 16 GBytes per widget *
590 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
591 * space can be accessed. *
592 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
593 * Window number) are used to select one of these 7 registers. The *
594 * Widget number field is then derived from the W_NUM field for *
595 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
596 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
597 * field is used as Crosstalk[47], and remainder of the Crosstalk *
598 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
599 * Crosstalk space addressable by the Shub is thus the lower *
600 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
601 * of this space can be accessed. *
602 * *
603 ************************************************************************/
604
605typedef union ii_itte1_u {
606 uint64_t ii_itte1_regval;
607 struct {
608 uint64_t i_offset : 5;
609 uint64_t i_rsvd_1 : 3;
610 uint64_t i_w_num : 4;
611 uint64_t i_iosp : 1;
612 uint64_t i_rsvd : 51;
613 } ii_itte1_fld_s;
614} ii_itte1_u_t;
615
616
617/************************************************************************
618 * *
619 * Description: There are seven instances of translation table entry *
620 * registers. Each register maps a Shub Big Window to a 48-bit *
621 * address on Crosstalk. *
622 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
623 * number) are used to select one of these 7 registers. The Widget *
624 * number field is then derived from the W_NUM field for synthesizing *
625 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
626 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
627 * are padded with zeros. Although the maximum Crosstalk space *
628 * addressable by the Shub is thus the lower 16 GBytes per widget *
629 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
630 * space can be accessed. *
631 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
632 * Window number) are used to select one of these 7 registers. The *
633 * Widget number field is then derived from the W_NUM field for *
634 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
635 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
636 * field is used as Crosstalk[47], and remainder of the Crosstalk *
637 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
638 * Crosstalk space addressable by the Shub is thus the lower *
639 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
640 * of this space can be accessed. *
641 * *
642 ************************************************************************/
643
644typedef union ii_itte2_u {
645 uint64_t ii_itte2_regval;
646 struct {
647 uint64_t i_offset : 5;
648 uint64_t i_rsvd_1 : 3;
649 uint64_t i_w_num : 4;
650 uint64_t i_iosp : 1;
651 uint64_t i_rsvd : 51;
652 } ii_itte2_fld_s;
653} ii_itte2_u_t;
654
655
656/************************************************************************
657 * *
658 * Description: There are seven instances of translation table entry *
659 * registers. Each register maps a Shub Big Window to a 48-bit *
660 * address on Crosstalk. *
661 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
662 * number) are used to select one of these 7 registers. The Widget *
663 * number field is then derived from the W_NUM field for synthesizing *
664 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
665 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
666 * are padded with zeros. Although the maximum Crosstalk space *
667 * addressable by the Shub is thus the lower 16 GBytes per widget *
668 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
669 * space can be accessed. *
670 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
671 * Window number) are used to select one of these 7 registers. The *
672 * Widget number field is then derived from the W_NUM field for *
673 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
674 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
675 * field is used as Crosstalk[47], and remainder of the Crosstalk *
676 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
677 * Crosstalk space addressable by the SHub is thus the lower *
678 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
679 * of this space can be accessed. *
680 * *
681 ************************************************************************/
682
683typedef union ii_itte3_u {
684 uint64_t ii_itte3_regval;
685 struct {
686 uint64_t i_offset : 5;
687 uint64_t i_rsvd_1 : 3;
688 uint64_t i_w_num : 4;
689 uint64_t i_iosp : 1;
690 uint64_t i_rsvd : 51;
691 } ii_itte3_fld_s;
692} ii_itte3_u_t;
693
694
695/************************************************************************
696 * *
697 * Description: There are seven instances of translation table entry *
698 * registers. Each register maps a SHub Big Window to a 48-bit *
699 * address on Crosstalk. *
700 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
701 * number) are used to select one of these 7 registers. The Widget *
702 * number field is then derived from the W_NUM field for synthesizing *
703 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
704 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
705 * are padded with zeros. Although the maximum Crosstalk space *
706 * addressable by the SHub is thus the lower 16 GBytes per widget *
707 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
708 * space can be accessed. *
709 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
710 * Window number) are used to select one of these 7 registers. The *
711 * Widget number field is then derived from the W_NUM field for *
712 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
713 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
714 * field is used as Crosstalk[47], and remainder of the Crosstalk *
715 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
716 * Crosstalk space addressable by the SHub is thus the lower *
717 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
718 * of this space can be accessed. *
719 * *
720 ************************************************************************/
721
722typedef union ii_itte4_u {
723 uint64_t ii_itte4_regval;
724 struct {
725 uint64_t i_offset : 5;
726 uint64_t i_rsvd_1 : 3;
727 uint64_t i_w_num : 4;
728 uint64_t i_iosp : 1;
729 uint64_t i_rsvd : 51;
730 } ii_itte4_fld_s;
731} ii_itte4_u_t;
732
733
734/************************************************************************
735 * *
736 * Description: There are seven instances of translation table entry *
737 * registers. Each register maps a SHub Big Window to a 48-bit *
738 * address on Crosstalk. *
739 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
740 * number) are used to select one of these 7 registers. The Widget *
741 * number field is then derived from the W_NUM field for synthesizing *
742 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
743 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
744 * are padded with zeros. Although the maximum Crosstalk space *
745 * addressable by the Shub is thus the lower 16 GBytes per widget *
746 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
747 * space can be accessed. *
748 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
749 * Window number) are used to select one of these 7 registers. The *
750 * Widget number field is then derived from the W_NUM field for *
751 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
752 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
753 * field is used as Crosstalk[47], and remainder of the Crosstalk *
754 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
755 * Crosstalk space addressable by the Shub is thus the lower *
756 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
757 * of this space can be accessed. *
758 * *
759 ************************************************************************/
760
761typedef union ii_itte5_u {
762 uint64_t ii_itte5_regval;
763 struct {
764 uint64_t i_offset : 5;
765 uint64_t i_rsvd_1 : 3;
766 uint64_t i_w_num : 4;
767 uint64_t i_iosp : 1;
768 uint64_t i_rsvd : 51;
769 } ii_itte5_fld_s;
770} ii_itte5_u_t;
771
772
773/************************************************************************
774 * *
775 * Description: There are seven instances of translation table entry *
776 * registers. Each register maps a Shub Big Window to a 48-bit *
777 * address on Crosstalk. *
778 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
779 * number) are used to select one of these 7 registers. The Widget *
780 * number field is then derived from the W_NUM field for synthesizing *
781 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
782 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
783 * are padded with zeros. Although the maximum Crosstalk space *
784 * addressable by the Shub is thus the lower 16 GBytes per widget *
785 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
786 * space can be accessed. *
787 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
788 * Window number) are used to select one of these 7 registers. The *
789 * Widget number field is then derived from the W_NUM field for *
790 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
791 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
792 * field is used as Crosstalk[47], and remainder of the Crosstalk *
793 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
794 * Crosstalk space addressable by the Shub is thus the lower *
795 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
796 * of this space can be accessed. *
797 * *
798 ************************************************************************/
799
800typedef union ii_itte6_u {
801 uint64_t ii_itte6_regval;
802 struct {
803 uint64_t i_offset : 5;
804 uint64_t i_rsvd_1 : 3;
805 uint64_t i_w_num : 4;
806 uint64_t i_iosp : 1;
807 uint64_t i_rsvd : 51;
808 } ii_itte6_fld_s;
809} ii_itte6_u_t;
810
811
812/************************************************************************
813 * *
814 * Description: There are seven instances of translation table entry *
815 * registers. Each register maps a Shub Big Window to a 48-bit *
816 * address on Crosstalk. *
817 * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
818 * number) are used to select one of these 7 registers. The Widget *
819 * number field is then derived from the W_NUM field for synthesizing *
820 * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
821 * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
822 * are padded with zeros. Although the maximum Crosstalk space *
823 * addressable by the Shub is thus the lower 16 GBytes per widget *
824 * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
825 * space can be accessed. *
826 * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
827 * Window number) are used to select one of these 7 registers. The *
828 * Widget number field is then derived from the W_NUM field for *
829 * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
830 * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
831 * field is used as Crosstalk[47], and remainder of the Crosstalk *
832 * address bits (Crosstalk[46:34]) are always zero. While the maximum *
833 * Crosstalk space addressable by the SHub is thus the lower *
834 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
835 * of this space can be accessed. *
836 * *
837 ************************************************************************/
838
839typedef union ii_itte7_u {
840 uint64_t ii_itte7_regval;
841 struct {
842 uint64_t i_offset : 5;
843 uint64_t i_rsvd_1 : 3;
844 uint64_t i_w_num : 4;
845 uint64_t i_iosp : 1;
846 uint64_t i_rsvd : 51;
847 } ii_itte7_fld_s;
848} ii_itte7_u_t;
849
850
851/************************************************************************
852 * *
853 * Description: There are 9 instances of this register, one per *
854 * actual widget in this implementation of SHub and Crossbow. *
855 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
856 * refers to Crossbow's internal space. *
857 * This register contains the state elements per widget that are *
858 * necessary to manage the PIO flow control on Crosstalk and on the *
859 * Router Network. See the PIO Flow Control chapter for a complete *
860 * description of this register *
861 * The SPUR_WR bit requires some explanation. When this register is *
862 * written, the new value of the C field is captured in an internal *
863 * register so the hardware can remember what the programmer wrote *
864 * into the credit counter. The SPUR_WR bit sets whenever the C field *
865 * increments above this stored value, which indicates that there *
866 * have been more responses received than requests sent. The SPUR_WR *
867 * bit cannot be cleared until a value is written to the IPRBx *
868 * register; the write will correct the C field and capture its new *
869 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
870 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
871 * . *
872 * *
873 ************************************************************************/
874
875typedef union ii_iprb0_u {
876 uint64_t ii_iprb0_regval;
877 struct {
878 uint64_t i_c : 8;
879 uint64_t i_na : 14;
880 uint64_t i_rsvd_2 : 2;
881 uint64_t i_nb : 14;
882 uint64_t i_rsvd_1 : 2;
883 uint64_t i_m : 2;
884 uint64_t i_f : 1;
885 uint64_t i_of_cnt : 5;
886 uint64_t i_error : 1;
887 uint64_t i_rd_to : 1;
888 uint64_t i_spur_wr : 1;
889 uint64_t i_spur_rd : 1;
890 uint64_t i_rsvd : 11;
891 uint64_t i_mult_err : 1;
892 } ii_iprb0_fld_s;
893} ii_iprb0_u_t;
894
895
896/************************************************************************
897 * *
898 * Description: There are 9 instances of this register, one per *
899 * actual widget in this implementation of SHub and Crossbow. *
900 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
901 * refers to Crossbow's internal space. *
902 * This register contains the state elements per widget that are *
903 * necessary to manage the PIO flow control on Crosstalk and on the *
904 * Router Network. See the PIO Flow Control chapter for a complete *
905 * description of this register *
906 * The SPUR_WR bit requires some explanation. When this register is *
907 * written, the new value of the C field is captured in an internal *
908 * register so the hardware can remember what the programmer wrote *
909 * into the credit counter. The SPUR_WR bit sets whenever the C field *
910 * increments above this stored value, which indicates that there *
911 * have been more responses received than requests sent. The SPUR_WR *
912 * bit cannot be cleared until a value is written to the IPRBx *
913 * register; the write will correct the C field and capture its new *
914 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
915 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
916 * . *
917 * *
918 ************************************************************************/
919
920typedef union ii_iprb8_u {
921 uint64_t ii_iprb8_regval;
922 struct {
923 uint64_t i_c : 8;
924 uint64_t i_na : 14;
925 uint64_t i_rsvd_2 : 2;
926 uint64_t i_nb : 14;
927 uint64_t i_rsvd_1 : 2;
928 uint64_t i_m : 2;
929 uint64_t i_f : 1;
930 uint64_t i_of_cnt : 5;
931 uint64_t i_error : 1;
932 uint64_t i_rd_to : 1;
933 uint64_t i_spur_wr : 1;
934 uint64_t i_spur_rd : 1;
935 uint64_t i_rsvd : 11;
936 uint64_t i_mult_err : 1;
937 } ii_iprb8_fld_s;
938} ii_iprb8_u_t;
939
940
941/************************************************************************
942 * *
943 * Description: There are 9 instances of this register, one per *
944 * actual widget in this implementation of SHub and Crossbow. *
945 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
946 * refers to Crossbow's internal space. *
947 * This register contains the state elements per widget that are *
948 * necessary to manage the PIO flow control on Crosstalk and on the *
949 * Router Network. See the PIO Flow Control chapter for a complete *
950 * description of this register *
951 * The SPUR_WR bit requires some explanation. When this register is *
952 * written, the new value of the C field is captured in an internal *
953 * register so the hardware can remember what the programmer wrote *
954 * into the credit counter. The SPUR_WR bit sets whenever the C field *
955 * increments above this stored value, which indicates that there *
956 * have been more responses received than requests sent. The SPUR_WR *
957 * bit cannot be cleared until a value is written to the IPRBx *
958 * register; the write will correct the C field and capture its new *
959 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
960 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
961 * . *
962 * *
963 ************************************************************************/
964
965typedef union ii_iprb9_u {
966 uint64_t ii_iprb9_regval;
967 struct {
968 uint64_t i_c : 8;
969 uint64_t i_na : 14;
970 uint64_t i_rsvd_2 : 2;
971 uint64_t i_nb : 14;
972 uint64_t i_rsvd_1 : 2;
973 uint64_t i_m : 2;
974 uint64_t i_f : 1;
975 uint64_t i_of_cnt : 5;
976 uint64_t i_error : 1;
977 uint64_t i_rd_to : 1;
978 uint64_t i_spur_wr : 1;
979 uint64_t i_spur_rd : 1;
980 uint64_t i_rsvd : 11;
981 uint64_t i_mult_err : 1;
982 } ii_iprb9_fld_s;
983} ii_iprb9_u_t;
984
985
986/************************************************************************
987 * *
988 * Description: There are 9 instances of this register, one per *
989 * actual widget in this implementation of SHub and Crossbow. *
990 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
991 * refers to Crossbow's internal space. *
992 * This register contains the state elements per widget that are *
993 * necessary to manage the PIO flow control on Crosstalk and on the *
994 * Router Network. See the PIO Flow Control chapter for a complete *
995 * description of this register *
996 * The SPUR_WR bit requires some explanation. When this register is *
997 * written, the new value of the C field is captured in an internal *
998 * register so the hardware can remember what the programmer wrote *
999 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1000 * increments above this stored value, which indicates that there *
1001 * have been more responses received than requests sent. The SPUR_WR *
1002 * bit cannot be cleared until a value is written to the IPRBx *
1003 * register; the write will correct the C field and capture its new *
1004 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1005 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1006 * *
1007 * *
1008 ************************************************************************/
1009
1010typedef union ii_iprba_u {
1011 uint64_t ii_iprba_regval;
1012 struct {
1013 uint64_t i_c : 8;
1014 uint64_t i_na : 14;
1015 uint64_t i_rsvd_2 : 2;
1016 uint64_t i_nb : 14;
1017 uint64_t i_rsvd_1 : 2;
1018 uint64_t i_m : 2;
1019 uint64_t i_f : 1;
1020 uint64_t i_of_cnt : 5;
1021 uint64_t i_error : 1;
1022 uint64_t i_rd_to : 1;
1023 uint64_t i_spur_wr : 1;
1024 uint64_t i_spur_rd : 1;
1025 uint64_t i_rsvd : 11;
1026 uint64_t i_mult_err : 1;
1027 } ii_iprba_fld_s;
1028} ii_iprba_u_t;
1029
1030
1031/************************************************************************
1032 * *
1033 * Description: There are 9 instances of this register, one per *
1034 * actual widget in this implementation of SHub and Crossbow. *
1035 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
1036 * refers to Crossbow's internal space. *
1037 * This register contains the state elements per widget that are *
1038 * necessary to manage the PIO flow control on Crosstalk and on the *
1039 * Router Network. See the PIO Flow Control chapter for a complete *
1040 * description of this register *
1041 * The SPUR_WR bit requires some explanation. When this register is *
1042 * written, the new value of the C field is captured in an internal *
1043 * register so the hardware can remember what the programmer wrote *
1044 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1045 * increments above this stored value, which indicates that there *
1046 * have been more responses received than requests sent. The SPUR_WR *
1047 * bit cannot be cleared until a value is written to the IPRBx *
1048 * register; the write will correct the C field and capture its new *
1049 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1050 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1051 * . *
1052 * *
1053 ************************************************************************/
1054
1055typedef union ii_iprbb_u {
1056 uint64_t ii_iprbb_regval;
1057 struct {
1058 uint64_t i_c : 8;
1059 uint64_t i_na : 14;
1060 uint64_t i_rsvd_2 : 2;
1061 uint64_t i_nb : 14;
1062 uint64_t i_rsvd_1 : 2;
1063 uint64_t i_m : 2;
1064 uint64_t i_f : 1;
1065 uint64_t i_of_cnt : 5;
1066 uint64_t i_error : 1;
1067 uint64_t i_rd_to : 1;
1068 uint64_t i_spur_wr : 1;
1069 uint64_t i_spur_rd : 1;
1070 uint64_t i_rsvd : 11;
1071 uint64_t i_mult_err : 1;
1072 } ii_iprbb_fld_s;
1073} ii_iprbb_u_t;
1074
1075
1076/************************************************************************
1077 * *
1078 * Description: There are 9 instances of this register, one per *
1079 * actual widget in this implementation of SHub and Crossbow. *
1080 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
1081 * refers to Crossbow's internal space. *
1082 * This register contains the state elements per widget that are *
1083 * necessary to manage the PIO flow control on Crosstalk and on the *
1084 * Router Network. See the PIO Flow Control chapter for a complete *
1085 * description of this register *
1086 * The SPUR_WR bit requires some explanation. When this register is *
1087 * written, the new value of the C field is captured in an internal *
1088 * register so the hardware can remember what the programmer wrote *
1089 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1090 * increments above this stored value, which indicates that there *
1091 * have been more responses received than requests sent. The SPUR_WR *
1092 * bit cannot be cleared until a value is written to the IPRBx *
1093 * register; the write will correct the C field and capture its new *
1094 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1095 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1096 * . *
1097 * *
1098 ************************************************************************/
1099
1100typedef union ii_iprbc_u {
1101 uint64_t ii_iprbc_regval;
1102 struct {
1103 uint64_t i_c : 8;
1104 uint64_t i_na : 14;
1105 uint64_t i_rsvd_2 : 2;
1106 uint64_t i_nb : 14;
1107 uint64_t i_rsvd_1 : 2;
1108 uint64_t i_m : 2;
1109 uint64_t i_f : 1;
1110 uint64_t i_of_cnt : 5;
1111 uint64_t i_error : 1;
1112 uint64_t i_rd_to : 1;
1113 uint64_t i_spur_wr : 1;
1114 uint64_t i_spur_rd : 1;
1115 uint64_t i_rsvd : 11;
1116 uint64_t i_mult_err : 1;
1117 } ii_iprbc_fld_s;
1118} ii_iprbc_u_t;
1119
1120
1121/************************************************************************
1122 * *
1123 * Description: There are 9 instances of this register, one per *
1124 * actual widget in this implementation of SHub and Crossbow. *
1125 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
1126 * refers to Crossbow's internal space. *
1127 * This register contains the state elements per widget that are *
1128 * necessary to manage the PIO flow control on Crosstalk and on the *
1129 * Router Network. See the PIO Flow Control chapter for a complete *
1130 * description of this register *
1131 * The SPUR_WR bit requires some explanation. When this register is *
1132 * written, the new value of the C field is captured in an internal *
1133 * register so the hardware can remember what the programmer wrote *
1134 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1135 * increments above this stored value, which indicates that there *
1136 * have been more responses received than requests sent. The SPUR_WR *
1137 * bit cannot be cleared until a value is written to the IPRBx *
1138 * register; the write will correct the C field and capture its new *
1139 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1140 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1141 * . *
1142 * *
1143 ************************************************************************/
1144
1145typedef union ii_iprbd_u {
1146 uint64_t ii_iprbd_regval;
1147 struct {
1148 uint64_t i_c : 8;
1149 uint64_t i_na : 14;
1150 uint64_t i_rsvd_2 : 2;
1151 uint64_t i_nb : 14;
1152 uint64_t i_rsvd_1 : 2;
1153 uint64_t i_m : 2;
1154 uint64_t i_f : 1;
1155 uint64_t i_of_cnt : 5;
1156 uint64_t i_error : 1;
1157 uint64_t i_rd_to : 1;
1158 uint64_t i_spur_wr : 1;
1159 uint64_t i_spur_rd : 1;
1160 uint64_t i_rsvd : 11;
1161 uint64_t i_mult_err : 1;
1162 } ii_iprbd_fld_s;
1163} ii_iprbd_u_t;
1164
1165
1166/************************************************************************
1167 * *
1168 * Description: There are 9 instances of this register, one per *
1169 * actual widget in this implementation of SHub and Crossbow. *
1170 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
1171 * refers to Crossbow's internal space. *
1172 * This register contains the state elements per widget that are *
1173 * necessary to manage the PIO flow control on Crosstalk and on the *
1174 * Router Network. See the PIO Flow Control chapter for a complete *
1175 * description of this register *
1176 * The SPUR_WR bit requires some explanation. When this register is *
1177 * written, the new value of the C field is captured in an internal *
1178 * register so the hardware can remember what the programmer wrote *
1179 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1180 * increments above this stored value, which indicates that there *
1181 * have been more responses received than requests sent. The SPUR_WR *
1182 * bit cannot be cleared until a value is written to the IPRBx *
1183 * register; the write will correct the C field and capture its new *
1184 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1185 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1186 * . *
1187 * *
1188 ************************************************************************/
1189
1190typedef union ii_iprbe_u {
1191 uint64_t ii_iprbe_regval;
1192 struct {
1193 uint64_t i_c : 8;
1194 uint64_t i_na : 14;
1195 uint64_t i_rsvd_2 : 2;
1196 uint64_t i_nb : 14;
1197 uint64_t i_rsvd_1 : 2;
1198 uint64_t i_m : 2;
1199 uint64_t i_f : 1;
1200 uint64_t i_of_cnt : 5;
1201 uint64_t i_error : 1;
1202 uint64_t i_rd_to : 1;
1203 uint64_t i_spur_wr : 1;
1204 uint64_t i_spur_rd : 1;
1205 uint64_t i_rsvd : 11;
1206 uint64_t i_mult_err : 1;
1207 } ii_iprbe_fld_s;
1208} ii_iprbe_u_t;
1209
1210
1211/************************************************************************
1212 * *
1213 * Description: There are 9 instances of this register, one per *
1214 * actual widget in this implementation of Shub and Crossbow. *
1215 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
1216 * refers to Crossbow's internal space. *
1217 * This register contains the state elements per widget that are *
1218 * necessary to manage the PIO flow control on Crosstalk and on the *
1219 * Router Network. See the PIO Flow Control chapter for a complete *
1220 * description of this register *
1221 * The SPUR_WR bit requires some explanation. When this register is *
1222 * written, the new value of the C field is captured in an internal *
1223 * register so the hardware can remember what the programmer wrote *
1224 * into the credit counter. The SPUR_WR bit sets whenever the C field *
1225 * increments above this stored value, which indicates that there *
1226 * have been more responses received than requests sent. The SPUR_WR *
1227 * bit cannot be cleared until a value is written to the IPRBx *
1228 * register; the write will correct the C field and capture its new *
1229 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1230 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1231 * . *
1232 * *
1233 ************************************************************************/
1234
1235typedef union ii_iprbf_u {
1236 uint64_t ii_iprbf_regval;
1237 struct {
1238 uint64_t i_c : 8;
1239 uint64_t i_na : 14;
1240 uint64_t i_rsvd_2 : 2;
1241 uint64_t i_nb : 14;
1242 uint64_t i_rsvd_1 : 2;
1243 uint64_t i_m : 2;
1244 uint64_t i_f : 1;
1245 uint64_t i_of_cnt : 5;
1246 uint64_t i_error : 1;
1247 uint64_t i_rd_to : 1;
1248 uint64_t i_spur_wr : 1;
1249 uint64_t i_spur_rd : 1;
1250 uint64_t i_rsvd : 11;
1251 uint64_t i_mult_err : 1;
1252 } ii_iprbe_fld_s;
1253} ii_iprbf_u_t;
1254
1255
1256/************************************************************************
1257 * *
1258 * This register specifies the timeout value to use for monitoring *
1259 * Crosstalk credits which are used outbound to Crosstalk. An *
1260 * internal counter called the Crosstalk Credit Timeout Counter *
1261 * increments every 128 II clocks. The counter starts counting *
1262 * anytime the credit count drops below a threshold, and resets to *
1263 * zero (stops counting) anytime the credit count is at or above the *
1264 * threshold. The threshold is 1 credit in direct connect mode and 2 *
1265 * in Crossbow connect mode. When the internal Crosstalk Credit *
1266 * Timeout Counter reaches the value programmed in this register, a *
1267 * Crosstalk Credit Timeout has occurred. The internal counter is not *
1268 * readable from software, and stops counting at its maximum value, *
1269 * so it cannot cause more than one interrupt. *
1270 * *
1271 ************************************************************************/
1272
1273typedef union ii_ixcc_u {
1274 uint64_t ii_ixcc_regval;
1275 struct {
1276 uint64_t i_time_out : 26;
1277 uint64_t i_rsvd : 38;
1278 } ii_ixcc_fld_s;
1279} ii_ixcc_u_t;
1280
1281
1282/************************************************************************
1283 * *
1284 * Description: This register qualifies all the PIO and DMA *
1285 * operations launched from widget 0 towards the SHub. In *
1286 * addition, it also qualifies accesses by the BTE streams. *
1287 * The bits in each field of this register are cleared by the SHub *
1288 * upon detection of an error which requires widget 0 or the BTE *
1289 * streams to be terminated. Whether or not widget x has access *
1290 * rights to this SHub is determined by an AND of the device *
1291 * enable bit in the appropriate field of this register and bit 0 in *
1292 * the Wx_IAC field. The bits in this field are set by writing a 1 to *
1293 * them. Incoming replies from Crosstalk are not subject to this *
1294 * access control mechanism. *
1295 * *
1296 ************************************************************************/
1297
1298typedef union ii_imem_u {
1299 uint64_t ii_imem_regval;
1300 struct {
1301 uint64_t i_w0_esd : 1;
1302 uint64_t i_rsvd_3 : 3;
1303 uint64_t i_b0_esd : 1;
1304 uint64_t i_rsvd_2 : 3;
1305 uint64_t i_b1_esd : 1;
1306 uint64_t i_rsvd_1 : 3;
1307 uint64_t i_clr_precise : 1;
1308 uint64_t i_rsvd : 51;
1309 } ii_imem_fld_s;
1310} ii_imem_u_t;
1311
1312
1313
1314/************************************************************************
1315 * *
1316 * Description: This register specifies the timeout value to use for *
1317 * monitoring Crosstalk tail flits coming into the Shub in the *
1318 * TAIL_TO field. An internal counter associated with this register *
1319 * is incremented every 128 II internal clocks (7 bits). The counter *
1320 * starts counting anytime a header micropacket is received and stops *
1321 * counting (and resets to zero) any time a micropacket with a Tail *
1322 * bit is received. Once the counter reaches the threshold value *
1323 * programmed in this register, it generates an interrupt to the *
1324 * processor that is programmed into the IIDSR. The counter saturates *
1325 * (does not roll over) at its maximum value, so it cannot cause *
1326 * another interrupt until after it is cleared. *
1327 * The register also contains the Read Response Timeout values. The *
1328 * Prescalar is 23 bits, and counts II clocks. An internal counter *
1329 * increments on every II clock and when it reaches the value in the *
1330 * Prescalar field, all IPRTE registers with their valid bits set *
1331 * have their Read Response timers bumped. Whenever any of them match *
1332 * the value in the RRSP_TO field, a Read Response Timeout has *
1333 * occurred, and error handling occurs as described in the Error *
1334 * Handling section of this document. *
1335 * *
1336 ************************************************************************/
1337
1338typedef union ii_ixtt_u {
1339 uint64_t ii_ixtt_regval;
1340 struct {
1341 uint64_t i_tail_to : 26;
1342 uint64_t i_rsvd_1 : 6;
1343 uint64_t i_rrsp_ps : 23;
1344 uint64_t i_rrsp_to : 5;
1345 uint64_t i_rsvd : 4;
1346 } ii_ixtt_fld_s;
1347} ii_ixtt_u_t;
1348
1349
1350/************************************************************************
1351 * *
1352 * Writing a 1 to the fields of this register clears the appropriate *
1353 * error bits in other areas of SHub. Note that when the *
1354 * E_PRB_x bits are used to clear error bits in PRB registers, *
1355 * SPUR_RD and SPUR_WR may persist, because they require additional *
1356 * action to clear them. See the IPRBx and IXSS Register *
1357 * specifications. *
1358 * *
1359 ************************************************************************/
1360
1361typedef union ii_ieclr_u {
1362 uint64_t ii_ieclr_regval;
1363 struct {
1364 uint64_t i_e_prb_0 : 1;
1365 uint64_t i_rsvd : 7;
1366 uint64_t i_e_prb_8 : 1;
1367 uint64_t i_e_prb_9 : 1;
1368 uint64_t i_e_prb_a : 1;
1369 uint64_t i_e_prb_b : 1;
1370 uint64_t i_e_prb_c : 1;
1371 uint64_t i_e_prb_d : 1;
1372 uint64_t i_e_prb_e : 1;
1373 uint64_t i_e_prb_f : 1;
1374 uint64_t i_e_crazy : 1;
1375 uint64_t i_e_bte_0 : 1;
1376 uint64_t i_e_bte_1 : 1;
1377 uint64_t i_reserved_1 : 10;
1378 uint64_t i_spur_rd_hdr : 1;
1379 uint64_t i_cam_intr_to : 1;
1380 uint64_t i_cam_overflow : 1;
1381 uint64_t i_cam_read_miss : 1;
1382 uint64_t i_ioq_rep_underflow : 1;
1383 uint64_t i_ioq_req_underflow : 1;
1384 uint64_t i_ioq_rep_overflow : 1;
1385 uint64_t i_ioq_req_overflow : 1;
1386 uint64_t i_iiq_rep_overflow : 1;
1387 uint64_t i_iiq_req_overflow : 1;
1388 uint64_t i_ii_xn_rep_cred_overflow : 1;
1389 uint64_t i_ii_xn_req_cred_overflow : 1;
1390 uint64_t i_ii_xn_invalid_cmd : 1;
1391 uint64_t i_xn_ii_invalid_cmd : 1;
1392 uint64_t i_reserved_2 : 21;
1393 } ii_ieclr_fld_s;
1394} ii_ieclr_u_t;
1395
1396
1397/************************************************************************
1398 * *
1399 * This register controls both BTEs. SOFT_RESET is intended for *
1400 * recovery after an error. COUNT controls the total number of CRBs *
1401 * that both BTEs (combined) can use, which affects total BTE *
1402 * bandwidth. *
1403 * *
1404 ************************************************************************/
1405
1406typedef union ii_ibcr_u {
1407 uint64_t ii_ibcr_regval;
1408 struct {
1409 uint64_t i_count : 4;
1410 uint64_t i_rsvd_1 : 4;
1411 uint64_t i_soft_reset : 1;
1412 uint64_t i_rsvd : 55;
1413 } ii_ibcr_fld_s;
1414} ii_ibcr_u_t;
1415
1416
1417/************************************************************************
1418 * *
1419 * This register contains the header of a spurious read response *
1420 * received from Crosstalk. A spurious read response is defined as a *
1421 * read response received by II from a widget for which (1) the SIDN *
1422 * has a value between 1 and 7, inclusive (II never sends requests to *
1423 * these widgets (2) there is no valid IPRTE register which *
1424 * corresponds to the TNUM, or (3) the widget indicated in SIDN is *
1425 * not the same as the widget recorded in the IPRTE register *
1426 * referenced by the TNUM. If this condition is true, and if the *
1427 * IXSS[VALID] bit is clear, then the header of the spurious read *
1428 * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
1429 * errant header is thereby captured, and no further spurious read *
1430 * respones are captured until IXSS[VALID] is cleared by setting the *
1431 * appropriate bit in IECLR.Everytime a spurious read response is *
1432 * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
1433 * message's SIDN field is set. This always happens, regarless of *
1434 * whether a header is captured. The programmer should check *
1435 * IXSM[SIDN] to determine which widget sent the spurious response, *
1436 * because there may be more than one SPUR_RD bit set in the PRB *
1437 * registers. The widget indicated by IXSM[SIDN] was the first *
1438 * spurious read response to be received since the last time *
1439 * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB *
1440 * will be set. Any SPUR_RD bits in any other PRB registers indicate *
1441 * spurious messages from other widets which were detected after the *
1442 * header was captured.. *
1443 * *
1444 ************************************************************************/
1445
1446typedef union ii_ixsm_u {
1447 uint64_t ii_ixsm_regval;
1448 struct {
1449 uint64_t i_byte_en : 32;
1450 uint64_t i_reserved : 1;
1451 uint64_t i_tag : 3;
1452 uint64_t i_alt_pactyp : 4;
1453 uint64_t i_bo : 1;
1454 uint64_t i_error : 1;
1455 uint64_t i_vbpm : 1;
1456 uint64_t i_gbr : 1;
1457 uint64_t i_ds : 2;
1458 uint64_t i_ct : 1;
1459 uint64_t i_tnum : 5;
1460 uint64_t i_pactyp : 4;
1461 uint64_t i_sidn : 4;
1462 uint64_t i_didn : 4;
1463 } ii_ixsm_fld_s;
1464} ii_ixsm_u_t;
1465
1466
1467/************************************************************************
1468 * *
1469 * This register contains the sideband bits of a spurious read *
1470 * response received from Crosstalk. *
1471 * *
1472 ************************************************************************/
1473
1474typedef union ii_ixss_u {
1475 uint64_t ii_ixss_regval;
1476 struct {
1477 uint64_t i_sideband : 8;
1478 uint64_t i_rsvd : 55;
1479 uint64_t i_valid : 1;
1480 } ii_ixss_fld_s;
1481} ii_ixss_u_t;
1482
1483
1484/************************************************************************
1485 * *
1486 * This register enables software to access the II LLP's test port. *
1487 * Refer to the LLP 2.5 documentation for an explanation of the test *
1488 * port. Software can write to this register to program the values *
1489 * for the control fields (TestErrCapture, TestClear, TestFlit, *
1490 * TestMask and TestSeed). Similarly, software can read from this *
1491 * register to obtain the values of the test port's status outputs *
1492 * (TestCBerr, TestValid and TestData). *
1493 * *
1494 ************************************************************************/
1495
1496typedef union ii_ilct_u {
1497 uint64_t ii_ilct_regval;
1498 struct {
1499 uint64_t i_test_seed : 20;
1500 uint64_t i_test_mask : 8;
1501 uint64_t i_test_data : 20;
1502 uint64_t i_test_valid : 1;
1503 uint64_t i_test_cberr : 1;
1504 uint64_t i_test_flit : 3;
1505 uint64_t i_test_clear : 1;
1506 uint64_t i_test_err_capture : 1;
1507 uint64_t i_rsvd : 9;
1508 } ii_ilct_fld_s;
1509} ii_ilct_u_t;
1510
1511
1512/************************************************************************
1513 * *
1514 * If the II detects an illegal incoming Duplonet packet (request or *
1515 * reply) when VALID==0 in the IIEPH1 register, then it saves the *
1516 * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
1517 * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, *
1518 * and assigns a value to the ERR_TYPE field which indicates the *
1519 * specific nature of the error. The II recognizes four different *
1520 * types of errors: short request packets (ERR_TYPE==2), short reply *
1521 * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long *
1522 * reply packets (ERR_TYPE==5). The encodings for these types of *
1523 * errors were chosen to be consistent with the same types of errors *
1524 * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in *
1525 * the LB unit). If the II detects an illegal incoming Duplonet *
1526 * packet when VALID==1 in the IIEPH1 register, then it merely sets *
1527 * the OVERRUN bit to indicate that a subsequent error has happened, *
1528 * and does nothing further. *
1529 * *
1530 ************************************************************************/
1531
1532typedef union ii_iieph1_u {
1533 uint64_t ii_iieph1_regval;
1534 struct {
1535 uint64_t i_command : 7;
1536 uint64_t i_rsvd_5 : 1;
1537 uint64_t i_suppl : 14;
1538 uint64_t i_rsvd_4 : 1;
1539 uint64_t i_source : 14;
1540 uint64_t i_rsvd_3 : 1;
1541 uint64_t i_err_type : 4;
1542 uint64_t i_rsvd_2 : 4;
1543 uint64_t i_overrun : 1;
1544 uint64_t i_rsvd_1 : 3;
1545 uint64_t i_valid : 1;
1546 uint64_t i_rsvd : 13;
1547 } ii_iieph1_fld_s;
1548} ii_iieph1_u_t;
1549
1550
1551/************************************************************************
1552 * *
1553 * This register holds the Address field from the header flit of an *
1554 * incoming erroneous Duplonet packet, along with the tail bit which *
1555 * accompanied this header flit. This register is essentially an *
1556 * extension of IIEPH1. Two registers were necessary because the 64 *
1557 * bits available in only a single register were insufficient to *
1558 * capture the entire header flit of an erroneous packet. *
1559 * *
1560 ************************************************************************/
1561
1562typedef union ii_iieph2_u {
1563 uint64_t ii_iieph2_regval;
1564 struct {
1565 uint64_t i_rsvd_0 : 3;
1566 uint64_t i_address : 47;
1567 uint64_t i_rsvd_1 : 10;
1568 uint64_t i_tail : 1;
1569 uint64_t i_rsvd : 3;
1570 } ii_iieph2_fld_s;
1571} ii_iieph2_u_t;
1572
1573
1574/******************************/
1575
1576
1577
1578/************************************************************************
1579 * *
1580 * This register's value is a bit vector that guards access from SXBs *
1581 * to local registers within the II as well as to external Crosstalk *
1582 * widgets *
1583 * *
1584 ************************************************************************/
1585
1586typedef union ii_islapr_u {
1587 uint64_t ii_islapr_regval;
1588 struct {
1589 uint64_t i_region : 64;
1590 } ii_islapr_fld_s;
1591} ii_islapr_u_t;
1592
1593
1594/************************************************************************
1595 * *
1596 * A write to this register of the 56-bit value "Pup+Bun" will cause *
1597 * the bit in the ISLAPR register corresponding to the region of the *
1598 * requestor to be set (access allowed). (
1599 * *
1600 ************************************************************************/
1601
1602typedef union ii_islapo_u {
1603 uint64_t ii_islapo_regval;
1604 struct {
1605 uint64_t i_io_sbx_ovrride : 56;
1606 uint64_t i_rsvd : 8;
1607 } ii_islapo_fld_s;
1608} ii_islapo_u_t;
1609
1610/************************************************************************
1611 * *
1612 * Determines how long the wrapper will wait aftr an interrupt is *
1613 * initially issued from the II before it times out the outstanding *
1614 * interrupt and drops it from the interrupt queue. *
1615 * *
1616 ************************************************************************/
1617
1618typedef union ii_iwi_u {
1619 uint64_t ii_iwi_regval;
1620 struct {
1621 uint64_t i_prescale : 24;
1622 uint64_t i_rsvd : 8;
1623 uint64_t i_timeout : 8;
1624 uint64_t i_rsvd1 : 8;
1625 uint64_t i_intrpt_retry_period : 8;
1626 uint64_t i_rsvd2 : 8;
1627 } ii_iwi_fld_s;
1628} ii_iwi_u_t;
1629
1630/************************************************************************
1631 * *
1632 * Log errors which have occurred in the II wrapper. The errors are *
1633 * cleared by writing to the IECLR register. *
1634 * *
1635 ************************************************************************/
1636
1637typedef union ii_iwel_u {
1638 uint64_t ii_iwel_regval;
1639 struct {
1640 uint64_t i_intr_timed_out : 1;
1641 uint64_t i_rsvd : 7;
1642 uint64_t i_cam_overflow : 1;
1643 uint64_t i_cam_read_miss : 1;
1644 uint64_t i_rsvd1 : 2;
1645 uint64_t i_ioq_rep_underflow : 1;
1646 uint64_t i_ioq_req_underflow : 1;
1647 uint64_t i_ioq_rep_overflow : 1;
1648 uint64_t i_ioq_req_overflow : 1;
1649 uint64_t i_iiq_rep_overflow : 1;
1650 uint64_t i_iiq_req_overflow : 1;
1651 uint64_t i_rsvd2 : 6;
1652 uint64_t i_ii_xn_rep_cred_over_under: 1;
1653 uint64_t i_ii_xn_req_cred_over_under: 1;
1654 uint64_t i_rsvd3 : 6;
1655 uint64_t i_ii_xn_invalid_cmd : 1;
1656 uint64_t i_xn_ii_invalid_cmd : 1;
1657 uint64_t i_rsvd4 : 30;
1658 } ii_iwel_fld_s;
1659} ii_iwel_u_t;
1660
1661/************************************************************************
1662 * *
1663 * Controls the II wrapper. *
1664 * *
1665 ************************************************************************/
1666
1667typedef union ii_iwc_u {
1668 uint64_t ii_iwc_regval;
1669 struct {
1670 uint64_t i_dma_byte_swap : 1;
1671 uint64_t i_rsvd : 3;
1672 uint64_t i_cam_read_lines_reset : 1;
1673 uint64_t i_rsvd1 : 3;
1674 uint64_t i_ii_xn_cred_over_under_log: 1;
1675 uint64_t i_rsvd2 : 19;
1676 uint64_t i_xn_rep_iq_depth : 5;
1677 uint64_t i_rsvd3 : 3;
1678 uint64_t i_xn_req_iq_depth : 5;
1679 uint64_t i_rsvd4 : 3;
1680 uint64_t i_iiq_depth : 6;
1681 uint64_t i_rsvd5 : 12;
1682 uint64_t i_force_rep_cred : 1;
1683 uint64_t i_force_req_cred : 1;
1684 } ii_iwc_fld_s;
1685} ii_iwc_u_t;
1686
1687/************************************************************************
1688 * *
1689 * Status in the II wrapper. *
1690 * *
1691 ************************************************************************/
1692
1693typedef union ii_iws_u {
1694 uint64_t ii_iws_regval;
1695 struct {
1696 uint64_t i_xn_rep_iq_credits : 5;
1697 uint64_t i_rsvd : 3;
1698 uint64_t i_xn_req_iq_credits : 5;
1699 uint64_t i_rsvd1 : 51;
1700 } ii_iws_fld_s;
1701} ii_iws_u_t;
1702
1703/************************************************************************
1704 * *
1705 * Masks errors in the IWEL register. *
1706 * *
1707 ************************************************************************/
1708
1709typedef union ii_iweim_u {
1710 uint64_t ii_iweim_regval;
1711 struct {
1712 uint64_t i_intr_timed_out : 1;
1713 uint64_t i_rsvd : 7;
1714 uint64_t i_cam_overflow : 1;
1715 uint64_t i_cam_read_miss : 1;
1716 uint64_t i_rsvd1 : 2;
1717 uint64_t i_ioq_rep_underflow : 1;
1718 uint64_t i_ioq_req_underflow : 1;
1719 uint64_t i_ioq_rep_overflow : 1;
1720 uint64_t i_ioq_req_overflow : 1;
1721 uint64_t i_iiq_rep_overflow : 1;
1722 uint64_t i_iiq_req_overflow : 1;
1723 uint64_t i_rsvd2 : 6;
1724 uint64_t i_ii_xn_rep_cred_overflow : 1;
1725 uint64_t i_ii_xn_req_cred_overflow : 1;
1726 uint64_t i_rsvd3 : 6;
1727 uint64_t i_ii_xn_invalid_cmd : 1;
1728 uint64_t i_xn_ii_invalid_cmd : 1;
1729 uint64_t i_rsvd4 : 30;
1730 } ii_iweim_fld_s;
1731} ii_iweim_u_t;
1732
1733
1734/************************************************************************
1735 * *
1736 * A write to this register causes a particular field in the *
1737 * corresponding widget's PRB entry to be adjusted up or down by 1. *
1738 * This counter should be used when recovering from error and reset *
1739 * conditions. Note that software would be capable of causing *
1740 * inadvertent overflow or underflow of these counters. *
1741 * *
1742 ************************************************************************/
1743
1744typedef union ii_ipca_u {
1745 uint64_t ii_ipca_regval;
1746 struct {
1747 uint64_t i_wid : 4;
1748 uint64_t i_adjust : 1;
1749 uint64_t i_rsvd_1 : 3;
1750 uint64_t i_field : 2;
1751 uint64_t i_rsvd : 54;
1752 } ii_ipca_fld_s;
1753} ii_ipca_u_t;
1754
1755
1756/************************************************************************
1757 * *
1758 * There are 8 instances of this register. This register contains *
1759 * the information that the II has to remember once it has launched a *
1760 * PIO Read operation. The contents are used to form the correct *
1761 * Router Network packet and direct the Crosstalk reply to the *
1762 * appropriate processor. *
1763 * *
1764 ************************************************************************/
1765
1766
1767typedef union ii_iprte0a_u {
1768 uint64_t ii_iprte0a_regval;
1769 struct {
1770 uint64_t i_rsvd_1 : 54;
1771 uint64_t i_widget : 4;
1772 uint64_t i_to_cnt : 5;
1773 uint64_t i_vld : 1;
1774 } ii_iprte0a_fld_s;
1775} ii_iprte0a_u_t;
1776
1777
1778/************************************************************************
1779 * *
1780 * There are 8 instances of this register. This register contains *
1781 * the information that the II has to remember once it has launched a *
1782 * PIO Read operation. The contents are used to form the correct *
1783 * Router Network packet and direct the Crosstalk reply to the *
1784 * appropriate processor. *
1785 * *
1786 ************************************************************************/
1787
1788typedef union ii_iprte1a_u {
1789 uint64_t ii_iprte1a_regval;
1790 struct {
1791 uint64_t i_rsvd_1 : 54;
1792 uint64_t i_widget : 4;
1793 uint64_t i_to_cnt : 5;
1794 uint64_t i_vld : 1;
1795 } ii_iprte1a_fld_s;
1796} ii_iprte1a_u_t;
1797
1798
1799/************************************************************************
1800 * *
1801 * There are 8 instances of this register. This register contains *
1802 * the information that the II has to remember once it has launched a *
1803 * PIO Read operation. The contents are used to form the correct *
1804 * Router Network packet and direct the Crosstalk reply to the *
1805 * appropriate processor. *
1806 * *
1807 ************************************************************************/
1808
1809typedef union ii_iprte2a_u {
1810 uint64_t ii_iprte2a_regval;
1811 struct {
1812 uint64_t i_rsvd_1 : 54;
1813 uint64_t i_widget : 4;
1814 uint64_t i_to_cnt : 5;
1815 uint64_t i_vld : 1;
1816 } ii_iprte2a_fld_s;
1817} ii_iprte2a_u_t;
1818
1819
1820/************************************************************************
1821 * *
1822 * There are 8 instances of this register. This register contains *
1823 * the information that the II has to remember once it has launched a *
1824 * PIO Read operation. The contents are used to form the correct *
1825 * Router Network packet and direct the Crosstalk reply to the *
1826 * appropriate processor. *
1827 * *
1828 ************************************************************************/
1829
1830typedef union ii_iprte3a_u {
1831 uint64_t ii_iprte3a_regval;
1832 struct {
1833 uint64_t i_rsvd_1 : 54;
1834 uint64_t i_widget : 4;
1835 uint64_t i_to_cnt : 5;
1836 uint64_t i_vld : 1;
1837 } ii_iprte3a_fld_s;
1838} ii_iprte3a_u_t;
1839
1840
1841/************************************************************************
1842 * *
1843 * There are 8 instances of this register. This register contains *
1844 * the information that the II has to remember once it has launched a *
1845 * PIO Read operation. The contents are used to form the correct *
1846 * Router Network packet and direct the Crosstalk reply to the *
1847 * appropriate processor. *
1848 * *
1849 ************************************************************************/
1850
1851typedef union ii_iprte4a_u {
1852 uint64_t ii_iprte4a_regval;
1853 struct {
1854 uint64_t i_rsvd_1 : 54;
1855 uint64_t i_widget : 4;
1856 uint64_t i_to_cnt : 5;
1857 uint64_t i_vld : 1;
1858 } ii_iprte4a_fld_s;
1859} ii_iprte4a_u_t;
1860
1861
1862/************************************************************************
1863 * *
1864 * There are 8 instances of this register. This register contains *
1865 * the information that the II has to remember once it has launched a *
1866 * PIO Read operation. The contents are used to form the correct *
1867 * Router Network packet and direct the Crosstalk reply to the *
1868 * appropriate processor. *
1869 * *
1870 ************************************************************************/
1871
1872typedef union ii_iprte5a_u {
1873 uint64_t ii_iprte5a_regval;
1874 struct {
1875 uint64_t i_rsvd_1 : 54;
1876 uint64_t i_widget : 4;
1877 uint64_t i_to_cnt : 5;
1878 uint64_t i_vld : 1;
1879 } ii_iprte5a_fld_s;
1880} ii_iprte5a_u_t;
1881
1882
1883/************************************************************************
1884 * *
1885 * There are 8 instances of this register. This register contains *
1886 * the information that the II has to remember once it has launched a *
1887 * PIO Read operation. The contents are used to form the correct *
1888 * Router Network packet and direct the Crosstalk reply to the *
1889 * appropriate processor. *
1890 * *
1891 ************************************************************************/
1892
1893typedef union ii_iprte6a_u {
1894 uint64_t ii_iprte6a_regval;
1895 struct {
1896 uint64_t i_rsvd_1 : 54;
1897 uint64_t i_widget : 4;
1898 uint64_t i_to_cnt : 5;
1899 uint64_t i_vld : 1;
1900 } ii_iprte6a_fld_s;
1901} ii_iprte6a_u_t;
1902
1903
1904/************************************************************************
1905 * *
1906 * There are 8 instances of this register. This register contains *
1907 * the information that the II has to remember once it has launched a *
1908 * PIO Read operation. The contents are used to form the correct *
1909 * Router Network packet and direct the Crosstalk reply to the *
1910 * appropriate processor. *
1911 * *
1912 ************************************************************************/
1913
1914typedef union ii_iprte7a_u {
1915 uint64_t ii_iprte7a_regval;
1916 struct {
1917 uint64_t i_rsvd_1 : 54;
1918 uint64_t i_widget : 4;
1919 uint64_t i_to_cnt : 5;
1920 uint64_t i_vld : 1;
1921 } ii_iprtea7_fld_s;
1922} ii_iprte7a_u_t;
1923
1924
1925
1926/************************************************************************
1927 * *
1928 * There are 8 instances of this register. This register contains *
1929 * the information that the II has to remember once it has launched a *
1930 * PIO Read operation. The contents are used to form the correct *
1931 * Router Network packet and direct the Crosstalk reply to the *
1932 * appropriate processor. *
1933 * *
1934 ************************************************************************/
1935
1936
1937typedef union ii_iprte0b_u {
1938 uint64_t ii_iprte0b_regval;
1939 struct {
1940 uint64_t i_rsvd_1 : 3;
1941 uint64_t i_address : 47;
1942 uint64_t i_init : 3;
1943 uint64_t i_source : 11;
1944 } ii_iprte0b_fld_s;
1945} ii_iprte0b_u_t;
1946
1947
1948/************************************************************************
1949 * *
1950 * There are 8 instances of this register. This register contains *
1951 * the information that the II has to remember once it has launched a *
1952 * PIO Read operation. The contents are used to form the correct *
1953 * Router Network packet and direct the Crosstalk reply to the *
1954 * appropriate processor. *
1955 * *
1956 ************************************************************************/
1957
1958typedef union ii_iprte1b_u {
1959 uint64_t ii_iprte1b_regval;
1960 struct {
1961 uint64_t i_rsvd_1 : 3;
1962 uint64_t i_address : 47;
1963 uint64_t i_init : 3;
1964 uint64_t i_source : 11;
1965 } ii_iprte1b_fld_s;
1966} ii_iprte1b_u_t;
1967
1968
1969/************************************************************************
1970 * *
1971 * There are 8 instances of this register. This register contains *
1972 * the information that the II has to remember once it has launched a *
1973 * PIO Read operation. The contents are used to form the correct *
1974 * Router Network packet and direct the Crosstalk reply to the *
1975 * appropriate processor. *
1976 * *
1977 ************************************************************************/
1978
1979typedef union ii_iprte2b_u {
1980 uint64_t ii_iprte2b_regval;
1981 struct {
1982 uint64_t i_rsvd_1 : 3;
1983 uint64_t i_address : 47;
1984 uint64_t i_init : 3;
1985 uint64_t i_source : 11;
1986 } ii_iprte2b_fld_s;
1987} ii_iprte2b_u_t;
1988
1989
1990/************************************************************************
1991 * *
1992 * There are 8 instances of this register. This register contains *
1993 * the information that the II has to remember once it has launched a *
1994 * PIO Read operation. The contents are used to form the correct *
1995 * Router Network packet and direct the Crosstalk reply to the *
1996 * appropriate processor. *
1997 * *
1998 ************************************************************************/
1999
2000typedef union ii_iprte3b_u {
2001 uint64_t ii_iprte3b_regval;
2002 struct {
2003 uint64_t i_rsvd_1 : 3;
2004 uint64_t i_address : 47;
2005 uint64_t i_init : 3;
2006 uint64_t i_source : 11;
2007 } ii_iprte3b_fld_s;
2008} ii_iprte3b_u_t;
2009
2010
2011/************************************************************************
2012 * *
2013 * There are 8 instances of this register. This register contains *
2014 * the information that the II has to remember once it has launched a *
2015 * PIO Read operation. The contents are used to form the correct *
2016 * Router Network packet and direct the Crosstalk reply to the *
2017 * appropriate processor. *
2018 * *
2019 ************************************************************************/
2020
2021typedef union ii_iprte4b_u {
2022 uint64_t ii_iprte4b_regval;
2023 struct {
2024 uint64_t i_rsvd_1 : 3;
2025 uint64_t i_address : 47;
2026 uint64_t i_init : 3;
2027 uint64_t i_source : 11;
2028 } ii_iprte4b_fld_s;
2029} ii_iprte4b_u_t;
2030
2031
2032/************************************************************************
2033 * *
2034 * There are 8 instances of this register. This register contains *
2035 * the information that the II has to remember once it has launched a *
2036 * PIO Read operation. The contents are used to form the correct *
2037 * Router Network packet and direct the Crosstalk reply to the *
2038 * appropriate processor. *
2039 * *
2040 ************************************************************************/
2041
2042typedef union ii_iprte5b_u {
2043 uint64_t ii_iprte5b_regval;
2044 struct {
2045 uint64_t i_rsvd_1 : 3;
2046 uint64_t i_address : 47;
2047 uint64_t i_init : 3;
2048 uint64_t i_source : 11;
2049 } ii_iprte5b_fld_s;
2050} ii_iprte5b_u_t;
2051
2052
2053/************************************************************************
2054 * *
2055 * There are 8 instances of this register. This register contains *
2056 * the information that the II has to remember once it has launched a *
2057 * PIO Read operation. The contents are used to form the correct *
2058 * Router Network packet and direct the Crosstalk reply to the *
2059 * appropriate processor. *
2060 * *
2061 ************************************************************************/
2062
2063typedef union ii_iprte6b_u {
2064 uint64_t ii_iprte6b_regval;
2065 struct {
2066 uint64_t i_rsvd_1 : 3;
2067 uint64_t i_address : 47;
2068 uint64_t i_init : 3;
2069 uint64_t i_source : 11;
2070
2071 } ii_iprte6b_fld_s;
2072} ii_iprte6b_u_t;
2073
2074
2075/************************************************************************
2076 * *
2077 * There are 8 instances of this register. This register contains *
2078 * the information that the II has to remember once it has launched a *
2079 * PIO Read operation. The contents are used to form the correct *
2080 * Router Network packet and direct the Crosstalk reply to the *
2081 * appropriate processor. *
2082 * *
2083 ************************************************************************/
2084
2085typedef union ii_iprte7b_u {
2086 uint64_t ii_iprte7b_regval;
2087 struct {
2088 uint64_t i_rsvd_1 : 3;
2089 uint64_t i_address : 47;
2090 uint64_t i_init : 3;
2091 uint64_t i_source : 11;
2092 } ii_iprte7b_fld_s;
2093} ii_iprte7b_u_t;
2094
2095
2096/************************************************************************
2097 * *
2098 * Description: SHub II contains a feature which did not exist in *
2099 * the Hub which automatically cleans up after a Read Response *
2100 * timeout, including deallocation of the IPRTE and recovery of IBuf *
2101 * space. The inclusion of this register in SHub is for backward *
2102 * compatibility *
2103 * A write to this register causes an entry from the table of *
2104 * outstanding PIO Read Requests to be freed and returned to the *
2105 * stack of free entries. This register is used in handling the *
2106 * timeout errors that result in a PIO Reply never returning from *
2107 * Crosstalk. *
2108 * Note that this register does not affect the contents of the IPRTE *
2109 * registers. The Valid bits in those registers have to be *
2110 * specifically turned off by software. *
2111 * *
2112 ************************************************************************/
2113
2114typedef union ii_ipdr_u {
2115 uint64_t ii_ipdr_regval;
2116 struct {
2117 uint64_t i_te : 3;
2118 uint64_t i_rsvd_1 : 1;
2119 uint64_t i_pnd : 1;
2120 uint64_t i_init_rpcnt : 1;
2121 uint64_t i_rsvd : 58;
2122 } ii_ipdr_fld_s;
2123} ii_ipdr_u_t;
2124
2125
2126/************************************************************************
2127 * *
2128 * A write to this register causes a CRB entry to be returned to the *
2129 * queue of free CRBs. The entry should have previously been cleared *
2130 * (mark bit) via backdoor access to the pertinent CRB entry. This *
2131 * register is used in the last step of handling the errors that are *
2132 * captured and marked in CRB entries. Briefly: 1) first error for *
2133 * DMA write from a particular device, and first error for a *
2134 * particular BTE stream, lead to a marked CRB entry, and processor *
2135 * interrupt, 2) software reads the error information captured in the *
2136 * CRB entry, and presumably takes some corrective action, 3) *
2137 * software clears the mark bit, and finally 4) software writes to *
2138 * the ICDR register to return the CRB entry to the list of free CRB *
2139 * entries. *
2140 * *
2141 ************************************************************************/
2142
2143typedef union ii_icdr_u {
2144 uint64_t ii_icdr_regval;
2145 struct {
2146 uint64_t i_crb_num : 4;
2147 uint64_t i_pnd : 1;
2148 uint64_t i_rsvd : 59;
2149 } ii_icdr_fld_s;
2150} ii_icdr_u_t;
2151
2152
2153/************************************************************************
2154 * *
2155 * This register provides debug access to two FIFOs inside of II. *
2156 * Both IOQ_MAX* fields of this register contain the instantaneous *
2157 * depth (in units of the number of available entries) of the *
2158 * associated IOQ FIFO. A read of this register will return the *
2159 * number of free entries on each FIFO at the time of the read. So *
2160 * when a FIFO is idle, the associated field contains the maximum *
2161 * depth of the FIFO. This register is writable for debug reasons *
2162 * and is intended to be written with the maximum desired FIFO depth *
2163 * while the FIFO is idle. Software must assure that II is idle when *
2164 * this register is written. If there are any active entries in any *
2165 * of these FIFOs when this register is written, the results are *
2166 * undefined. *
2167 * *
2168 ************************************************************************/
2169
2170typedef union ii_ifdr_u {
2171 uint64_t ii_ifdr_regval;
2172 struct {
2173 uint64_t i_ioq_max_rq : 7;
2174 uint64_t i_set_ioq_rq : 1;
2175 uint64_t i_ioq_max_rp : 7;
2176 uint64_t i_set_ioq_rp : 1;
2177 uint64_t i_rsvd : 48;
2178 } ii_ifdr_fld_s;
2179} ii_ifdr_u_t;
2180
2181
2182/************************************************************************
2183 * *
2184 * This register allows the II to become sluggish in removing *
2185 * messages from its inbound queue (IIQ). This will cause messages to *
2186 * back up in either virtual channel. Disabling the "molasses" mode *
2187 * subsequently allows the II to be tested under stress. In the *
2188 * sluggish ("Molasses") mode, the localized effects of congestion *
2189 * can be observed. *
2190 * *
2191 ************************************************************************/
2192
2193typedef union ii_iiap_u {
2194 uint64_t ii_iiap_regval;
2195 struct {
2196 uint64_t i_rq_mls : 6;
2197 uint64_t i_rsvd_1 : 2;
2198 uint64_t i_rp_mls : 6;
2199 uint64_t i_rsvd : 50;
2200 } ii_iiap_fld_s;
2201} ii_iiap_u_t;
2202
2203
2204/************************************************************************
2205 * *
2206 * This register allows several parameters of CRB operation to be *
2207 * set. Note that writing to this register can have catastrophic side *
2208 * effects, if the CRB is not quiescent, i.e. if the CRB is *
2209 * processing protocol messages when the write occurs. *
2210 * *
2211 ************************************************************************/
2212
2213typedef union ii_icmr_u {
2214 uint64_t ii_icmr_regval;
2215 struct {
2216 uint64_t i_sp_msg : 1;
2217 uint64_t i_rd_hdr : 1;
2218 uint64_t i_rsvd_4 : 2;
2219 uint64_t i_c_cnt : 4;
2220 uint64_t i_rsvd_3 : 4;
2221 uint64_t i_clr_rqpd : 1;
2222 uint64_t i_clr_rppd : 1;
2223 uint64_t i_rsvd_2 : 2;
2224 uint64_t i_fc_cnt : 4;
2225 uint64_t i_crb_vld : 15;
2226 uint64_t i_crb_mark : 15;
2227 uint64_t i_rsvd_1 : 2;
2228 uint64_t i_precise : 1;
2229 uint64_t i_rsvd : 11;
2230 } ii_icmr_fld_s;
2231} ii_icmr_u_t;
2232
2233
2234/************************************************************************
2235 * *
2236 * This register allows control of the table portion of the CRB *
2237 * logic via software. Control operations from this register have *
2238 * priority over all incoming Crosstalk or BTE requests. *
2239 * *
2240 ************************************************************************/
2241
2242typedef union ii_iccr_u {
2243 uint64_t ii_iccr_regval;
2244 struct {
2245 uint64_t i_crb_num : 4;
2246 uint64_t i_rsvd_1 : 4;
2247 uint64_t i_cmd : 8;
2248 uint64_t i_pending : 1;
2249 uint64_t i_rsvd : 47;
2250 } ii_iccr_fld_s;
2251} ii_iccr_u_t;
2252
2253
2254/************************************************************************
2255 * *
2256 * This register allows the maximum timeout value to be programmed. *
2257 * *
2258 ************************************************************************/
2259
2260typedef union ii_icto_u {
2261 uint64_t ii_icto_regval;
2262 struct {
2263 uint64_t i_timeout : 8;
2264 uint64_t i_rsvd : 56;
2265 } ii_icto_fld_s;
2266} ii_icto_u_t;
2267
2268
2269/************************************************************************
2270 * *
2271 * This register allows the timeout prescalar to be programmed. An *
2272 * internal counter is associated with this register. When the *
2273 * internal counter reaches the value of the PRESCALE field, the *
2274 * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
2275 * field). The internal counter resets to zero, and then continues *
2276 * counting. *
2277 * *
2278 ************************************************************************/
2279
2280typedef union ii_ictp_u {
2281 uint64_t ii_ictp_regval;
2282 struct {
2283 uint64_t i_prescale : 24;
2284 uint64_t i_rsvd : 40;
2285 } ii_ictp_fld_s;
2286} ii_ictp_u_t;
2287
2288
2289/************************************************************************
2290 * *
2291 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2292 * used for Crosstalk operations (both cacheline and partial *
2293 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2294 * registers (_A to _E) are required to read and write each entry. *
2295 * The CRB Entry registers can be conceptualized as rows and columns *
2296 * (illustrated in the table above). Each row contains the 4 *
2297 * registers required for a single CRB Entry. The first doubleword *
2298 * (column) for each entry is labeled A, and the second doubleword *
2299 * (higher address) is labeled B, the third doubleword is labeled C, *
2300 * the fourth doubleword is labeled D and the fifth doubleword is *
2301 * labeled E. All CRB entries have their addresses on a quarter *
2302 * cacheline aligned boundary. *
2303 * Upon reset, only the following fields are initialized: valid *
2304 * (VLD), priority count, timeout, timeout valid, and context valid. *
2305 * All other bits should be cleared by software before use (after *
2306 * recovering any potential error state from before the reset). *
2307 * The following four tables summarize the format for the four *
2308 * registers that are used for each ICRB# Entry. *
2309 * *
2310 ************************************************************************/
2311
2312typedef union ii_icrb0_a_u {
2313 uint64_t ii_icrb0_a_regval;
2314 struct {
2315 uint64_t ia_iow : 1;
2316 uint64_t ia_vld : 1;
2317 uint64_t ia_addr : 47;
2318 uint64_t ia_tnum : 5;
2319 uint64_t ia_sidn : 4;
2320 uint64_t ia_rsvd : 6;
2321 } ii_icrb0_a_fld_s;
2322} ii_icrb0_a_u_t;
2323
2324
2325/************************************************************************
2326 * *
2327 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2328 * used for Crosstalk operations (both cacheline and partial *
2329 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2330 * registers (_A to _E) are required to read and write each entry. *
2331 * *
2332 ************************************************************************/
2333
2334typedef union ii_icrb0_b_u {
2335 uint64_t ii_icrb0_b_regval;
2336 struct {
2337 uint64_t ib_xt_err : 1;
2338 uint64_t ib_mark : 1;
2339 uint64_t ib_ln_uce : 1;
2340 uint64_t ib_errcode : 3;
2341 uint64_t ib_error : 1;
2342 uint64_t ib_stall__bte_1 : 1;
2343 uint64_t ib_stall__bte_0 : 1;
2344 uint64_t ib_stall__intr : 1;
2345 uint64_t ib_stall_ib : 1;
2346 uint64_t ib_intvn : 1;
2347 uint64_t ib_wb : 1;
2348 uint64_t ib_hold : 1;
2349 uint64_t ib_ack : 1;
2350 uint64_t ib_resp : 1;
2351 uint64_t ib_ack_cnt : 11;
2352 uint64_t ib_rsvd : 7;
2353 uint64_t ib_exc : 5;
2354 uint64_t ib_init : 3;
2355 uint64_t ib_imsg : 8;
2356 uint64_t ib_imsgtype : 2;
2357 uint64_t ib_use_old : 1;
2358 uint64_t ib_rsvd_1 : 11;
2359 } ii_icrb0_b_fld_s;
2360} ii_icrb0_b_u_t;
2361
2362
2363/************************************************************************
2364 * *
2365 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2366 * used for Crosstalk operations (both cacheline and partial *
2367 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2368 * registers (_A to _E) are required to read and write each entry. *
2369 * *
2370 ************************************************************************/
2371
2372typedef union ii_icrb0_c_u {
2373 uint64_t ii_icrb0_c_regval;
2374 struct {
2375 uint64_t ic_source : 15;
2376 uint64_t ic_size : 2;
2377 uint64_t ic_ct : 1;
2378 uint64_t ic_bte_num : 1;
2379 uint64_t ic_gbr : 1;
2380 uint64_t ic_resprqd : 1;
2381 uint64_t ic_bo : 1;
2382 uint64_t ic_suppl : 15;
2383 uint64_t ic_rsvd : 27;
2384 } ii_icrb0_c_fld_s;
2385} ii_icrb0_c_u_t;
2386
2387
2388/************************************************************************
2389 * *
2390 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2391 * used for Crosstalk operations (both cacheline and partial *
2392 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2393 * registers (_A to _E) are required to read and write each entry. *
2394 * *
2395 ************************************************************************/
2396
2397typedef union ii_icrb0_d_u {
2398 uint64_t ii_icrb0_d_regval;
2399 struct {
2400 uint64_t id_pa_be : 43;
2401 uint64_t id_bte_op : 1;
2402 uint64_t id_pr_psc : 4;
2403 uint64_t id_pr_cnt : 4;
2404 uint64_t id_sleep : 1;
2405 uint64_t id_rsvd : 11;
2406 } ii_icrb0_d_fld_s;
2407} ii_icrb0_d_u_t;
2408
2409
2410/************************************************************************
2411 * *
2412 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2413 * used for Crosstalk operations (both cacheline and partial *
2414 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2415 * registers (_A to _E) are required to read and write each entry. *
2416 * *
2417 ************************************************************************/
2418
2419typedef union ii_icrb0_e_u {
2420 uint64_t ii_icrb0_e_regval;
2421 struct {
2422 uint64_t ie_timeout : 8;
2423 uint64_t ie_context : 15;
2424 uint64_t ie_rsvd : 1;
2425 uint64_t ie_tvld : 1;
2426 uint64_t ie_cvld : 1;
2427 uint64_t ie_rsvd_0 : 38;
2428 } ii_icrb0_e_fld_s;
2429} ii_icrb0_e_u_t;
2430
2431
2432/************************************************************************
2433 * *
2434 * This register contains the lower 64 bits of the header of the *
2435 * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
2436 * register is set. *
2437 * *
2438 ************************************************************************/
2439
2440typedef union ii_icsml_u {
2441 uint64_t ii_icsml_regval;
2442 struct {
2443 uint64_t i_tt_addr : 47;
2444 uint64_t i_newsuppl_ex : 14;
2445 uint64_t i_reserved : 2;
2446 uint64_t i_overflow : 1;
2447 } ii_icsml_fld_s;
2448} ii_icsml_u_t;
2449
2450
2451/************************************************************************
2452 * *
2453 * This register contains the middle 64 bits of the header of the *
2454 * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
2455 * register is set. *
2456 * *
2457 ************************************************************************/
2458
2459typedef union ii_icsmm_u {
2460 uint64_t ii_icsmm_regval;
2461 struct {
2462 uint64_t i_tt_ack_cnt : 11;
2463 uint64_t i_reserved : 53;
2464 } ii_icsmm_fld_s;
2465} ii_icsmm_u_t;
2466
2467
2468/************************************************************************
2469 * *
2470 * This register contains the microscopic state, all the inputs to *
2471 * the protocol table, captured with the spurious message. Valid when *
2472 * the SP_MSG bit in the ICMR register is set. *
2473 * *
2474 ************************************************************************/
2475
2476typedef union ii_icsmh_u {
2477 uint64_t ii_icsmh_regval;
2478 struct {
2479 uint64_t i_tt_vld : 1;
2480 uint64_t i_xerr : 1;
2481 uint64_t i_ft_cwact_o : 1;
2482 uint64_t i_ft_wact_o : 1;
2483 uint64_t i_ft_active_o : 1;
2484 uint64_t i_sync : 1;
2485 uint64_t i_mnusg : 1;
2486 uint64_t i_mnusz : 1;
2487 uint64_t i_plusz : 1;
2488 uint64_t i_plusg : 1;
2489 uint64_t i_tt_exc : 5;
2490 uint64_t i_tt_wb : 1;
2491 uint64_t i_tt_hold : 1;
2492 uint64_t i_tt_ack : 1;
2493 uint64_t i_tt_resp : 1;
2494 uint64_t i_tt_intvn : 1;
2495 uint64_t i_g_stall_bte1 : 1;
2496 uint64_t i_g_stall_bte0 : 1;
2497 uint64_t i_g_stall_il : 1;
2498 uint64_t i_g_stall_ib : 1;
2499 uint64_t i_tt_imsg : 8;
2500 uint64_t i_tt_imsgtype : 2;
2501 uint64_t i_tt_use_old : 1;
2502 uint64_t i_tt_respreqd : 1;
2503 uint64_t i_tt_bte_num : 1;
2504 uint64_t i_cbn : 1;
2505 uint64_t i_match : 1;
2506 uint64_t i_rpcnt_lt_34 : 1;
2507 uint64_t i_rpcnt_ge_34 : 1;
2508 uint64_t i_rpcnt_lt_18 : 1;
2509 uint64_t i_rpcnt_ge_18 : 1;
2510 uint64_t i_rpcnt_lt_2 : 1;
2511 uint64_t i_rpcnt_ge_2 : 1;
2512 uint64_t i_rqcnt_lt_18 : 1;
2513 uint64_t i_rqcnt_ge_18 : 1;
2514 uint64_t i_rqcnt_lt_2 : 1;
2515 uint64_t i_rqcnt_ge_2 : 1;
2516 uint64_t i_tt_device : 7;
2517 uint64_t i_tt_init : 3;
2518 uint64_t i_reserved : 5;
2519 } ii_icsmh_fld_s;
2520} ii_icsmh_u_t;
2521
2522
2523/************************************************************************
2524 * *
2525 * The Shub DEBUG unit provides a 3-bit selection signal to the *
2526 * II core and a 3-bit selection signal to the fsbclk domain in the II *
2527 * wrapper. *
2528 * *
2529 ************************************************************************/
2530
2531typedef union ii_idbss_u {
2532 uint64_t ii_idbss_regval;
2533 struct {
2534 uint64_t i_iioclk_core_submenu : 3;
2535 uint64_t i_rsvd : 5;
2536 uint64_t i_fsbclk_wrapper_submenu : 3;
2537 uint64_t i_rsvd_1 : 5;
2538 uint64_t i_iioclk_menu : 5;
2539 uint64_t i_rsvd_2 : 43;
2540 } ii_idbss_fld_s;
2541} ii_idbss_u_t;
2542
2543
2544/************************************************************************
2545 * *
2546 * Description: This register is used to set up the length for a *
2547 * transfer and then to monitor the progress of that transfer. This *
2548 * register needs to be initialized before a transfer is started. A *
2549 * legitimate write to this register will set the Busy bit, clear the *
2550 * Error bit, and initialize the length to the value desired. *
2551 * While the transfer is in progress, hardware will decrement the *
2552 * length field with each successful block that is copied. Once the *
2553 * transfer completes, hardware will clear the Busy bit. The length *
2554 * field will also contain the number of cache lines left to be *
2555 * transferred. *
2556 * *
2557 ************************************************************************/
2558
2559typedef union ii_ibls0_u {
2560 uint64_t ii_ibls0_regval;
2561 struct {
2562 uint64_t i_length : 16;
2563 uint64_t i_error : 1;
2564 uint64_t i_rsvd_1 : 3;
2565 uint64_t i_busy : 1;
2566 uint64_t i_rsvd : 43;
2567 } ii_ibls0_fld_s;
2568} ii_ibls0_u_t;
2569
2570
2571/************************************************************************
2572 * *
2573 * This register should be loaded before a transfer is started. The *
2574 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2575 * address as described in Section 1.3, Figure2 and Figure3. Since *
2576 * the bottom 7 bits of the address are always taken to be zero, BTE *
2577 * transfers are always cacheline-aligned. *
2578 * *
2579 ************************************************************************/
2580
2581typedef union ii_ibsa0_u {
2582 uint64_t ii_ibsa0_regval;
2583 struct {
2584 uint64_t i_rsvd_1 : 7;
2585 uint64_t i_addr : 42;
2586 uint64_t i_rsvd : 15;
2587 } ii_ibsa0_fld_s;
2588} ii_ibsa0_u_t;
2589
2590
2591/************************************************************************
2592 * *
2593 * This register should be loaded before a transfer is started. The *
2594 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2595 * address as described in Section 1.3, Figure2 and Figure3. Since *
2596 * the bottom 7 bits of the address are always taken to be zero, BTE *
2597 * transfers are always cacheline-aligned. *
2598 * *
2599 ************************************************************************/
2600
2601typedef union ii_ibda0_u {
2602 uint64_t ii_ibda0_regval;
2603 struct {
2604 uint64_t i_rsvd_1 : 7;
2605 uint64_t i_addr : 42;
2606 uint64_t i_rsvd : 15;
2607 } ii_ibda0_fld_s;
2608} ii_ibda0_u_t;
2609
2610
2611/************************************************************************
2612 * *
2613 * Writing to this register sets up the attributes of the transfer *
2614 * and initiates the transfer operation. Reading this register has *
2615 * the side effect of terminating any transfer in progress. Note: *
2616 * stopping a transfer midstream could have an adverse impact on the *
2617 * other BTE. If a BTE stream has to be stopped (due to error *
2618 * handling for example), both BTE streams should be stopped and *
2619 * their transfers discarded. *
2620 * *
2621 ************************************************************************/
2622
2623typedef union ii_ibct0_u {
2624 uint64_t ii_ibct0_regval;
2625 struct {
2626 uint64_t i_zerofill : 1;
2627 uint64_t i_rsvd_2 : 3;
2628 uint64_t i_notify : 1;
2629 uint64_t i_rsvd_1 : 3;
2630 uint64_t i_poison : 1;
2631 uint64_t i_rsvd : 55;
2632 } ii_ibct0_fld_s;
2633} ii_ibct0_u_t;
2634
2635
2636/************************************************************************
2637 * *
2638 * This register contains the address to which the WINV is sent. *
2639 * This address has to be cache line aligned. *
2640 * *
2641 ************************************************************************/
2642
2643typedef union ii_ibna0_u {
2644 uint64_t ii_ibna0_regval;
2645 struct {
2646 uint64_t i_rsvd_1 : 7;
2647 uint64_t i_addr : 42;
2648 uint64_t i_rsvd : 15;
2649 } ii_ibna0_fld_s;
2650} ii_ibna0_u_t;
2651
2652
2653/************************************************************************
2654 * *
2655 * This register contains the programmable level as well as the node *
2656 * ID and PI unit of the processor to which the interrupt will be *
2657 * sent. *
2658 * *
2659 ************************************************************************/
2660
2661typedef union ii_ibia0_u {
2662 uint64_t ii_ibia0_regval;
2663 struct {
2664 uint64_t i_rsvd_2 : 1;
2665 uint64_t i_node_id : 11;
2666 uint64_t i_rsvd_1 : 4;
2667 uint64_t i_level : 7;
2668 uint64_t i_rsvd : 41;
2669 } ii_ibia0_fld_s;
2670} ii_ibia0_u_t;
2671
2672
2673/************************************************************************
2674 * *
2675 * Description: This register is used to set up the length for a *
2676 * transfer and then to monitor the progress of that transfer. This *
2677 * register needs to be initialized before a transfer is started. A *
2678 * legitimate write to this register will set the Busy bit, clear the *
2679 * Error bit, and initialize the length to the value desired. *
2680 * While the transfer is in progress, hardware will decrement the *
2681 * length field with each successful block that is copied. Once the *
2682 * transfer completes, hardware will clear the Busy bit. The length *
2683 * field will also contain the number of cache lines left to be *
2684 * transferred. *
2685 * *
2686 ************************************************************************/
2687
2688typedef union ii_ibls1_u {
2689 uint64_t ii_ibls1_regval;
2690 struct {
2691 uint64_t i_length : 16;
2692 uint64_t i_error : 1;
2693 uint64_t i_rsvd_1 : 3;
2694 uint64_t i_busy : 1;
2695 uint64_t i_rsvd : 43;
2696 } ii_ibls1_fld_s;
2697} ii_ibls1_u_t;
2698
2699
2700/************************************************************************
2701 * *
2702 * This register should be loaded before a transfer is started. The *
2703 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2704 * address as described in Section 1.3, Figure2 and Figure3. Since *
2705 * the bottom 7 bits of the address are always taken to be zero, BTE *
2706 * transfers are always cacheline-aligned. *
2707 * *
2708 ************************************************************************/
2709
2710typedef union ii_ibsa1_u {
2711 uint64_t ii_ibsa1_regval;
2712 struct {
2713 uint64_t i_rsvd_1 : 7;
2714 uint64_t i_addr : 33;
2715 uint64_t i_rsvd : 24;
2716 } ii_ibsa1_fld_s;
2717} ii_ibsa1_u_t;
2718
2719
2720/************************************************************************
2721 * *
2722 * This register should be loaded before a transfer is started. The *
2723 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2724 * address as described in Section 1.3, Figure2 and Figure3. Since *
2725 * the bottom 7 bits of the address are always taken to be zero, BTE *
2726 * transfers are always cacheline-aligned. *
2727 * *
2728 ************************************************************************/
2729
2730typedef union ii_ibda1_u {
2731 uint64_t ii_ibda1_regval;
2732 struct {
2733 uint64_t i_rsvd_1 : 7;
2734 uint64_t i_addr : 33;
2735 uint64_t i_rsvd : 24;
2736 } ii_ibda1_fld_s;
2737} ii_ibda1_u_t;
2738
2739
2740/************************************************************************
2741 * *
2742 * Writing to this register sets up the attributes of the transfer *
2743 * and initiates the transfer operation. Reading this register has *
2744 * the side effect of terminating any transfer in progress. Note: *
2745 * stopping a transfer midstream could have an adverse impact on the *
2746 * other BTE. If a BTE stream has to be stopped (due to error *
2747 * handling for example), both BTE streams should be stopped and *
2748 * their transfers discarded. *
2749 * *
2750 ************************************************************************/
2751
2752typedef union ii_ibct1_u {
2753 uint64_t ii_ibct1_regval;
2754 struct {
2755 uint64_t i_zerofill : 1;
2756 uint64_t i_rsvd_2 : 3;
2757 uint64_t i_notify : 1;
2758 uint64_t i_rsvd_1 : 3;
2759 uint64_t i_poison : 1;
2760 uint64_t i_rsvd : 55;
2761 } ii_ibct1_fld_s;
2762} ii_ibct1_u_t;
2763
2764
2765/************************************************************************
2766 * *
2767 * This register contains the address to which the WINV is sent. *
2768 * This address has to be cache line aligned. *
2769 * *
2770 ************************************************************************/
2771
2772typedef union ii_ibna1_u {
2773 uint64_t ii_ibna1_regval;
2774 struct {
2775 uint64_t i_rsvd_1 : 7;
2776 uint64_t i_addr : 33;
2777 uint64_t i_rsvd : 24;
2778 } ii_ibna1_fld_s;
2779} ii_ibna1_u_t;
2780
2781
2782/************************************************************************
2783 * *
2784 * This register contains the programmable level as well as the node *
2785 * ID and PI unit of the processor to which the interrupt will be *
2786 * sent. *
2787 * *
2788 ************************************************************************/
2789
2790typedef union ii_ibia1_u {
2791 uint64_t ii_ibia1_regval;
2792 struct {
2793 uint64_t i_pi_id : 1;
2794 uint64_t i_node_id : 8;
2795 uint64_t i_rsvd_1 : 7;
2796 uint64_t i_level : 7;
2797 uint64_t i_rsvd : 41;
2798 } ii_ibia1_fld_s;
2799} ii_ibia1_u_t;
2800
2801
2802/************************************************************************
2803 * *
2804 * This register defines the resources that feed information into *
2805 * the two performance counters located in the IO Performance *
2806 * Profiling Register. There are 17 different quantities that can be *
2807 * measured. Given these 17 different options, the two performance *
2808 * counters have 15 of them in common; menu selections 0 through 0xE *
2809 * are identical for each performance counter. As for the other two *
2810 * options, one is available from one performance counter and the *
2811 * other is available from the other performance counter. Hence, the *
2812 * II supports all 17*16=272 possible combinations of quantities to *
2813 * measure. *
2814 * *
2815 ************************************************************************/
2816
2817typedef union ii_ipcr_u {
2818 uint64_t ii_ipcr_regval;
2819 struct {
2820 uint64_t i_ippr0_c : 4;
2821 uint64_t i_ippr1_c : 4;
2822 uint64_t i_icct : 8;
2823 uint64_t i_rsvd : 48;
2824 } ii_ipcr_fld_s;
2825} ii_ipcr_u_t;
2826
2827
2828/************************************************************************
2829 * *
2830 * *
2831 * *
2832 ************************************************************************/
2833
2834typedef union ii_ippr_u {
2835 uint64_t ii_ippr_regval;
2836 struct {
2837 uint64_t i_ippr0 : 32;
2838 uint64_t i_ippr1 : 32;
2839 } ii_ippr_fld_s;
2840} ii_ippr_u_t;
2841
2842
2843
2844/**************************************************************************
2845 * *
2846 * The following defines which were not formed into structures are *
2847 * probably indentical to another register, and the name of the *
2848 * register is provided against each of these registers. This *
2849 * information needs to be checked carefully *
2850 * *
2851 * IIO_ICRB1_A IIO_ICRB0_A *
2852 * IIO_ICRB1_B IIO_ICRB0_B *
2853 * IIO_ICRB1_C IIO_ICRB0_C *
2854 * IIO_ICRB1_D IIO_ICRB0_D *
2855 * IIO_ICRB1_E IIO_ICRB0_E *
2856 * IIO_ICRB2_A IIO_ICRB0_A *
2857 * IIO_ICRB2_B IIO_ICRB0_B *
2858 * IIO_ICRB2_C IIO_ICRB0_C *
2859 * IIO_ICRB2_D IIO_ICRB0_D *
2860 * IIO_ICRB2_E IIO_ICRB0_E *
2861 * IIO_ICRB3_A IIO_ICRB0_A *
2862 * IIO_ICRB3_B IIO_ICRB0_B *
2863 * IIO_ICRB3_C IIO_ICRB0_C *
2864 * IIO_ICRB3_D IIO_ICRB0_D *
2865 * IIO_ICRB3_E IIO_ICRB0_E *
2866 * IIO_ICRB4_A IIO_ICRB0_A *
2867 * IIO_ICRB4_B IIO_ICRB0_B *
2868 * IIO_ICRB4_C IIO_ICRB0_C *
2869 * IIO_ICRB4_D IIO_ICRB0_D *
2870 * IIO_ICRB4_E IIO_ICRB0_E *
2871 * IIO_ICRB5_A IIO_ICRB0_A *
2872 * IIO_ICRB5_B IIO_ICRB0_B *
2873 * IIO_ICRB5_C IIO_ICRB0_C *
2874 * IIO_ICRB5_D IIO_ICRB0_D *
2875 * IIO_ICRB5_E IIO_ICRB0_E *
2876 * IIO_ICRB6_A IIO_ICRB0_A *
2877 * IIO_ICRB6_B IIO_ICRB0_B *
2878 * IIO_ICRB6_C IIO_ICRB0_C *
2879 * IIO_ICRB6_D IIO_ICRB0_D *
2880 * IIO_ICRB6_E IIO_ICRB0_E *
2881 * IIO_ICRB7_A IIO_ICRB0_A *
2882 * IIO_ICRB7_B IIO_ICRB0_B *
2883 * IIO_ICRB7_C IIO_ICRB0_C *
2884 * IIO_ICRB7_D IIO_ICRB0_D *
2885 * IIO_ICRB7_E IIO_ICRB0_E *
2886 * IIO_ICRB8_A IIO_ICRB0_A *
2887 * IIO_ICRB8_B IIO_ICRB0_B *
2888 * IIO_ICRB8_C IIO_ICRB0_C *
2889 * IIO_ICRB8_D IIO_ICRB0_D *
2890 * IIO_ICRB8_E IIO_ICRB0_E *
2891 * IIO_ICRB9_A IIO_ICRB0_A *
2892 * IIO_ICRB9_B IIO_ICRB0_B *
2893 * IIO_ICRB9_C IIO_ICRB0_C *
2894 * IIO_ICRB9_D IIO_ICRB0_D *
2895 * IIO_ICRB9_E IIO_ICRB0_E *
2896 * IIO_ICRBA_A IIO_ICRB0_A *
2897 * IIO_ICRBA_B IIO_ICRB0_B *
2898 * IIO_ICRBA_C IIO_ICRB0_C *
2899 * IIO_ICRBA_D IIO_ICRB0_D *
2900 * IIO_ICRBA_E IIO_ICRB0_E *
2901 * IIO_ICRBB_A IIO_ICRB0_A *
2902 * IIO_ICRBB_B IIO_ICRB0_B *
2903 * IIO_ICRBB_C IIO_ICRB0_C *
2904 * IIO_ICRBB_D IIO_ICRB0_D *
2905 * IIO_ICRBB_E IIO_ICRB0_E *
2906 * IIO_ICRBC_A IIO_ICRB0_A *
2907 * IIO_ICRBC_B IIO_ICRB0_B *
2908 * IIO_ICRBC_C IIO_ICRB0_C *
2909 * IIO_ICRBC_D IIO_ICRB0_D *
2910 * IIO_ICRBC_E IIO_ICRB0_E *
2911 * IIO_ICRBD_A IIO_ICRB0_A *
2912 * IIO_ICRBD_B IIO_ICRB0_B *
2913 * IIO_ICRBD_C IIO_ICRB0_C *
2914 * IIO_ICRBD_D IIO_ICRB0_D *
2915 * IIO_ICRBD_E IIO_ICRB0_E *
2916 * IIO_ICRBE_A IIO_ICRB0_A *
2917 * IIO_ICRBE_B IIO_ICRB0_B *
2918 * IIO_ICRBE_C IIO_ICRB0_C *
2919 * IIO_ICRBE_D IIO_ICRB0_D *
2920 * IIO_ICRBE_E IIO_ICRB0_E *
2921 * *
2922 **************************************************************************/
2923
2924
2925/*
2926 * Slightly friendlier names for some common registers.
2927 */
2928#define IIO_WIDGET IIO_WID /* Widget identification */
2929#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
2930#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
2931#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
2932#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
2933#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
2934#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
2935#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
2936#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
2937#define IIO_LLP_LOG IIO_ILLR /* LLP log */
2938#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
2939#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
2940#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
2941#define IIO_IGFX_0 IIO_IGFX0
2942#define IIO_IGFX_1 IIO_IGFX1
2943#define IIO_IBCT_0 IIO_IBCT0
2944#define IIO_IBCT_1 IIO_IBCT1
2945#define IIO_IBLS_0 IIO_IBLS0
2946#define IIO_IBLS_1 IIO_IBLS1
2947#define IIO_IBSA_0 IIO_IBSA0
2948#define IIO_IBSA_1 IIO_IBSA1
2949#define IIO_IBDA_0 IIO_IBDA0
2950#define IIO_IBDA_1 IIO_IBDA1
2951#define IIO_IBNA_0 IIO_IBNA0
2952#define IIO_IBNA_1 IIO_IBNA1
2953#define IIO_IBIA_0 IIO_IBIA0
2954#define IIO_IBIA_1 IIO_IBIA1
2955#define IIO_IOPRB_0 IIO_IPRB0
2956
2957#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x)))
2958#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x)))
2959#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
2960#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
2961#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
2962
2963#define IIO_NUM_IPRBS (9)
2964
2965#define IIO_LLP_CSR_IS_UP 0x00002000
2966#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
2967#define IIO_LLP_CSR_LLP_STAT_SHFT 12
2968
2969#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */
2970#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
2971
2972/* key to IIO_PROTECT_OVRRD */
2973#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
2974
2975/* BTE register names */
2976#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
2977#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
2978#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
2979#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
2980#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
2981#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
2982#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
2983#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
2984
2985/* BTE register offsets from base */
2986#define BTEOFF_STAT 0
2987#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
2988#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
2989#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
2990#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
2991#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
2992
2993
2994/* names used in shub diags */
2995#define IIO_BASE_BTE0 IIO_IBLS_0
2996#define IIO_BASE_BTE1 IIO_IBLS_1
2997
2998/*
2999 * Macro which takes the widget number, and returns the
3000 * IO PRB address of that widget.
3001 * value _x is expected to be a widget number in the range
3002 * 0, 8 - 0xF
3003 */
3004#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
3005 (_x) : \
3006 (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
3007
3008
3009/* GFX Flow Control Node/Widget Register */
3010#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
3011#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
3012#define IIO_IGFX_W_NUM_SHIFT 0
3013#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */
3014#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1)
3015#define IIO_IGFX_PI_NUM_SHIFT 4
3016#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */
3017#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
3018#define IIO_IGFX_N_NUM_SHIFT 5
3019#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
3020#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
3021#define IIO_IGFX_P_NUM_SHIFT 16
3022#define IIO_IGFX_INIT(widget, pi, node, cpu) (\
3023 (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
3024 (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \
3025 (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
3026 (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
3027
3028
3029/* Scratch registers (all bits available) */
3030#define IIO_SCRATCH_REG0 IIO_ISCR0
3031#define IIO_SCRATCH_REG1 IIO_ISCR1
3032#define IIO_SCRATCH_MASK 0xffffffffffffffffUL
3033
3034#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL
3035#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL
3036#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL
3037#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL
3038#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL
3039#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL
3040#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL
3041#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL
3042#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL
3043#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL
3044#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL
3045
3046#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
3047#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
3048/* IO Translation Table Entries */
3049#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
3050 /* Hw manuals number them 1..7! */
3051/*
3052 * IIO_IMEM Register fields.
3053 */
3054#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
3055#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
3056#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
3057
3058/*
3059 * As a permanent workaround for a bug in the PI side of the shub, we've
3060 * redefined big window 7 as small window 0.
3061 XXX does this still apply for SN1??
3062 */
3063#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
3064
3065/*
3066 * Use the top big window as a surrogate for the first small window
3067 */
3068#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
3069
3070#define ILCSR_WARM_RESET 0x100
3071
3072/*
3073 * CRB manipulation macros
3074 * The CRB macros are slightly complicated, since there are up to
3075 * four registers associated with each CRB entry.
3076 */
3077#define IIO_NUM_CRBS 15 /* Number of CRBs */
3078#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
3079#define IIO_ICRB_OFFSET 8
3080#define IIO_ICRB_0 IIO_ICRB0_A
3081#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
3082/* XXX - This is now tuneable:
3083 #define IIO_FIRST_PC_ENTRY 12
3084 */
3085
3086#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
3087#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
3088#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
3089#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
3090#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
3091
3092#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
3093
3094/*
3095 * values for "ecode" field
3096 */
3097#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
3098#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
3099#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
3100 * e.g. WINV to a Read only line. */
3101#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
3102#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
3103#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
3104#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
3105#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
3106
3107/*
3108 * Values for field imsgtype
3109 */
3110#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
3111#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
3112#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
3113#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
3114
3115/*
3116 * values for field initiator.
3117 */
3118#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
3119#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
3120#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */
3121#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
3122#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
3123
3124/*
3125 * Number of credits Hub widget has while sending req/response to
3126 * xbow.
3127 * Value of 3 is required by Xbow 1.1
3128 * We may be able to increase this to 4 with Xbow 1.2.
3129 */
3130#define HUBII_XBOW_CREDIT 3
3131#define HUBII_XBOW_REV2_CREDIT 4
3132
3133/*
3134 * Number of credits that xtalk devices should use when communicating
3135 * with a SHub (depth of SHub's queue).
3136 */
3137#define HUB_CREDIT 4
3138
3139/*
3140 * Some IIO_PRB fields
3141 */
3142#define IIO_PRB_MULTI_ERR (1LL << 63)
3143#define IIO_PRB_SPUR_RD (1LL << 51)
3144#define IIO_PRB_SPUR_WR (1LL << 50)
3145#define IIO_PRB_RD_TO (1LL << 49)
3146#define IIO_PRB_ERROR (1LL << 48)
3147
3148/*************************************************************************
3149
3150 Some of the IIO field masks and shifts are defined here.
3151 This is in order to maintain compatibility in SN0 and SN1 code
3152
3153**************************************************************************/
3154
3155/*
3156 * ICMR register fields
3157 * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
3158 * present in SHub)
3159 */
3160
3161#define IIO_ICMR_CRB_VLD_SHFT 20
3162#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
3163
3164#define IIO_ICMR_FC_CNT_SHFT 16
3165#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
3166
3167#define IIO_ICMR_C_CNT_SHFT 4
3168#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
3169
3170#define IIO_ICMR_PRECISE (1UL << 52)
3171#define IIO_ICMR_CLR_RPPD (1UL << 13)
3172#define IIO_ICMR_CLR_RQPD (1UL << 12)
3173
3174/*
3175 * IIO PIO Deallocation register field masks : (IIO_IPDR)
3176 XXX present but not needed in bedrock? See the manual.
3177 */
3178#define IIO_IPDR_PND (1 << 4)
3179
3180/*
3181 * IIO CRB deallocation register field masks: (IIO_ICDR)
3182 */
3183#define IIO_ICDR_PND (1 << 4)
3184
3185/*
3186 * IO BTE Length/Status (IIO_IBLS) register bit field definitions
3187 */
3188#define IBLS_BUSY (0x1UL << 20)
3189#define IBLS_ERROR_SHFT 16
3190#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT)
3191#define IBLS_LENGTH_MASK 0xffff
3192
3193/*
3194 * IO BTE Control/Terminate register (IBCT) register bit field definitions
3195 */
3196#define IBCT_POISON (0x1UL << 8)
3197#define IBCT_NOTIFY (0x1UL << 4)
3198#define IBCT_ZFIL_MODE (0x1UL << 0)
3199
3200/*
3201 * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
3202 */
3203#define IIEPH1_VALID (1UL << 44)
3204#define IIEPH1_OVERRUN (1UL << 40)
3205#define IIEPH1_ERR_TYPE_SHFT 32
3206#define IIEPH1_ERR_TYPE_MASK 0xf
3207#define IIEPH1_SOURCE_SHFT 20
3208#define IIEPH1_SOURCE_MASK 11
3209#define IIEPH1_SUPPL_SHFT 8
3210#define IIEPH1_SUPPL_MASK 11
3211#define IIEPH1_CMD_SHFT 0
3212#define IIEPH1_CMD_MASK 7
3213
3214#define IIEPH2_TAIL (1UL << 40)
3215#define IIEPH2_ADDRESS_SHFT 0
3216#define IIEPH2_ADDRESS_MASK 38
3217
3218#define IIEPH1_ERR_SHORT_REQ 2
3219#define IIEPH1_ERR_SHORT_REPLY 3
3220#define IIEPH1_ERR_LONG_REQ 4
3221#define IIEPH1_ERR_LONG_REPLY 5
3222
3223/*
3224 * IO Error Clear register bit field definitions
3225 */
3226#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
3227#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
3228#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
3229#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
3230#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
3231#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
3232#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
3233#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
3234#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
3235#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
3236#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
3237#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
3238#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
3239#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
3240#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
3241
3242/*
3243 * IIO CRB control register Fields: IIO_ICCR
3244 */
3245#define IIO_ICCR_PENDING (0x10000)
3246#define IIO_ICCR_CMD_MASK (0xFF)
3247#define IIO_ICCR_CMD_SHFT (7)
3248#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
3249#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
3250#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
3251#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
3252 * via a WB
3253 */
3254#define IIO_ICCR_CMD_FLUSH (0x800)
3255
3256/*
3257 *
3258 * CRB Register description.
3259 *
3260 * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
3261 * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
3262 * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
3263 * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
3264 * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
3265 *
3266 * Many of the fields in CRB are status bits used by hardware
3267 * for implementation of the protocol. It's very dangerous to
3268 * mess around with the CRB registers.
3269 *
3270 * It's OK to read the CRB registers and try to make sense out of the
3271 * fields in CRB.
3272 *
3273 * Updating CRB requires all activities in Hub IIO to be quiesced.
3274 * otherwise, a write to CRB could corrupt other CRB entries.
3275 * CRBs are here only as a back door peek to shub IIO's status.
3276 * Quiescing implies no dmas no PIOs
3277 * either directly from the cpu or from sn0net.
3278 * this is not something that can be done easily. So, AVOID updating
3279 * CRBs.
3280 */
3281
3282/*
3283 * Easy access macros for CRBs, all 5 registers (A-E)
3284 */
3285typedef ii_icrb0_a_u_t icrba_t;
3286#define a_sidn ii_icrb0_a_fld_s.ia_sidn
3287#define a_tnum ii_icrb0_a_fld_s.ia_tnum
3288#define a_addr ii_icrb0_a_fld_s.ia_addr
3289#define a_valid ii_icrb0_a_fld_s.ia_vld
3290#define a_iow ii_icrb0_a_fld_s.ia_iow
3291#define a_regvalue ii_icrb0_a_regval
3292
3293typedef ii_icrb0_b_u_t icrbb_t;
3294#define b_use_old ii_icrb0_b_fld_s.ib_use_old
3295#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype
3296#define b_imsg ii_icrb0_b_fld_s.ib_imsg
3297#define b_initiator ii_icrb0_b_fld_s.ib_init
3298#define b_exc ii_icrb0_b_fld_s.ib_exc
3299#define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt
3300#define b_resp ii_icrb0_b_fld_s.ib_resp
3301#define b_ack ii_icrb0_b_fld_s.ib_ack
3302#define b_hold ii_icrb0_b_fld_s.ib_hold
3303#define b_wb ii_icrb0_b_fld_s.ib_wb
3304#define b_intvn ii_icrb0_b_fld_s.ib_intvn
3305#define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib
3306#define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr
3307#define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0
3308#define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1
3309#define b_error ii_icrb0_b_fld_s.ib_error
3310#define b_ecode ii_icrb0_b_fld_s.ib_errcode
3311#define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce
3312#define b_mark ii_icrb0_b_fld_s.ib_mark
3313#define b_xerr ii_icrb0_b_fld_s.ib_xt_err
3314#define b_regvalue ii_icrb0_b_regval
3315
3316typedef ii_icrb0_c_u_t icrbc_t;
3317#define c_suppl ii_icrb0_c_fld_s.ic_suppl
3318#define c_barrop ii_icrb0_c_fld_s.ic_bo
3319#define c_doresp ii_icrb0_c_fld_s.ic_resprqd
3320#define c_gbr ii_icrb0_c_fld_s.ic_gbr
3321#define c_btenum ii_icrb0_c_fld_s.ic_bte_num
3322#define c_cohtrans ii_icrb0_c_fld_s.ic_ct
3323#define c_xtsize ii_icrb0_c_fld_s.ic_size
3324#define c_source ii_icrb0_c_fld_s.ic_source
3325#define c_regvalue ii_icrb0_c_regval
3326
3327
3328typedef ii_icrb0_d_u_t icrbd_t;
3329#define d_sleep ii_icrb0_d_fld_s.id_sleep
3330#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
3331#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
3332#define d_bteop ii_icrb0_d_fld_s.id_bte_op
3333#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/
3334#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/
3335#define d_regvalue ii_icrb0_d_regval
3336
3337typedef ii_icrb0_e_u_t icrbe_t;
3338#define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld
3339#define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld
3340#define icrbe_context ii_icrb0_e_fld_s.ie_context
3341#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
3342#define e_regvalue ii_icrb0_e_regval
3343
3344
3345/* Number of widgets supported by shub */
3346#define HUB_NUM_WIDGET 9
3347#define HUB_WIDGET_ID_MIN 0x8
3348#define HUB_WIDGET_ID_MAX 0xf
3349
3350#define HUB_WIDGET_PART_NUM 0xc120
3351#define MAX_HUBS_PER_XBOW 2
3352
3353/* A few more #defines for backwards compatibility */
3354#define iprb_t ii_iprb0_u_t
3355#define iprb_regval ii_iprb0_regval
3356#define iprb_mult_err ii_iprb0_fld_s.i_mult_err
3357#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd
3358#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr
3359#define iprb_rd_to ii_iprb0_fld_s.i_rd_to
3360#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt
3361#define iprb_error ii_iprb0_fld_s.i_error
3362#define iprb_ff ii_iprb0_fld_s.i_f
3363#define iprb_mode ii_iprb0_fld_s.i_m
3364#define iprb_bnakctr ii_iprb0_fld_s.i_nb
3365#define iprb_anakctr ii_iprb0_fld_s.i_na
3366#define iprb_xtalkctr ii_iprb0_fld_s.i_c
3367
3368#define LNK_STAT_WORKING 0x2 /* LLP is working */
3369
3370#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
3371#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
3372#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */
3373#define IIO_WSTAT_TXRETRY_SHFT (16)
3374#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
3375 IIO_WSTAT_TXRETRY_MASK)
3376
3377/* Number of II perf. counters we can multiplex at once */
3378
3379#define IO_PERF_SETS 32
3380
3381/* Bit for the widget in inbound access register */
3382#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
3383/* Bit for the widget in outbound access register */
3384#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
3385
3386/* NOTE: The following define assumes that we are going to get
3387 * widget numbers from 8 thru F and the device numbers within
3388 * widget from 0 thru 7.
3389 */
3390#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d))))
3391
3392/* IO Interrupt Destination Register */
3393#define IIO_IIDSR_SENT_SHIFT 28
3394#define IIO_IIDSR_SENT_MASK 0x30000000
3395#define IIO_IIDSR_ENB_SHIFT 24
3396#define IIO_IIDSR_ENB_MASK 0x01000000
3397#define IIO_IIDSR_NODE_SHIFT 9
3398#define IIO_IIDSR_NODE_MASK 0x000ff700
3399#define IIO_IIDSR_PI_ID_SHIFT 8
3400#define IIO_IIDSR_PI_ID_MASK 0x00000100
3401#define IIO_IIDSR_LVL_SHIFT 0
3402#define IIO_IIDSR_LVL_MASK 0x000000ff
3403
3404/* Xtalk timeout threshhold register (IIO_IXTT) */
3405#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
3406#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
3407#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
3408#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
3409#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
3410#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
3411
3412/*
3413 * The IO LLP control status register and widget control register
3414 */
3415
3416typedef union hubii_wcr_u {
3417 uint64_t wcr_reg_value;
3418 struct {
3419 uint64_t wcr_widget_id: 4, /* LLP crossbar credit */
3420 wcr_tag_mode: 1, /* Tag mode */
3421 wcr_rsvd1: 8, /* Reserved */
3422 wcr_xbar_crd: 3, /* LLP crossbar credit */
3423 wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
3424 wcr_dir_con: 1, /* widget direct connect */
3425 wcr_e_thresh: 5, /* elasticity threshold */
3426 wcr_rsvd: 41; /* unused */
3427 } wcr_fields_s;
3428} hubii_wcr_t;
3429
3430#define iwcr_dir_con wcr_fields_s.wcr_dir_con
3431
3432/* The structures below are defined to extract and modify the ii
3433performance registers */
3434
3435/* io_perf_sel allows the caller to specify what tests will be
3436 performed */
3437
3438typedef union io_perf_sel {
3439 uint64_t perf_sel_reg;
3440 struct {
3441 uint64_t perf_ippr0 : 4,
3442 perf_ippr1 : 4,
3443 perf_icct : 8,
3444 perf_rsvd : 48;
3445 } perf_sel_bits;
3446} io_perf_sel_t;
3447
3448/* io_perf_cnt is to extract the count from the shub registers. Due to
3449 hardware problems there is only one counter, not two. */
3450
3451typedef union io_perf_cnt {
3452 uint64_t perf_cnt;
3453 struct {
3454 uint64_t perf_cnt : 20,
3455 perf_rsvd2 : 12,
3456 perf_rsvd1 : 32;
3457 } perf_cnt_bits;
3458
3459} io_perf_cnt_t;
3460
3461typedef union iprte_a {
3462 uint64_t entry;
3463 struct {
3464 uint64_t i_rsvd_1 : 3;
3465 uint64_t i_addr : 38;
3466 uint64_t i_init : 3;
3467 uint64_t i_source : 8;
3468 uint64_t i_rsvd : 2;
3469 uint64_t i_widget : 4;
3470 uint64_t i_to_cnt : 5;
3471 uint64_t i_vld : 1;
3472 } iprte_fields;
3473} iprte_a_t;
3474
3475#endif /* _ASM_IA64_SN_SHUBIO_H */
3476
diff --git a/include/asm-ia64/sn/simulator.h b/include/asm-ia64/sn/simulator.h
new file mode 100644
index 000000000000..78eb4f869c8b
--- /dev/null
+++ b/include/asm-ia64/sn/simulator.h
@@ -0,0 +1,27 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
6 */
7
8#ifndef _ASM_IA64_SN_SIMULATOR_H
9#define _ASM_IA64_SN_SIMULATOR_H
10
11#include <linux/config.h>
12
13#ifdef CONFIG_IA64_SGI_SN_SIM
14
15#define SNMAGIC 0xaeeeeeee8badbeefL
16#define IS_RUNNING_ON_SIMULATOR() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
17
18#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
19
20#else
21
22#define IS_RUNNING_ON_SIMULATOR() (0)
23#define SIMULATOR_SLEEP()
24
25#endif
26
27#endif /* _ASM_IA64_SN_SIMULATOR_H */
diff --git a/include/asm-ia64/sn/sn2/sn_hwperf.h b/include/asm-ia64/sn/sn2/sn_hwperf.h
new file mode 100644
index 000000000000..b0c4d6dd77ba
--- /dev/null
+++ b/include/asm-ia64/sn/sn2/sn_hwperf.h
@@ -0,0 +1,226 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
7 *
8 * Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring
9 * SGI Altix node and router hardware
10 *
11 * Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004
12 */
13
14#ifndef SN_HWPERF_H
15#define SN_HWPERF_H
16
17/*
18 * object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO
19 * return an array of these. Do not change this without also
20 * changing the corresponding SAL code.
21 */
22#define SN_HWPERF_MAXSTRING 128
23struct sn_hwperf_object_info {
24 u32 id;
25 union {
26 struct {
27 u64 this_part:1;
28 u64 is_shared:1;
29 } fields;
30 struct {
31 u64 flags;
32 u64 reserved;
33 } b;
34 } f;
35 char name[SN_HWPERF_MAXSTRING];
36 char location[SN_HWPERF_MAXSTRING];
37 u32 ports;
38};
39
40#define sn_hwp_this_part f.fields.this_part
41#define sn_hwp_is_shared f.fields.is_shared
42#define sn_hwp_flags f.b.flags
43
44/* macros for object classification */
45#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub"))
46#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO"))
47#define SN_HWPERF_IS_ROUTER(x) ((x) && strstr((x)->name, "Router"))
48#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router"))
49#define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared)
50#define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\
51 (SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\
52 (SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y)))
53
54/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */
55struct sn_hwperf_port_info {
56 u32 port;
57 u32 conn_id;
58 u32 conn_port;
59};
60
61/* for HWPERF_{GET,SET}_MMRS */
62struct sn_hwperf_data {
63 u64 addr;
64 u64 data;
65};
66
67/* user ioctl() argument, see below */
68struct sn_hwperf_ioctl_args {
69 u64 arg; /* argument, usually an object id */
70 u64 sz; /* size of transfer */
71 void *ptr; /* pointer to source/target */
72 u32 v0; /* second return value */
73};
74
75/*
76 * For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE,
77 * sn_hwperf_ioctl_args.arg can be used to specify a CPU on which
78 * to call SAL, and whether to use an interprocessor interrupt
79 * or task migration in order to do so. If the CPU specified is
80 * SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used.
81 */
82#define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL
83#define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL
84#define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL
85#define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL
86
87/*
88 * ioctl requests on the "sn_hwperf" misc device that call SAL.
89 */
90#define SN_HWPERF_OP_MEM_COPYIN 0x1000
91#define SN_HWPERF_OP_MEM_COPYOUT 0x2000
92#define SN_HWPERF_OP_MASK 0x0fff
93
94/*
95 * Determine mem requirement.
96 * arg don't care
97 * sz 8
98 * p pointer to u64 integer
99 */
100#define SN_HWPERF_GET_HEAPSIZE 1
101
102/*
103 * Install mem for SAL drvr
104 * arg don't care
105 * sz sizeof buffer pointed to by p
106 * p pointer to buffer for scratch area
107 */
108#define SN_HWPERF_INSTALL_HEAP 2
109
110/*
111 * Determine number of objects
112 * arg don't care
113 * sz 8
114 * p pointer to u64 integer
115 */
116#define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT)
117
118/*
119 * Determine object "distance", relative to a cpu. This operation can
120 * execute on a designated logical cpu number, using either an IPI or
121 * via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
122 * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
123 *
124 * arg bitmap of IPI flag, cpu number and object id
125 * sz 8
126 * p pointer to u64 integer
127 */
128#define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT)
129
130/*
131 * Enumerate objects. Special case if sz == 8, returns the required
132 * buffer size.
133 * arg don't care
134 * sz sizeof buffer pointed to by p
135 * p pointer to array of struct sn_hwperf_object_info
136 */
137#define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT)
138
139/*
140 * Enumerate NumaLink ports for an object. Special case if sz == 8,
141 * returns the required buffer size.
142 * arg object id
143 * sz sizeof buffer pointed to by p
144 * p pointer to array of struct sn_hwperf_port_info
145 */
146#define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT)
147
148/*
149 * SET/GET memory mapped registers. These operations can execute
150 * on a designated logical cpu number, using either an IPI or via
151 * task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
152 * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
153 *
154 * arg bitmap of ipi flag, cpu number and object id
155 * sz sizeof buffer pointed to by p
156 * p pointer to array of struct sn_hwperf_data
157 */
158#define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN)
159#define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \
160 SN_HWPERF_OP_MEM_COPYIN)
161/*
162 * Lock a shared object
163 * arg object id
164 * sz don't care
165 * p don't care
166 */
167#define SN_HWPERF_ACQUIRE 16
168
169/*
170 * Unlock a shared object
171 * arg object id
172 * sz don't care
173 * p don't care
174 */
175#define SN_HWPERF_RELEASE 17
176
177/*
178 * Break a lock on a shared object
179 * arg object id
180 * sz don't care
181 * p don't care
182 */
183#define SN_HWPERF_FORCE_RELEASE 18
184
185/*
186 * ioctl requests on "sn_hwperf" that do not call SAL
187 */
188
189/*
190 * get cpu info as an array of hwperf_object_info_t.
191 * id is logical CPU number, name is description, location
192 * is geoid (e.g. 001c04#1c). Special case if sz == 8,
193 * returns the required buffer size.
194 *
195 * arg don't care
196 * sz sizeof buffer pointed to by p
197 * p pointer to array of struct sn_hwperf_object_info
198 */
199#define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT)
200
201/*
202 * Given an object id, return it's node number (aka cnode).
203 * arg object id
204 * sz 8
205 * p pointer to u64 integer
206 */
207#define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT)
208
209/*
210 * Given a node number (cnode), return it's nasid.
211 * arg ordinal node number (aka cnodeid)
212 * sz 8
213 * p pointer to u64 integer
214 */
215#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT)
216
217/* return codes */
218#define SN_HWPERF_OP_OK 0
219#define SN_HWPERF_OP_NOMEM 1
220#define SN_HWPERF_OP_NO_PERM 2
221#define SN_HWPERF_OP_IO_ERROR 3
222#define SN_HWPERF_OP_BUSY 4
223#define SN_HWPERF_OP_RECONFIGURE 253
224#define SN_HWPERF_OP_INVAL 254
225
226#endif /* SN_HWPERF_H */
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h
new file mode 100644
index 000000000000..685435af170d
--- /dev/null
+++ b/include/asm-ia64/sn/sn_cpuid.h
@@ -0,0 +1,144 @@
1/*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
8 */
9
10
11#ifndef _ASM_IA64_SN_SN_CPUID_H
12#define _ASM_IA64_SN_SN_CPUID_H
13
14#include <linux/config.h>
15#include <linux/smp.h>
16#include <asm/sn/addrs.h>
17#include <asm/sn/pda.h>
18#include <asm/intrinsics.h>
19
20
21/*
22 * Functions for converting between cpuids, nodeids and NASIDs.
23 *
24 * These are for SGI platforms only.
25 *
26 */
27
28
29
30
31/*
32 * Definitions of terms (these definitions are for IA64 ONLY. Other architectures
33 * use cpuid/cpunum quite defferently):
34 *
35 * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
36 * the cpu. The value cpuid has no significance on IA64 other than
37 * the boot cpu is 0.
38 * smp_processor_id() returns the cpuid of the current cpu.
39 *
40 * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
41 * This is the same as 31:24 of the processor LID register
42 * hard_smp_processor_id()- cpu_physical_id of current processor
43 * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
44 * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
45 * * not real efficient - don't use in perf critical code
46 *
47 * SLICE - a number in the range of 0 - 3 (typically) that represents the
48 * cpu number on a brick.
49 *
50 * SUBNODE - (almost obsolete) the number of the FSB that a cpu is
51 * connected to. This is also the same as the PI number. Usually 0 or 1.
52 *
53 * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
54 * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
55 *
56 *
57 * The macros convert between cpu physical ids & slice/nasid/cnodeid.
58 * These terms are described below:
59 *
60 *
61 * Brick
62 * ----- ----- ----- ----- CPU
63 * | 0 | | 1 | | 0 | | 1 | SLICE
64 * ----- ----- ----- -----
65 * | | | |
66 * | | | |
67 * 0 | | 2 0 | | 2 FSB SLOT
68 * ------- -------
69 * | |
70 * | |
71 * | |
72 * ------------ -------------
73 * | | | |
74 * | SHUB | | SHUB | NASID (0..MAX_NASIDS)
75 * | |----- | | CNODEID (0..num_compact_nodes-1)
76 * | | | |
77 * | | | |
78 * ------------ -------------
79 * | |
80 *
81 *
82 */
83
84#ifndef CONFIG_SMP
85#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
86#endif
87
88
89#define get_node_number(addr) NASID_GET(addr)
90
91/*
92 * NOTE: on non-MP systems, only cpuid 0 exists
93 */
94
95extern short physical_node_map[]; /* indexed by nasid to get cnode */
96
97/*
98 * Macros for retrieving info about current cpu
99 */
100#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid)
101#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode)
102#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice)
103#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode)
104#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
105
106/*
107 * Macros for retrieving info about an arbitrary cpu
108 * cpuid - logical cpu id
109 */
110#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid)
111#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode)
112#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice)
113#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)])
114
115
116/*
117 * Dont use the following in performance critical code. They require scans
118 * of potentially large tables.
119 */
120extern int nasid_slice_to_cpuid(int, int);
121#define nasid_slice_to_cpu_physical_id(nasid, slice) \
122 cpu_physical_id(nasid_slice_to_cpuid(nasid, slice))
123
124/*
125 * cnodeid_to_nasid - convert a cnodeid to a NASID
126 * Macro relies on pg_data for a node being on the node itself.
127 * Just extract the NASID from the pointer.
128 *
129 */
130#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid]
131
132/*
133 * nasid_to_cnodeid - convert a NASID to a cnodeid
134 */
135#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
136
137/*
138 * partition_coherence_id - get the coherence ID of the current partition
139 */
140extern u8 sn_coherency_id;
141#define partition_coherence_id() (sn_coherency_id)
142
143#endif /* _ASM_IA64_SN_SN_CPUID_H */
144
diff --git a/include/asm-ia64/sn/sn_fru.h b/include/asm-ia64/sn/sn_fru.h
new file mode 100644
index 000000000000..8c21ac3f0156
--- /dev/null
+++ b/include/asm-ia64/sn/sn_fru.h
@@ -0,0 +1,44 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_SN_FRU_H
9#define _ASM_IA64_SN_SN_FRU_H
10
11#define MAX_DIMMS 8 /* max # of dimm banks */
12#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */
13
14typedef unsigned char confidence_t;
15
16typedef struct kf_mem_s {
17 confidence_t km_confidence; /* confidence level that the memory is bad
18 * is this necessary ?
19 */
20 confidence_t km_dimm[MAX_DIMMS];
21 /* confidence level that dimm[i] is bad
22 *I think this is the right number
23 */
24
25} kf_mem_t;
26
27typedef struct kf_cpu_s {
28 confidence_t kc_confidence; /* confidence level that cpu is bad */
29 confidence_t kc_icache; /* confidence level that instr. cache is bad */
30 confidence_t kc_dcache; /* confidence level that data cache is bad */
31 confidence_t kc_scache; /* confidence level that sec. cache is bad */
32 confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
33} kf_cpu_t;
34
35
36typedef struct kf_pci_bus_s {
37 confidence_t kpb_belief; /* confidence level that the pci bus is bad */
38 confidence_t kpb_pcidev_belief[MAX_PCIDEV];
39 /* confidence level that the pci dev is bad */
40} kf_pci_bus_t;
41
42
43#endif /* _ASM_IA64_SN_SN_FRU_H */
44
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
new file mode 100644
index 000000000000..88c31b53dc09
--- /dev/null
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -0,0 +1,1015 @@
1#ifndef _ASM_IA64_SN_SN_SAL_H
2#define _ASM_IA64_SN_SN_SAL_H
3
4/*
5 * System Abstraction Layer definitions for IA64
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All rights reserved.
12 */
13
14
15#include <linux/config.h>
16#include <asm/sal.h>
17#include <asm/sn/sn_cpuid.h>
18#include <asm/sn/arch.h>
19#include <asm/sn/geo.h>
20#include <asm/sn/nodepda.h>
21#include <asm/sn/shub_mmr.h>
22
23// SGI Specific Calls
24#define SN_SAL_POD_MODE 0x02000001
25#define SN_SAL_SYSTEM_RESET 0x02000002
26#define SN_SAL_PROBE 0x02000003
27#define SN_SAL_GET_MASTER_NASID 0x02000004
28#define SN_SAL_GET_KLCONFIG_ADDR 0x02000005
29#define SN_SAL_LOG_CE 0x02000006
30#define SN_SAL_REGISTER_CE 0x02000007
31#define SN_SAL_GET_PARTITION_ADDR 0x02000009
32#define SN_SAL_XP_ADDR_REGION 0x0200000f
33#define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010
34#define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011
35#define SN_SAL_PRINT_ERROR 0x02000012
36#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant
37#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant
38#define SN_SAL_GET_SN_INFO 0x0200001c
39#define SN_SAL_GET_SAPIC_INFO 0x0200001d
40#define SN_SAL_CONSOLE_PUTC 0x02000021
41#define SN_SAL_CONSOLE_GETC 0x02000022
42#define SN_SAL_CONSOLE_PUTS 0x02000023
43#define SN_SAL_CONSOLE_GETS 0x02000024
44#define SN_SAL_CONSOLE_GETS_TIMEOUT 0x02000025
45#define SN_SAL_CONSOLE_POLL 0x02000026
46#define SN_SAL_CONSOLE_INTR 0x02000027
47#define SN_SAL_CONSOLE_PUTB 0x02000028
48#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a
49#define SN_SAL_CONSOLE_READC 0x0200002b
50#define SN_SAL_SYSCTL_MODID_GET 0x02000031
51#define SN_SAL_SYSCTL_GET 0x02000032
52#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033
53#define SN_SAL_SYSCTL_IO_PORTSPEED_GET 0x02000035
54#define SN_SAL_SYSCTL_SLAB_GET 0x02000036
55#define SN_SAL_BUS_CONFIG 0x02000037
56#define SN_SAL_SYS_SERIAL_GET 0x02000038
57#define SN_SAL_PARTITION_SERIAL_GET 0x02000039
58#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
59#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
60#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
61#define SN_SAL_COHERENCE 0x0200003d
62#define SN_SAL_MEMPROTECT 0x0200003e
63#define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f
64
65#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
66#define SN_SAL_IROUTER_OP 0x02000043
67#define SN_SAL_IOIF_INTERRUPT 0x0200004a
68#define SN_SAL_HWPERF_OP 0x02000050 // lock
69#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
70
71#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053
72#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054
73#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
74#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
75#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
76#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058
77
78#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
79
80
81/*
82 * Service-specific constants
83 */
84
85/* Console interrupt manipulation */
86 /* action codes */
87#define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */
88#define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */
89#define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */
90 /* interrupt specification & status return codes */
91#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */
92#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */
93
94/* interrupt handling */
95#define SAL_INTR_ALLOC 1
96#define SAL_INTR_FREE 2
97
98/*
99 * IRouter (i.e. generalized system controller) operations
100 */
101#define SAL_IROUTER_OPEN 0 /* open a subchannel */
102#define SAL_IROUTER_CLOSE 1 /* close a subchannel */
103#define SAL_IROUTER_SEND 2 /* send part of an IRouter packet */
104#define SAL_IROUTER_RECV 3 /* receive part of an IRouter packet */
105#define SAL_IROUTER_INTR_STATUS 4 /* check the interrupt status for
106 * an open subchannel
107 */
108#define SAL_IROUTER_INTR_ON 5 /* enable an interrupt */
109#define SAL_IROUTER_INTR_OFF 6 /* disable an interrupt */
110#define SAL_IROUTER_INIT 7 /* initialize IRouter driver */
111
112/* IRouter interrupt mask bits */
113#define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT
114#define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV
115
116
117/*
118 * SAL Error Codes
119 */
120#define SALRET_MORE_PASSES 1
121#define SALRET_OK 0
122#define SALRET_NOT_IMPLEMENTED (-1)
123#define SALRET_INVALID_ARG (-2)
124#define SALRET_ERROR (-3)
125
126
127/**
128 * sn_sal_rev_major - get the major SGI SAL revision number
129 *
130 * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
131 * This routine simply extracts the major value from the
132 * @ia64_sal_systab structure constructed by ia64_sal_init().
133 */
134static inline int
135sn_sal_rev_major(void)
136{
137 struct ia64_sal_systab *systab = efi.sal_systab;
138
139 return (int)systab->sal_b_rev_major;
140}
141
142/**
143 * sn_sal_rev_minor - get the minor SGI SAL revision number
144 *
145 * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
146 * This routine simply extracts the minor value from the
147 * @ia64_sal_systab structure constructed by ia64_sal_init().
148 */
149static inline int
150sn_sal_rev_minor(void)
151{
152 struct ia64_sal_systab *systab = efi.sal_systab;
153
154 return (int)systab->sal_b_rev_minor;
155}
156
157/*
158 * Specify the minimum PROM revsion required for this kernel.
159 * Note that they're stored in hex format...
160 */
161#define SN_SAL_MIN_MAJOR 0x4 /* SN2 kernels need at least PROM 4.0 */
162#define SN_SAL_MIN_MINOR 0x0
163
164/*
165 * Returns the master console nasid, if the call fails, return an illegal
166 * value.
167 */
168static inline u64
169ia64_sn_get_console_nasid(void)
170{
171 struct ia64_sal_retval ret_stuff;
172
173 ret_stuff.status = 0;
174 ret_stuff.v0 = 0;
175 ret_stuff.v1 = 0;
176 ret_stuff.v2 = 0;
177 SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
178
179 if (ret_stuff.status < 0)
180 return ret_stuff.status;
181
182 /* Master console nasid is in 'v0' */
183 return ret_stuff.v0;
184}
185
186/*
187 * Returns the master baseio nasid, if the call fails, return an illegal
188 * value.
189 */
190static inline u64
191ia64_sn_get_master_baseio_nasid(void)
192{
193 struct ia64_sal_retval ret_stuff;
194
195 ret_stuff.status = 0;
196 ret_stuff.v0 = 0;
197 ret_stuff.v1 = 0;
198 ret_stuff.v2 = 0;
199 SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
200
201 if (ret_stuff.status < 0)
202 return ret_stuff.status;
203
204 /* Master baseio nasid is in 'v0' */
205 return ret_stuff.v0;
206}
207
208static inline char *
209ia64_sn_get_klconfig_addr(nasid_t nasid)
210{
211 struct ia64_sal_retval ret_stuff;
212 int cnodeid;
213
214 cnodeid = nasid_to_cnodeid(nasid);
215 ret_stuff.status = 0;
216 ret_stuff.v0 = 0;
217 ret_stuff.v1 = 0;
218 ret_stuff.v2 = 0;
219 SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
220
221 /*
222 * We should panic if a valid cnode nasid does not produce
223 * a klconfig address.
224 */
225 if (ret_stuff.status != 0) {
226 panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", ret_stuff.status);
227 }
228 return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
229}
230
231/*
232 * Returns the next console character.
233 */
234static inline u64
235ia64_sn_console_getc(int *ch)
236{
237 struct ia64_sal_retval ret_stuff;
238
239 ret_stuff.status = 0;
240 ret_stuff.v0 = 0;
241 ret_stuff.v1 = 0;
242 ret_stuff.v2 = 0;
243 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
244
245 /* character is in 'v0' */
246 *ch = (int)ret_stuff.v0;
247
248 return ret_stuff.status;
249}
250
251/*
252 * Read a character from the SAL console device, after a previous interrupt
253 * or poll operation has given us to know that a character is available
254 * to be read.
255 */
256static inline u64
257ia64_sn_console_readc(void)
258{
259 struct ia64_sal_retval ret_stuff;
260
261 ret_stuff.status = 0;
262 ret_stuff.v0 = 0;
263 ret_stuff.v1 = 0;
264 ret_stuff.v2 = 0;
265 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
266
267 /* character is in 'v0' */
268 return ret_stuff.v0;
269}
270
271/*
272 * Sends the given character to the console.
273 */
274static inline u64
275ia64_sn_console_putc(char ch)
276{
277 struct ia64_sal_retval ret_stuff;
278
279 ret_stuff.status = 0;
280 ret_stuff.v0 = 0;
281 ret_stuff.v1 = 0;
282 ret_stuff.v2 = 0;
283 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0);
284
285 return ret_stuff.status;
286}
287
288/*
289 * Sends the given buffer to the console.
290 */
291static inline u64
292ia64_sn_console_putb(const char *buf, int len)
293{
294 struct ia64_sal_retval ret_stuff;
295
296 ret_stuff.status = 0;
297 ret_stuff.v0 = 0;
298 ret_stuff.v1 = 0;
299 ret_stuff.v2 = 0;
300 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0);
301
302 if ( ret_stuff.status == 0 ) {
303 return ret_stuff.v0;
304 }
305 return (u64)0;
306}
307
308/*
309 * Print a platform error record
310 */
311static inline u64
312ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
313{
314 struct ia64_sal_retval ret_stuff;
315
316 ret_stuff.status = 0;
317 ret_stuff.v0 = 0;
318 ret_stuff.v1 = 0;
319 ret_stuff.v2 = 0;
320 SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
321
322 return ret_stuff.status;
323}
324
325/*
326 * Check for Platform errors
327 */
328static inline u64
329ia64_sn_plat_cpei_handler(void)
330{
331 struct ia64_sal_retval ret_stuff;
332
333 ret_stuff.status = 0;
334 ret_stuff.v0 = 0;
335 ret_stuff.v1 = 0;
336 ret_stuff.v2 = 0;
337 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
338
339 return ret_stuff.status;
340}
341
342/*
343 * Checks for console input.
344 */
345static inline u64
346ia64_sn_console_check(int *result)
347{
348 struct ia64_sal_retval ret_stuff;
349
350 ret_stuff.status = 0;
351 ret_stuff.v0 = 0;
352 ret_stuff.v1 = 0;
353 ret_stuff.v2 = 0;
354 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
355
356 /* result is in 'v0' */
357 *result = (int)ret_stuff.v0;
358
359 return ret_stuff.status;
360}
361
362/*
363 * Checks console interrupt status
364 */
365static inline u64
366ia64_sn_console_intr_status(void)
367{
368 struct ia64_sal_retval ret_stuff;
369
370 ret_stuff.status = 0;
371 ret_stuff.v0 = 0;
372 ret_stuff.v1 = 0;
373 ret_stuff.v2 = 0;
374 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
375 0, SAL_CONSOLE_INTR_STATUS,
376 0, 0, 0, 0, 0);
377
378 if (ret_stuff.status == 0) {
379 return ret_stuff.v0;
380 }
381
382 return 0;
383}
384
385/*
386 * Enable an interrupt on the SAL console device.
387 */
388static inline void
389ia64_sn_console_intr_enable(uint64_t intr)
390{
391 struct ia64_sal_retval ret_stuff;
392
393 ret_stuff.status = 0;
394 ret_stuff.v0 = 0;
395 ret_stuff.v1 = 0;
396 ret_stuff.v2 = 0;
397 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
398 intr, SAL_CONSOLE_INTR_ON,
399 0, 0, 0, 0, 0);
400}
401
402/*
403 * Disable an interrupt on the SAL console device.
404 */
405static inline void
406ia64_sn_console_intr_disable(uint64_t intr)
407{
408 struct ia64_sal_retval ret_stuff;
409
410 ret_stuff.status = 0;
411 ret_stuff.v0 = 0;
412 ret_stuff.v1 = 0;
413 ret_stuff.v2 = 0;
414 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
415 intr, SAL_CONSOLE_INTR_OFF,
416 0, 0, 0, 0, 0);
417}
418
419/*
420 * Sends a character buffer to the console asynchronously.
421 */
422static inline u64
423ia64_sn_console_xmit_chars(char *buf, int len)
424{
425 struct ia64_sal_retval ret_stuff;
426
427 ret_stuff.status = 0;
428 ret_stuff.v0 = 0;
429 ret_stuff.v1 = 0;
430 ret_stuff.v2 = 0;
431 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
432 (uint64_t)buf, (uint64_t)len,
433 0, 0, 0, 0, 0);
434
435 if (ret_stuff.status == 0) {
436 return ret_stuff.v0;
437 }
438
439 return 0;
440}
441
442/*
443 * Returns the iobrick module Id
444 */
445static inline u64
446ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
447{
448 struct ia64_sal_retval ret_stuff;
449
450 ret_stuff.status = 0;
451 ret_stuff.v0 = 0;
452 ret_stuff.v1 = 0;
453 ret_stuff.v2 = 0;
454 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
455
456 /* result is in 'v0' */
457 *result = (int)ret_stuff.v0;
458
459 return ret_stuff.status;
460}
461
462/**
463 * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
464 *
465 * SN_SAL_POD_MODE actually takes an argument, but it's always
466 * 0 when we call it from the kernel, so we don't have to expose
467 * it to the caller.
468 */
469static inline u64
470ia64_sn_pod_mode(void)
471{
472 struct ia64_sal_retval isrv;
473 SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
474 if (isrv.status)
475 return 0;
476 return isrv.v0;
477}
478
479/**
480 * ia64_sn_probe_mem - read from memory safely
481 * @addr: address to probe
482 * @size: number bytes to read (1,2,4,8)
483 * @data_ptr: address to store value read by probe (-1 returned if probe fails)
484 *
485 * Call into the SAL to do a memory read. If the read generates a machine
486 * check, this routine will recover gracefully and return -1 to the caller.
487 * @addr is usually a kernel virtual address in uncached space (i.e. the
488 * address starts with 0xc), but if called in physical mode, @addr should
489 * be a physical address.
490 *
491 * Return values:
492 * 0 - probe successful
493 * 1 - probe failed (generated MCA)
494 * 2 - Bad arg
495 * <0 - PAL error
496 */
497static inline u64
498ia64_sn_probe_mem(long addr, long size, void *data_ptr)
499{
500 struct ia64_sal_retval isrv;
501
502 SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
503
504 if (data_ptr) {
505 switch (size) {
506 case 1:
507 *((u8*)data_ptr) = (u8)isrv.v0;
508 break;
509 case 2:
510 *((u16*)data_ptr) = (u16)isrv.v0;
511 break;
512 case 4:
513 *((u32*)data_ptr) = (u32)isrv.v0;
514 break;
515 case 8:
516 *((u64*)data_ptr) = (u64)isrv.v0;
517 break;
518 default:
519 isrv.status = 2;
520 }
521 }
522 return isrv.status;
523}
524
525/*
526 * Retrieve the system serial number as an ASCII string.
527 */
528static inline u64
529ia64_sn_sys_serial_get(char *buf)
530{
531 struct ia64_sal_retval ret_stuff;
532 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
533 return ret_stuff.status;
534}
535
536extern char sn_system_serial_number_string[];
537extern u64 sn_partition_serial_number;
538
539static inline char *
540sn_system_serial_number(void) {
541 if (sn_system_serial_number_string[0]) {
542 return(sn_system_serial_number_string);
543 } else {
544 ia64_sn_sys_serial_get(sn_system_serial_number_string);
545 return(sn_system_serial_number_string);
546 }
547}
548
549
550/*
551 * Returns a unique id number for this system and partition (suitable for
552 * use with license managers), based in part on the system serial number.
553 */
554static inline u64
555ia64_sn_partition_serial_get(void)
556{
557 struct ia64_sal_retval ret_stuff;
558 SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
559 if (ret_stuff.status != 0)
560 return 0;
561 return ret_stuff.v0;
562}
563
564static inline u64
565sn_partition_serial_number_val(void) {
566 if (sn_partition_serial_number) {
567 return(sn_partition_serial_number);
568 } else {
569 return(sn_partition_serial_number = ia64_sn_partition_serial_get());
570 }
571}
572
573/*
574 * Returns the partition id of the nasid passed in as an argument,
575 * or INVALID_PARTID if the partition id cannot be retrieved.
576 */
577static inline partid_t
578ia64_sn_sysctl_partition_get(nasid_t nasid)
579{
580 struct ia64_sal_retval ret_stuff;
581 SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
582 0, 0, 0, 0, 0, 0);
583 if (ret_stuff.status != 0)
584 return INVALID_PARTID;
585 return ((partid_t)ret_stuff.v0);
586}
587
588/*
589 * Returns the partition id of the current processor.
590 */
591
592extern partid_t sn_partid;
593
594static inline partid_t
595sn_local_partid(void) {
596 if (sn_partid < 0) {
597 return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
598 } else {
599 return sn_partid;
600 }
601}
602
603/*
604 * Register or unregister a physical address range being referenced across
605 * a partition boundary for which certain SAL errors should be scanned for,
606 * cleaned up and ignored. This is of value for kernel partitioning code only.
607 * Values for the operation argument:
608 * 1 = register this address range with SAL
609 * 0 = unregister this address range with SAL
610 *
611 * SAL maintains a reference count on an address range in case it is registered
612 * multiple times.
613 *
614 * On success, returns the reference count of the address range after the SAL
615 * call has performed the current registration/unregistration. Returns a
616 * negative value if an error occurred.
617 */
618static inline int
619sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
620{
621 struct ia64_sal_retval ret_stuff;
622 SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
623 0, 0, 0, 0);
624 return ret_stuff.status;
625}
626
627/*
628 * Register or unregister an instruction range for which SAL errors should
629 * be ignored. If an error occurs while in the registered range, SAL jumps
630 * to return_addr after ignoring the error. Values for the operation argument:
631 * 1 = register this instruction range with SAL
632 * 0 = unregister this instruction range with SAL
633 *
634 * Returns 0 on success, or a negative value if an error occurred.
635 */
636static inline int
637sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
638 int virtual, int operation)
639{
640 struct ia64_sal_retval ret_stuff;
641 u64 call;
642 if (virtual) {
643 call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
644 } else {
645 call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
646 }
647 SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
648 0, 0, 0);
649 return ret_stuff.status;
650}
651
652/*
653 * Change or query the coherence domain for this partition. Each cpu-based
654 * nasid is represented by a bit in an array of 64-bit words:
655 * 0 = not in this partition's coherency domain
656 * 1 = in this partition's coherency domain
657 *
658 * It is not possible for the local system's nasids to be removed from
659 * the coherency domain. Purpose of the domain arguments:
660 * new_domain = set the coherence domain to the given nasids
661 * old_domain = return the current coherence domain
662 *
663 * Returns 0 on success, or a negative value if an error occurred.
664 */
665static inline int
666sn_change_coherence(u64 *new_domain, u64 *old_domain)
667{
668 struct ia64_sal_retval ret_stuff;
669 SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
670 0, 0, 0);
671 return ret_stuff.status;
672}
673
674/*
675 * Change memory access protections for a physical address range.
676 * nasid_array is not used on Altix, but may be in future architectures.
677 * Available memory protection access classes are defined after the function.
678 */
679static inline int
680sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
681{
682 struct ia64_sal_retval ret_stuff;
683 int cnodeid;
684 unsigned long irq_flags;
685
686 cnodeid = nasid_to_cnodeid(get_node_number(paddr));
687 // spin_lock(&NODEPDA(cnodeid)->bist_lock);
688 local_irq_save(irq_flags);
689 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
690 perms, 0, 0, 0);
691 local_irq_restore(irq_flags);
692 // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
693 return ret_stuff.status;
694}
695#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
696#define SN_MEMPROT_ACCESS_CLASS_1 0x2520c2
697#define SN_MEMPROT_ACCESS_CLASS_2 0x14a1ca
698#define SN_MEMPROT_ACCESS_CLASS_3 0x14a290
699#define SN_MEMPROT_ACCESS_CLASS_6 0x084080
700#define SN_MEMPROT_ACCESS_CLASS_7 0x021080
701
702/*
703 * Turns off system power.
704 */
705static inline void
706ia64_sn_power_down(void)
707{
708 struct ia64_sal_retval ret_stuff;
709 SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
710 while(1);
711 /* never returns */
712}
713
714/**
715 * ia64_sn_fru_capture - tell the system controller to capture hw state
716 *
717 * This routine will call the SAL which will tell the system controller(s)
718 * to capture hw mmr information from each SHub in the system.
719 */
720static inline u64
721ia64_sn_fru_capture(void)
722{
723 struct ia64_sal_retval isrv;
724 SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
725 if (isrv.status)
726 return 0;
727 return isrv.v0;
728}
729
730/*
731 * Performs an operation on a PCI bus or slot -- power up, power down
732 * or reset.
733 */
734static inline u64
735ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type,
736 u64 bus, char slot,
737 u64 action)
738{
739 struct ia64_sal_retval rv = {0, 0, 0, 0};
740
741 SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action,
742 bus, (u64) slot, 0, 0);
743 if (rv.status)
744 return rv.v0;
745 return 0;
746}
747
748
749/*
750 * Open a subchannel for sending arbitrary data to the system
751 * controller network via the system controller device associated with
752 * 'nasid'. Return the subchannel number or a negative error code.
753 */
754static inline int
755ia64_sn_irtr_open(nasid_t nasid)
756{
757 struct ia64_sal_retval rv;
758 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
759 0, 0, 0, 0, 0);
760 return (int) rv.v0;
761}
762
763/*
764 * Close system controller subchannel 'subch' previously opened on 'nasid'.
765 */
766static inline int
767ia64_sn_irtr_close(nasid_t nasid, int subch)
768{
769 struct ia64_sal_retval rv;
770 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
771 (u64) nasid, (u64) subch, 0, 0, 0, 0);
772 return (int) rv.status;
773}
774
775/*
776 * Read data from system controller associated with 'nasid' on
777 * subchannel 'subch'. The buffer to be filled is pointed to by
778 * 'buf', and its capacity is in the integer pointed to by 'len'. The
779 * referent of 'len' is set to the number of bytes read by the SAL
780 * call. The return value is either SALRET_OK (for bytes read) or
781 * SALRET_ERROR (for error or "no data available").
782 */
783static inline int
784ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
785{
786 struct ia64_sal_retval rv;
787 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
788 (u64) nasid, (u64) subch, (u64) buf, (u64) len,
789 0, 0);
790 return (int) rv.status;
791}
792
793/*
794 * Write data to the system controller network via the system
795 * controller associated with 'nasid' on suchannel 'subch'. The
796 * buffer to be written out is pointed to by 'buf', and 'len' is the
797 * number of bytes to be written. The return value is either the
798 * number of bytes written (which could be zero) or a negative error
799 * code.
800 */
801static inline int
802ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
803{
804 struct ia64_sal_retval rv;
805 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
806 (u64) nasid, (u64) subch, (u64) buf, (u64) len,
807 0, 0);
808 return (int) rv.v0;
809}
810
811/*
812 * Check whether any interrupts are pending for the system controller
813 * associated with 'nasid' and its subchannel 'subch'. The return
814 * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
815 * SAL_IROUTER_INTR_RECV).
816 */
817static inline int
818ia64_sn_irtr_intr(nasid_t nasid, int subch)
819{
820 struct ia64_sal_retval rv;
821 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
822 (u64) nasid, (u64) subch, 0, 0, 0, 0);
823 return (int) rv.v0;
824}
825
826/*
827 * Enable the interrupt indicated by the intr parameter (either
828 * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
829 */
830static inline int
831ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
832{
833 struct ia64_sal_retval rv;
834 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
835 (u64) nasid, (u64) subch, intr, 0, 0, 0);
836 return (int) rv.v0;
837}
838
839/*
840 * Disable the interrupt indicated by the intr parameter (either
841 * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
842 */
843static inline int
844ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
845{
846 struct ia64_sal_retval rv;
847 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
848 (u64) nasid, (u64) subch, intr, 0, 0, 0);
849 return (int) rv.v0;
850}
851
852/**
853 * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
854 * @nasid: NASID of node to read
855 * @index: FIT entry index to be retrieved (0..n)
856 * @fitentry: 16 byte buffer where FIT entry will be stored.
857 * @banbuf: optional buffer for retrieving banner
858 * @banlen: length of banner buffer
859 *
860 * Access to the physical PROM chips needs to be serialized since reads and
861 * writes can't occur at the same time, so we need to call into the SAL when
862 * we want to look at the FIT entries on the chips.
863 *
864 * Returns:
865 * %SALRET_OK if ok
866 * %SALRET_INVALID_ARG if index too big
867 * %SALRET_NOT_IMPLEMENTED if running on older PROM
868 * ??? if nasid invalid OR banner buffer not large enough
869 */
870static inline int
871ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
872 u64 banlen)
873{
874 struct ia64_sal_retval rv;
875 SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
876 banbuf, banlen, 0, 0);
877 return (int) rv.status;
878}
879
880/*
881 * Initialize the SAL components of the system controller
882 * communication driver; specifically pass in a sizable buffer that
883 * can be used for allocation of subchannel queues as new subchannels
884 * are opened. "buf" points to the buffer, and "len" specifies its
885 * length.
886 */
887static inline int
888ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
889{
890 struct ia64_sal_retval rv;
891 SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
892 (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
893 return (int) rv.status;
894}
895
896/*
897 * Returns the nasid, subnode & slice corresponding to a SAPIC ID
898 *
899 * In:
900 * arg0 - SN_SAL_GET_SAPIC_INFO
901 * arg1 - sapicid (lid >> 16)
902 * Out:
903 * v0 - nasid
904 * v1 - subnode
905 * v2 - slice
906 */
907static inline u64
908ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
909{
910 struct ia64_sal_retval ret_stuff;
911
912 ret_stuff.status = 0;
913 ret_stuff.v0 = 0;
914 ret_stuff.v1 = 0;
915 ret_stuff.v2 = 0;
916 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0);
917
918/***** BEGIN HACK - temp til old proms no longer supported ********/
919 if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
920 if (nasid) *nasid = sapicid & 0xfff;
921 if (subnode) *subnode = (sapicid >> 13) & 1;
922 if (slice) *slice = (sapicid >> 12) & 3;
923 return 0;
924 }
925/***** END HACK *******/
926
927 if (ret_stuff.status < 0)
928 return ret_stuff.status;
929
930 if (nasid) *nasid = (int) ret_stuff.v0;
931 if (subnode) *subnode = (int) ret_stuff.v1;
932 if (slice) *slice = (int) ret_stuff.v2;
933 return 0;
934}
935
936/*
937 * Returns information about the HUB/SHUB.
938 * In:
939 * arg0 - SN_SAL_GET_SN_INFO
940 * arg1 - 0 (other values reserved for future use)
941 * Out:
942 * v0
943 * [7:0] - shub type (0=shub1, 1=shub2)
944 * [15:8] - Log2 max number of nodes in entire system (includes
945 * C-bricks, I-bricks, etc)
946 * [23:16] - Log2 of nodes per sharing domain
947 * [31:24] - partition ID
948 * [39:32] - coherency_id
949 * [47:40] - regionsize
950 * v1
951 * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid)
952 * [23:15] - bit position of low nasid bit
953 */
954static inline u64
955ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift,
956 u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg)
957{
958 struct ia64_sal_retval ret_stuff;
959
960 ret_stuff.status = 0;
961 ret_stuff.v0 = 0;
962 ret_stuff.v1 = 0;
963 ret_stuff.v2 = 0;
964 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
965
966/***** BEGIN HACK - temp til old proms no longer supported ********/
967 if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
968 int nasid = get_sapicid() & 0xfff;;
969#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
970#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
971 if (shubtype) *shubtype = 0;
972 if (nasid_bitmask) *nasid_bitmask = 0x7ff;
973 if (nasid_shift) *nasid_shift = 38;
974 if (systemsize) *systemsize = 11;
975 if (sharing_domain_size) *sharing_domain_size = 9;
976 if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
977 if (coher) *coher = nasid >> 9;
978 if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
979 SH_SHUB_ID_NODES_PER_BIT_SHFT;
980 return 0;
981 }
982/***** END HACK *******/
983
984 if (ret_stuff.status < 0)
985 return ret_stuff.status;
986
987 if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
988 if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
989 if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff;
990 if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
991 if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
992 if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
993 if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
994 if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
995 return 0;
996}
997
998/*
999 * This is the access point to the Altix PROM hardware performance
1000 * and status monitoring interface. For info on using this, see
1001 * include/asm-ia64/sn/sn2/sn_hwperf.h
1002 */
1003static inline int
1004ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
1005 u64 a3, u64 a4, int *v0)
1006{
1007 struct ia64_sal_retval rv;
1008 SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
1009 opcode, a0, a1, a2, a3, a4);
1010 if (v0)
1011 *v0 = (int) rv.v0;
1012 return (int) rv.status;
1013}
1014
1015#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/include/asm-ia64/sn/sndrv.h b/include/asm-ia64/sn/sndrv.h
new file mode 100644
index 000000000000..aa00d42cde32
--- /dev/null
+++ b/include/asm-ia64/sn/sndrv.h
@@ -0,0 +1,47 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#ifndef _ASM_IA64_SN_SNDRV_H
10#define _ASM_IA64_SN_SNDRV_H
11
12/* ioctl commands */
13#define SNDRV_GET_ROUTERINFO 1
14#define SNDRV_GET_INFOSIZE 2
15#define SNDRV_GET_HUBINFO 3
16#define SNDRV_GET_FLASHLOGSIZE 4
17#define SNDRV_SET_FLASHSYNC 5
18#define SNDRV_GET_FLASHLOGDATA 6
19#define SNDRV_GET_FLASHLOGALL 7
20
21#define SNDRV_SET_HISTOGRAM_TYPE 14
22
23#define SNDRV_ELSC_COMMAND 19
24#define SNDRV_CLEAR_LOG 20
25#define SNDRV_INIT_LOG 21
26#define SNDRV_GET_PIMM_PSC 22
27#define SNDRV_SET_PARTITION 23
28#define SNDRV_GET_PARTITION 24
29
30/* see synergy_perf_ioctl() */
31#define SNDRV_GET_SYNERGY_VERSION 30
32#define SNDRV_GET_SYNERGY_STATUS 31
33#define SNDRV_GET_SYNERGYINFO 32
34#define SNDRV_SYNERGY_APPEND 33
35#define SNDRV_SYNERGY_ENABLE 34
36#define SNDRV_SYNERGY_FREQ 35
37
38/* Devices */
39#define SNDRV_UKNOWN_DEVICE -1
40#define SNDRV_ROUTER_DEVICE 1
41#define SNDRV_HUB_DEVICE 2
42#define SNDRV_ELSC_NVRAM_DEVICE 3
43#define SNDRV_ELSC_CONTROLLER_DEVICE 4
44#define SNDRV_SYSCTL_SUBCH 5
45#define SNDRV_SYNERGY_DEVICE 6
46
47#endif /* _ASM_IA64_SN_SNDRV_H */
diff --git a/include/asm-ia64/sn/types.h b/include/asm-ia64/sn/types.h
new file mode 100644
index 000000000000..586ed47cae9c
--- /dev/null
+++ b/include/asm-ia64/sn/types.h
@@ -0,0 +1,25 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
7 * Copyright (C) 1999 by Ralf Baechle
8 */
9#ifndef _ASM_IA64_SN_TYPES_H
10#define _ASM_IA64_SN_TYPES_H
11
12#include <linux/types.h>
13
14typedef unsigned long cpuid_t;
15typedef signed short nasid_t; /* node id in numa-as-id space */
16typedef signed char partid_t; /* partition ID type */
17typedef unsigned int moduleid_t; /* user-visible module number type */
18typedef unsigned int cmoduleid_t; /* kernel compact module id type */
19typedef signed char slabid_t;
20typedef u64 nic_t;
21typedef unsigned long iopaddr_t;
22typedef unsigned long paddr_t;
23typedef short cnodeid_t;
24
25#endif /* _ASM_IA64_SN_TYPES_H */
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h
new file mode 100644
index 000000000000..21a9f10d6baa
--- /dev/null
+++ b/include/asm-ia64/socket.h
@@ -0,0 +1,59 @@
1#ifndef _ASM_IA64_SOCKET_H
2#define _ASM_IA64_SOCKET_H
3
4/*
5 * Socket related defines.
6 *
7 * Based on <asm-i386/socket.h>.
8 *
9 * Modified 1998-2000
10 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
11 */
12
13#include <asm/sockios.h>
14
15/* For setsockopt(2) */
16#define SOL_SOCKET 1
17
18#define SO_DEBUG 1
19#define SO_REUSEADDR 2
20#define SO_TYPE 3
21#define SO_ERROR 4
22#define SO_DONTROUTE 5
23#define SO_BROADCAST 6
24#define SO_SNDBUF 7
25#define SO_RCVBUF 8
26#define SO_KEEPALIVE 9
27#define SO_OOBINLINE 10
28#define SO_NO_CHECK 11
29#define SO_PRIORITY 12
30#define SO_LINGER 13
31#define SO_BSDCOMPAT 14
32/* To add :#define SO_REUSEPORT 15 */
33#define SO_PASSCRED 16
34#define SO_PEERCRED 17
35#define SO_RCVLOWAT 18
36#define SO_SNDLOWAT 19
37#define SO_RCVTIMEO 20
38#define SO_SNDTIMEO 21
39
40/* Security levels - as per NRL IPv6 - don't actually do anything */
41#define SO_SECURITY_AUTHENTICATION 22
42#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
43#define SO_SECURITY_ENCRYPTION_NETWORK 24
44
45#define SO_BINDTODEVICE 25
46
47/* Socket filtering */
48#define SO_ATTACH_FILTER 26
49#define SO_DETACH_FILTER 27
50
51#define SO_PEERNAME 28
52#define SO_TIMESTAMP 29
53#define SCM_TIMESTAMP SO_TIMESTAMP
54
55#define SO_ACCEPTCONN 30
56
57#define SO_PEERSEC 31
58
59#endif /* _ASM_IA64_SOCKET_H */
diff --git a/include/asm-ia64/sockios.h b/include/asm-ia64/sockios.h
new file mode 100644
index 000000000000..cf94857c8a54
--- /dev/null
+++ b/include/asm-ia64/sockios.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_IA64_SOCKIOS_H
2#define _ASM_IA64_SOCKIOS_H
3
4/*
5 * Socket-level I/O control calls.
6 *
7 * Based on <asm-i386/sockios.h>.
8 *
9 * Modified 1998, 1999
10 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
11 */
12#define FIOSETOWN 0x8901
13#define SIOCSPGRP 0x8902
14#define FIOGETOWN 0x8903
15#define SIOCGPGRP 0x8904
16#define SIOCATMARK 0x8905
17#define SIOCGSTAMP 0x8906 /* Get stamp */
18
19#endif /* _ASM_IA64_SOCKIOS_H */
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
new file mode 100644
index 000000000000..909936f25512
--- /dev/null
+++ b/include/asm-ia64/spinlock.h
@@ -0,0 +1,208 @@
1#ifndef _ASM_IA64_SPINLOCK_H
2#define _ASM_IA64_SPINLOCK_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
11
12#include <linux/compiler.h>
13#include <linux/kernel.h>
14
15#include <asm/atomic.h>
16#include <asm/bitops.h>
17#include <asm/intrinsics.h>
18#include <asm/system.h>
19
20typedef struct {
21 volatile unsigned int lock;
22#ifdef CONFIG_PREEMPT
23 unsigned int break_lock;
24#endif
25} spinlock_t;
26
27#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
28#define spin_lock_init(x) ((x)->lock = 0)
29
30#ifdef ASM_SUPPORTED
31/*
32 * Try to get the lock. If we fail to get the lock, make a non-standard call to
33 * ia64_spinlock_contention(). We do not use a normal call because that would force all
34 * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
35 * carefully coded to touch only those registers that spin_lock() marks "clobbered".
36 */
37
38#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
39
40static inline void
41_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
42{
43 register volatile unsigned int *ptr asm ("r31") = &lock->lock;
44
45#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
46# ifdef CONFIG_ITANIUM
47 /* don't use brl on Itanium... */
48 asm volatile ("{\n\t"
49 " mov ar.ccv = r0\n\t"
50 " mov r28 = ip\n\t"
51 " mov r30 = 1;;\n\t"
52 "}\n\t"
53 "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
54 "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
55 "cmp4.ne p14, p0 = r30, r0\n\t"
56 "mov b6 = r29;;\n\t"
57 "mov r27=%2\n\t"
58 "(p14) br.cond.spnt.many b6"
59 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
60# else
61 asm volatile ("{\n\t"
62 " mov ar.ccv = r0\n\t"
63 " mov r28 = ip\n\t"
64 " mov r30 = 1;;\n\t"
65 "}\n\t"
66 "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
67 "cmp4.ne p14, p0 = r30, r0\n\t"
68 "mov r27=%2\n\t"
69 "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
70 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
71# endif /* CONFIG_MCKINLEY */
72#else
73# ifdef CONFIG_ITANIUM
74 /* don't use brl on Itanium... */
75 /* mis-declare, so we get the entry-point, not it's function descriptor: */
76 asm volatile ("mov r30 = 1\n\t"
77 "mov r27=%2\n\t"
78 "mov ar.ccv = r0;;\n\t"
79 "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
80 "movl r29 = ia64_spinlock_contention;;\n\t"
81 "cmp4.ne p14, p0 = r30, r0\n\t"
82 "mov b6 = r29;;\n\t"
83 "(p14) br.call.spnt.many b6 = b6"
84 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
85# else
86 asm volatile ("mov r30 = 1\n\t"
87 "mov r27=%2\n\t"
88 "mov ar.ccv = r0;;\n\t"
89 "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
90 "cmp4.ne p14, p0 = r30, r0\n\t"
91 "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
92 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
93# endif /* CONFIG_MCKINLEY */
94#endif
95}
96#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
97#else /* !ASM_SUPPORTED */
98#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
99# define _raw_spin_lock(x) \
100do { \
101 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
102 __u64 ia64_spinlock_val; \
103 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
104 if (unlikely(ia64_spinlock_val)) { \
105 do { \
106 while (*ia64_spinlock_ptr) \
107 ia64_barrier(); \
108 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
109 } while (ia64_spinlock_val); \
110 } \
111} while (0)
112#endif /* !ASM_SUPPORTED */
113
114#define spin_is_locked(x) ((x)->lock != 0)
115#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
116#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
117#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
118
119typedef struct {
120 volatile unsigned int read_counter : 31;
121 volatile unsigned int write_lock : 1;
122#ifdef CONFIG_PREEMPT
123 unsigned int break_lock;
124#endif
125} rwlock_t;
126#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
127
128#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
129#define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
130#define write_can_lock(rw) (*(volatile int *)(rw) == 0)
131
132#define _raw_read_lock(rw) \
133do { \
134 rwlock_t *__read_lock_ptr = (rw); \
135 \
136 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
137 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
138 while (*(volatile int *)__read_lock_ptr < 0) \
139 cpu_relax(); \
140 } \
141} while (0)
142
143#define _raw_read_unlock(rw) \
144do { \
145 rwlock_t *__read_lock_ptr = (rw); \
146 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
147} while (0)
148
149#ifdef ASM_SUPPORTED
150#define _raw_write_lock(rw) \
151do { \
152 __asm__ __volatile__ ( \
153 "mov ar.ccv = r0\n" \
154 "dep r29 = -1, r0, 31, 1;;\n" \
155 "1:\n" \
156 "ld4 r2 = [%0];;\n" \
157 "cmp4.eq p0,p7 = r0,r2\n" \
158 "(p7) br.cond.spnt.few 1b \n" \
159 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
160 "cmp4.eq p0,p7 = r0, r2\n" \
161 "(p7) br.cond.spnt.few 1b;;\n" \
162 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
163} while(0)
164
165#define _raw_write_trylock(rw) \
166({ \
167 register long result; \
168 \
169 __asm__ __volatile__ ( \
170 "mov ar.ccv = r0\n" \
171 "dep r29 = -1, r0, 31, 1;;\n" \
172 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
173 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
174 (result == 0); \
175})
176
177#else /* !ASM_SUPPORTED */
178
179#define _raw_write_lock(l) \
180({ \
181 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
182 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
183 do { \
184 while (*ia64_write_lock_ptr) \
185 ia64_barrier(); \
186 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
187 } while (ia64_val); \
188})
189
190#define _raw_write_trylock(rw) \
191({ \
192 __u64 ia64_val; \
193 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
194 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
195 (ia64_val == 0); \
196})
197
198#endif /* !ASM_SUPPORTED */
199
200#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
201
202#define _raw_write_unlock(x) \
203({ \
204 smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
205 clear_bit(31, (x)); \
206})
207
208#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/include/asm-ia64/stat.h b/include/asm-ia64/stat.h
new file mode 100644
index 000000000000..367bb90cdffa
--- /dev/null
+++ b/include/asm-ia64/stat.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_IA64_STAT_H
2#define _ASM_IA64_STAT_H
3
4/*
5 * Modified 1998, 1999
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9struct stat {
10 unsigned long st_dev;
11 unsigned long st_ino;
12 unsigned long st_nlink;
13 unsigned int st_mode;
14 unsigned int st_uid;
15 unsigned int st_gid;
16 unsigned int __pad0;
17 unsigned long st_rdev;
18 unsigned long st_size;
19 unsigned long st_atime;
20 unsigned long st_atime_nsec;
21 unsigned long st_mtime;
22 unsigned long st_mtime_nsec;
23 unsigned long st_ctime;
24 unsigned long st_ctime_nsec;
25 unsigned long st_blksize;
26 long st_blocks;
27 unsigned long __unused[3];
28};
29
30#define STAT_HAVE_NSEC 1
31
32struct ia64_oldstat {
33 unsigned int st_dev;
34 unsigned int st_ino;
35 unsigned int st_mode;
36 unsigned int st_nlink;
37 unsigned int st_uid;
38 unsigned int st_gid;
39 unsigned int st_rdev;
40 unsigned int __pad1;
41 unsigned long st_size;
42 unsigned long st_atime;
43 unsigned long st_mtime;
44 unsigned long st_ctime;
45 unsigned int st_blksize;
46 int st_blocks;
47 unsigned int __unused1;
48 unsigned int __unused2;
49};
50
51#endif /* _ASM_IA64_STAT_H */
diff --git a/include/asm-ia64/statfs.h b/include/asm-ia64/statfs.h
new file mode 100644
index 000000000000..811097974f31
--- /dev/null
+++ b/include/asm-ia64/statfs.h
@@ -0,0 +1,62 @@
1#ifndef _ASM_IA64_STATFS_H
2#define _ASM_IA64_STATFS_H
3
4/*
5 * Based on <asm-i386/statfs.h>.
6 *
7 * Modified 1998, 1999, 2003
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */
10
11#ifndef __KERNEL_STRICT_NAMES
12# include <linux/types.h>
13typedef __kernel_fsid_t fsid_t;
14#endif
15
16/*
17 * This is ugly --- we're already 64-bit, so just duplicate the definitions
18 */
19struct statfs {
20 long f_type;
21 long f_bsize;
22 long f_blocks;
23 long f_bfree;
24 long f_bavail;
25 long f_files;
26 long f_ffree;
27 __kernel_fsid_t f_fsid;
28 long f_namelen;
29 long f_frsize;
30 long f_spare[5];
31};
32
33
34struct statfs64 {
35 long f_type;
36 long f_bsize;
37 long f_blocks;
38 long f_bfree;
39 long f_bavail;
40 long f_files;
41 long f_ffree;
42 __kernel_fsid_t f_fsid;
43 long f_namelen;
44 long f_frsize;
45 long f_spare[5];
46};
47
48struct compat_statfs64 {
49 __u32 f_type;
50 __u32 f_bsize;
51 __u64 f_blocks;
52 __u64 f_bfree;
53 __u64 f_bavail;
54 __u64 f_files;
55 __u64 f_ffree;
56 __kernel_fsid_t f_fsid;
57 __u32 f_namelen;
58 __u32 f_frsize;
59 __u32 f_spare[5];
60} __attribute__((packed));
61
62#endif /* _ASM_IA64_STATFS_H */
diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h
new file mode 100644
index 000000000000..43502d3b57e5
--- /dev/null
+++ b/include/asm-ia64/string.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_STRING_H
2#define _ASM_IA64_STRING_H
3
4/*
5 * Here is where we want to put optimized versions of the string
6 * routines.
7 *
8 * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 */
11
12#include <linux/config.h> /* remove this once we remove the A-step workaround... */
13
14#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
15#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
16#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
17
18extern __kernel_size_t strlen (const char *);
19extern void *memcpy (void *, const void *, __kernel_size_t);
20extern void *memset (void *, int, __kernel_size_t);
21
22#endif /* _ASM_IA64_STRING_H */
diff --git a/include/asm-ia64/suspend.h b/include/asm-ia64/suspend.h
new file mode 100644
index 000000000000..b05bbb6074e2
--- /dev/null
+++ b/include/asm-ia64/suspend.h
@@ -0,0 +1 @@
/* dummy (must be non-empty to prevent prejudicial removal...) */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
new file mode 100644
index 000000000000..6f516e76d1f0
--- /dev/null
+++ b/include/asm-ia64/system.h
@@ -0,0 +1,295 @@
1#ifndef _ASM_IA64_SYSTEM_H
2#define _ASM_IA64_SYSTEM_H
3
4/*
5 * System defines. Note that this is included both from .c and .S
6 * files, so it does only defines, not any C code. This is based
7 * on information published in the Processor Abstraction Layer
8 * and the System Abstraction Layer manual.
9 *
10 * Copyright (C) 1998-2003 Hewlett-Packard Co
11 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
13 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
14 */
15#include <linux/config.h>
16
17#include <asm/kregs.h>
18#include <asm/page.h>
19#include <asm/pal.h>
20#include <asm/percpu.h>
21
22#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000)
23/*
24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
26 */
27#define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
29
30#ifndef __ASSEMBLY__
31
32#include <linux/kernel.h>
33#include <linux/types.h>
34
35struct pci_vector_struct {
36 __u16 segment; /* PCI Segment number */
37 __u16 bus; /* PCI Bus number */
38 __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
39 __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
40 __u32 irq; /* IRQ assigned */
41};
42
43extern struct ia64_boot_param {
44 __u64 command_line; /* physical address of command line arguments */
45 __u64 efi_systab; /* physical address of EFI system table */
46 __u64 efi_memmap; /* physical address of EFI memory map */
47 __u64 efi_memmap_size; /* size of EFI memory map */
48 __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
49 __u32 efi_memdesc_version; /* memory descriptor version */
50 struct {
51 __u16 num_cols; /* number of columns on console output device */
52 __u16 num_rows; /* number of rows on console output device */
53 __u16 orig_x; /* cursor's x position */
54 __u16 orig_y; /* cursor's y position */
55 } console_info;
56 __u64 fpswa; /* physical address of the fpswa interface */
57 __u64 initrd_start;
58 __u64 initrd_size;
59} *ia64_boot_param;
60
61/*
62 * Macros to force memory ordering. In these descriptions, "previous"
63 * and "subsequent" refer to program order; "visible" means that all
64 * architecturally visible effects of a memory access have occurred
65 * (at a minimum, this means the memory has been read or written).
66 *
67 * wmb(): Guarantees that all preceding stores to memory-
68 * like regions are visible before any subsequent
69 * stores and that all following stores will be
70 * visible only after all previous stores.
71 * rmb(): Like wmb(), but for reads.
72 * mb(): wmb()/rmb() combo, i.e., all previous memory
73 * accesses are visible before all subsequent
74 * accesses and vice versa. This is also known as
75 * a "fence."
76 *
77 * Note: "mb()" and its variants cannot be used as a fence to order
78 * accesses to memory mapped I/O registers. For that, mf.a needs to
79 * be used. However, we don't want to always use mf.a because (a)
80 * it's (presumably) much slower than mf and (b) mf.a is supported for
81 * sequential memory pages only.
82 */
83#define mb() ia64_mf()
84#define rmb() mb()
85#define wmb() mb()
86#define read_barrier_depends() do { } while(0)
87
88#ifdef CONFIG_SMP
89# define smp_mb() mb()
90# define smp_rmb() rmb()
91# define smp_wmb() wmb()
92# define smp_read_barrier_depends() read_barrier_depends()
93#else
94# define smp_mb() barrier()
95# define smp_rmb() barrier()
96# define smp_wmb() barrier()
97# define smp_read_barrier_depends() do { } while(0)
98#endif
99
100/*
101 * XXX check on these---I suspect what Linus really wants here is
102 * acquire vs release semantics but we can't discuss this stuff with
103 * Linus just yet. Grrr...
104 */
105#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
106#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
107
108#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
109
110/*
111 * The group barrier in front of the rsm & ssm are necessary to ensure
112 * that none of the previous instructions in the same group are
113 * affected by the rsm/ssm.
114 */
115/* For spinlocks etc */
116
117/*
118 * - clearing psr.i is implicitly serialized (visible by next insn)
119 * - setting psr.i requires data serialization
120 * - we need a stop-bit before reading PSR because we sometimes
121 * write a floating-point register right before reading the PSR
122 * and that writes to PSR.mfl
123 */
124#define __local_irq_save(x) \
125do { \
126 ia64_stop(); \
127 (x) = ia64_getreg(_IA64_REG_PSR); \
128 ia64_stop(); \
129 ia64_rsm(IA64_PSR_I); \
130} while (0)
131
132#define __local_irq_disable() \
133do { \
134 ia64_stop(); \
135 ia64_rsm(IA64_PSR_I); \
136} while (0)
137
138#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
139
140#ifdef CONFIG_IA64_DEBUG_IRQ
141
142 extern unsigned long last_cli_ip;
143
144# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
145
146# define local_irq_save(x) \
147do { \
148 unsigned long psr; \
149 \
150 __local_irq_save(psr); \
151 if (psr & IA64_PSR_I) \
152 __save_ip(); \
153 (x) = psr; \
154} while (0)
155
156# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0)
157
158# define local_irq_restore(x) \
159do { \
160 unsigned long old_psr, psr = (x); \
161 \
162 local_save_flags(old_psr); \
163 __local_irq_restore(psr); \
164 if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \
165 __save_ip(); \
166} while (0)
167
168#else /* !CONFIG_IA64_DEBUG_IRQ */
169# define local_irq_save(x) __local_irq_save(x)
170# define local_irq_disable() __local_irq_disable()
171# define local_irq_restore(x) __local_irq_restore(x)
172#endif /* !CONFIG_IA64_DEBUG_IRQ */
173
174#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
175#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
176
177#define irqs_disabled() \
178({ \
179 unsigned long __ia64_id_flags; \
180 local_save_flags(__ia64_id_flags); \
181 (__ia64_id_flags & IA64_PSR_I) == 0; \
182})
183
184#ifdef __KERNEL__
185
186#define prepare_to_switch() do { } while(0)
187
188#ifdef CONFIG_IA32_SUPPORT
189# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
190#else
191# define IS_IA32_PROCESS(regs) 0
192struct task_struct;
193static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
194static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
195#endif
196
197/*
198 * Context switch from one thread to another. If the two threads have
199 * different address spaces, schedule() has already taken care of
200 * switching to the new address space by calling switch_mm().
201 *
202 * Disabling access to the fph partition and the debug-register
203 * context switch MUST be done before calling ia64_switch_to() since a
204 * newly created thread returns directly to
205 * ia64_ret_from_syscall_clear_r8.
206 */
207extern struct task_struct *ia64_switch_to (void *next_task);
208
209struct task_struct;
210
211extern void ia64_save_extra (struct task_struct *task);
212extern void ia64_load_extra (struct task_struct *task);
213
214#ifdef CONFIG_PERFMON
215 DECLARE_PER_CPU(unsigned long, pfm_syst_info);
216# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
217#else
218# define PERFMON_IS_SYSWIDE() (0)
219#endif
220
221#define IA64_HAS_EXTRA_STATE(t) \
222 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
223 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
224
225#define __switch_to(prev,next,last) do { \
226 if (IA64_HAS_EXTRA_STATE(prev)) \
227 ia64_save_extra(prev); \
228 if (IA64_HAS_EXTRA_STATE(next)) \
229 ia64_load_extra(next); \
230 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
231 (last) = ia64_switch_to((next)); \
232} while (0)
233
234#ifdef CONFIG_SMP
235/*
236 * In the SMP case, we save the fph state when context-switching away from a thread that
237 * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
238 * pick up the state from task->thread.fph, avoiding the complication of having to fetch
239 * the latest fph state from another CPU. In other words: eager save, lazy restore.
240 */
241# define switch_to(prev,next,last) do { \
242 if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
243 ia64_psr(ia64_task_regs(prev))->mfh = 0; \
244 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
245 __ia64_save_fpu((prev)->thread.fph); \
246 } \
247 __switch_to(prev, next, last); \
248} while (0)
249#else
250# define switch_to(prev,next,last) __switch_to(prev, next, last)
251#endif
252
253/*
254 * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
255 * because that could cause a deadlock. Here is an example by Erich Focht:
256 *
257 * Example:
258 * CPU#0:
259 * schedule()
260 * -> spin_lock_irq(&rq->lock)
261 * -> context_switch()
262 * -> wrap_mmu_context()
263 * -> read_lock(&tasklist_lock)
264 *
265 * CPU#1:
266 * sys_wait4() or release_task() or forget_original_parent()
267 * -> write_lock(&tasklist_lock)
268 * -> do_notify_parent()
269 * -> wake_up_parent()
270 * -> try_to_wake_up()
271 * -> spin_lock_irq(&parent_rq->lock)
272 *
273 * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
274 * of that CPU which will not be released, because there we wait for the
275 * tasklist_lock to become available.
276 */
277#define prepare_arch_switch(rq, next) \
278do { \
279 spin_lock(&(next)->switch_lock); \
280 spin_unlock(&(rq)->lock); \
281} while (0)
282#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
283#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
284
285#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
286
287void cpu_idle_wait(void);
288
289#define arch_align_stack(x) (x)
290
291#endif /* __KERNEL__ */
292
293#endif /* __ASSEMBLY__ */
294
295#endif /* _ASM_IA64_SYSTEM_H */
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h
new file mode 100644
index 000000000000..b9e843f7dc42
--- /dev/null
+++ b/include/asm-ia64/termbits.h
@@ -0,0 +1,182 @@
1#ifndef _ASM_IA64_TERMBITS_H
2#define _ASM_IA64_TERMBITS_H
3
4/*
5 * Based on <asm-i386/termbits.h>.
6 *
7 * Modified 1999
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 *
10 * 99/01/28 Added new baudrates
11 */
12
13#include <linux/posix_types.h>
14
15typedef unsigned char cc_t;
16typedef unsigned int speed_t;
17typedef unsigned int tcflag_t;
18
19#define NCCS 19
20struct termios {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27};
28
29/* c_cc characters */
30#define VINTR 0
31#define VQUIT 1
32#define VERASE 2
33#define VKILL 3
34#define VEOF 4
35#define VTIME 5
36#define VMIN 6
37#define VSWTC 7
38#define VSTART 8
39#define VSTOP 9
40#define VSUSP 10
41#define VEOL 11
42#define VREPRINT 12
43#define VDISCARD 13
44#define VWERASE 14
45#define VLNEXT 15
46#define VEOL2 16
47
48/* c_iflag bits */
49#define IGNBRK 0000001
50#define BRKINT 0000002
51#define IGNPAR 0000004
52#define PARMRK 0000010
53#define INPCK 0000020
54#define ISTRIP 0000040
55#define INLCR 0000100
56#define IGNCR 0000200
57#define ICRNL 0000400
58#define IUCLC 0001000
59#define IXON 0002000
60#define IXANY 0004000
61#define IXOFF 0010000
62#define IMAXBEL 0020000
63#define IUTF8 0040000
64
65/* c_oflag bits */
66#define OPOST 0000001
67#define OLCUC 0000002
68#define ONLCR 0000004
69#define OCRNL 0000010
70#define ONOCR 0000020
71#define ONLRET 0000040
72#define OFILL 0000100
73#define OFDEL 0000200
74#define NLDLY 0000400
75#define NL0 0000000
76#define NL1 0000400
77#define CRDLY 0003000
78#define CR0 0000000
79#define CR1 0001000
80#define CR2 0002000
81#define CR3 0003000
82#define TABDLY 0014000
83#define TAB0 0000000
84#define TAB1 0004000
85#define TAB2 0010000
86#define TAB3 0014000
87#define XTABS 0014000
88#define BSDLY 0020000
89#define BS0 0000000
90#define BS1 0020000
91#define VTDLY 0040000
92#define VT0 0000000
93#define VT1 0040000
94#define FFDLY 0100000
95#define FF0 0000000
96#define FF1 0100000
97
98/* c_cflag bit meaning */
99#define CBAUD 0010017
100#define B0 0000000 /* hang up */
101#define B50 0000001
102#define B75 0000002
103#define B110 0000003
104#define B134 0000004
105#define B150 0000005
106#define B200 0000006
107#define B300 0000007
108#define B600 0000010
109#define B1200 0000011
110#define B1800 0000012
111#define B2400 0000013
112#define B4800 0000014
113#define B9600 0000015
114#define B19200 0000016
115#define B38400 0000017
116#define EXTA B19200
117#define EXTB B38400
118#define CSIZE 0000060
119#define CS5 0000000
120#define CS6 0000020
121#define CS7 0000040
122#define CS8 0000060
123#define CSTOPB 0000100
124#define CREAD 0000200
125#define PARENB 0000400
126#define PARODD 0001000
127#define HUPCL 0002000
128#define CLOCAL 0004000
129#define CBAUDEX 0010000
130#define B57600 0010001
131#define B115200 0010002
132#define B230400 0010003
133#define B460800 0010004
134#define B500000 0010005
135#define B576000 0010006
136#define B921600 0010007
137#define B1000000 0010010
138#define B1152000 0010011
139#define B1500000 0010012
140#define B2000000 0010013
141#define B2500000 0010014
142#define B3000000 0010015
143#define B3500000 0010016
144#define B4000000 0010017
145#define CIBAUD 002003600000 /* input baud rate (not used) */
146#define CMSPAR 010000000000 /* mark or space (stick) parity */
147#define CRTSCTS 020000000000 /* flow control */
148
149/* c_lflag bits */
150#define ISIG 0000001
151#define ICANON 0000002
152#define XCASE 0000004
153#define ECHO 0000010
154#define ECHOE 0000020
155#define ECHOK 0000040
156#define ECHONL 0000100
157#define NOFLSH 0000200
158#define TOSTOP 0000400
159#define ECHOCTL 0001000
160#define ECHOPRT 0002000
161#define ECHOKE 0004000
162#define FLUSHO 0010000
163#define PENDIN 0040000
164#define IEXTEN 0100000
165
166/* tcflow() and TCXONC use these */
167#define TCOOFF 0
168#define TCOON 1
169#define TCIOFF 2
170#define TCION 3
171
172/* tcflush() and TCFLSH use these */
173#define TCIFLUSH 0
174#define TCOFLUSH 1
175#define TCIOFLUSH 2
176
177/* tcsetattr uses these */
178#define TCSANOW 0
179#define TCSADRAIN 1
180#define TCSAFLUSH 2
181
182#endif /* _ASM_IA64_TERMBITS_H */
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h
new file mode 100644
index 000000000000..42c95693240c
--- /dev/null
+++ b/include/asm-ia64/termios.h
@@ -0,0 +1,113 @@
1#ifndef _ASM_IA64_TERMIOS_H
2#define _ASM_IA64_TERMIOS_H
3
4/*
5 * Modified 1999
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 *
8 * 99/01/28 Added N_IRDA and N_SMSBLOCK
9 */
10
11#include <asm/termbits.h>
12#include <asm/ioctls.h>
13
14struct winsize {
15 unsigned short ws_row;
16 unsigned short ws_col;
17 unsigned short ws_xpixel;
18 unsigned short ws_ypixel;
19};
20
21#define NCC 8
22struct termio {
23 unsigned short c_iflag; /* input mode flags */
24 unsigned short c_oflag; /* output mode flags */
25 unsigned short c_cflag; /* control mode flags */
26 unsigned short c_lflag; /* local mode flags */
27 unsigned char c_line; /* line discipline */
28 unsigned char c_cc[NCC]; /* control characters */
29};
30
31/* modem lines */
32#define TIOCM_LE 0x001
33#define TIOCM_DTR 0x002
34#define TIOCM_RTS 0x004
35#define TIOCM_ST 0x008
36#define TIOCM_SR 0x010
37#define TIOCM_CTS 0x020
38#define TIOCM_CAR 0x040
39#define TIOCM_RNG 0x080
40#define TIOCM_DSR 0x100
41#define TIOCM_CD TIOCM_CAR
42#define TIOCM_RI TIOCM_RNG
43#define TIOCM_OUT1 0x2000
44#define TIOCM_OUT2 0x4000
45#define TIOCM_LOOP 0x8000
46
47/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
48
49/* line disciplines */
50#define N_TTY 0
51#define N_SLIP 1
52#define N_MOUSE 2
53#define N_PPP 3
54#define N_STRIP 4
55#define N_AX25 5
56#define N_X25 6 /* X.25 async */
57#define N_6PACK 7
58#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
59#define N_R3964 9 /* Reserved for Simatic R3964 module */
60#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
61#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
62#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS msgs */
63#define N_HDLC 13 /* synchronous HDLC */
64#define N_SYNC_PPP 14 /* synchronous PPP */
65#define N_HCI 15 /* Bluetooth HCI UART */
66
67# ifdef __KERNEL__
68
69/* intr=^C quit=^\ erase=del kill=^U
70 eof=^D vtime=\0 vmin=\1 sxtc=\0
71 start=^Q stop=^S susp=^Z eol=\0
72 reprint=^R discard=^U werase=^W lnext=^V
73 eol2=\0
74*/
75#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
76
77/*
78 * Translate a "termio" structure into a "termios". Ugh.
79 */
80#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
81 unsigned short __tmp; \
82 get_user(__tmp,&(termio)->x); \
83 *(unsigned short *) &(termios)->x = __tmp; \
84}
85
86#define user_termio_to_kernel_termios(termios, termio) \
87({ \
88 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
89 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
90 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
91 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
92 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
93})
94
95/*
96 * Translate a "termios" structure into a "termio". Ugh.
97 */
98#define kernel_termios_to_user_termio(termio, termios) \
99({ \
100 put_user((termios)->c_iflag, &(termio)->c_iflag); \
101 put_user((termios)->c_oflag, &(termio)->c_oflag); \
102 put_user((termios)->c_cflag, &(termio)->c_cflag); \
103 put_user((termios)->c_lflag, &(termio)->c_lflag); \
104 put_user((termios)->c_line, &(termio)->c_line); \
105 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
106})
107
108#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
109#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
110
111# endif /* __KERNEL__ */
112
113#endif /* _ASM_IA64_TERMIOS_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
new file mode 100644
index 000000000000..8d5b7e77028c
--- /dev/null
+++ b/include/asm-ia64/thread_info.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (C) 2002-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 */
5#ifndef _ASM_IA64_THREAD_INFO_H
6#define _ASM_IA64_THREAD_INFO_H
7
8#include <asm/offsets.h>
9#include <asm/processor.h>
10#include <asm/ptrace.h>
11
12#define PREEMPT_ACTIVE_BIT 30
13#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
14
15#ifndef __ASSEMBLY__
16
17/*
18 * On IA-64, we want to keep the task structure and kernel stack together, so they can be
19 * mapped by a single TLB entry and so they can be addressed by the "current" pointer
20 * without having to do pointer masking.
21 */
22struct thread_info {
23 struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
24 struct exec_domain *exec_domain;/* execution domain */
25 __u32 flags; /* thread_info flags (see TIF_*) */
26 __u32 cpu; /* current CPU */
27 mm_segment_t addr_limit; /* user-level address space limit */
28 __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
29 struct restart_block restart_block;
30 struct {
31 int signo;
32 int code;
33 void __user *addr;
34 unsigned long start_time;
35 pid_t pid;
36 } sigdelayed; /* Saved information for TIF_SIGDELAYED */
37};
38
39#define THREAD_SIZE KERNEL_STACK_SIZE
40
41#define INIT_THREAD_INFO(tsk) \
42{ \
43 .task = &tsk, \
44 .exec_domain = &default_exec_domain, \
45 .flags = 0, \
46 .cpu = 0, \
47 .addr_limit = KERNEL_DS, \
48 .preempt_count = 0, \
49 .restart_block = { \
50 .fn = do_no_restart_syscall, \
51 }, \
52}
53
54/* how to get the thread information struct from C */
55#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
56#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
57#define free_thread_info(ti) /* nothing */
58
59#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
60#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
61#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
62
63#endif /* !__ASSEMBLY */
64
65/*
66 * thread information flags
67 * - these are process state flags that various assembly files may need to access
68 * - pending work-to-be-done flags are in least-significant 16 bits, other flags
69 * in top 16 bits
70 */
71#define TIF_NOTIFY_RESUME 0 /* resumption notification requested */
72#define TIF_SIGPENDING 1 /* signal pending */
73#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
74#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
75#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
76#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */
77#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
78#define TIF_MEMDIE 17
79
80#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
81#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
82#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
83#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
84#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
85#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
86#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
87#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
88
89/* "work to do on user-return" bits */
90#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
91/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
92#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
93
94#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h
new file mode 100644
index 000000000000..414aae060440
--- /dev/null
+++ b/include/asm-ia64/timex.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_IA64_TIMEX_H
2#define _ASM_IA64_TIMEX_H
3
4/*
5 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8/*
9 * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64.
10 * Also removed cacheflush_time as it's entirely unused.
11 */
12
13#include <asm/intrinsics.h>
14#include <asm/processor.h>
15
16typedef unsigned long cycles_t;
17
18/*
19 * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
20 * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
21 * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time
22 * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
23 * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
24 * the actual value is calculated and used to update the wall clock each jiffie. Setting
25 * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we
26 * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
27 * 100MHz.
28 */
29#define CLOCK_TICK_RATE (HZ * 100000UL)
30
31static inline cycles_t
32get_cycles (void)
33{
34 cycles_t ret;
35
36 ret = ia64_getreg(_IA64_REG_AR_ITC);
37 return ret;
38}
39
40#endif /* _ASM_IA64_TIMEX_H */
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
new file mode 100644
index 000000000000..3a9a6d1be75c
--- /dev/null
+++ b/include/asm-ia64/tlb.h
@@ -0,0 +1,245 @@
1#ifndef _ASM_IA64_TLB_H
2#define _ASM_IA64_TLB_H
3/*
4 * Based on <asm-generic/tlb.h>.
5 *
6 * Copyright (C) 2002-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9/*
10 * Removing a translation from a page table (including TLB-shootdown) is a four-step
11 * procedure:
12 *
13 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14 * (this is a no-op on ia64).
15 * (2) Clear the relevant portions of the page-table
16 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17 * (4) Release the pages that were freed up in step (2).
18 *
19 * Note that the ordering of these steps is crucial to avoid races on MP machines.
20 *
21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template:
24 *
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
26 * {
27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma);
29 * for each page-table-entry PTE that needs to be removed do {
30 * tlb_remove_tlb_entry(tlb, pte, address);
31 * if (pte refers to a normal page) {
32 * tlb_remove_page(tlb, page);
33 * }
34 * }
35 * tlb_end_vma(tlb, vma);
36 * }
37 * }
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
39 */
40#include <linux/config.h>
41#include <linux/mm.h>
42#include <linux/pagemap.h>
43#include <linux/swap.h>
44
45#include <asm/pgalloc.h>
46#include <asm/processor.h>
47#include <asm/tlbflush.h>
48#include <asm/machvec.h>
49
50#ifdef CONFIG_SMP
51# define FREE_PTE_NR 2048
52# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
53#else
54# define FREE_PTE_NR 0
55# define tlb_fast_mode(tlb) (1)
56#endif
57
58struct mmu_gather {
59 struct mm_struct *mm;
60 unsigned int nr; /* == ~0U => fast mode */
61 unsigned char fullmm; /* non-zero means full mm flush */
62 unsigned char need_flush; /* really unmapped some PTEs? */
63 unsigned long freed; /* number of pages freed */
64 unsigned long start_addr;
65 unsigned long end_addr;
66 struct page *pages[FREE_PTE_NR];
67};
68
69/* Users of the generic TLB shootdown code must declare this storage space. */
70DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
71
72/*
73 * Flush the TLB for address range START to END and, if not in fast mode, release the
74 * freed pages that where gathered up to this point.
75 */
76static inline void
77ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
78{
79 unsigned int nr;
80
81 if (!tlb->need_flush)
82 return;
83 tlb->need_flush = 0;
84
85 if (tlb->fullmm) {
86 /*
87 * Tearing down the entire address space. This happens both as a result
88 * of exit() and execve(). The latter case necessitates the call to
89 * flush_tlb_mm() here.
90 */
91 flush_tlb_mm(tlb->mm);
92 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
93 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
94 {
95 /*
96 * If we flush more than a tera-byte or across regions, we're probably
97 * better off just flushing the entire TLB(s). This should be very rare
98 * and is not worth optimizing for.
99 */
100 flush_tlb_all();
101 } else {
102 /*
103 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
104 * vma pointer.
105 */
106 struct vm_area_struct vma;
107
108 vma.vm_mm = tlb->mm;
109 /* flush the address range from the tlb: */
110 flush_tlb_range(&vma, start, end);
111 /* now flush the virt. page-table area mapping the address range: */
112 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
113 }
114
115 /* lastly, release the freed pages */
116 nr = tlb->nr;
117 if (!tlb_fast_mode(tlb)) {
118 unsigned long i;
119 tlb->nr = 0;
120 tlb->start_addr = ~0UL;
121 for (i = 0; i < nr; ++i)
122 free_page_and_swap_cache(tlb->pages[i]);
123 }
124}
125
126/*
127 * Return a pointer to an initialized struct mmu_gather.
128 */
129static inline struct mmu_gather *
130tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
131{
132 struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers);
133
134 tlb->mm = mm;
135 /*
136 * Use fast mode if only 1 CPU is online.
137 *
138 * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
139 * doesn't work because of speculative accesses and software prefetching: the page
140 * table of "mm" may (and usually is) the currently active page table and even
141 * though the kernel won't do any user-space accesses during the TLB shoot down, a
142 * compiler might use speculation or lfetch.fault on what happens to be a valid
143 * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
144 * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
145 * problems. (We could make fast-mode work by switching the current task to a
146 * different "mm" during the shootdown.) --davidm 08/02/2002
147 */
148 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
149 tlb->fullmm = full_mm_flush;
150 tlb->freed = 0;
151 tlb->start_addr = ~0UL;
152 return tlb;
153}
154
155/*
156 * Called at the end of the shootdown operation to free up any resources that were
157 * collected. The page table lock is still held at this point.
158 */
159static inline void
160tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
161{
162 unsigned long freed = tlb->freed;
163 struct mm_struct *mm = tlb->mm;
164 unsigned long rss = get_mm_counter(mm, rss);
165
166 if (rss < freed)
167 freed = rss;
168 add_mm_counter(mm, rss, -freed);
169 /*
170 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
171 * tlb->end_addr.
172 */
173 ia64_tlb_flush_mmu(tlb, start, end);
174
175 /* keep the page table cache within bounds */
176 check_pgt_cache();
177}
178
179static inline unsigned int
180tlb_is_full_mm(struct mmu_gather *tlb)
181{
182 return tlb->fullmm;
183}
184
185/*
186 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
187 * must be delayed until after the TLB has been flushed (see comments at the beginning of
188 * this file).
189 */
190static inline void
191tlb_remove_page (struct mmu_gather *tlb, struct page *page)
192{
193 tlb->need_flush = 1;
194
195 if (tlb_fast_mode(tlb)) {
196 free_page_and_swap_cache(page);
197 return;
198 }
199 tlb->pages[tlb->nr++] = page;
200 if (tlb->nr >= FREE_PTE_NR)
201 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
202}
203
204/*
205 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
206 * PTE, not just those pointing to (normal) physical memory.
207 */
208static inline void
209__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
210{
211 if (tlb->start_addr == ~0UL)
212 tlb->start_addr = address;
213 tlb->end_addr = address + PAGE_SIZE;
214}
215
216#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
217
218#define tlb_start_vma(tlb, vma) do { } while (0)
219#define tlb_end_vma(tlb, vma) do { } while (0)
220
221#define tlb_remove_tlb_entry(tlb, ptep, addr) \
222do { \
223 tlb->need_flush = 1; \
224 __tlb_remove_tlb_entry(tlb, ptep, addr); \
225} while (0)
226
227#define pte_free_tlb(tlb, ptep) \
228do { \
229 tlb->need_flush = 1; \
230 __pte_free_tlb(tlb, ptep); \
231} while (0)
232
233#define pmd_free_tlb(tlb, ptep) \
234do { \
235 tlb->need_flush = 1; \
236 __pmd_free_tlb(tlb, ptep); \
237} while (0)
238
239#define pud_free_tlb(tlb, pudp) \
240do { \
241 tlb->need_flush = 1; \
242 __pud_free_tlb(tlb, pudp); \
243} while (0)
244
245#endif /* _ASM_IA64_TLB_H */
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
new file mode 100644
index 000000000000..b65c62702724
--- /dev/null
+++ b/include/asm-ia64/tlbflush.h
@@ -0,0 +1,99 @@
1#ifndef _ASM_IA64_TLBFLUSH_H
2#define _ASM_IA64_TLBFLUSH_H
3
4/*
5 * Copyright (C) 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/config.h>
10
11#include <linux/mm.h>
12
13#include <asm/intrinsics.h>
14#include <asm/mmu_context.h>
15#include <asm/page.h>
16
17/*
18 * Now for some TLB flushing routines. This is the kind of stuff that
19 * can be very expensive, so try to avoid them whenever possible.
20 */
21
22/*
23 * Flush everything (kernel mapping may also have changed due to
24 * vmalloc/vfree).
25 */
26extern void local_flush_tlb_all (void);
27
28#ifdef CONFIG_SMP
29 extern void smp_flush_tlb_all (void);
30 extern void smp_flush_tlb_mm (struct mm_struct *mm);
31# define flush_tlb_all() smp_flush_tlb_all()
32#else
33# define flush_tlb_all() local_flush_tlb_all()
34#endif
35
36static inline void
37local_finish_flush_tlb_mm (struct mm_struct *mm)
38{
39 if (mm == current->active_mm)
40 activate_context(mm);
41}
42
43/*
44 * Flush a specified user mapping. This is called, e.g., as a result of fork() and
45 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
46 * the PTEs of the parent task.
47 */
48static inline void
49flush_tlb_mm (struct mm_struct *mm)
50{
51 if (!mm)
52 return;
53
54 mm->context = 0;
55
56 if (atomic_read(&mm->mm_users) == 0)
57 return; /* happens as a result of exit_mmap() */
58
59#ifdef CONFIG_SMP
60 smp_flush_tlb_mm(mm);
61#else
62 local_finish_flush_tlb_mm(mm);
63#endif
64}
65
66extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
67
68/*
69 * Page-granular tlb flush.
70 */
71static inline void
72flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
73{
74#ifdef CONFIG_SMP
75 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
76#else
77 if (vma->vm_mm == current->active_mm)
78 ia64_ptcl(addr, (PAGE_SHIFT << 2));
79 else
80 vma->vm_mm->context = 0;
81#endif
82}
83
84/*
85 * Flush the TLB entries mapping the virtually mapped linear page
86 * table corresponding to address range [START-END).
87 */
88static inline void
89flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
90{
91 /*
92 * Deprecated. The virtual page table is now flushed via the normal gather/flush
93 * interface (see tlb.h).
94 */
95}
96
97#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
98
99#endif /* _ASM_IA64_TLBFLUSH_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
new file mode 100644
index 000000000000..21cf351fd05c
--- /dev/null
+++ b/include/asm-ia64/topology.h
@@ -0,0 +1,90 @@
1/*
2 * linux/include/asm-ia64/topology.h
3 *
4 * Copyright (C) 2002, Erich Focht, NEC
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13#ifndef _ASM_IA64_TOPOLOGY_H
14#define _ASM_IA64_TOPOLOGY_H
15
16#include <asm/acpi.h>
17#include <asm/numa.h>
18#include <asm/smp.h>
19
20#ifdef CONFIG_NUMA
21/*
22 * Returns the number of the node containing CPU 'cpu'
23 */
24#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
25
26/*
27 * Returns a bitmask of CPUs on Node 'node'.
28 */
29#define node_to_cpumask(node) (node_to_cpu_mask[node])
30
31/*
32 * Returns the number of the node containing Node 'nid'.
33 * Not implemented here. Multi-level hierarchies detected with
34 * the help of node_distance().
35 */
36#define parent_node(nid) (nid)
37
38/*
39 * Returns the number of the first CPU on Node 'node'.
40 */
41#define node_to_first_cpu(node) (__ffs(node_to_cpumask(node)))
42
43void build_cpu_to_node_map(void);
44
45/* sched_domains SD_NODE_INIT for IA64 NUMA machines */
46#define SD_NODE_INIT (struct sched_domain) { \
47 .span = CPU_MASK_NONE, \
48 .parent = NULL, \
49 .groups = NULL, \
50 .min_interval = 80, \
51 .max_interval = 320, \
52 .busy_factor = 320, \
53 .imbalance_pct = 125, \
54 .cache_hot_time = (10*1000000), \
55 .cache_nice_tries = 1, \
56 .per_cpu_gain = 100, \
57 .flags = SD_LOAD_BALANCE \
58 | SD_BALANCE_EXEC \
59 | SD_BALANCE_NEWIDLE \
60 | SD_WAKE_IDLE \
61 | SD_WAKE_BALANCE, \
62 .last_balance = jiffies, \
63 .balance_interval = 1, \
64 .nr_balance_failed = 0, \
65}
66
67/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */
68#define SD_ALLNODES_INIT (struct sched_domain) { \
69 .span = CPU_MASK_NONE, \
70 .parent = NULL, \
71 .groups = NULL, \
72 .min_interval = 80, \
73 .max_interval = 320, \
74 .busy_factor = 320, \
75 .imbalance_pct = 125, \
76 .cache_hot_time = (10*1000000), \
77 .cache_nice_tries = 1, \
78 .per_cpu_gain = 100, \
79 .flags = SD_LOAD_BALANCE \
80 | SD_BALANCE_EXEC, \
81 .last_balance = jiffies, \
82 .balance_interval = 100*(63+num_online_cpus())/64, \
83 .nr_balance_failed = 0, \
84}
85
86#endif /* CONFIG_NUMA */
87
88#include <asm-generic/topology.h>
89
90#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/include/asm-ia64/types.h b/include/asm-ia64/types.h
new file mode 100644
index 000000000000..a677565aa954
--- /dev/null
+++ b/include/asm-ia64/types.h
@@ -0,0 +1,75 @@
1#ifndef _ASM_IA64_TYPES_H
2#define _ASM_IA64_TYPES_H
3
4/*
5 * This file is never included by application software unless explicitly requested (e.g.,
6 * via linux/types.h) in which case the application is Linux specific so (user-) name
7 * space pollution is not a major issue. However, for interoperability, libraries still
8 * need to be careful to avoid a name clashes.
9 *
10 * Based on <asm-alpha/types.h>.
11 *
12 * Modified 1998-2000, 2002
13 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
14 */
15
16#ifdef __ASSEMBLY__
17# define __IA64_UL(x) (x)
18# define __IA64_UL_CONST(x) x
19
20# ifdef __KERNEL__
21# define BITS_PER_LONG 64
22# endif
23
24#else
25# define __IA64_UL(x) ((unsigned long)(x))
26# define __IA64_UL_CONST(x) x##UL
27
28typedef unsigned int umode_t;
29
30/*
31 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
32 * header files exported to user space
33 */
34
35typedef __signed__ char __s8;
36typedef unsigned char __u8;
37
38typedef __signed__ short __s16;
39typedef unsigned short __u16;
40
41typedef __signed__ int __s32;
42typedef unsigned int __u32;
43
44typedef __signed__ long __s64;
45typedef unsigned long __u64;
46
47/*
48 * These aren't exported outside the kernel to avoid name space clashes
49 */
50# ifdef __KERNEL__
51
52typedef __s8 s8;
53typedef __u8 u8;
54
55typedef __s16 s16;
56typedef __u16 u16;
57
58typedef __s32 s32;
59typedef __u32 u32;
60
61typedef __s64 s64;
62typedef __u64 u64;
63
64#define BITS_PER_LONG 64
65
66/* DMA addresses are 64-bits wide, in general. */
67
68typedef u64 dma_addr_t;
69
70typedef unsigned short kmem_bufctl_t;
71
72# endif /* __KERNEL__ */
73#endif /* !__ASSEMBLY__ */
74
75#endif /* _ASM_IA64_TYPES_H */
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h
new file mode 100644
index 000000000000..8edd9a90949c
--- /dev/null
+++ b/include/asm-ia64/uaccess.h
@@ -0,0 +1,408 @@
1#ifndef _ASM_IA64_UACCESS_H
2#define _ASM_IA64_UACCESS_H
3
4/*
5 * This file defines various macros to transfer memory areas across
6 * the user/kernel boundary. This needs to be done carefully because
7 * this code is executed in kernel mode and uses user-specified
8 * addresses. Thus, we need to be careful not to let the user to
9 * trick us into accessing kernel memory that would normally be
10 * inaccessible. This code is also fairly performance sensitive,
11 * so we want to spend as little time doing safety checks as
12 * possible.
13 *
14 * To make matters a bit more interesting, these macros sometimes also
15 * called from within the kernel itself, in which case the address
16 * validity check must be skipped. The get_fs() macro tells us what
17 * to do: if get_fs()==USER_DS, checking is performed, if
18 * get_fs()==KERNEL_DS, checking is bypassed.
19 *
20 * Note that even if the memory area specified by the user is in a
21 * valid address range, it is still possible that we'll get a page
22 * fault while accessing it. This is handled by filling out an
23 * exception handler fixup entry for each instruction that has the
24 * potential to fault. When such a fault occurs, the page fault
25 * handler checks to see whether the faulting instruction has a fixup
26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
27 * then resumes execution at the continuation point.
28 *
29 * Based on <asm-alpha/uaccess.h>.
30 *
31 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
32 * David Mosberger-Tang <davidm@hpl.hp.com>
33 */
34
35#include <linux/compiler.h>
36#include <linux/errno.h>
37#include <linux/sched.h>
38#include <linux/page-flags.h>
39#include <linux/mm.h>
40
41#include <asm/intrinsics.h>
42#include <asm/pgtable.h>
43#include <asm/io.h>
44
45/*
46 * For historical reasons, the following macros are grossly misnamed:
47 */
48#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
49#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
50
51#define VERIFY_READ 0
52#define VERIFY_WRITE 1
53
54#define get_ds() (KERNEL_DS)
55#define get_fs() (current_thread_info()->addr_limit)
56#define set_fs(x) (current_thread_info()->addr_limit = (x))
57
58#define segment_eq(a, b) ((a).seg == (b).seg)
59
60/*
61 * When accessing user memory, we need to make sure the entire area really is in
62 * user-level space. In order to do this efficiently, we make sure that the page at
63 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
64 * point inside the virtually mapped linear page table.
65 */
66#define __access_ok(addr, size, segment) \
67({ \
68 __chk_user_ptr(addr); \
69 (likely((unsigned long) (addr) <= (segment).seg) \
70 && ((segment).seg == KERNEL_DS.seg \
71 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
72})
73#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
74
75/* this function will go away soon - use access_ok() instead */
76static inline int __deprecated
77verify_area (int type, const void __user *addr, unsigned long size)
78{
79 return access_ok(type, addr, size) ? 0 : -EFAULT;
80}
81
82/*
83 * These are the main single-value transfer routines. They automatically
84 * use the right size if we just have the right pointer type.
85 *
86 * Careful to not
87 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
88 * (b) require any knowledge of processes at this stage
89 */
90#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
91#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
92
93/*
94 * The "__xxx" versions do not do address space checking, useful when
95 * doing multiple accesses to the same area (the programmer has to do the
96 * checks by hand with "access_ok()")
97 */
98#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
99#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
100
101extern long __put_user_unaligned_unknown (void);
102
103#define __put_user_unaligned(x, ptr) \
104({ \
105 long __ret; \
106 switch (sizeof(*(ptr))) { \
107 case 1: __ret = __put_user((x), (ptr)); break; \
108 case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
109 | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
110 case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
111 | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
112 case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
113 | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
114 default: __ret = __put_user_unaligned_unknown(); \
115 } \
116 __ret; \
117})
118
119extern long __get_user_unaligned_unknown (void);
120
121#define __get_user_unaligned(x, ptr) \
122({ \
123 long __ret; \
124 switch (sizeof(*(ptr))) { \
125 case 1: __ret = __get_user((x), (ptr)); break; \
126 case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
127 | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
128 case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
129 | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
130 case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
131 | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
132 default: __ret = __get_user_unaligned_unknown(); \
133 } \
134 __ret; \
135})
136
137#ifdef ASM_SUPPORTED
138 struct __large_struct { unsigned long buf[100]; };
139# define __m(x) (*(struct __large_struct __user *)(x))
140
141/* We need to declare the __ex_table section before we can use it in .xdata. */
142asm (".section \"__ex_table\", \"a\"\n\t.previous");
143
144# define __get_user_size(val, addr, n, err) \
145do { \
146 register long __gu_r8 asm ("r8") = 0; \
147 register long __gu_r9 asm ("r9"); \
148 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
149 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
150 "[1:]" \
151 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
152 (err) = __gu_r8; \
153 (val) = __gu_r9; \
154} while (0)
155
156/*
157 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
158 * is because they do not write to any memory gcc knows about, so there are no aliasing
159 * issues.
160 */
161# define __put_user_size(val, addr, n, err) \
162do { \
163 register long __pu_r8 asm ("r8") = 0; \
164 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
165 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
166 "[1:]" \
167 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
168 (err) = __pu_r8; \
169} while (0)
170
171#else /* !ASM_SUPPORTED */
172# define RELOC_TYPE 2 /* ip-rel */
173# define __get_user_size(val, addr, n, err) \
174do { \
175 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
176 (err) = ia64_getreg(_IA64_REG_R8); \
177 (val) = ia64_getreg(_IA64_REG_R9); \
178} while (0)
179# define __put_user_size(val, addr, n, err) \
180do { \
181 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
182 (err) = ia64_getreg(_IA64_REG_R8); \
183} while (0)
184#endif /* !ASM_SUPPORTED */
185
186extern void __get_user_unknown (void);
187
188/*
189 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
190 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
191 * using r8/r9.
192 */
193#define __do_get_user(check, x, ptr, size, segment) \
194({ \
195 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
196 __typeof__ (size) __gu_size = (size); \
197 long __gu_err = -EFAULT, __gu_val = 0; \
198 \
199 if (!check || __access_ok(__gu_ptr, size, segment)) \
200 switch (__gu_size) { \
201 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
202 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
203 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
204 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
205 default: __get_user_unknown(); break; \
206 } \
207 (x) = (__typeof__(*(__gu_ptr))) __gu_val; \
208 __gu_err; \
209})
210
211#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS)
212#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
213
214extern void __put_user_unknown (void);
215
216/*
217 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
218 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
219 */
220#define __do_put_user(check, x, ptr, size, segment) \
221({ \
222 __typeof__ (x) __pu_x = (x); \
223 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
224 __typeof__ (size) __pu_size = (size); \
225 long __pu_err = -EFAULT; \
226 \
227 if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
228 switch (__pu_size) { \
229 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
230 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
231 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
232 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
233 default: __put_user_unknown(); break; \
234 } \
235 __pu_err; \
236})
237
238#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS)
239#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
240
241/*
242 * Complex access routines
243 */
244extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
245 unsigned long count);
246
247static inline unsigned long
248__copy_to_user (void __user *to, const void *from, unsigned long count)
249{
250 return __copy_user(to, (void __user *) from, count);
251}
252
253static inline unsigned long
254__copy_from_user (void *to, const void __user *from, unsigned long count)
255{
256 return __copy_user((void __user *) to, from, count);
257}
258
259#define __copy_to_user_inatomic __copy_to_user
260#define __copy_from_user_inatomic __copy_from_user
261#define copy_to_user(to, from, n) \
262({ \
263 void __user *__cu_to = (to); \
264 const void *__cu_from = (from); \
265 long __cu_len = (n); \
266 \
267 if (__access_ok(__cu_to, __cu_len, get_fs())) \
268 __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \
269 __cu_len; \
270})
271
272#define copy_from_user(to, from, n) \
273({ \
274 void *__cu_to = (to); \
275 const void __user *__cu_from = (from); \
276 long __cu_len = (n); \
277 \
278 __chk_user_ptr(__cu_from); \
279 if (__access_ok(__cu_from, __cu_len, get_fs())) \
280 __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \
281 __cu_len; \
282})
283
284#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
285
286static inline unsigned long
287copy_in_user (void __user *to, const void __user *from, unsigned long n)
288{
289 if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
290 n = __copy_user(to, from, n);
291 return n;
292}
293
294extern unsigned long __do_clear_user (void __user *, unsigned long);
295
296#define __clear_user(to, n) __do_clear_user(to, n)
297
298#define clear_user(to, n) \
299({ \
300 unsigned long __cu_len = (n); \
301 if (__access_ok(to, __cu_len, get_fs())) \
302 __cu_len = __do_clear_user(to, __cu_len); \
303 __cu_len; \
304})
305
306
307/*
308 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
309 * strlen.
310 */
311extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
312
313#define strncpy_from_user(to, from, n) \
314({ \
315 const char __user * __sfu_from = (from); \
316 long __sfu_ret = -EFAULT; \
317 if (__access_ok(__sfu_from, 0, get_fs())) \
318 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
319 __sfu_ret; \
320})
321
322/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
323extern unsigned long __strlen_user (const char __user *);
324
325#define strlen_user(str) \
326({ \
327 const char __user *__su_str = (str); \
328 unsigned long __su_ret = 0; \
329 if (__access_ok(__su_str, 0, get_fs())) \
330 __su_ret = __strlen_user(__su_str); \
331 __su_ret; \
332})
333
334/*
335 * Returns: 0 if exception before NUL or reaching the supplied limit
336 * (N), a value greater than N if the limit would be exceeded, else
337 * strlen.
338 */
339extern unsigned long __strnlen_user (const char __user *, long);
340
341#define strnlen_user(str, len) \
342({ \
343 const char __user *__su_str = (str); \
344 unsigned long __su_ret = 0; \
345 if (__access_ok(__su_str, 0, get_fs())) \
346 __su_ret = __strnlen_user(__su_str, len); \
347 __su_ret; \
348})
349
350/* Generic code can't deal with the location-relative format that we use for compactness. */
351#define ARCH_HAS_SORT_EXTABLE
352#define ARCH_HAS_SEARCH_EXTABLE
353
354struct exception_table_entry {
355 int addr; /* location-relative address of insn this fixup is for */
356 int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
357};
358
359extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
360extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
361
362static inline int
363ia64_done_with_exception (struct pt_regs *regs)
364{
365 const struct exception_table_entry *e;
366 e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
367 if (e) {
368 ia64_handle_exception(regs, e);
369 return 1;
370 }
371 return 0;
372}
373
374#define ARCH_HAS_TRANSLATE_MEM_PTR 1
375static __inline__ char *
376xlate_dev_mem_ptr (unsigned long p)
377{
378 struct page *page;
379 char * ptr;
380
381 page = pfn_to_page(p >> PAGE_SHIFT);
382 if (PageUncached(page))
383 ptr = (char *)p + __IA64_UNCACHED_OFFSET;
384 else
385 ptr = __va(p);
386
387 return ptr;
388}
389
390/*
391 * Convert a virtual cached kernel memory pointer to an uncached pointer
392 */
393static __inline__ char *
394xlate_dev_kmem_ptr (char * p)
395{
396 struct page *page;
397 char * ptr;
398
399 page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
400 if (PageUncached(page))
401 ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
402 else
403 ptr = p;
404
405 return ptr;
406}
407
408#endif /* _ASM_IA64_UACCESS_H */
diff --git a/include/asm-ia64/ucontext.h b/include/asm-ia64/ucontext.h
new file mode 100644
index 000000000000..bf573dc8ca6a
--- /dev/null
+++ b/include/asm-ia64/ucontext.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_IA64_UCONTEXT_H
2#define _ASM_IA64_UCONTEXT_H
3
4struct ucontext {
5 struct sigcontext uc_mcontext;
6};
7
8#define uc_link uc_mcontext.sc_gr[0] /* wrong type; nobody cares */
9#define uc_sigmask uc_mcontext.sc_sigmask
10#define uc_stack uc_mcontext.sc_stack
11
12#endif /* _ASM_IA64_UCONTEXT_H */
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
new file mode 100644
index 000000000000..bb8559888103
--- /dev/null
+++ b/include/asm-ia64/unaligned.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_IA64_UNALIGNED_H
2#define _ASM_IA64_UNALIGNED_H
3
4#include <asm-generic/unaligned.h>
5
6#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
new file mode 100644
index 000000000000..33e26c557c5c
--- /dev/null
+++ b/include/asm-ia64/unistd.h
@@ -0,0 +1,399 @@
1#ifndef _ASM_IA64_UNISTD_H
2#define _ASM_IA64_UNISTD_H
3
4/*
5 * IA-64 Linux syscall numbers and inline-functions.
6 *
7 * Copyright (C) 1998-2005 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11#include <asm/break.h>
12
13#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
14
15#define __NR_ni_syscall 1024
16#define __NR_exit 1025
17#define __NR_read 1026
18#define __NR_write 1027
19#define __NR_open 1028
20#define __NR_close 1029
21#define __NR_creat 1030
22#define __NR_link 1031
23#define __NR_unlink 1032
24#define __NR_execve 1033
25#define __NR_chdir 1034
26#define __NR_fchdir 1035
27#define __NR_utimes 1036
28#define __NR_mknod 1037
29#define __NR_chmod 1038
30#define __NR_chown 1039
31#define __NR_lseek 1040
32#define __NR_getpid 1041
33#define __NR_getppid 1042
34#define __NR_mount 1043
35#define __NR_umount 1044
36#define __NR_setuid 1045
37#define __NR_getuid 1046
38#define __NR_geteuid 1047
39#define __NR_ptrace 1048
40#define __NR_access 1049
41#define __NR_sync 1050
42#define __NR_fsync 1051
43#define __NR_fdatasync 1052
44#define __NR_kill 1053
45#define __NR_rename 1054
46#define __NR_mkdir 1055
47#define __NR_rmdir 1056
48#define __NR_dup 1057
49#define __NR_pipe 1058
50#define __NR_times 1059
51#define __NR_brk 1060
52#define __NR_setgid 1061
53#define __NR_getgid 1062
54#define __NR_getegid 1063
55#define __NR_acct 1064
56#define __NR_ioctl 1065
57#define __NR_fcntl 1066
58#define __NR_umask 1067
59#define __NR_chroot 1068
60#define __NR_ustat 1069
61#define __NR_dup2 1070
62#define __NR_setreuid 1071
63#define __NR_setregid 1072
64#define __NR_getresuid 1073
65#define __NR_setresuid 1074
66#define __NR_getresgid 1075
67#define __NR_setresgid 1076
68#define __NR_getgroups 1077
69#define __NR_setgroups 1078
70#define __NR_getpgid 1079
71#define __NR_setpgid 1080
72#define __NR_setsid 1081
73#define __NR_getsid 1082
74#define __NR_sethostname 1083
75#define __NR_setrlimit 1084
76#define __NR_getrlimit 1085
77#define __NR_getrusage 1086
78#define __NR_gettimeofday 1087
79#define __NR_settimeofday 1088
80#define __NR_select 1089
81#define __NR_poll 1090
82#define __NR_symlink 1091
83#define __NR_readlink 1092
84#define __NR_uselib 1093
85#define __NR_swapon 1094
86#define __NR_swapoff 1095
87#define __NR_reboot 1096
88#define __NR_truncate 1097
89#define __NR_ftruncate 1098
90#define __NR_fchmod 1099
91#define __NR_fchown 1100
92#define __NR_getpriority 1101
93#define __NR_setpriority 1102
94#define __NR_statfs 1103
95#define __NR_fstatfs 1104
96#define __NR_gettid 1105
97#define __NR_semget 1106
98#define __NR_semop 1107
99#define __NR_semctl 1108
100#define __NR_msgget 1109
101#define __NR_msgsnd 1110
102#define __NR_msgrcv 1111
103#define __NR_msgctl 1112
104#define __NR_shmget 1113
105#define __NR_shmat 1114
106#define __NR_shmdt 1115
107#define __NR_shmctl 1116
108/* also known as klogctl() in GNU libc: */
109#define __NR_syslog 1117
110#define __NR_setitimer 1118
111#define __NR_getitimer 1119
112/* 1120 was __NR_old_stat */
113/* 1121 was __NR_old_lstat */
114/* 1122 was __NR_old_fstat */
115#define __NR_vhangup 1123
116#define __NR_lchown 1124
117#define __NR_remap_file_pages 1125
118#define __NR_wait4 1126
119#define __NR_sysinfo 1127
120#define __NR_clone 1128
121#define __NR_setdomainname 1129
122#define __NR_uname 1130
123#define __NR_adjtimex 1131
124/* 1132 was __NR_create_module */
125#define __NR_init_module 1133
126#define __NR_delete_module 1134
127/* 1135 was __NR_get_kernel_syms */
128/* 1136 was __NR_query_module */
129#define __NR_quotactl 1137
130#define __NR_bdflush 1138
131#define __NR_sysfs 1139
132#define __NR_personality 1140
133#define __NR_afs_syscall 1141
134#define __NR_setfsuid 1142
135#define __NR_setfsgid 1143
136#define __NR_getdents 1144
137#define __NR_flock 1145
138#define __NR_readv 1146
139#define __NR_writev 1147
140#define __NR_pread64 1148
141#define __NR_pwrite64 1149
142#define __NR__sysctl 1150
143#define __NR_mmap 1151
144#define __NR_munmap 1152
145#define __NR_mlock 1153
146#define __NR_mlockall 1154
147#define __NR_mprotect 1155
148#define __NR_mremap 1156
149#define __NR_msync 1157
150#define __NR_munlock 1158
151#define __NR_munlockall 1159
152#define __NR_sched_getparam 1160
153#define __NR_sched_setparam 1161
154#define __NR_sched_getscheduler 1162
155#define __NR_sched_setscheduler 1163
156#define __NR_sched_yield 1164
157#define __NR_sched_get_priority_max 1165
158#define __NR_sched_get_priority_min 1166
159#define __NR_sched_rr_get_interval 1167
160#define __NR_nanosleep 1168
161#define __NR_nfsservctl 1169
162#define __NR_prctl 1170
163/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
164#define __NR_mmap2 1172
165#define __NR_pciconfig_read 1173
166#define __NR_pciconfig_write 1174
167#define __NR_perfmonctl 1175
168#define __NR_sigaltstack 1176
169#define __NR_rt_sigaction 1177
170#define __NR_rt_sigpending 1178
171#define __NR_rt_sigprocmask 1179
172#define __NR_rt_sigqueueinfo 1180
173#define __NR_rt_sigreturn 1181
174#define __NR_rt_sigsuspend 1182
175#define __NR_rt_sigtimedwait 1183
176#define __NR_getcwd 1184
177#define __NR_capget 1185
178#define __NR_capset 1186
179#define __NR_sendfile 1187
180#define __NR_getpmsg 1188
181#define __NR_putpmsg 1189
182#define __NR_socket 1190
183#define __NR_bind 1191
184#define __NR_connect 1192
185#define __NR_listen 1193
186#define __NR_accept 1194
187#define __NR_getsockname 1195
188#define __NR_getpeername 1196
189#define __NR_socketpair 1197
190#define __NR_send 1198
191#define __NR_sendto 1199
192#define __NR_recv 1200
193#define __NR_recvfrom 1201
194#define __NR_shutdown 1202
195#define __NR_setsockopt 1203
196#define __NR_getsockopt 1204
197#define __NR_sendmsg 1205
198#define __NR_recvmsg 1206
199#define __NR_pivot_root 1207
200#define __NR_mincore 1208
201#define __NR_madvise 1209
202#define __NR_stat 1210
203#define __NR_lstat 1211
204#define __NR_fstat 1212
205#define __NR_clone2 1213
206#define __NR_getdents64 1214
207#define __NR_getunwind 1215
208#define __NR_readahead 1216
209#define __NR_setxattr 1217
210#define __NR_lsetxattr 1218
211#define __NR_fsetxattr 1219
212#define __NR_getxattr 1220
213#define __NR_lgetxattr 1221
214#define __NR_fgetxattr 1222
215#define __NR_listxattr 1223
216#define __NR_llistxattr 1224
217#define __NR_flistxattr 1225
218#define __NR_removexattr 1226
219#define __NR_lremovexattr 1227
220#define __NR_fremovexattr 1228
221#define __NR_tkill 1229
222#define __NR_futex 1230
223#define __NR_sched_setaffinity 1231
224#define __NR_sched_getaffinity 1232
225#define __NR_set_tid_address 1233
226#define __NR_fadvise64 1234
227#define __NR_tgkill 1235
228#define __NR_exit_group 1236
229#define __NR_lookup_dcookie 1237
230#define __NR_io_setup 1238
231#define __NR_io_destroy 1239
232#define __NR_io_getevents 1240
233#define __NR_io_submit 1241
234#define __NR_io_cancel 1242
235#define __NR_epoll_create 1243
236#define __NR_epoll_ctl 1244
237#define __NR_epoll_wait 1245
238#define __NR_restart_syscall 1246
239#define __NR_semtimedop 1247
240#define __NR_timer_create 1248
241#define __NR_timer_settime 1249
242#define __NR_timer_gettime 1250
243#define __NR_timer_getoverrun 1251
244#define __NR_timer_delete 1252
245#define __NR_clock_settime 1253
246#define __NR_clock_gettime 1254
247#define __NR_clock_getres 1255
248#define __NR_clock_nanosleep 1256
249#define __NR_fstatfs64 1257
250#define __NR_statfs64 1258
251#define __NR_mbind 1259
252#define __NR_get_mempolicy 1260
253#define __NR_set_mempolicy 1261
254#define __NR_mq_open 1262
255#define __NR_mq_unlink 1263
256#define __NR_mq_timedsend 1264
257#define __NR_mq_timedreceive 1265
258#define __NR_mq_notify 1266
259#define __NR_mq_getsetattr 1267
260#define __NR_kexec_load 1268
261#define __NR_vserver 1269
262#define __NR_waitid 1270
263#define __NR_add_key 1271
264#define __NR_request_key 1272
265#define __NR_keyctl 1273
266
267#ifdef __KERNEL__
268
269#include <linux/config.h>
270
271#define NR_syscalls 256 /* length of syscall table */
272
273#define __ARCH_WANT_SYS_RT_SIGACTION
274
275#ifdef CONFIG_IA32_SUPPORT
276# define __ARCH_WANT_SYS_FADVISE64
277# define __ARCH_WANT_SYS_GETPGRP
278# define __ARCH_WANT_SYS_LLSEEK
279# define __ARCH_WANT_SYS_NICE
280# define __ARCH_WANT_SYS_OLD_GETRLIMIT
281# define __ARCH_WANT_SYS_OLDUMOUNT
282# define __ARCH_WANT_SYS_SIGPENDING
283# define __ARCH_WANT_SYS_SIGPROCMASK
284# define __ARCH_WANT_COMPAT_SYS_TIME
285#endif
286
287#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
288
289#include <linux/types.h>
290#include <linux/linkage.h>
291#include <linux/compiler.h>
292
293extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
294
295#ifdef __KERNEL_SYSCALLS__
296
297#include <linux/compiler.h>
298#include <linux/string.h>
299#include <linux/signal.h>
300#include <asm/ptrace.h>
301#include <linux/stringify.h>
302#include <linux/syscalls.h>
303
304static inline long
305open (const char * name, int mode, int flags)
306{
307 return sys_open(name, mode, flags);
308}
309
310static inline long
311dup (int fd)
312{
313 return sys_dup(fd);
314}
315
316static inline long
317close (int fd)
318{
319 return sys_close(fd);
320}
321
322static inline off_t
323lseek (int fd, off_t off, int whence)
324{
325 return sys_lseek(fd, off, whence);
326}
327
328static inline void
329_exit (int value)
330{
331 sys_exit(value);
332}
333
334#define exit(x) _exit(x)
335
336static inline long
337write (int fd, const char * buf, size_t nr)
338{
339 return sys_write(fd, buf, nr);
340}
341
342static inline long
343read (int fd, char * buf, size_t nr)
344{
345 return sys_read(fd, buf, nr);
346}
347
348
349static inline long
350setsid (void)
351{
352 return sys_setsid();
353}
354
355static inline pid_t
356waitpid (int pid, int * wait_stat, int flags)
357{
358 return sys_wait4(pid, wait_stat, flags, NULL);
359}
360
361
362extern int execve (const char *filename, char *const av[], char *const ep[]);
363extern pid_t clone (unsigned long flags, void *sp);
364
365#endif /* __KERNEL_SYSCALLS__ */
366
367asmlinkage unsigned long sys_mmap(
368 unsigned long addr, unsigned long len,
369 int prot, int flags,
370 int fd, long off);
371asmlinkage unsigned long sys_mmap2(
372 unsigned long addr, unsigned long len,
373 int prot, int flags,
374 int fd, long pgoff);
375struct pt_regs;
376struct sigaction;
377long sys_execve(char __user *filename, char __user * __user *argv,
378 char __user * __user *envp, struct pt_regs *regs);
379asmlinkage long sys_pipe(void);
380asmlinkage long sys_ptrace(long request, pid_t pid,
381 unsigned long addr, unsigned long data);
382asmlinkage long sys_rt_sigaction(int sig,
383 const struct sigaction __user *act,
384 struct sigaction __user *oact,
385 size_t sigsetsize);
386
387/*
388 * "Conditional" syscalls
389 *
390 * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
391 * kernel/sys_ni.c. This version causes warnings because the declaration isn't a
392 * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
393 * declarations have prototypes at the moment.
394 */
395#define cond_syscall(x) asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
396
397#endif /* !__ASSEMBLY__ */
398#endif /* __KERNEL__ */
399#endif /* _ASM_IA64_UNISTD_H */
diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h
new file mode 100644
index 000000000000..61426ad3ecdb
--- /dev/null
+++ b/include/asm-ia64/unwind.h
@@ -0,0 +1,240 @@
1#ifndef _ASM_IA64_UNWIND_H
2#define _ASM_IA64_UNWIND_H
3
4/*
5 * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * A simple API for unwinding kernel stacks. This is used for
9 * debugging and error reporting purposes. The kernel doesn't need
10 * full-blown stack unwinding with all the bells and whitles, so there
11 * is not much point in implementing the full IA-64 unwind API (though
12 * it would of course be possible to implement the kernel API on top
13 * of it).
14 */
15
16struct task_struct; /* forward declaration */
17struct switch_stack; /* forward declaration */
18
19enum unw_application_register {
20 UNW_AR_BSP,
21 UNW_AR_BSPSTORE,
22 UNW_AR_PFS,
23 UNW_AR_RNAT,
24 UNW_AR_UNAT,
25 UNW_AR_LC,
26 UNW_AR_EC,
27 UNW_AR_FPSR,
28 UNW_AR_RSC,
29 UNW_AR_CCV,
30 UNW_AR_CSD,
31 UNW_AR_SSD
32};
33
34/*
35 * The following declarations are private to the unwind
36 * implementation:
37 */
38
39struct unw_stack {
40 unsigned long limit;
41 unsigned long top;
42};
43
44#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0)
45
46/*
47 * No user of this module should every access this structure directly
48 * as it is subject to change. It is declared here solely so we can
49 * use automatic variables.
50 */
51struct unw_frame_info {
52 struct unw_stack regstk;
53 struct unw_stack memstk;
54 unsigned int flags;
55 short hint;
56 short prev_script;
57
58 /* current frame info: */
59 unsigned long bsp; /* backing store pointer value */
60 unsigned long sp; /* stack pointer value */
61 unsigned long psp; /* previous sp value */
62 unsigned long ip; /* instruction pointer value */
63 unsigned long pr; /* current predicate values */
64 unsigned long *cfm_loc; /* cfm save location (or NULL) */
65 unsigned long pt; /* struct pt_regs location */
66
67 struct task_struct *task;
68 struct switch_stack *sw;
69
70 /* preserved state: */
71 unsigned long *bsp_loc; /* previous bsp save location */
72 unsigned long *bspstore_loc;
73 unsigned long *pfs_loc;
74 unsigned long *rnat_loc;
75 unsigned long *rp_loc;
76 unsigned long *pri_unat_loc;
77 unsigned long *unat_loc;
78 unsigned long *pr_loc;
79 unsigned long *lc_loc;
80 unsigned long *fpsr_loc;
81 struct unw_ireg {
82 unsigned long *loc;
83 struct unw_ireg_nat {
84 long type : 3; /* enum unw_nat_type */
85 signed long off : 61; /* NaT word is at loc+nat.off */
86 } nat;
87 } r4, r5, r6, r7;
88 unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
89 struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
90};
91
92/*
93 * The official API follows below:
94 */
95
96struct unw_table_entry {
97 u64 start_offset;
98 u64 end_offset;
99 u64 info_offset;
100};
101
102/*
103 * Initialize unwind support.
104 */
105extern void unw_init (void);
106
107extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
108 const void *table_start, const void *table_end);
109
110extern void unw_remove_unwind_table (void *handle);
111
112/*
113 * Prepare to unwind blocked task t.
114 */
115extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
116
117/*
118 * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have
119 * be "adjacent" (no state modifications between pt-regs and switch-stack).
120 */
121extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
122 struct pt_regs *pt, struct switch_stack *sw);
123
124extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
125 struct switch_stack *sw);
126
127/*
128 * Prepare to unwind the currently running thread.
129 */
130extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg);
131
132/*
133 * Unwind to previous to frame. Returns 0 if successful, negative
134 * number in case of an error.
135 */
136extern int unw_unwind (struct unw_frame_info *info);
137
138/*
139 * Unwind until the return pointer is in user-land (or until an error
140 * occurs). Returns 0 if successful, negative number in case of
141 * error.
142 */
143extern int unw_unwind_to_user (struct unw_frame_info *info);
144
145#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
146
147static inline int
148unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
149{
150 *valp = (info)->ip;
151 return 0;
152}
153
154static inline int
155unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
156{
157 *valp = (info)->sp;
158 return 0;
159}
160
161static inline int
162unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
163{
164 *valp = (info)->psp;
165 return 0;
166}
167
168static inline int
169unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
170{
171 *valp = (info)->bsp;
172 return 0;
173}
174
175static inline int
176unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
177{
178 *valp = *(info)->cfm_loc;
179 return 0;
180}
181
182static inline int
183unw_set_cfm (struct unw_frame_info *info, unsigned long val)
184{
185 *(info)->cfm_loc = val;
186 return 0;
187}
188
189static inline int
190unw_get_rp (struct unw_frame_info *info, unsigned long *val)
191{
192 if (!info->rp_loc)
193 return -1;
194 *val = *info->rp_loc;
195 return 0;
196}
197
198extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int);
199extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
200extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int);
201extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
202extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
203
204static inline int
205unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
206{
207 return unw_access_gr(i, n, &v, &nat, 1);
208}
209
210static inline int
211unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
212{
213 return unw_access_br(i, n, &v, 1);
214}
215
216static inline int
217unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
218{
219 return unw_access_fr(i, n, &v, 1);
220}
221
222static inline int
223unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
224{
225 return unw_access_ar(i, n, &v, 1);
226}
227
228static inline int
229unw_set_pr (struct unw_frame_info *i, unsigned long v)
230{
231 return unw_access_pr(i, &v, 1);
232}
233
234#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0)
235#define unw_get_br(i,n,v) unw_access_br(i,n,v,0)
236#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0)
237#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0)
238#define unw_get_pr(i,v) unw_access_pr(i,v,0)
239
240#endif /* _ASM_UNWIND_H */
diff --git a/include/asm-ia64/user.h b/include/asm-ia64/user.h
new file mode 100644
index 000000000000..78e5a20140aa
--- /dev/null
+++ b/include/asm-ia64/user.h
@@ -0,0 +1,58 @@
1#ifndef _ASM_IA64_USER_H
2#define _ASM_IA64_USER_H
3
4/*
5 * Core file format: The core file is written in such a way that gdb
6 * can understand it and provide useful information to the user (under
7 * linux we use the `trad-core' bfd). The file contents are as
8 * follows:
9 *
10 * upage: 1 page consisting of a user struct that tells gdb
11 * what is present in the file. Directly after this is a
12 * copy of the task_struct, which is currently not used by gdb,
13 * but it may come in handy at some point. All of the registers
14 * are stored as part of the upage. The upage should always be
15 * only one page long.
16 * data: The data segment follows next. We use current->end_text to
17 * current->brk to pick up all of the user variables, plus any memory
18 * that may have been sbrk'ed. No attempt is made to determine if a
19 * page is demand-zero or if a page is totally unused, we just cover
20 * the entire range. All of the addresses are rounded in such a way
21 * that an integral number of pages is written.
22 * stack: We need the stack information in order to get a meaningful
23 * backtrace. We need to write the data from usp to
24 * current->start_stack, so we round each of these in order to be able
25 * to write an integer number of pages.
26 *
27 * Modified 1998, 1999, 2001
28 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
29 */
30
31#include <linux/ptrace.h>
32#include <linux/types.h>
33
34#include <asm/page.h>
35
36#define EF_SIZE 3072 /* XXX fix me */
37
38struct user {
39 unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
40 size_t u_tsize; /* text size (pages) */
41 size_t u_dsize; /* data size (pages) */
42 size_t u_ssize; /* stack size (pages) */
43 unsigned long start_code; /* text starting address */
44 unsigned long start_data; /* data starting address */
45 unsigned long start_stack; /* stack starting address */
46 long int signal; /* signal causing core dump */
47 struct regs * u_ar0; /* help gdb find registers */
48 unsigned long magic; /* identifies a core file */
49 char u_comm[32]; /* user command name */
50};
51
52#define NBPG PAGE_SIZE
53#define UPAGES 1
54#define HOST_TEXT_START_ADDR (u.start_code)
55#define HOST_DATA_START_ADDR (u.start_data)
56#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
57
58#endif /* _ASM_IA64_USER_H */
diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h
new file mode 100644
index 000000000000..da55c91246e3
--- /dev/null
+++ b/include/asm-ia64/ustack.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_IA64_USTACK_H
2#define _ASM_IA64_USTACK_H
3
4/*
5 * Constants for the user stack size
6 */
7
8#include <asm/page.h>
9
10/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
11#define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
12/* Make a default stack size of 2GB */
13#define DEFAULT_USER_STACK_SIZE (1UL << 31)
14#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
15
16#endif /* _ASM_IA64_USTACK_H */
diff --git a/include/asm-ia64/vga.h b/include/asm-ia64/vga.h
new file mode 100644
index 000000000000..1f446d6841f6
--- /dev/null
+++ b/include/asm-ia64/vga.h
@@ -0,0 +1,22 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 * (c) 1999 Asit Mallick <asit.k.mallick@intel.com>
6 * (c) 1999 Don Dugger <don.dugger@intel.com>
7 */
8
9#ifndef __ASM_IA64_VGA_H_
10#define __ASM_IA64_VGA_H_
11
12/*
13 * On the PC, we can just recalculate addresses and then access the
14 * videoram directly without any black magic.
15 */
16
17#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
18
19#define vga_readb(x) (*(x))
20#define vga_writeb(x,y) (*(y) = (x))
21
22#endif /* __ASM_IA64_VGA_H_ */
diff --git a/include/asm-ia64/xor.h b/include/asm-ia64/xor.h
new file mode 100644
index 000000000000..41fb8744d17a
--- /dev/null
+++ b/include/asm-ia64/xor.h
@@ -0,0 +1,33 @@
1/*
2 * include/asm-ia64/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for IA-64.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16
17extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *);
18extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *,
19 unsigned long *);
20extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *,
21 unsigned long *, unsigned long *);
22extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
23 unsigned long *, unsigned long *, unsigned long *);
24
25static struct xor_block_template xor_block_ia64 = {
26 .name = "ia64",
27 .do_2 = xor_ia64_2,
28 .do_3 = xor_ia64_3,
29 .do_4 = xor_ia64_4,
30 .do_5 = xor_ia64_5,
31};
32
33#define XOR_TRY_TEMPLATES xor_speed(&xor_block_ia64)