aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/acpi.h5
-rw-r--r--include/asm-ia64/fcntl.h3
-rw-r--r--include/asm-ia64/io.h4
-rw-r--r--include/asm-ia64/mmu.h8
-rw-r--r--include/asm-ia64/mmu_context.h61
-rw-r--r--include/asm-ia64/page.h27
-rw-r--r--include/asm-ia64/pal.h21
-rw-r--r--include/asm-ia64/pgtable.h13
-rw-r--r--include/asm-ia64/rwsem.h35
-rw-r--r--include/asm-ia64/sn/addrs.h112
-rw-r--r--include/asm-ia64/sn/geo.h3
-rw-r--r--include/asm-ia64/sn/intr.h3
-rw-r--r--include/asm-ia64/sn/nodepda.h3
-rw-r--r--include/asm-ia64/sn/pcibus_provider_defs.h8
-rw-r--r--include/asm-ia64/sn/pda.h1
-rw-r--r--include/asm-ia64/sn/sn2/sn_hwperf.h10
-rw-r--r--include/asm-ia64/sn/sn_sal.h60
-rw-r--r--include/asm-ia64/sn/tioce.h740
-rw-r--r--include/asm-ia64/sn/tioce_provider.h66
-rw-r--r--include/asm-ia64/socket.h2
-rw-r--r--include/asm-ia64/spinlock.h33
-rw-r--r--include/asm-ia64/system.h5
22 files changed, 1046 insertions, 177 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 4c06d455139c..3a544ffc5008 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -116,6 +116,11 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
116 116
117extern u16 ia64_acpiid_to_sapicid[]; 117extern u16 ia64_acpiid_to_sapicid[];
118 118
119/*
120 * Refer Intel ACPI _PDC support document for bit definitions
121 */
122#define ACPI_PDC_EST_CAPABILITY_SMP 0x8
123
119#endif /*__KERNEL__*/ 124#endif /*__KERNEL__*/
120 125
121#endif /*_ASM_ACPI_H*/ 126#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h
index c9f8d835d0cc..cee16ea1780a 100644
--- a/include/asm-ia64/fcntl.h
+++ b/include/asm-ia64/fcntl.h
@@ -81,6 +81,7 @@ struct flock {
81 81
82#define F_LINUX_SPECIFIC_BASE 1024 82#define F_LINUX_SPECIFIC_BASE 1024
83 83
84#define force_o_largefile() ( ! (current->personality & PER_LINUX32) ) 84#define force_o_largefile() \
85 (personality(current->personality) != PER_LINUX32)
85 86
86#endif /* _ASM_IA64_FCNTL_H */ 87#endif /* _ASM_IA64_FCNTL_H */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 54e7637a326c..cf772a67f858 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -23,7 +23,7 @@
23#define __SLOW_DOWN_IO do { } while (0) 23#define __SLOW_DOWN_IO do { } while (0)
24#define SLOW_DOWN_IO do { } while (0) 24#define SLOW_DOWN_IO do { } while (0)
25 25
26#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ 26#define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
27 27
28/* 28/*
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but 29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
@@ -41,7 +41,7 @@
41#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) 41#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) 42#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
43 43
44#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff)) 44#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
45 45
46struct io_space { 46struct io_space {
47 unsigned long mmio_base; /* base in MMIO space */ 47 unsigned long mmio_base; /* base in MMIO space */
diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h
index ae1525352a25..611432ba579c 100644
--- a/include/asm-ia64/mmu.h
+++ b/include/asm-ia64/mmu.h
@@ -2,10 +2,12 @@
2#define __MMU_H 2#define __MMU_H
3 3
4/* 4/*
5 * Type for a context number. We declare it volatile to ensure proper ordering when it's 5 * Type for a context number. We declare it volatile to ensure proper
6 * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and 6 * ordering when it's accessed outside of spinlock'd critical sections
7 * init_new_context()). 7 * (e.g., as done in activate_mm() and init_new_context()).
8 */ 8 */
9typedef volatile unsigned long mm_context_t; 9typedef volatile unsigned long mm_context_t;
10 10
11typedef unsigned long nv_mm_context_t;
12
11#endif 13#endif
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index e3e5fededb04..8d6e72f7b08e 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -19,6 +19,7 @@
19 19
20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) 20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
21 21
22# include <asm/page.h>
22# ifndef __ASSEMBLY__ 23# ifndef __ASSEMBLY__
23 24
24#include <linux/compiler.h> 25#include <linux/compiler.h>
@@ -55,34 +56,46 @@ static inline void
55delayed_tlb_flush (void) 56delayed_tlb_flush (void)
56{ 57{
57 extern void local_flush_tlb_all (void); 58 extern void local_flush_tlb_all (void);
59 unsigned long flags;
58 60
59 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { 61 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
60 local_flush_tlb_all(); 62 spin_lock_irqsave(&ia64_ctx.lock, flags);
61 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; 63 {
64 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
65 local_flush_tlb_all();
66 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
67 }
68 }
69 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
62 } 70 }
63} 71}
64 72
65static inline mm_context_t 73static inline nv_mm_context_t
66get_mmu_context (struct mm_struct *mm) 74get_mmu_context (struct mm_struct *mm)
67{ 75{
68 unsigned long flags; 76 unsigned long flags;
69 mm_context_t context = mm->context; 77 nv_mm_context_t context = mm->context;
70 78
71 if (context) 79 if (unlikely(!context)) {
72 return context; 80 spin_lock_irqsave(&ia64_ctx.lock, flags);
73 81 {
74 spin_lock_irqsave(&ia64_ctx.lock, flags); 82 /* re-check, now that we've got the lock: */
75 { 83 context = mm->context;
76 /* re-check, now that we've got the lock: */ 84 if (context == 0) {
77 context = mm->context; 85 cpus_clear(mm->cpu_vm_mask);
78 if (context == 0) { 86 if (ia64_ctx.next >= ia64_ctx.limit)
79 cpus_clear(mm->cpu_vm_mask); 87 wrap_mmu_context(mm);
80 if (ia64_ctx.next >= ia64_ctx.limit) 88 mm->context = context = ia64_ctx.next++;
81 wrap_mmu_context(mm); 89 }
82 mm->context = context = ia64_ctx.next++;
83 } 90 }
91 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
84 } 92 }
85 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 93 /*
94 * Ensure we're not starting to use "context" before any old
95 * uses of it are gone from our TLB.
96 */
97 delayed_tlb_flush();
98
86 return context; 99 return context;
87} 100}
88 101
@@ -104,13 +117,13 @@ destroy_context (struct mm_struct *mm)
104} 117}
105 118
106static inline void 119static inline void
107reload_context (mm_context_t context) 120reload_context (nv_mm_context_t context)
108{ 121{
109 unsigned long rid; 122 unsigned long rid;
110 unsigned long rid_incr = 0; 123 unsigned long rid_incr = 0;
111 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; 124 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
112 125
113 old_rr4 = ia64_get_rr(0x8000000000000000UL); 126 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
114 rid = context << 3; /* make space for encoding the region number */ 127 rid = context << 3; /* make space for encoding the region number */
115 rid_incr = 1 << 8; 128 rid_incr = 1 << 8;
116 129
@@ -122,6 +135,10 @@ reload_context (mm_context_t context)
122 rr4 = rr0 + 4*rid_incr; 135 rr4 = rr0 + 4*rid_incr;
123#ifdef CONFIG_HUGETLB_PAGE 136#ifdef CONFIG_HUGETLB_PAGE
124 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); 137 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
138
139# if RGN_HPAGE != 4
140# error "reload_context assumes RGN_HPAGE is 4"
141# endif
125#endif 142#endif
126 143
127 ia64_set_rr(0x0000000000000000UL, rr0); 144 ia64_set_rr(0x0000000000000000UL, rr0);
@@ -138,7 +155,7 @@ reload_context (mm_context_t context)
138static inline void 155static inline void
139activate_context (struct mm_struct *mm) 156activate_context (struct mm_struct *mm)
140{ 157{
141 mm_context_t context; 158 nv_mm_context_t context;
142 159
143 do { 160 do {
144 context = get_mmu_context(mm); 161 context = get_mmu_context(mm);
@@ -157,8 +174,6 @@ activate_context (struct mm_struct *mm)
157static inline void 174static inline void
158activate_mm (struct mm_struct *prev, struct mm_struct *next) 175activate_mm (struct mm_struct *prev, struct mm_struct *next)
159{ 176{
160 delayed_tlb_flush();
161
162 /* 177 /*
163 * We may get interrupts here, but that's OK because interrupt handlers cannot 178 * We may get interrupts here, but that's OK because interrupt handlers cannot
164 * touch user-space. 179 * touch user-space.
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 08894f73abf0..9edffad8c28b 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -13,6 +13,19 @@
13#include <asm/types.h> 13#include <asm/types.h>
14 14
15/* 15/*
16 * The top three bits of an IA64 address are its Region Number.
17 * Different regions are assigned to different purposes.
18 */
19#define RGN_SHIFT (61)
20#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
21#define RGN_BITS (RGN_BASE(-1))
22
23#define RGN_KERNEL 7 /* Identity mapped region */
24#define RGN_UNCACHED 6 /* Identity mapped I/O region */
25#define RGN_GATE 5 /* Gate page, Kernel text, etc */
26#define RGN_HPAGE 4 /* For Huge TLB pages */
27
28/*
16 * PAGE_SHIFT determines the actual kernel page size. 29 * PAGE_SHIFT determines the actual kernel page size.
17 */ 30 */
18#if defined(CONFIG_IA64_PAGE_SIZE_4KB) 31#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
@@ -36,10 +49,9 @@
36 49
37#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ 50#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
38 51
52
39#ifdef CONFIG_HUGETLB_PAGE 53#ifdef CONFIG_HUGETLB_PAGE
40# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/ 54# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
41# define REGION_SHIFT 61
42# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
43# define HPAGE_SHIFT hpage_shift 55# define HPAGE_SHIFT hpage_shift
44# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ 56# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
45# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) 57# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
@@ -130,16 +142,13 @@ typedef union ia64_va {
130#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) 142#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
131#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) 143#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
132 144
133#define REGION_SIZE REGION_NUMBER(1)
134#define REGION_KERNEL 7
135
136#ifdef CONFIG_HUGETLB_PAGE 145#ifdef CONFIG_HUGETLB_PAGE
137# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ 146# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
138 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) 147 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
139# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 148# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
140# define is_hugepage_only_range(mm, addr, len) \ 149# define is_hugepage_only_range(mm, addr, len) \
141 (REGION_NUMBER(addr) == REGION_HPAGE && \ 150 (REGION_NUMBER(addr) == RGN_HPAGE && \
142 REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE) 151 REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
143extern unsigned int hpage_shift; 152extern unsigned int hpage_shift;
144#endif 153#endif
145 154
@@ -197,7 +206,7 @@ get_order (unsigned long size)
197# define __pgprot(x) (x) 206# define __pgprot(x) (x)
198#endif /* !STRICT_MM_TYPECHECKS */ 207#endif /* !STRICT_MM_TYPECHECKS */
199 208
200#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000) 209#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
201 210
202#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 211#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
203 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ 212 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 2303a10ee595..e828377ad295 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -75,6 +75,8 @@
75#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */ 75#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
76#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */ 76#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
77#define PAL_VM_TR_READ 261 /* read contents of translation register */ 77#define PAL_VM_TR_READ 261 /* read contents of translation register */
78#define PAL_GET_PSTATE 262 /* get the current P-state */
79#define PAL_SET_PSTATE 263 /* set the P-state */
78 80
79#ifndef __ASSEMBLY__ 81#ifndef __ASSEMBLY__
80 82
@@ -1111,6 +1113,25 @@ ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
1111 return iprv.status; 1113 return iprv.status;
1112} 1114}
1113 1115
1116/* Get the current P-state information */
1117static inline s64
1118ia64_pal_get_pstate (u64 *pstate_index)
1119{
1120 struct ia64_pal_retval iprv;
1121 PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0);
1122 *pstate_index = iprv.v0;
1123 return iprv.status;
1124}
1125
1126/* Set the P-state */
1127static inline s64
1128ia64_pal_set_pstate (u64 pstate_index)
1129{
1130 struct ia64_pal_retval iprv;
1131 PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
1132 return iprv.status;
1133}
1134
1114/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are 1135/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
1115 * suspended, but cache and TLB coherency is maintained. 1136 * suspended, but cache and TLB coherency is maintained.
1116 */ 1137 */
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 48586e08f432..2e34c06e6777 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -204,21 +204,18 @@ ia64_phys_addr_valid (unsigned long addr)
204#define set_pte(ptep, pteval) (*(ptep) = (pteval)) 204#define set_pte(ptep, pteval) (*(ptep) = (pteval))
205#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 205#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
206 206
207#define RGN_SIZE (1UL << 61) 207#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
208#define RGN_KERNEL 7
209
210#define VMALLOC_START 0xa000000200000000UL
211#ifdef CONFIG_VIRTUAL_MEM_MAP 208#ifdef CONFIG_VIRTUAL_MEM_MAP
212# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) 209# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
213# define VMALLOC_END vmalloc_end 210# define VMALLOC_END vmalloc_end
214 extern unsigned long vmalloc_end; 211 extern unsigned long vmalloc_end;
215#else 212#else
216# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) 213# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
217#endif 214#endif
218 215
219/* fs/proc/kcore.c */ 216/* fs/proc/kcore.c */
220#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL) 217#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
221#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL) 218#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
222 219
223/* 220/*
224 * Conversion functions: convert page frame number (pfn) and a protection value to a page 221 * Conversion functions: convert page frame number (pfn) and a protection value to a page
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
index 6ece5061dc19..e18b5ab0cb75 100644
--- a/include/asm-ia64/rwsem.h
+++ b/include/asm-ia64/rwsem.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> 4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> 5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
6 * 7 *
7 * Based on asm-i386/rwsem.h and other architecture implementation. 8 * Based on asm-i386/rwsem.h and other architecture implementation.
8 * 9 *
@@ -11,9 +12,9 @@
11 * 12 *
12 * The lock count is initialized to 0 (no active and no waiting lockers). 13 * The lock count is initialized to 0 (no active and no waiting lockers).
13 * 14 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case 15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
15 * of an uncontended lock. Readers increment by 1 and see a positive value 16 * the case of an uncontended lock. Readers increment by 1 and see a positive
16 * when uncontended, negative if there are writers (and maybe) readers 17 * value when uncontended, negative if there are writers (and maybe) readers
17 * waiting (in which case it goes to sleep). 18 * waiting (in which case it goes to sleep).
18 */ 19 */
19 20
@@ -29,7 +30,7 @@
29 * the semaphore definition 30 * the semaphore definition
30 */ 31 */
31struct rw_semaphore { 32struct rw_semaphore {
32 signed int count; 33 signed long count;
33 spinlock_t wait_lock; 34 spinlock_t wait_lock;
34 struct list_head wait_list; 35 struct list_head wait_list;
35#if RWSEM_DEBUG 36#if RWSEM_DEBUG
@@ -37,10 +38,10 @@ struct rw_semaphore {
37#endif 38#endif
38}; 39};
39 40
40#define RWSEM_UNLOCKED_VALUE 0x00000000 41#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
41#define RWSEM_ACTIVE_BIAS 0x00000001 42#define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001)
42#define RWSEM_ACTIVE_MASK 0x0000ffff 43#define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff)
43#define RWSEM_WAITING_BIAS (-0x00010000) 44#define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000)
44#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 45#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
45#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 46#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
46 47
@@ -83,7 +84,7 @@ init_rwsem (struct rw_semaphore *sem)
83static inline void 84static inline void
84__down_read (struct rw_semaphore *sem) 85__down_read (struct rw_semaphore *sem)
85{ 86{
86 int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); 87 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
87 88
88 if (result < 0) 89 if (result < 0)
89 rwsem_down_read_failed(sem); 90 rwsem_down_read_failed(sem);
@@ -95,7 +96,7 @@ __down_read (struct rw_semaphore *sem)
95static inline void 96static inline void
96__down_write (struct rw_semaphore *sem) 97__down_write (struct rw_semaphore *sem)
97{ 98{
98 int old, new; 99 long old, new;
99 100
100 do { 101 do {
101 old = sem->count; 102 old = sem->count;
@@ -112,7 +113,7 @@ __down_write (struct rw_semaphore *sem)
112static inline void 113static inline void
113__up_read (struct rw_semaphore *sem) 114__up_read (struct rw_semaphore *sem)
114{ 115{
115 int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); 116 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
116 117
117 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) 118 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
118 rwsem_wake(sem); 119 rwsem_wake(sem);
@@ -124,7 +125,7 @@ __up_read (struct rw_semaphore *sem)
124static inline void 125static inline void
125__up_write (struct rw_semaphore *sem) 126__up_write (struct rw_semaphore *sem)
126{ 127{
127 int old, new; 128 long old, new;
128 129
129 do { 130 do {
130 old = sem->count; 131 old = sem->count;
@@ -141,7 +142,7 @@ __up_write (struct rw_semaphore *sem)
141static inline int 142static inline int
142__down_read_trylock (struct rw_semaphore *sem) 143__down_read_trylock (struct rw_semaphore *sem)
143{ 144{
144 int tmp; 145 long tmp;
145 while ((tmp = sem->count) >= 0) { 146 while ((tmp = sem->count) >= 0) {
146 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { 147 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
147 return 1; 148 return 1;
@@ -156,7 +157,7 @@ __down_read_trylock (struct rw_semaphore *sem)
156static inline int 157static inline int
157__down_write_trylock (struct rw_semaphore *sem) 158__down_write_trylock (struct rw_semaphore *sem)
158{ 159{
159 int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, 160 long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
160 RWSEM_ACTIVE_WRITE_BIAS); 161 RWSEM_ACTIVE_WRITE_BIAS);
161 return tmp == RWSEM_UNLOCKED_VALUE; 162 return tmp == RWSEM_UNLOCKED_VALUE;
162} 163}
@@ -167,7 +168,7 @@ __down_write_trylock (struct rw_semaphore *sem)
167static inline void 168static inline void
168__downgrade_write (struct rw_semaphore *sem) 169__downgrade_write (struct rw_semaphore *sem)
169{ 170{
170 int old, new; 171 long old, new;
171 172
172 do { 173 do {
173 old = sem->count; 174 old = sem->count;
@@ -182,7 +183,7 @@ __downgrade_write (struct rw_semaphore *sem)
182 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 183 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
183 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. 184 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
184 */ 185 */
185#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count)) 186#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
186#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count)) 187#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
187 188
188#endif /* _ASM_IA64_RWSEM_H */ 189#endif /* _ASM_IA64_RWSEM_H */
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
index 103d745dc5f2..2c32e4b77b54 100644
--- a/include/asm-ia64/sn/addrs.h
+++ b/include/asm-ia64/sn/addrs.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1992-1999,2001-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#ifndef _ASM_IA64_SN_ADDRS_H 9#ifndef _ASM_IA64_SN_ADDRS_H
@@ -65,7 +65,6 @@
65 65
66#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT) 66#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
67#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT) 67#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
68#define REGION_BITS 0xe000000000000000UL
69 68
70 69
71/* 70/*
@@ -79,38 +78,30 @@
79#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT) 78#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
80 79
81 80
82/*
83 * Base addresses for various address ranges.
84 */
85#define CACHED 0xe000000000000000UL
86#define UNCACHED 0xc000000000000000UL
87#define UNCACHED_PHYS 0x8000000000000000UL
88
89
90/* 81/*
91 * Virtual Mode Local & Global MMR space. 82 * Virtual Mode Local & Global MMR space.
92 */ 83 */
93#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL 84#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
94#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL 85#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
95#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET) 86#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
96#define LOCAL_MMR_SPACE (UNCACHED | LOCAL_MMR_OFFSET) 87#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
97#define LOCAL_PHYS_MMR_SPACE (UNCACHED_PHYS | LOCAL_MMR_OFFSET) 88#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
98 89
99#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL 90#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
100#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL 91#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
101#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET) 92#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
102#define GLOBAL_MMR_SPACE (UNCACHED | GLOBAL_MMR_OFFSET) 93#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
103 94
104/* 95/*
105 * Physical mode addresses 96 * Physical mode addresses
106 */ 97 */
107#define GLOBAL_PHYS_MMR_SPACE (UNCACHED_PHYS | GLOBAL_MMR_OFFSET) 98#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
108 99
109 100
110/* 101/*
111 * Clear region & AS bits. 102 * Clear region & AS bits.
112 */ 103 */
113#define TO_PHYS_MASK (~(REGION_BITS | AS_MASK)) 104#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
114 105
115 106
116/* 107/*
@@ -126,6 +117,7 @@
126#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a)) 117#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
127#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a)) 118#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
128#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n))) 119#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
120#define IS_TIO_NASID(n) ((n) & 1)
129 121
130 122
131/* non-II mmr's start at top of big window space (4G) */ 123/* non-II mmr's start at top of big window space (4G) */
@@ -134,10 +126,10 @@
134/* 126/*
135 * general address defines 127 * general address defines
136 */ 128 */
137#define CAC_BASE (CACHED | AS_CAC_SPACE) 129#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
138#define AMO_BASE (UNCACHED | AS_AMO_SPACE) 130#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
139#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE) 131#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
140#define GET_BASE (CACHED | AS_GET_SPACE) 132#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
141 133
142/* 134/*
143 * Convert Memory addresses between various addressing modes. 135 * Convert Memory addresses between various addressing modes.
@@ -155,17 +147,35 @@
155 * the chiplet id is zero. If we implement TIO-TIO dma, we might need 147 * the chiplet id is zero. If we implement TIO-TIO dma, we might need
156 * to insert a chiplet id into this macro. However, it is our belief 148 * to insert a chiplet id into this macro. However, it is our belief
157 * right now that this chiplet id will be ICE, which is also zero. 149 * right now that this chiplet id will be ICE, which is also zero.
158 * Nasid starts on bit 40.
159 */ 150 */
160#define PHYS_TO_TIODMA(x) ( (((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x)) 151#define SH1_TIO_PHYS_TO_DMA(x) \
161#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) 152 ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
153
154#define SH2_NETWORK_BANK_OFFSET(x) \
155 ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
156
157#define SH2_NETWORK_BANK_SELECT(x) \
158 ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
159 >> (sn_hub_info->nasid_shift - 4)) << 36)
160
161#define SH2_NETWORK_ADDRESS(x) \
162 (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
163
164#define SH2_TIO_PHYS_TO_DMA(x) \
165 (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
166
167#define PHYS_TO_TIODMA(x) \
168 (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
169
170#define PHYS_TO_DMA(x) \
171 ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
162 172
163 173
164/* 174/*
165 * Macros to test for address type. 175 * Macros to test for address type.
166 */ 176 */
167#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE) 177#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
168#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE) 178#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
169 179
170 180
171/* 181/*
@@ -180,18 +190,20 @@
180#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \ 190#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
181 ((u64) (w) << TIO_SWIN_SIZE_BITS)) 191 ((u64) (w) << TIO_SWIN_SIZE_BITS))
182#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) 192#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
183#define TIO_IO_BASE(n) (UNCACHED | NASID_SPACE(n)) 193#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
184#define BWIN_SIZE (1UL << BWIN_SIZE_BITS) 194#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
185#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) 195#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
186#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS)) 196#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
187#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS)) 197#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
188#define BWIN_WIDGET_MASK 0x7 198#define BWIN_WIDGET_MASK 0x7
189#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK) 199#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
200#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
190 201
191#define TIO_BWIN_WINDOW_SELECT_MASK 0x7 202#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
192#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK) 203#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
193 204
194 205#define TIO_HWIN_SHIFT_BITS 33
206#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
195 207
196/* 208/*
197 * The following definitions pertain to the IO special address 209 * The following definitions pertain to the IO special address
@@ -216,10 +228,6 @@
216#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK) 228#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
217 229
218 230
219#define TIO_IOSPACE_ADDR(n,x) \
220 /* Move in the Chiplet ID for TIO Local Block MMR */ \
221 (REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2))
222
223/* 231/*
224 * The following macros produce the correct base virtual address for 232 * The following macros produce the correct base virtual address for
225 * the hub registers. The REMOTE_HUB_* macro produce 233 * the hub registers. The REMOTE_HUB_* macro produce
@@ -234,18 +242,40 @@
234 * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). 242 * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
235 * They're always safe. 243 * They're always safe.
236 */ 244 */
245/* Shub1 TIO & MMR addressing macros */
246#define SH1_TIO_IOSPACE_ADDR(n,x) \
247 GLOBAL_MMR_ADDR(n,x)
248
249#define SH1_REMOTE_BWIN_MMR(n,x) \
250 GLOBAL_MMR_ADDR(n,x)
251
252#define SH1_REMOTE_SWIN_MMR(n,x) \
253 (NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
254
255#define SH1_REMOTE_MMR(n,x) \
256 (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
257 SH1_REMOTE_SWIN_MMR(n,x))
258
259/* Shub1 TIO & MMR addressing macros */
260#define SH2_TIO_IOSPACE_ADDR(n,x) \
261 ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
262
263#define SH2_REMOTE_MMR(n,x) \
264 GLOBAL_MMR_ADDR(n,x)
265
266
267/* TIO & MMR addressing macros that work on both shub1 & shub2 */
268#define TIO_IOSPACE_ADDR(n,x) \
269 ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
270 SH2_TIO_IOSPACE_ADDR(n,x)))
271
272#define SH_REMOTE_MMR(n,x) \
273 (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
274
237#define REMOTE_HUB_ADDR(n,x) \ 275#define REMOTE_HUB_ADDR(n,x) \
238 ((n & 1) ? \ 276 (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
239 /* TIO: */ \ 277 ((volatile u64*)SH_REMOTE_MMR(n,x)))
240 (is_shub2() ? \ 278
241 /* TIO on Shub2 */ \
242 (volatile u64 *)(TIO_IOSPACE_ADDR(n,x)) \
243 : /* TIO on shub1 */ \
244 (volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
245 \
246 : /* SHUB1 and SHUB2 MMRs: */ \
247 (((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
248 : ((volatile u64 *)(NODE_SWIN_BASE(n,1) + 0x800000 + (x)))))
249 279
250#define HUB_L(x) (*((volatile typeof(*x) *)x)) 280#define HUB_L(x) (*((volatile typeof(*x) *)x))
251#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d)) 281#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
diff --git a/include/asm-ia64/sn/geo.h b/include/asm-ia64/sn/geo.h
index 84b254603b8d..f083c9434066 100644
--- a/include/asm-ia64/sn/geo.h
+++ b/include/asm-ia64/sn/geo.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#ifndef _ASM_IA64_SN_GEO_H 9#ifndef _ASM_IA64_SN_GEO_H
@@ -108,7 +108,6 @@ typedef union geoid_u {
108#define INVALID_SLAB (slabid_t)-1 108#define INVALID_SLAB (slabid_t)-1
109#define INVALID_SLOT (slotid_t)-1 109#define INVALID_SLOT (slotid_t)-1
110#define INVALID_MODULE ((moduleid_t)-1) 110#define INVALID_MODULE ((moduleid_t)-1)
111#define INVALID_PARTID ((partid_t)-1)
112 111
113static inline slabid_t geo_slab(geoid_t g) 112static inline slabid_t geo_slab(geoid_t g)
114{ 113{
diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h
index e190dd4213d5..e35074f526d9 100644
--- a/include/asm-ia64/sn/intr.h
+++ b/include/asm-ia64/sn/intr.h
@@ -12,13 +12,12 @@
12#include <linux/rcupdate.h> 12#include <linux/rcupdate.h>
13 13
14#define SGI_UART_VECTOR (0xe9) 14#define SGI_UART_VECTOR (0xe9)
15#define SGI_PCIBR_ERROR (0x33)
16 15
17/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ 16/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
18#define SGI_XPC_ACTIVATE (0x30) 17#define SGI_XPC_ACTIVATE (0x30)
19#define SGI_II_ERROR (0x31) 18#define SGI_II_ERROR (0x31)
20#define SGI_XBOW_ERROR (0x32) 19#define SGI_XBOW_ERROR (0x32)
21#define SGI_PCIBR_ERROR (0x33) 20#define SGI_PCIASIC_ERROR (0x33)
22#define SGI_ACPI_SCI_INT (0x34) 21#define SGI_ACPI_SCI_INT (0x34)
23#define SGI_TIOCA_ERROR (0x35) 22#define SGI_TIOCA_ERROR (0x35)
24#define SGI_TIO_ERROR (0x36) 23#define SGI_TIO_ERROR (0x36)
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
index 7138b1eafd6b..47bb8100fd00 100644
--- a/include/asm-ia64/sn/nodepda.h
+++ b/include/asm-ia64/sn/nodepda.h
@@ -37,7 +37,6 @@ struct phys_cpuid {
37 37
38struct nodepda_s { 38struct nodepda_s {
39 void *pdinfo; /* Platform-dependent per-node info */ 39 void *pdinfo; /* Platform-dependent per-node info */
40 spinlock_t bist_lock;
41 40
42 /* 41 /*
43 * The BTEs on this node are shared by the local cpus 42 * The BTEs on this node are shared by the local cpus
@@ -55,6 +54,8 @@ struct nodepda_s {
55 * Array of physical cpu identifiers. Indexed by cpuid. 54 * Array of physical cpu identifiers. Indexed by cpuid.
56 */ 55 */
57 struct phys_cpuid phys_cpuid[NR_CPUS]; 56 struct phys_cpuid phys_cpuid[NR_CPUS];
57 spinlock_t ptc_lock ____cacheline_aligned_in_smp;
58 spinlock_t bist_lock;
58}; 59};
59 60
60typedef struct nodepda_s nodepda_t; 61typedef struct nodepda_s nodepda_t;
diff --git a/include/asm-ia64/sn/pcibus_provider_defs.h b/include/asm-ia64/sn/pcibus_provider_defs.h
index 976f5eff0539..ad0e8e8ae53f 100644
--- a/include/asm-ia64/sn/pcibus_provider_defs.h
+++ b/include/asm-ia64/sn/pcibus_provider_defs.h
@@ -18,8 +18,9 @@
18#define PCIIO_ASIC_TYPE_PIC 2 18#define PCIIO_ASIC_TYPE_PIC 2
19#define PCIIO_ASIC_TYPE_TIOCP 3 19#define PCIIO_ASIC_TYPE_TIOCP 3
20#define PCIIO_ASIC_TYPE_TIOCA 4 20#define PCIIO_ASIC_TYPE_TIOCA 4
21#define PCIIO_ASIC_TYPE_TIOCE 5
21 22
22#define PCIIO_ASIC_MAX_TYPES 5 23#define PCIIO_ASIC_MAX_TYPES 6
23 24
24/* 25/*
25 * Common pciio bus provider data. There should be one of these as the 26 * Common pciio bus provider data. There should be one of these as the
@@ -30,7 +31,8 @@
30struct pcibus_bussoft { 31struct pcibus_bussoft {
31 uint32_t bs_asic_type; /* chipset type */ 32 uint32_t bs_asic_type; /* chipset type */
32 uint32_t bs_xid; /* xwidget id */ 33 uint32_t bs_xid; /* xwidget id */
33 uint64_t bs_persist_busnum; /* Persistent Bus Number */ 34 uint32_t bs_persist_busnum; /* Persistent Bus Number */
35 uint32_t bs_persist_segment; /* Segment Number */
34 uint64_t bs_legacy_io; /* legacy io pio addr */ 36 uint64_t bs_legacy_io; /* legacy io pio addr */
35 uint64_t bs_legacy_mem; /* legacy mem pio addr */ 37 uint64_t bs_legacy_mem; /* legacy mem pio addr */
36 uint64_t bs_base; /* widget base */ 38 uint64_t bs_base; /* widget base */
@@ -47,6 +49,8 @@ struct sn_pcibus_provider {
47 dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t); 49 dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t);
48 void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); 50 void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
49 void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); 51 void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
52 void (*force_interrupt)(struct sn_irq_info *);
53 void (*target_interrupt)(struct sn_irq_info *);
50}; 54};
51 55
52extern struct sn_pcibus_provider *sn_pci_provider[]; 56extern struct sn_pcibus_provider *sn_pci_provider[];
diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
index ea5590c76ca4..1c5108d44d8b 100644
--- a/include/asm-ia64/sn/pda.h
+++ b/include/asm-ia64/sn/pda.h
@@ -39,7 +39,6 @@ typedef struct pda_s {
39 unsigned long pio_write_status_val; 39 unsigned long pio_write_status_val;
40 volatile unsigned long *pio_shub_war_cam_addr; 40 volatile unsigned long *pio_shub_war_cam_addr;
41 41
42 unsigned long sn_soft_irr[4];
43 unsigned long sn_in_service_ivecs[4]; 42 unsigned long sn_in_service_ivecs[4];
44 int sn_lb_int_war_ticks; 43 int sn_lb_int_war_ticks;
45 int sn_last_irq; 44 int sn_last_irq;
diff --git a/include/asm-ia64/sn/sn2/sn_hwperf.h b/include/asm-ia64/sn/sn2/sn_hwperf.h
index df75f4c4aec3..291ef3d69da2 100644
--- a/include/asm-ia64/sn/sn2/sn_hwperf.h
+++ b/include/asm-ia64/sn/sn2/sn_hwperf.h
@@ -43,6 +43,7 @@ struct sn_hwperf_object_info {
43 43
44/* macros for object classification */ 44/* macros for object classification */
45#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub")) 45#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub"))
46#define SN_HWPERF_IS_NODE_SHUB2(x) ((x) && strstr((x)->name, "SHub 2."))
46#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO")) 47#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO"))
47#define SN_HWPERF_IS_ROUTER(x) ((x) && strstr((x)->name, "Router")) 48#define SN_HWPERF_IS_ROUTER(x) ((x) && strstr((x)->name, "Router"))
48#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router")) 49#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router"))
@@ -214,6 +215,15 @@ struct sn_hwperf_ioctl_args {
214 */ 215 */
215#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT) 216#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT)
216 217
218/*
219 * Given a node id, determine the id of the nearest node with CPUs
220 * and the id of the nearest node that has memory. The argument
221 * node would normally be a "headless" node, e.g. an "IO node".
222 * Return 0 on success.
223 */
224extern int sn_hwperf_get_nearest_node(cnodeid_t node,
225 cnodeid_t *near_mem, cnodeid_t *near_cpu);
226
217/* return codes */ 227/* return codes */
218#define SN_HWPERF_OP_OK 0 228#define SN_HWPERF_OP_OK 0
219#define SN_HWPERF_OP_NOMEM 1 229#define SN_HWPERF_OP_NOMEM 1
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 27976d223186..e67825ad1930 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -55,7 +55,6 @@
55#define SN_SAL_BUS_CONFIG 0x02000037 55#define SN_SAL_BUS_CONFIG 0x02000037
56#define SN_SAL_SYS_SERIAL_GET 0x02000038 56#define SN_SAL_SYS_SERIAL_GET 0x02000038
57#define SN_SAL_PARTITION_SERIAL_GET 0x02000039 57#define SN_SAL_PARTITION_SERIAL_GET 0x02000039
58#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
59#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b 58#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
60#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c 59#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
61#define SN_SAL_COHERENCE 0x0200003d 60#define SN_SAL_COHERENCE 0x0200003d
@@ -78,7 +77,8 @@
78 77
79#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 78#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
80#define SN_SAL_BTE_RECOVER 0x02000061 79#define SN_SAL_BTE_RECOVER 0x02000061
81#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000062 80#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
81#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064
82 82
83/* 83/*
84 * Service-specific constants 84 * Service-specific constants
@@ -586,35 +586,6 @@ sn_partition_serial_number_val(void) {
586} 586}
587 587
588/* 588/*
589 * Returns the partition id of the nasid passed in as an argument,
590 * or INVALID_PARTID if the partition id cannot be retrieved.
591 */
592static inline partid_t
593ia64_sn_sysctl_partition_get(nasid_t nasid)
594{
595 struct ia64_sal_retval ret_stuff;
596 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
597 0, 0, 0, 0, 0, 0);
598 if (ret_stuff.status != 0)
599 return INVALID_PARTID;
600 return ((partid_t)ret_stuff.v0);
601}
602
603/*
604 * Returns the partition id of the current processor.
605 */
606
607extern partid_t sn_partid;
608
609static inline partid_t
610sn_local_partid(void) {
611 if (unlikely(sn_partid < 0)) {
612 sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()));
613 }
614 return sn_partid;
615}
616
617/*
618 * Returns the physical address of the partition's reserved page through 589 * Returns the physical address of the partition's reserved page through
619 * an iterative number of calls. 590 * an iterative number of calls.
620 * 591 *
@@ -749,7 +720,8 @@ ia64_sn_power_down(void)
749{ 720{
750 struct ia64_sal_retval ret_stuff; 721 struct ia64_sal_retval ret_stuff;
751 SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0); 722 SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
752 while(1); 723 while(1)
724 cpu_relax();
753 /* never returns */ 725 /* never returns */
754} 726}
755 727
@@ -1018,24 +990,6 @@ ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift,
1018 ret_stuff.v2 = 0; 990 ret_stuff.v2 = 0;
1019 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0); 991 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
1020 992
1021/***** BEGIN HACK - temp til old proms no longer supported ********/
1022 if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
1023 int nasid = get_sapicid() & 0xfff;;
1024#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
1025#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
1026 if (shubtype) *shubtype = 0;
1027 if (nasid_bitmask) *nasid_bitmask = 0x7ff;
1028 if (nasid_shift) *nasid_shift = 38;
1029 if (systemsize) *systemsize = 11;
1030 if (sharing_domain_size) *sharing_domain_size = 9;
1031 if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
1032 if (coher) *coher = nasid >> 9;
1033 if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
1034 SH_SHUB_ID_NODES_PER_BIT_SHFT;
1035 return 0;
1036 }
1037/***** END HACK *******/
1038
1039 if (ret_stuff.status < 0) 993 if (ret_stuff.status < 0)
1040 return ret_stuff.status; 994 return ret_stuff.status;
1041 995
@@ -1068,12 +1022,10 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
1068} 1022}
1069 1023
1070static inline int 1024static inline int
1071ia64_sn_ioif_get_pci_topology(u64 rack, u64 bay, u64 slot, u64 slab, 1025ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
1072 u64 buf, u64 len)
1073{ 1026{
1074 struct ia64_sal_retval rv; 1027 struct ia64_sal_retval rv;
1075 SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, 1028 SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
1076 rack, bay, slot, slab, buf, len, 0);
1077 return (int) rv.status; 1029 return (int) rv.status;
1078} 1030}
1079 1031
diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h
new file mode 100644
index 000000000000..22879853e46c
--- /dev/null
+++ b/include/asm-ia64/sn/tioce.h
@@ -0,0 +1,740 @@
1/**************************************************************************
2 * *
3 * Unpublished copyright (c) 2005, Silicon Graphics, Inc. *
4 * THIS IS UNPUBLISHED CONFIDENTIAL AND PROPRIETARY SOURCE CODE OF SGI. *
5 * *
6 * The copyright notice above does not evidence any actual or intended *
7 * publication or disclosure of this source code, which includes *
8 * information that is confidential and/or proprietary, and is a trade *
9 * secret, of Silicon Graphics, Inc. ANY REPRODUCTION, MODIFICATION, *
10 * DISTRIBUTION, PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH *
11 * USE OF THIS SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF *
12 * SILICON GRAPHICS, INC. IS STRICTLY PROHIBITED, AND IN VIOLATION OF *
13 * APPLICABLE LAWS AND INTERNATIONAL TREATIES. THE RECEIPT OR *
14 * POSSESSION OF THIS SOURCE CODE AND/OR RELATED INFORMATION DOES NOT *
15 * CONVEY OR IMPLY ANY RIGHTS TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS *
16 * CONTENTS, OR TO MANUFACTURE, USE, OR SELL ANYTHING THAT IT MAY *
17 * DESCRIBE, IN WHOLE OR IN PART. *
18 * *
19 **************************************************************************/
20
21#ifndef __ASM_IA64_SN_TIOCE_H__
22#define __ASM_IA64_SN_TIOCE_H__
23
24/* CE ASIC part & mfgr information */
25#define TIOCE_PART_NUM 0xCE00
26#define TIOCE_MFGR_NUM 0x36
27#define TIOCE_REV_A 0x1
28
29/* CE Virtual PPB Vendor/Device IDs */
30#define CE_VIRT_PPB_VENDOR_ID 0x10a9
31#define CE_VIRT_PPB_DEVICE_ID 0x4002
32
33/* CE Host Bridge Vendor/Device IDs */
34#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9
35#define CE_HOST_BRIDGE_DEVICE_ID 0x4003
36
37
38#define TIOCE_NUM_M40_ATES 4096
39#define TIOCE_NUM_M3240_ATES 2048
40#define TIOCE_NUM_PORTS 2
41
42/*
43 * Register layout for TIOCE. MMR offsets are shown at the far right of the
44 * structure definition.
45 */
46typedef volatile struct tioce {
47 /*
48 * ADMIN : Administration Registers
49 */
50 uint64_t ce_adm_id; /* 0x000000 */
51 uint64_t ce_pad_000008; /* 0x000008 */
52 uint64_t ce_adm_dyn_credit_status; /* 0x000010 */
53 uint64_t ce_adm_last_credit_status; /* 0x000018 */
54 uint64_t ce_adm_credit_limit; /* 0x000020 */
55 uint64_t ce_adm_force_credit; /* 0x000028 */
56 uint64_t ce_adm_control; /* 0x000030 */
57 uint64_t ce_adm_mmr_chn_timeout; /* 0x000038 */
58 uint64_t ce_adm_ssp_ure_timeout; /* 0x000040 */
59 uint64_t ce_adm_ssp_dre_timeout; /* 0x000048 */
60 uint64_t ce_adm_ssp_debug_sel; /* 0x000050 */
61 uint64_t ce_adm_int_status; /* 0x000058 */
62 uint64_t ce_adm_int_status_alias; /* 0x000060 */
63 uint64_t ce_adm_int_mask; /* 0x000068 */
64 uint64_t ce_adm_int_pending; /* 0x000070 */
65 uint64_t ce_adm_force_int; /* 0x000078 */
66 uint64_t ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */
67 uint64_t ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */
68 uint64_t ce_adm_error_summary; /* 0x000100 */
69 uint64_t ce_adm_error_summary_alias; /* 0x000108 */
70 uint64_t ce_adm_error_mask; /* 0x000110 */
71 uint64_t ce_adm_first_error; /* 0x000118 */
72 uint64_t ce_adm_error_overflow; /* 0x000120 */
73 uint64_t ce_adm_error_overflow_alias; /* 0x000128 */
74 uint64_t ce_pad_000130[2]; /* 0x000130 -- 0x000138 */
75 uint64_t ce_adm_tnum_error; /* 0x000140 */
76 uint64_t ce_adm_mmr_err_detail; /* 0x000148 */
77 uint64_t ce_adm_msg_sram_perr_detail; /* 0x000150 */
78 uint64_t ce_adm_bap_sram_perr_detail; /* 0x000158 */
79 uint64_t ce_adm_ce_sram_perr_detail; /* 0x000160 */
80 uint64_t ce_adm_ce_credit_oflow_detail; /* 0x000168 */
81 uint64_t ce_adm_tx_link_idle_max_timer; /* 0x000170 */
82 uint64_t ce_adm_pcie_debug_sel; /* 0x000178 */
83 uint64_t ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */
84
85 uint64_t ce_adm_pcie_debug_sel_top; /* 0x000200 */
86 uint64_t ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */
87 uint64_t ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */
88 uint64_t ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */
89 uint64_t ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */
90 uint64_t ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */
91 uint64_t ce_adm_pcie_trig_compare_top; /* 0x000230 */
92 uint64_t ce_adm_pcie_trig_compare_en_top; /* 0x000238 */
93 uint64_t ce_adm_ssp_debug_sel_top; /* 0x000240 */
94 uint64_t ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */
95 uint64_t ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */
96 uint64_t ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */
97 uint64_t ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */
98 uint64_t ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */
99 uint64_t ce_adm_ssp_trig_compare_top; /* 0x000270 */
100 uint64_t ce_adm_ssp_trig_compare_en_top; /* 0x000278 */
101 uint64_t ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */
102
103 uint64_t ce_adm_bap_ctrl; /* 0x000400 */
104 uint64_t ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */
105
106 uint64_t ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */
107 uint64_t ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */
108
109 uint64_t ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */
110 uint64_t ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */
111
112 uint64_t ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */
113 uint64_t ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */
114
115 uint64_t ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */
116
117 /*
118 * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
119 * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
120 * NOTE: the comment offsets at far right: let 'z' = {2 or 3}
121 */
122 #define ce_lsi(link_num) ce_lsi[link_num-1]
123 struct ce_lsi_reg {
124 uint64_t ce_lsi_lpu_id; /* 0x00z000 */
125 uint64_t ce_lsi_rst; /* 0x00z008 */
126 uint64_t ce_lsi_dbg_stat; /* 0x00z010 */
127 uint64_t ce_lsi_dbg_cfg; /* 0x00z018 */
128 uint64_t ce_lsi_ltssm_ctrl; /* 0x00z020 */
129 uint64_t ce_lsi_lk_stat; /* 0x00z028 */
130 uint64_t ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */
131 uint64_t ce_lsi_int_and_stat; /* 0x00z040 */
132 uint64_t ce_lsi_int_mask; /* 0x00z048 */
133 uint64_t ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */
134 uint64_t ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */
135 uint64_t ce_pad_00z108; /* 0x00z108 */
136 uint64_t ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */
137 uint64_t ce_pad_00z118; /* 0x00z118 */
138 uint64_t ce_lsi_lk_perf_cnt1; /* 0x00z120 */
139 uint64_t ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */
140 uint64_t ce_lsi_lk_perf_cnt2; /* 0x00z130 */
141 uint64_t ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */
142 uint64_t ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */
143 uint64_t ce_lsi_lk_lyr_cfg; /* 0x00z200 */
144 uint64_t ce_lsi_lk_lyr_status; /* 0x00z208 */
145 uint64_t ce_lsi_lk_lyr_int_stat; /* 0x00z210 */
146 uint64_t ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */
147 uint64_t ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */
148 uint64_t ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */
149 uint64_t ce_lsi_fc_upd_ctl; /* 0x00z240 */
150 uint64_t ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */
151 uint64_t ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */
152 uint64_t ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */
153 uint64_t ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */
154 uint64_t ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */
155 uint64_t ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */
156 uint64_t ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */
157 uint64_t ce_lsi_rply_tmr_thr; /* 0x00z410 */
158 uint64_t ce_lsi_rply_tmr; /* 0x00z418 */
159 uint64_t ce_lsi_rply_num_stat; /* 0x00z420 */
160 uint64_t ce_lsi_rty_buf_max_addr; /* 0x00z428 */
161 uint64_t ce_lsi_rty_fifo_ptr; /* 0x00z430 */
162 uint64_t ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */
163 uint64_t ce_lsi_rty_fifo_cred; /* 0x00z440 */
164 uint64_t ce_lsi_seq_cnt; /* 0x00z448 */
165 uint64_t ce_lsi_ack_sent_seq_num; /* 0x00z450 */
166 uint64_t ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */
167 uint64_t ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */
168 uint64_t ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */
169 uint64_t ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */
170 uint64_t ce_pad_00z478; /* 0x00z478 */
171 uint64_t ce_lsi_mem_addr_ctl; /* 0x00z480 */
172 uint64_t ce_lsi_mem_d_ld0; /* 0x00z488 */
173 uint64_t ce_lsi_mem_d_ld1; /* 0x00z490 */
174 uint64_t ce_lsi_mem_d_ld2; /* 0x00z498 */
175 uint64_t ce_lsi_mem_d_ld3; /* 0x00z4A0 */
176 uint64_t ce_lsi_mem_d_ld4; /* 0x00z4A8 */
177 uint64_t ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */
178 uint64_t ce_lsi_rty_d_cnt; /* 0x00z4C0 */
179 uint64_t ce_lsi_seq_buf_cnt; /* 0x00z4C8 */
180 uint64_t ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */
181 uint64_t ce_pad_00z4D8; /* 0x00z4D8 */
182 uint64_t ce_lsi_ack_lat_thr; /* 0x00z4E0 */
183 uint64_t ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */
184 uint64_t ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */
185 uint64_t ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */
186 uint64_t ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */
187 uint64_t ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */
188 uint64_t ce_lsi_phy_lyr_cfg; /* 0x00z600 */
189 uint64_t ce_pad_00z608; /* 0x00z608 */
190 uint64_t ce_lsi_phy_lyr_int_stat; /* 0x00z610 */
191 uint64_t ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */
192 uint64_t ce_lsi_phy_lyr_int_mask; /* 0x00z620 */
193 uint64_t ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */
194 uint64_t ce_lsi_rcv_phy_cfg; /* 0x00z680 */
195 uint64_t ce_lsi_rcv_phy_stat1; /* 0x00z688 */
196 uint64_t ce_lsi_rcv_phy_stat2; /* 0x00z690 */
197 uint64_t ce_lsi_rcv_phy_stat3; /* 0x00z698 */
198 uint64_t ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */
199 uint64_t ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */
200 uint64_t ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */
201 uint64_t ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */
202 uint64_t ce_lsi_tx_phy_cfg; /* 0x00z700 */
203 uint64_t ce_lsi_tx_phy_stat; /* 0x00z708 */
204 uint64_t ce_lsi_tx_phy_int_stat; /* 0x00z710 */
205 uint64_t ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */
206 uint64_t ce_lsi_tx_phy_int_mask; /* 0x00z720 */
207 uint64_t ce_lsi_tx_phy_stat2; /* 0x00z728 */
208 uint64_t ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */
209 uint64_t ce_lsi_ltssm_cfg1; /* 0x00z780 */
210 uint64_t ce_lsi_ltssm_cfg2; /* 0x00z788 */
211 uint64_t ce_lsi_ltssm_cfg3; /* 0x00z790 */
212 uint64_t ce_lsi_ltssm_cfg4; /* 0x00z798 */
213 uint64_t ce_lsi_ltssm_cfg5; /* 0x00z7A0 */
214 uint64_t ce_lsi_ltssm_stat1; /* 0x00z7A8 */
215 uint64_t ce_lsi_ltssm_stat2; /* 0x00z7B0 */
216 uint64_t ce_lsi_ltssm_int_stat; /* 0x00z7B8 */
217 uint64_t ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */
218 uint64_t ce_lsi_ltssm_int_mask; /* 0x00z7C8 */
219 uint64_t ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */
220 uint64_t ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */
221 uint64_t ce_lsi_gb_cfg1; /* 0x00z800 */
222 uint64_t ce_lsi_gb_cfg2; /* 0x00z808 */
223 uint64_t ce_lsi_gb_cfg3; /* 0x00z810 */
224 uint64_t ce_lsi_gb_cfg4; /* 0x00z818 */
225 uint64_t ce_lsi_gb_stat; /* 0x00z820 */
226 uint64_t ce_lsi_gb_int_stat; /* 0x00z828 */
227 uint64_t ce_lsi_gb_int_stat_test; /* 0x00z830 */
228 uint64_t ce_lsi_gb_int_mask; /* 0x00z838 */
229 uint64_t ce_lsi_gb_pwr_dn1; /* 0x00z840 */
230 uint64_t ce_lsi_gb_pwr_dn2; /* 0x00z848 */
231 uint64_t ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
232 } ce_lsi[2];
233
234 uint64_t ce_pad_004000[10]; /* 0x004000 -- 0x004048 */
235
236 /*
237 * CRM: Coretalk Receive Module Registers
238 */
239 uint64_t ce_crm_debug_mux; /* 0x004050 */
240 uint64_t ce_pad_004058; /* 0x004058 */
241 uint64_t ce_crm_ssp_err_cmd_wrd; /* 0x004060 */
242 uint64_t ce_crm_ssp_err_addr; /* 0x004068 */
243 uint64_t ce_crm_ssp_err_syn; /* 0x004070 */
244
245 uint64_t ce_pad_004078[499]; /* 0x004078 -- 0x005008 */
246
247 /*
248 * CXM: Coretalk Xmit Module Registers
249 */
250 uint64_t ce_cxm_dyn_credit_status; /* 0x005010 */
251 uint64_t ce_cxm_last_credit_status; /* 0x005018 */
252 uint64_t ce_cxm_credit_limit; /* 0x005020 */
253 uint64_t ce_cxm_force_credit; /* 0x005028 */
254 uint64_t ce_cxm_disable_bypass; /* 0x005030 */
255 uint64_t ce_pad_005038[3]; /* 0x005038 -- 0x005048 */
256 uint64_t ce_cxm_debug_mux; /* 0x005050 */
257
258 uint64_t ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */
259
260 /*
261 * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
262 * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
263 * DTL: the comment offsets at far right: let 'y' = {6 or 8}
264 *
265 * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
266 * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
267 * UTL: the comment offsets at far right: let 'z' = {7 or 9}
268 */
269 #define ce_dtl(link_num) ce_dtl_utl[link_num-1]
270 #define ce_utl(link_num) ce_dtl_utl[link_num-1]
271 struct ce_dtl_utl_reg {
272 /* DTL */
273 uint64_t ce_dtl_dtdr_credit_limit; /* 0x00y000 */
274 uint64_t ce_dtl_dtdr_credit_force; /* 0x00y008 */
275 uint64_t ce_dtl_dyn_credit_status; /* 0x00y010 */
276 uint64_t ce_dtl_dtl_last_credit_stat; /* 0x00y018 */
277 uint64_t ce_dtl_dtl_ctrl; /* 0x00y020 */
278 uint64_t ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */
279 uint64_t ce_dtl_debug_sel; /* 0x00y050 */
280 uint64_t ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
281
282 /* UTL */
283 uint64_t ce_utl_utl_ctrl; /* 0x00z000 */
284 uint64_t ce_utl_debug_sel; /* 0x00z008 */
285 uint64_t ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
286 } ce_dtl_utl[2];
287
288 uint64_t ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */
289
290 /*
291 * URE: Upstream Request Engine
292 */
293 uint64_t ce_ure_dyn_credit_status; /* 0x00B010 */
294 uint64_t ce_ure_last_credit_status; /* 0x00B018 */
295 uint64_t ce_ure_credit_limit; /* 0x00B020 */
296 uint64_t ce_pad_00B028; /* 0x00B028 */
297 uint64_t ce_ure_control; /* 0x00B030 */
298 uint64_t ce_ure_status; /* 0x00B038 */
299 uint64_t ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */
300 uint64_t ce_ure_debug_sel; /* 0x00B050 */
301 uint64_t ce_ure_pcie_debug_sel; /* 0x00B058 */
302 uint64_t ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */
303 uint64_t ce_ure_ssp_err_addr; /* 0x00B068 */
304 uint64_t ce_ure_page_map; /* 0x00B070 */
305 uint64_t ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */
306 uint64_t ce_ure_pipe_sel1; /* 0x00B088 */
307 uint64_t ce_ure_pipe_mask1; /* 0x00B090 */
308 uint64_t ce_ure_pipe_sel2; /* 0x00B098 */
309 uint64_t ce_ure_pipe_mask2; /* 0x00B0A0 */
310 uint64_t ce_ure_pcie1_credits_sent; /* 0x00B0A8 */
311 uint64_t ce_ure_pcie1_credits_used; /* 0x00B0B0 */
312 uint64_t ce_ure_pcie1_credit_limit; /* 0x00B0B8 */
313 uint64_t ce_ure_pcie2_credits_sent; /* 0x00B0C0 */
314 uint64_t ce_ure_pcie2_credits_used; /* 0x00B0C8 */
315 uint64_t ce_ure_pcie2_credit_limit; /* 0x00B0D0 */
316 uint64_t ce_ure_pcie_force_credit; /* 0x00B0D8 */
317 uint64_t ce_ure_rd_tnum_val; /* 0x00B0E0 */
318 uint64_t ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */
319 uint64_t ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */
320 uint64_t ce_ure_rd_tnum_error; /* 0x00B0F8 */
321 uint64_t ce_ure_rd_tnum_first_cl; /* 0x00B100 */
322 uint64_t ce_ure_rd_tnum_link_buf; /* 0x00B108 */
323 uint64_t ce_ure_wr_tnum_val; /* 0x00B110 */
324 uint64_t ce_ure_sram_err_addr0; /* 0x00B118 */
325 uint64_t ce_ure_sram_err_addr1; /* 0x00B120 */
326 uint64_t ce_ure_sram_err_addr2; /* 0x00B128 */
327 uint64_t ce_ure_sram_rd_addr0; /* 0x00B130 */
328 uint64_t ce_ure_sram_rd_addr1; /* 0x00B138 */
329 uint64_t ce_ure_sram_rd_addr2; /* 0x00B140 */
330 uint64_t ce_ure_sram_wr_addr0; /* 0x00B148 */
331 uint64_t ce_ure_sram_wr_addr1; /* 0x00B150 */
332 uint64_t ce_ure_sram_wr_addr2; /* 0x00B158 */
333 uint64_t ce_ure_buf_flush10; /* 0x00B160 */
334 uint64_t ce_ure_buf_flush11; /* 0x00B168 */
335 uint64_t ce_ure_buf_flush12; /* 0x00B170 */
336 uint64_t ce_ure_buf_flush13; /* 0x00B178 */
337 uint64_t ce_ure_buf_flush20; /* 0x00B180 */
338 uint64_t ce_ure_buf_flush21; /* 0x00B188 */
339 uint64_t ce_ure_buf_flush22; /* 0x00B190 */
340 uint64_t ce_ure_buf_flush23; /* 0x00B198 */
341 uint64_t ce_ure_pcie_control1; /* 0x00B1A0 */
342 uint64_t ce_ure_pcie_control2; /* 0x00B1A8 */
343
344 uint64_t ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */
345
346 /* Upstream Data Buffer, Port1 */
347 struct ce_ure_maint_ups_dat1_data {
348 uint64_t data63_0[512]; /* 0x00C000 -- 0x00CFF8 */
349 uint64_t data127_64[512]; /* 0x00D000 -- 0x00DFF8 */
350 uint64_t parity[512]; /* 0x00E000 -- 0x00EFF8 */
351 } ce_ure_maint_ups_dat1;
352
353 /* Upstream Header Buffer, Port1 */
354 struct ce_ure_maint_ups_hdr1_data {
355 uint64_t data63_0[512]; /* 0x00F000 -- 0x00FFF8 */
356 uint64_t data127_64[512]; /* 0x010000 -- 0x010FF8 */
357 uint64_t parity[512]; /* 0x011000 -- 0x011FF8 */
358 } ce_ure_maint_ups_hdr1;
359
360 /* Upstream Data Buffer, Port2 */
361 struct ce_ure_maint_ups_dat2_data {
362 uint64_t data63_0[512]; /* 0x012000 -- 0x012FF8 */
363 uint64_t data127_64[512]; /* 0x013000 -- 0x013FF8 */
364 uint64_t parity[512]; /* 0x014000 -- 0x014FF8 */
365 } ce_ure_maint_ups_dat2;
366
367 /* Upstream Header Buffer, Port2 */
368 struct ce_ure_maint_ups_hdr2_data {
369 uint64_t data63_0[512]; /* 0x015000 -- 0x015FF8 */
370 uint64_t data127_64[512]; /* 0x016000 -- 0x016FF8 */
371 uint64_t parity[512]; /* 0x017000 -- 0x017FF8 */
372 } ce_ure_maint_ups_hdr2;
373
374 /* Downstream Data Buffer */
375 struct ce_ure_maint_dns_dat_data {
376 uint64_t data63_0[512]; /* 0x018000 -- 0x018FF8 */
377 uint64_t data127_64[512]; /* 0x019000 -- 0x019FF8 */
378 uint64_t parity[512]; /* 0x01A000 -- 0x01AFF8 */
379 } ce_ure_maint_dns_dat;
380
381 /* Downstream Header Buffer */
382 struct ce_ure_maint_dns_hdr_data {
383 uint64_t data31_0[64]; /* 0x01B000 -- 0x01B1F8 */
384 uint64_t data95_32[64]; /* 0x01B200 -- 0x01B3F8 */
385 uint64_t parity[64]; /* 0x01B400 -- 0x01B5F8 */
386 } ce_ure_maint_dns_hdr;
387
388 /* RCI Buffer Data */
389 struct ce_ure_maint_rci_data {
390 uint64_t data41_0[64]; /* 0x01B600 -- 0x01B7F8 */
391 uint64_t data69_42[64]; /* 0x01B800 -- 0x01B9F8 */
392 } ce_ure_maint_rci;
393
394 /* Response Queue */
395 uint64_t ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */
396
397 uint64_t ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */
398
399 /* Admin Build-a-Packet Buffer */
400 struct ce_adm_maint_bap_buf_data {
401 uint64_t data63_0[258]; /* 0x024000 -- 0x024808 */
402 uint64_t data127_64[258]; /* 0x024810 -- 0x025018 */
403 uint64_t parity[258]; /* 0x025020 -- 0x025828 */
404 } ce_adm_maint_bap_buf;
405
406 uint64_t ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */
407
408 /* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */
409 uint64_t ce_ure_ate40[TIOCE_NUM_M40_ATES];
410
411 /* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */
412 uint64_t ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
413
414 uint64_t ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */
415
416 /*
417 * DRE: Down Stream Request Engine
418 */
419 uint64_t ce_dre_dyn_credit_status1; /* 0x040010 */
420 uint64_t ce_dre_dyn_credit_status2; /* 0x040018 */
421 uint64_t ce_dre_last_credit_status1; /* 0x040020 */
422 uint64_t ce_dre_last_credit_status2; /* 0x040028 */
423 uint64_t ce_dre_credit_limit1; /* 0x040030 */
424 uint64_t ce_dre_credit_limit2; /* 0x040038 */
425 uint64_t ce_dre_force_credit1; /* 0x040040 */
426 uint64_t ce_dre_force_credit2; /* 0x040048 */
427 uint64_t ce_dre_debug_mux1; /* 0x040050 */
428 uint64_t ce_dre_debug_mux2; /* 0x040058 */
429 uint64_t ce_dre_ssp_err_cmd_wrd; /* 0x040060 */
430 uint64_t ce_dre_ssp_err_addr; /* 0x040068 */
431 uint64_t ce_dre_comp_err_cmd_wrd; /* 0x040070 */
432 uint64_t ce_dre_comp_err_addr; /* 0x040078 */
433 uint64_t ce_dre_req_status; /* 0x040080 */
434 uint64_t ce_dre_config1; /* 0x040088 */
435 uint64_t ce_dre_config2; /* 0x040090 */
436 uint64_t ce_dre_config_req_status; /* 0x040098 */
437 uint64_t ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */
438 uint64_t ce_dre_dyn_fifo; /* 0x040100 */
439 uint64_t ce_pad_040108[3]; /* 0x040108 -- 0x040118 */
440 uint64_t ce_dre_last_fifo; /* 0x040120 */
441
442 uint64_t ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */
443
444 /* DRE Downstream Head Queue */
445 struct ce_dre_maint_ds_head_queue {
446 uint64_t data63_0[32]; /* 0x040200 -- 0x0402F8 */
447 uint64_t data127_64[32]; /* 0x040300 -- 0x0403F8 */
448 uint64_t parity[32]; /* 0x040400 -- 0x0404F8 */
449 } ce_dre_maint_ds_head_q;
450
451 uint64_t ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */
452
453 /* DRE Downstream Data Queue */
454 struct ce_dre_maint_ds_data_queue {
455 uint64_t data63_0[256]; /* 0x041000 -- 0x0417F8 */
456 uint64_t ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
457 uint64_t data127_64[256]; /* 0x042000 -- 0x0427F8 */
458 uint64_t ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
459 uint64_t parity[256]; /* 0x043000 -- 0x0437F8 */
460 uint64_t ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
461 } ce_dre_maint_ds_data_q;
462
463 /* DRE URE Upstream Response Queue */
464 struct ce_dre_maint_ure_us_rsp_queue {
465 uint64_t data63_0[8]; /* 0x044000 -- 0x044038 */
466 uint64_t ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */
467 uint64_t data127_64[8]; /* 0x044100 -- 0x044138 */
468 uint64_t ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */
469 uint64_t parity[8]; /* 0x044200 -- 0x044238 */
470 uint64_t ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */
471 } ce_dre_maint_ure_us_rsp_q;
472
473 uint64_t ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
474
475 uint64_t ce_end_of_struct; /* 0x044400 */
476} tioce_t;
477
478
479/* ce_adm_int_mask/ce_adm_int_status register bit defines */
480#define CE_ADM_INT_CE_ERROR_SHFT 0
481#define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1
482#define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2
483#define CE_ADM_INT_PCIE_ERROR_SHFT 3
484#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4
485#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5
486#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6
487#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7
488#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8
489#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9
490#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10
491#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11
492#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12
493#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13
494#define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/
495#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14
496#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15
497#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16
498#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17
499#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22
500#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23
501
502/* ce_adm_force_int register bit defines */
503#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0
504#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1
505#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2
506#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3
507#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4
508#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5
509#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6
510#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7
511#define CE_ADM_FORCE_INT_ALWAYS_SHFT 8
512
513/* ce_adm_int_dest register bit masks & shifts */
514#define INTR_VECTOR_SHFT 56
515
516/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
517#define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0)
518#define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1)
519#define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2)
520#define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3)
521#define CE_ADM_ERR_SSP_SBE (0x1ULL << 4)
522#define CE_ADM_ERR_SSP_MBE (0x1ULL << 5)
523#define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6)
524#define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7)
525#define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8)
526#define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9)
527#define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10)
528#define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11)
529#define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12)
530#define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13)
531#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14)
532#define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15)
533#define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16)
534#define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17)
535#define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18)
536#define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19)
537#define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20)
538#define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21)
539#define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22)
540#define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23)
541#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24)
542#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25)
543#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26)
544#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27)
545#define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28)
546#define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29)
547#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30)
548#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31)
549#define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32)
550#define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33)
551#define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34)
552#define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35)
553#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36)
554#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37)
555#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38)
556#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39)
557#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40)
558#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41)
559#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42)
560#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43)
561#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44)
562#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45)
563#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46)
564#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47)
565#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48)
566#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49)
567#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50)
568#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51)
569#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52)
570#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53)
571#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54)
572#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55)
573#define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56)
574#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57)
575#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58)
576#define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59)
577#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60)
578#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61)
579
580/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
581#define FLUSH_SEL_PORT1_PIPE0_SHFT 0
582#define FLUSH_SEL_PORT1_PIPE1_SHFT 4
583#define FLUSH_SEL_PORT1_PIPE2_SHFT 8
584#define FLUSH_SEL_PORT1_PIPE3_SHFT 12
585#define FLUSH_SEL_PORT2_PIPE0_SHFT 16
586#define FLUSH_SEL_PORT2_PIPE1_SHFT 20
587#define FLUSH_SEL_PORT2_PIPE2_SHFT 24
588#define FLUSH_SEL_PORT2_PIPE3_SHFT 28
589
590/* ce_dre_config1 register bit masks and shifts */
591#define CE_DRE_RO_ENABLE (0x1ULL << 0)
592#define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1)
593#define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2)
594#define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3)
595#define CE_DRE_ADDR_MODE_SHFT 4
596
597/* ce_dre_config_req_status register bit masks */
598#define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0)
599#define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3)
600#define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4)
601#define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5)
602
603/* ce_ure_control register bit masks & shifts */
604#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0)
605#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4)
606#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5)
607#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24)
608#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32)
609#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33)
610#define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34)
611#define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35)
612#define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36)
613#define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37)
614#define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38)
615#define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39)
616#define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40)
617#define CE_URE_MALFORM_DISABLE (0x1ULL << 44)
618#define CE_URE_UNSUP_DISABLE (0x1ULL << 45)
619
620/* ce_ure_page_map register bit masks & shifts */
621#define CE_URE_ATE3240_ENABLE (0x1ULL << 0)
622#define CE_URE_ATE40_ENABLE (0x1ULL << 1)
623#define CE_URE_PAGESIZE_SHFT 4
624#define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT)
625#define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT)
626#define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT)
627#define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT)
628#define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT)
629#define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT)
630
631/* ce_ure_pipe_sel register bit masks & shifts */
632#define PKT_TRAFIC_SHRT 16
633#define BUS_SRC_ID_SHFT 8
634#define DEV_SRC_ID_SHFT 3
635#define FNC_SRC_ID_SHFT 0
636#define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT)
637#define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT)
638#define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT)
639#define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT)
640#define CE_URE_PIPE_BUS(b) (((uint64_t)(b) << BUS_SRC_ID_SHFT) & \
641 CE_URE_BUS_MASK)
642#define CE_URE_PIPE_DEV(d) (((uint64_t)(d) << DEV_SRC_ID_SHFT) & \
643 CE_URE_DEV_MASK)
644#define CE_URE_PIPE_FNC(f) (((uint64_t)(f) << FNC_SRC_ID_SHFT) & \
645 CE_URE_FNC_MASK)
646
647#define CE_URE_SEL1_SHFT 0
648#define CE_URE_SEL2_SHFT 20
649#define CE_URE_SEL3_SHFT 40
650#define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT)
651#define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT)
652#define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT)
653
654
655/* ce_ure_pipe_mask register bit masks & shifts */
656#define CE_URE_MASK1_SHFT 0
657#define CE_URE_MASK2_SHFT 20
658#define CE_URE_MASK3_SHFT 40
659#define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT)
660#define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT)
661#define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT)
662
663
664/* ce_ure_pcie_control1 register bit masks & shifts */
665#define CE_URE_SI (0x1ULL << 0)
666#define CE_URE_ELAL_SHFT 4
667#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT)
668#define CE_URE_ELAL1_SHFT 8
669#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT)
670#define CE_URE_SCC (0x1ULL << 12)
671#define CE_URE_PN1_SHFT 16
672#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT)
673#define CE_URE_PN2_SHFT 24
674#define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT)
675#define CE_URE_PN1_SET(n) (((uint64_t)(n) << CE_URE_PN1_SHFT) & \
676 CE_URE_PN1_MASK)
677#define CE_URE_PN2_SET(n) (((uint64_t)(n) << CE_URE_PN2_SHFT) & \
678 CE_URE_PN2_MASK)
679
680/* ce_ure_pcie_control2 register bit masks & shifts */
681#define CE_URE_ABP (0x1ULL << 0)
682#define CE_URE_PCP (0x1ULL << 1)
683#define CE_URE_MSP (0x1ULL << 2)
684#define CE_URE_AIP (0x1ULL << 3)
685#define CE_URE_PIP (0x1ULL << 4)
686#define CE_URE_HPS (0x1ULL << 5)
687#define CE_URE_HPC (0x1ULL << 6)
688#define CE_URE_SPLV_SHFT 7
689#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT)
690#define CE_URE_SPLS_SHFT 15
691#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT)
692#define CE_URE_PSN1_SHFT 19
693#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT)
694#define CE_URE_PSN2_SHFT 32
695#define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT)
696#define CE_URE_PSN1_SET(n) (((uint64_t)(n) << CE_URE_PSN1_SHFT) & \
697 CE_URE_PSN1_MASK)
698#define CE_URE_PSN2_SET(n) (((uint64_t)(n) << CE_URE_PSN2_SHFT) & \
699 CE_URE_PSN2_MASK)
700
701/*
702 * PIO address space ranges for CE
703 */
704
705/* Local CE Registers Space */
706#define CE_PIO_MMR 0x00000000
707#define CE_PIO_MMR_LEN 0x04000000
708
709/* PCI Compatible Config Space */
710#define CE_PIO_CONFIG_SPACE 0x04000000
711#define CE_PIO_CONFIG_SPACE_LEN 0x04000000
712
713/* PCI I/O Space Alias */
714#define CE_PIO_IO_SPACE_ALIAS 0x08000000
715#define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000
716
717/* PCI Enhanced Config Space */
718#define CE_PIO_E_CONFIG_SPACE 0x10000000
719#define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000
720
721/* PCI I/O Space */
722#define CE_PIO_IO_SPACE 0x100000000
723#define CE_PIO_IO_SPACE_LEN 0x100000000
724
725/* PCI MEM Space */
726#define CE_PIO_MEM_SPACE 0x200000000
727#define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE
728
729
730/*
731 * CE PCI Enhanced Config Space shifts & masks
732 */
733#define CE_E_CONFIG_BUS_SHFT 20
734#define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT)
735#define CE_E_CONFIG_DEVICE_SHFT 15
736#define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT)
737#define CE_E_CONFIG_FUNC_SHFT 12
738#define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT)
739
740#endif /* __ASM_IA64_SN_TIOCE_H__ */
diff --git a/include/asm-ia64/sn/tioce_provider.h b/include/asm-ia64/sn/tioce_provider.h
new file mode 100644
index 000000000000..7f63dec0a79a
--- /dev/null
+++ b/include/asm-ia64/sn/tioce_provider.h
@@ -0,0 +1,66 @@
1/**************************************************************************
2 * Copyright (C) 2005, Silicon Graphics, Inc. *
3 * *
4 * These coded instructions, statements, and computer programs contain *
5 * unpublished proprietary information of Silicon Graphics, Inc., and *
6 * are protected by Federal copyright law. They may not be disclosed *
7 * to third parties or copied or duplicated in any form, in whole or *
8 * in part, without the prior written consent of Silicon Graphics, Inc. *
9 * *
10 **************************************************************************/
11
12#ifndef _ASM_IA64_SN_CE_PROVIDER_H
13#define _ASM_IA64_SN_CE_PROVIDER_H
14
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/tioce.h>
17
18/*
19 * Common TIOCE structure shared between the prom and kernel
20 *
21 * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
22 * PROM VERSION.
23 */
24struct tioce_common {
25 struct pcibus_bussoft ce_pcibus; /* common pciio header */
26
27 uint32_t ce_rev;
28 uint64_t ce_kernel_private;
29 uint64_t ce_prom_private;
30};
31
32struct tioce_kernel {
33 struct tioce_common *ce_common;
34 spinlock_t ce_lock;
35 struct list_head ce_dmamap_list;
36
37 uint64_t ce_ate40_shadow[TIOCE_NUM_M40_ATES];
38 uint64_t ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
39 uint32_t ce_ate3240_pagesize;
40
41 uint8_t ce_port1_secondary;
42
43 /* per-port resources */
44 struct {
45 int dirmap_refcnt;
46 uint64_t dirmap_shadow;
47 } ce_port[TIOCE_NUM_PORTS];
48};
49
50struct tioce_dmamap {
51 struct list_head ce_dmamap_list; /* headed by tioce_kernel */
52 uint32_t refcnt;
53
54 uint64_t nbytes; /* # bytes mapped */
55
56 uint64_t ct_start; /* coretalk start address */
57 uint64_t pci_start; /* bus start address */
58
59 uint64_t *ate_hw; /* hw ptr of first ate in map */
60 uint64_t *ate_shadow; /* shadow ptr of firat ate */
61 uint16_t ate_count; /* # ate's in the map */
62};
63
64extern int tioce_init_provider(void);
65
66#endif /* __ASM_IA64_SN_CE_PROVIDER_H */
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h
index 21a9f10d6baa..a255006fb7b5 100644
--- a/include/asm-ia64/socket.h
+++ b/include/asm-ia64/socket.h
@@ -23,6 +23,8 @@
23#define SO_BROADCAST 6 23#define SO_BROADCAST 6
24#define SO_SNDBUF 7 24#define SO_SNDBUF 7
25#define SO_RCVBUF 8 25#define SO_RCVBUF 8
26#define SO_SNDBUFFORCE 32
27#define SO_RCVBUFFORCE 33
26#define SO_KEEPALIVE 9 28#define SO_KEEPALIVE 9
27#define SO_OOBINLINE 10 29#define SO_OOBINLINE 10
28#define SO_NO_CHECK 11 30#define SO_NO_CHECK 11
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index 909936f25512..d2430aa0d49d 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -93,7 +93,15 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
93# endif /* CONFIG_MCKINLEY */ 93# endif /* CONFIG_MCKINLEY */
94#endif 94#endif
95} 95}
96
96#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) 97#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
98
99/* Unlock by doing an ordered store and releasing the cacheline with nta */
100static inline void _raw_spin_unlock(spinlock_t *x) {
101 barrier();
102 asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
103}
104
97#else /* !ASM_SUPPORTED */ 105#else /* !ASM_SUPPORTED */
98#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 106#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
99# define _raw_spin_lock(x) \ 107# define _raw_spin_lock(x) \
@@ -109,16 +117,16 @@ do { \
109 } while (ia64_spinlock_val); \ 117 } while (ia64_spinlock_val); \
110 } \ 118 } \
111} while (0) 119} while (0)
120#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
112#endif /* !ASM_SUPPORTED */ 121#endif /* !ASM_SUPPORTED */
113 122
114#define spin_is_locked(x) ((x)->lock != 0) 123#define spin_is_locked(x) ((x)->lock != 0)
115#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
116#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) 124#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
117#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) 125#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
118 126
119typedef struct { 127typedef struct {
120 volatile unsigned int read_counter : 31; 128 volatile unsigned int read_counter : 24;
121 volatile unsigned int write_lock : 1; 129 volatile unsigned int write_lock : 8;
122#ifdef CONFIG_PREEMPT 130#ifdef CONFIG_PREEMPT
123 unsigned int break_lock; 131 unsigned int break_lock;
124#endif 132#endif
@@ -174,6 +182,13 @@ do { \
174 (result == 0); \ 182 (result == 0); \
175}) 183})
176 184
185static inline void _raw_write_unlock(rwlock_t *x)
186{
187 u8 *y = (u8 *)x;
188 barrier();
189 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
190}
191
177#else /* !ASM_SUPPORTED */ 192#else /* !ASM_SUPPORTED */
178 193
179#define _raw_write_lock(l) \ 194#define _raw_write_lock(l) \
@@ -195,14 +210,14 @@ do { \
195 (ia64_val == 0); \ 210 (ia64_val == 0); \
196}) 211})
197 212
213static inline void _raw_write_unlock(rwlock_t *x)
214{
215 barrier();
216 x->write_lock = 0;
217}
218
198#endif /* !ASM_SUPPORTED */ 219#endif /* !ASM_SUPPORTED */
199 220
200#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 221#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
201 222
202#define _raw_write_unlock(x) \
203({ \
204 smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
205 clear_bit(31, (x)); \
206})
207
208#endif /* _ASM_IA64_SPINLOCK_H */ 223#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index cd2cf76b2db1..33256db4a7cf 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -19,12 +19,13 @@
19#include <asm/pal.h> 19#include <asm/pal.h>
20#include <asm/percpu.h> 20#include <asm/percpu.h>
21 21
22#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) 22#define GATE_ADDR RGN_BASE(RGN_GATE)
23
23/* 24/*
24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 25 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 26 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
26 */ 27 */
27#define KERNEL_START __IA64_UL_CONST(0xa000000100000000) 28#define KERNEL_START (GATE_ADDR+0x100000000)
28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 29#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__