diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/acpi.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/mmu.h | 8 | ||||
-rw-r--r-- | include/asm-ia64/mmu_context.h | 54 | ||||
-rw-r--r-- | include/asm-ia64/pal.h | 21 | ||||
-rw-r--r-- | include/asm-ia64/rwsem.h | 35 | ||||
-rw-r--r-- | include/asm-ia64/spinlock.h | 33 |
6 files changed, 105 insertions, 51 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 4c06d455139c..3a544ffc5008 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
@@ -116,6 +116,11 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | |||
116 | 116 | ||
117 | extern u16 ia64_acpiid_to_sapicid[]; | 117 | extern u16 ia64_acpiid_to_sapicid[]; |
118 | 118 | ||
119 | /* | ||
120 | * Refer Intel ACPI _PDC support document for bit definitions | ||
121 | */ | ||
122 | #define ACPI_PDC_EST_CAPABILITY_SMP 0x8 | ||
123 | |||
119 | #endif /*__KERNEL__*/ | 124 | #endif /*__KERNEL__*/ |
120 | 125 | ||
121 | #endif /*_ASM_ACPI_H*/ | 126 | #endif /*_ASM_ACPI_H*/ |
diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h index ae1525352a25..611432ba579c 100644 --- a/include/asm-ia64/mmu.h +++ b/include/asm-ia64/mmu.h | |||
@@ -2,10 +2,12 @@ | |||
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Type for a context number. We declare it volatile to ensure proper ordering when it's | 5 | * Type for a context number. We declare it volatile to ensure proper |
6 | * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and | 6 | * ordering when it's accessed outside of spinlock'd critical sections |
7 | * init_new_context()). | 7 | * (e.g., as done in activate_mm() and init_new_context()). |
8 | */ | 8 | */ |
9 | typedef volatile unsigned long mm_context_t; | 9 | typedef volatile unsigned long mm_context_t; |
10 | 10 | ||
11 | typedef unsigned long nv_mm_context_t; | ||
12 | |||
11 | #endif | 13 | #endif |
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index e3e5fededb04..0680d163be97 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h | |||
@@ -55,34 +55,46 @@ static inline void | |||
55 | delayed_tlb_flush (void) | 55 | delayed_tlb_flush (void) |
56 | { | 56 | { |
57 | extern void local_flush_tlb_all (void); | 57 | extern void local_flush_tlb_all (void); |
58 | unsigned long flags; | ||
58 | 59 | ||
59 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | 60 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { |
60 | local_flush_tlb_all(); | 61 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
61 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | 62 | { |
63 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { | ||
64 | local_flush_tlb_all(); | ||
65 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | ||
66 | } | ||
67 | } | ||
68 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | ||
62 | } | 69 | } |
63 | } | 70 | } |
64 | 71 | ||
65 | static inline mm_context_t | 72 | static inline nv_mm_context_t |
66 | get_mmu_context (struct mm_struct *mm) | 73 | get_mmu_context (struct mm_struct *mm) |
67 | { | 74 | { |
68 | unsigned long flags; | 75 | unsigned long flags; |
69 | mm_context_t context = mm->context; | 76 | nv_mm_context_t context = mm->context; |
70 | 77 | ||
71 | if (context) | 78 | if (unlikely(!context)) { |
72 | return context; | 79 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
73 | 80 | { | |
74 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 81 | /* re-check, now that we've got the lock: */ |
75 | { | 82 | context = mm->context; |
76 | /* re-check, now that we've got the lock: */ | 83 | if (context == 0) { |
77 | context = mm->context; | 84 | cpus_clear(mm->cpu_vm_mask); |
78 | if (context == 0) { | 85 | if (ia64_ctx.next >= ia64_ctx.limit) |
79 | cpus_clear(mm->cpu_vm_mask); | 86 | wrap_mmu_context(mm); |
80 | if (ia64_ctx.next >= ia64_ctx.limit) | 87 | mm->context = context = ia64_ctx.next++; |
81 | wrap_mmu_context(mm); | 88 | } |
82 | mm->context = context = ia64_ctx.next++; | ||
83 | } | 89 | } |
90 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | ||
84 | } | 91 | } |
85 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 92 | /* |
93 | * Ensure we're not starting to use "context" before any old | ||
94 | * uses of it are gone from our TLB. | ||
95 | */ | ||
96 | delayed_tlb_flush(); | ||
97 | |||
86 | return context; | 98 | return context; |
87 | } | 99 | } |
88 | 100 | ||
@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm) | |||
104 | } | 116 | } |
105 | 117 | ||
106 | static inline void | 118 | static inline void |
107 | reload_context (mm_context_t context) | 119 | reload_context (nv_mm_context_t context) |
108 | { | 120 | { |
109 | unsigned long rid; | 121 | unsigned long rid; |
110 | unsigned long rid_incr = 0; | 122 | unsigned long rid_incr = 0; |
@@ -138,7 +150,7 @@ reload_context (mm_context_t context) | |||
138 | static inline void | 150 | static inline void |
139 | activate_context (struct mm_struct *mm) | 151 | activate_context (struct mm_struct *mm) |
140 | { | 152 | { |
141 | mm_context_t context; | 153 | nv_mm_context_t context; |
142 | 154 | ||
143 | do { | 155 | do { |
144 | context = get_mmu_context(mm); | 156 | context = get_mmu_context(mm); |
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm) | |||
157 | static inline void | 169 | static inline void |
158 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | 170 | activate_mm (struct mm_struct *prev, struct mm_struct *next) |
159 | { | 171 | { |
160 | delayed_tlb_flush(); | ||
161 | |||
162 | /* | 172 | /* |
163 | * We may get interrupts here, but that's OK because interrupt handlers cannot | 173 | * We may get interrupts here, but that's OK because interrupt handlers cannot |
164 | * touch user-space. | 174 | * touch user-space. |
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 2303a10ee595..e828377ad295 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h | |||
@@ -75,6 +75,8 @@ | |||
75 | #define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */ | 75 | #define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */ |
76 | #define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */ | 76 | #define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */ |
77 | #define PAL_VM_TR_READ 261 /* read contents of translation register */ | 77 | #define PAL_VM_TR_READ 261 /* read contents of translation register */ |
78 | #define PAL_GET_PSTATE 262 /* get the current P-state */ | ||
79 | #define PAL_SET_PSTATE 263 /* set the P-state */ | ||
78 | 80 | ||
79 | #ifndef __ASSEMBLY__ | 81 | #ifndef __ASSEMBLY__ |
80 | 82 | ||
@@ -1111,6 +1113,25 @@ ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf) | |||
1111 | return iprv.status; | 1113 | return iprv.status; |
1112 | } | 1114 | } |
1113 | 1115 | ||
1116 | /* Get the current P-state information */ | ||
1117 | static inline s64 | ||
1118 | ia64_pal_get_pstate (u64 *pstate_index) | ||
1119 | { | ||
1120 | struct ia64_pal_retval iprv; | ||
1121 | PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0); | ||
1122 | *pstate_index = iprv.v0; | ||
1123 | return iprv.status; | ||
1124 | } | ||
1125 | |||
1126 | /* Set the P-state */ | ||
1127 | static inline s64 | ||
1128 | ia64_pal_set_pstate (u64 pstate_index) | ||
1129 | { | ||
1130 | struct ia64_pal_retval iprv; | ||
1131 | PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0); | ||
1132 | return iprv.status; | ||
1133 | } | ||
1134 | |||
1114 | /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are | 1135 | /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are |
1115 | * suspended, but cache and TLB coherency is maintained. | 1136 | * suspended, but cache and TLB coherency is maintained. |
1116 | */ | 1137 | */ |
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index 6ece5061dc19..e18b5ab0cb75 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> | 4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> |
5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> | 5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> |
6 | * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com> | ||
6 | * | 7 | * |
7 | * Based on asm-i386/rwsem.h and other architecture implementation. | 8 | * Based on asm-i386/rwsem.h and other architecture implementation. |
8 | * | 9 | * |
@@ -11,9 +12,9 @@ | |||
11 | * | 12 | * |
12 | * The lock count is initialized to 0 (no active and no waiting lockers). | 13 | * The lock count is initialized to 0 (no active and no waiting lockers). |
13 | * | 14 | * |
14 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case | 15 | * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for |
15 | * of an uncontended lock. Readers increment by 1 and see a positive value | 16 | * the case of an uncontended lock. Readers increment by 1 and see a positive |
16 | * when uncontended, negative if there are writers (and maybe) readers | 17 | * value when uncontended, negative if there are writers (and maybe) readers |
17 | * waiting (in which case it goes to sleep). | 18 | * waiting (in which case it goes to sleep). |
18 | */ | 19 | */ |
19 | 20 | ||
@@ -29,7 +30,7 @@ | |||
29 | * the semaphore definition | 30 | * the semaphore definition |
30 | */ | 31 | */ |
31 | struct rw_semaphore { | 32 | struct rw_semaphore { |
32 | signed int count; | 33 | signed long count; |
33 | spinlock_t wait_lock; | 34 | spinlock_t wait_lock; |
34 | struct list_head wait_list; | 35 | struct list_head wait_list; |
35 | #if RWSEM_DEBUG | 36 | #if RWSEM_DEBUG |
@@ -37,10 +38,10 @@ struct rw_semaphore { | |||
37 | #endif | 38 | #endif |
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 41 | #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) |
41 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 42 | #define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001) |
42 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 43 | #define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff) |
43 | #define RWSEM_WAITING_BIAS (-0x00010000) | 44 | #define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000) |
44 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 45 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
45 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 46 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
46 | 47 | ||
@@ -83,7 +84,7 @@ init_rwsem (struct rw_semaphore *sem) | |||
83 | static inline void | 84 | static inline void |
84 | __down_read (struct rw_semaphore *sem) | 85 | __down_read (struct rw_semaphore *sem) |
85 | { | 86 | { |
86 | int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); | 87 | long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); |
87 | 88 | ||
88 | if (result < 0) | 89 | if (result < 0) |
89 | rwsem_down_read_failed(sem); | 90 | rwsem_down_read_failed(sem); |
@@ -95,7 +96,7 @@ __down_read (struct rw_semaphore *sem) | |||
95 | static inline void | 96 | static inline void |
96 | __down_write (struct rw_semaphore *sem) | 97 | __down_write (struct rw_semaphore *sem) |
97 | { | 98 | { |
98 | int old, new; | 99 | long old, new; |
99 | 100 | ||
100 | do { | 101 | do { |
101 | old = sem->count; | 102 | old = sem->count; |
@@ -112,7 +113,7 @@ __down_write (struct rw_semaphore *sem) | |||
112 | static inline void | 113 | static inline void |
113 | __up_read (struct rw_semaphore *sem) | 114 | __up_read (struct rw_semaphore *sem) |
114 | { | 115 | { |
115 | int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); | 116 | long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); |
116 | 117 | ||
117 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) | 118 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) |
118 | rwsem_wake(sem); | 119 | rwsem_wake(sem); |
@@ -124,7 +125,7 @@ __up_read (struct rw_semaphore *sem) | |||
124 | static inline void | 125 | static inline void |
125 | __up_write (struct rw_semaphore *sem) | 126 | __up_write (struct rw_semaphore *sem) |
126 | { | 127 | { |
127 | int old, new; | 128 | long old, new; |
128 | 129 | ||
129 | do { | 130 | do { |
130 | old = sem->count; | 131 | old = sem->count; |
@@ -141,7 +142,7 @@ __up_write (struct rw_semaphore *sem) | |||
141 | static inline int | 142 | static inline int |
142 | __down_read_trylock (struct rw_semaphore *sem) | 143 | __down_read_trylock (struct rw_semaphore *sem) |
143 | { | 144 | { |
144 | int tmp; | 145 | long tmp; |
145 | while ((tmp = sem->count) >= 0) { | 146 | while ((tmp = sem->count) >= 0) { |
146 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { | 147 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { |
147 | return 1; | 148 | return 1; |
@@ -156,7 +157,7 @@ __down_read_trylock (struct rw_semaphore *sem) | |||
156 | static inline int | 157 | static inline int |
157 | __down_write_trylock (struct rw_semaphore *sem) | 158 | __down_write_trylock (struct rw_semaphore *sem) |
158 | { | 159 | { |
159 | int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, | 160 | long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, |
160 | RWSEM_ACTIVE_WRITE_BIAS); | 161 | RWSEM_ACTIVE_WRITE_BIAS); |
161 | return tmp == RWSEM_UNLOCKED_VALUE; | 162 | return tmp == RWSEM_UNLOCKED_VALUE; |
162 | } | 163 | } |
@@ -167,7 +168,7 @@ __down_write_trylock (struct rw_semaphore *sem) | |||
167 | static inline void | 168 | static inline void |
168 | __downgrade_write (struct rw_semaphore *sem) | 169 | __downgrade_write (struct rw_semaphore *sem) |
169 | { | 170 | { |
170 | int old, new; | 171 | long old, new; |
171 | 172 | ||
172 | do { | 173 | do { |
173 | old = sem->count; | 174 | old = sem->count; |
@@ -182,7 +183,7 @@ __downgrade_write (struct rw_semaphore *sem) | |||
182 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 | 183 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 |
183 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. | 184 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. |
184 | */ | 185 | */ |
185 | #define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count)) | 186 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) |
186 | #define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count)) | 187 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) |
187 | 188 | ||
188 | #endif /* _ASM_IA64_RWSEM_H */ | 189 | #endif /* _ASM_IA64_RWSEM_H */ |
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 909936f25512..d2430aa0d49d 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
@@ -93,7 +93,15 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
93 | # endif /* CONFIG_MCKINLEY */ | 93 | # endif /* CONFIG_MCKINLEY */ |
94 | #endif | 94 | #endif |
95 | } | 95 | } |
96 | |||
96 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) |
98 | |||
99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | ||
100 | static inline void _raw_spin_unlock(spinlock_t *x) { | ||
101 | barrier(); | ||
102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | ||
103 | } | ||
104 | |||
97 | #else /* !ASM_SUPPORTED */ | 105 | #else /* !ASM_SUPPORTED */ |
98 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
99 | # define _raw_spin_lock(x) \ | 107 | # define _raw_spin_lock(x) \ |
@@ -109,16 +117,16 @@ do { \ | |||
109 | } while (ia64_spinlock_val); \ | 117 | } while (ia64_spinlock_val); \ |
110 | } \ | 118 | } \ |
111 | } while (0) | 119 | } while (0) |
120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | ||
112 | #endif /* !ASM_SUPPORTED */ | 121 | #endif /* !ASM_SUPPORTED */ |
113 | 122 | ||
114 | #define spin_is_locked(x) ((x)->lock != 0) | 123 | #define spin_is_locked(x) ((x)->lock != 0) |
115 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | ||
116 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
117 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) |
118 | 126 | ||
119 | typedef struct { | 127 | typedef struct { |
120 | volatile unsigned int read_counter : 31; | 128 | volatile unsigned int read_counter : 24; |
121 | volatile unsigned int write_lock : 1; | 129 | volatile unsigned int write_lock : 8; |
122 | #ifdef CONFIG_PREEMPT | 130 | #ifdef CONFIG_PREEMPT |
123 | unsigned int break_lock; | 131 | unsigned int break_lock; |
124 | #endif | 132 | #endif |
@@ -174,6 +182,13 @@ do { \ | |||
174 | (result == 0); \ | 182 | (result == 0); \ |
175 | }) | 183 | }) |
176 | 184 | ||
185 | static inline void _raw_write_unlock(rwlock_t *x) | ||
186 | { | ||
187 | u8 *y = (u8 *)x; | ||
188 | barrier(); | ||
189 | asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" ); | ||
190 | } | ||
191 | |||
177 | #else /* !ASM_SUPPORTED */ | 192 | #else /* !ASM_SUPPORTED */ |
178 | 193 | ||
179 | #define _raw_write_lock(l) \ | 194 | #define _raw_write_lock(l) \ |
@@ -195,14 +210,14 @@ do { \ | |||
195 | (ia64_val == 0); \ | 210 | (ia64_val == 0); \ |
196 | }) | 211 | }) |
197 | 212 | ||
213 | static inline void _raw_write_unlock(rwlock_t *x) | ||
214 | { | ||
215 | barrier(); | ||
216 | x->write_lock = 0; | ||
217 | } | ||
218 | |||
198 | #endif /* !ASM_SUPPORTED */ | 219 | #endif /* !ASM_SUPPORTED */ |
199 | 220 | ||
200 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) |
201 | 222 | ||
202 | #define _raw_write_unlock(x) \ | ||
203 | ({ \ | ||
204 | smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ | ||
205 | clear_bit(31, (x)); \ | ||
206 | }) | ||
207 | |||
208 | #endif /* _ASM_IA64_SPINLOCK_H */ | 223 | #endif /* _ASM_IA64_SPINLOCK_H */ |