diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/acpi.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/agp.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/bitops.h | 50 | ||||
-rw-r--r-- | include/asm-ia64/compat.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/gcc_intrin.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/mca.h | 6 | ||||
-rw-r--r-- | include/asm-ia64/mca_asm.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/percpu.h | 57 | ||||
-rw-r--r-- | include/asm-ia64/pgalloc.h | 16 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/sal.h | 14 | ||||
-rw-r--r-- | include/asm-ia64/sn/xpc.h | 6 | ||||
-rw-r--r-- | include/asm-ia64/socket.h | 2 |
13 files changed, 71 insertions, 97 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 81bcd5e51789..cd1cc39b5599 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
@@ -127,6 +127,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; | |||
127 | extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | 127 | extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; |
128 | #endif | 128 | #endif |
129 | 129 | ||
130 | #define acpi_unlazy_tlb(x) | ||
131 | |||
130 | #endif /*__KERNEL__*/ | 132 | #endif /*__KERNEL__*/ |
131 | 133 | ||
132 | #endif /*_ASM_ACPI_H*/ | 134 | #endif /*_ASM_ACPI_H*/ |
diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h index 4e517f0e6afa..c11fdd8ab4d7 100644 --- a/include/asm-ia64/agp.h +++ b/include/asm-ia64/agp.h | |||
@@ -15,7 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | #define map_page_into_agp(page) /* nothing */ | 16 | #define map_page_into_agp(page) /* nothing */ |
17 | #define unmap_page_from_agp(page) /* nothing */ | 17 | #define unmap_page_from_agp(page) /* nothing */ |
18 | #define flush_agp_mappings() /* nothing */ | ||
19 | #define flush_agp_cache() mb() | 18 | #define flush_agp_cache() mb() |
20 | 19 | ||
21 | /* Convert a physical address to an address suitable for the GART. */ | 20 | /* Convert a physical address to an address suitable for the GART. */ |
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index a1b9719f5fbb..953d3df9dd22 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h | |||
@@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
122 | } | 122 | } |
123 | 123 | ||
124 | /** | 124 | /** |
125 | * __clear_bit_unlock - Non-atomically clear a bit with release | 125 | * __clear_bit_unlock - Non-atomically clears a bit in memory with release |
126 | * @nr: Bit to clear | ||
127 | * @addr: Address to start counting from | ||
126 | * | 128 | * |
127 | * This is like clear_bit_unlock, but the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
128 | * with release semantics. See also __raw_spin_unlock(). | 130 | * with release semantics. See also __raw_spin_unlock(). |
129 | */ | 131 | */ |
130 | static __inline__ void | 132 | static __inline__ void |
131 | __clear_bit_unlock(int nr, volatile void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
132 | { | 134 | { |
133 | __u32 mask, new; | 135 | __u32 * const m = (__u32 *) addr + (nr >> 5); |
134 | volatile __u32 *m; | 136 | __u32 const new = *m & ~(1 << (nr & 31)); |
135 | 137 | ||
136 | m = (volatile __u32 *)addr + (nr >> 5); | ||
137 | mask = ~(1 << (nr & 31)); | ||
138 | new = *m & mask; | ||
139 | barrier(); | ||
140 | ia64_st4_rel_nta(m, new); | 138 | ia64_st4_rel_nta(m, new); |
141 | } | 139 | } |
142 | 140 | ||
143 | /** | 141 | /** |
144 | * __clear_bit - Clears a bit in memory (non-atomic version) | 142 | * __clear_bit - Clears a bit in memory (non-atomic version) |
143 | * @nr: the bit to clear | ||
144 | * @addr: the address to start counting from | ||
145 | * | ||
146 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | ||
147 | * If it's called on the same region of memory simultaneously, the effect | ||
148 | * may be that only one operation succeeds. | ||
145 | */ | 149 | */ |
146 | static __inline__ void | 150 | static __inline__ void |
147 | __clear_bit (int nr, volatile void *addr) | 151 | __clear_bit (int nr, volatile void *addr) |
148 | { | 152 | { |
149 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); | 153 | *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); |
150 | __u32 m = 1 << (nr & 31); | ||
151 | *p &= ~m; | ||
152 | } | 154 | } |
153 | 155 | ||
154 | /** | 156 | /** |
155 | * change_bit - Toggle a bit in memory | 157 | * change_bit - Toggle a bit in memory |
156 | * @nr: Bit to clear | 158 | * @nr: Bit to toggle |
157 | * @addr: Address to start counting from | 159 | * @addr: Address to start counting from |
158 | * | 160 | * |
159 | * change_bit() is atomic and may not be reordered. | 161 | * change_bit() is atomic and may not be reordered. |
@@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr) | |||
178 | 180 | ||
179 | /** | 181 | /** |
180 | * __change_bit - Toggle a bit in memory | 182 | * __change_bit - Toggle a bit in memory |
181 | * @nr: the bit to set | 183 | * @nr: the bit to toggle |
182 | * @addr: the address to start counting from | 184 | * @addr: the address to start counting from |
183 | * | 185 | * |
184 | * Unlike change_bit(), this function is non-atomic and may be reordered. | 186 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
@@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr) | |||
197 | * @addr: Address to count from | 199 | * @addr: Address to count from |
198 | * | 200 | * |
199 | * This operation is atomic and cannot be reordered. | 201 | * This operation is atomic and cannot be reordered. |
200 | * It also implies a memory barrier. | 202 | * It also implies the acquisition side of the memory barrier. |
201 | */ | 203 | */ |
202 | static __inline__ int | 204 | static __inline__ int |
203 | test_and_set_bit (int nr, volatile void *addr) | 205 | test_and_set_bit (int nr, volatile void *addr) |
@@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr) | |||
247 | 249 | ||
248 | /** | 250 | /** |
249 | * test_and_clear_bit - Clear a bit and return its old value | 251 | * test_and_clear_bit - Clear a bit and return its old value |
250 | * @nr: Bit to set | 252 | * @nr: Bit to clear |
251 | * @addr: Address to count from | 253 | * @addr: Address to count from |
252 | * | 254 | * |
253 | * This operation is atomic and cannot be reordered. | 255 | * This operation is atomic and cannot be reordered. |
254 | * It also implies a memory barrier. | 256 | * It also implies the acquisition side of the memory barrier. |
255 | */ | 257 | */ |
256 | static __inline__ int | 258 | static __inline__ int |
257 | test_and_clear_bit (int nr, volatile void *addr) | 259 | test_and_clear_bit (int nr, volatile void *addr) |
@@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr) | |||
272 | 274 | ||
273 | /** | 275 | /** |
274 | * __test_and_clear_bit - Clear a bit and return its old value | 276 | * __test_and_clear_bit - Clear a bit and return its old value |
275 | * @nr: Bit to set | 277 | * @nr: Bit to clear |
276 | * @addr: Address to count from | 278 | * @addr: Address to count from |
277 | * | 279 | * |
278 | * This operation is non-atomic and can be reordered. | 280 | * This operation is non-atomic and can be reordered. |
@@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr) | |||
292 | 294 | ||
293 | /** | 295 | /** |
294 | * test_and_change_bit - Change a bit and return its old value | 296 | * test_and_change_bit - Change a bit and return its old value |
295 | * @nr: Bit to set | 297 | * @nr: Bit to change |
296 | * @addr: Address to count from | 298 | * @addr: Address to count from |
297 | * | 299 | * |
298 | * This operation is atomic and cannot be reordered. | 300 | * This operation is atomic and cannot be reordered. |
299 | * It also implies a memory barrier. | 301 | * It also implies the acquisition side of the memory barrier. |
300 | */ | 302 | */ |
301 | static __inline__ int | 303 | static __inline__ int |
302 | test_and_change_bit (int nr, volatile void *addr) | 304 | test_and_change_bit (int nr, volatile void *addr) |
@@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr) | |||
315 | return (old & bit) != 0; | 317 | return (old & bit) != 0; |
316 | } | 318 | } |
317 | 319 | ||
318 | /* | 320 | /** |
319 | * WARNING: non atomic version. | 321 | * __test_and_change_bit - Change a bit and return its old value |
322 | * @nr: Bit to change | ||
323 | * @addr: Address to count from | ||
324 | * | ||
325 | * This operation is non-atomic and can be reordered. | ||
320 | */ | 326 | */ |
321 | static __inline__ int | 327 | static __inline__ int |
322 | __test_and_change_bit (int nr, void *addr) | 328 | __test_and_change_bit (int nr, void *addr) |
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index 0f6e5264ab8f..dfcf75b8426d 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h | |||
@@ -181,7 +181,7 @@ struct compat_shmid64_ds { | |||
181 | /* | 181 | /* |
182 | * A pointer passed in from user mode. This should not be used for syscall parameters, | 182 | * A pointer passed in from user mode. This should not be used for syscall parameters, |
183 | * just declare them as pointers because the syscall entry code will have appropriately | 183 | * just declare them as pointers because the syscall entry code will have appropriately |
184 | * comverted them already. | 184 | * converted them already. |
185 | */ | 185 | */ |
186 | typedef u32 compat_uptr_t; | 186 | typedef u32 compat_uptr_t; |
187 | 187 | ||
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index e58d3298fa10..de2ed2cbdd84 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h | |||
@@ -24,7 +24,9 @@ | |||
24 | extern void ia64_bad_param_for_setreg (void); | 24 | extern void ia64_bad_param_for_setreg (void); |
25 | extern void ia64_bad_param_for_getreg (void); | 25 | extern void ia64_bad_param_for_getreg (void); |
26 | 26 | ||
27 | register unsigned long ia64_r13 asm ("r13") __attribute_used__; | 27 | #ifdef __KERNEL__ |
28 | register unsigned long ia64_r13 asm ("r13") __used; | ||
29 | #endif | ||
28 | 30 | ||
29 | #define ia64_setreg(regnum, val) \ | 31 | #define ia64_setreg(regnum, val) \ |
30 | ({ \ | 32 | ({ \ |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 823553bf12e6..f1663aa94a52 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -3,9 +3,9 @@ | |||
3 | * Purpose: Machine check handling specific defines | 3 | * Purpose: Machine check handling specific defines |
4 | * | 4 | * |
5 | * Copyright (C) 1999, 2004 Silicon Graphics, Inc. | 5 | * Copyright (C) 1999, 2004 Silicon Graphics, Inc. |
6 | * Copyright (C) Vijay Chander (vijay@engr.sgi.com) | 6 | * Copyright (C) Vijay Chander <vijay@engr.sgi.com> |
7 | * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) | 7 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> |
8 | * Copyright (C) Russ Anderson (rja@sgi.com) | 8 | * Copyright (C) Russ Anderson <rja@sgi.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_IA64_MCA_H | 11 | #ifndef _ASM_IA64_MCA_H |
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 76203f9a8718..dd2a5b134390 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * File: mca_asm.h | 2 | * File: mca_asm.h |
3 | * Purpose: Machine check handling specific defines | ||
3 | * | 4 | * |
4 | * Copyright (C) 1999 Silicon Graphics, Inc. | 5 | * Copyright (C) 1999 Silicon Graphics, Inc. |
5 | * Copyright (C) Vijay Chander (vijay@engr.sgi.com) | 6 | * Copyright (C) Vijay Chander <vijay@engr.sgi.com> |
6 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> | 7 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> |
7 | * Copyright (C) 2000 Hewlett-Packard Co. | 8 | * Copyright (C) 2000 Hewlett-Packard Co. |
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 9 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index c4f1e328a5ba..77f30b664b4e 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h | |||
@@ -15,69 +15,36 @@ | |||
15 | 15 | ||
16 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
17 | 17 | ||
18 | #ifdef HAVE_MODEL_SMALL_ATTRIBUTE | ||
19 | # define __SMALL_ADDR_AREA __attribute__((__model__ (__small__))) | ||
20 | #else | ||
21 | # define __SMALL_ADDR_AREA | ||
22 | #endif | ||
23 | |||
24 | #define DECLARE_PER_CPU(type, name) \ | ||
25 | extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name | ||
26 | |||
27 | /* Separate out the type, so (int[3], foo) works. */ | ||
28 | #define DEFINE_PER_CPU(type, name) \ | ||
29 | __attribute__((__section__(".data.percpu"))) \ | ||
30 | __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name | ||
31 | |||
32 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
33 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
34 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
35 | __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \ | ||
36 | ____cacheline_aligned_in_smp | ||
37 | #else | ||
38 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
39 | DEFINE_PER_CPU(type, name) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an | ||
44 | * external routine, to avoid include-hell. | ||
45 | */ | ||
46 | #ifdef CONFIG_SMP | ||
47 | |||
48 | extern unsigned long __per_cpu_offset[NR_CPUS]; | ||
49 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | ||
50 | 19 | ||
51 | /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ | 20 | #ifdef HAVE_MODEL_SMALL_ATTRIBUTE |
52 | DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); | 21 | # define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__))) |
22 | #endif | ||
53 | 23 | ||
54 | #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) | 24 | #define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset) |
55 | #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) | ||
56 | #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) | ||
57 | 25 | ||
58 | extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); | ||
59 | extern void setup_per_cpu_areas (void); | ||
60 | extern void *per_cpu_init(void); | 26 | extern void *per_cpu_init(void); |
61 | 27 | ||
62 | #else /* ! SMP */ | 28 | #else /* ! SMP */ |
63 | 29 | ||
64 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | 30 | #define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu"))) |
65 | #define __get_cpu_var(var) per_cpu__##var | 31 | |
66 | #define __raw_get_cpu_var(var) per_cpu__##var | ||
67 | #define per_cpu_init() (__phys_per_cpu_start) | 32 | #define per_cpu_init() (__phys_per_cpu_start) |
68 | 33 | ||
69 | #endif /* SMP */ | 34 | #endif /* SMP */ |
70 | 35 | ||
71 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
72 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
73 | |||
74 | /* | 36 | /* |
75 | * Be extremely careful when taking the address of this variable! Due to virtual | 37 | * Be extremely careful when taking the address of this variable! Due to virtual |
76 | * remapping, it is different from the canonical address returned by __get_cpu_var(var)! | 38 | * remapping, it is different from the canonical address returned by __get_cpu_var(var)! |
77 | * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly | 39 | * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly |
78 | * more efficient. | 40 | * more efficient. |
79 | */ | 41 | */ |
80 | #define __ia64_per_cpu_var(var) (per_cpu__##var) | 42 | #define __ia64_per_cpu_var(var) per_cpu__##var |
43 | |||
44 | #include <asm-generic/percpu.h> | ||
45 | |||
46 | /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ | ||
47 | DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); | ||
81 | 48 | ||
82 | #endif /* !__ASSEMBLY__ */ | 49 | #endif /* !__ASSEMBLY__ */ |
83 | 50 | ||
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index 67552cad5173..556d988123ac 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h | |||
@@ -27,7 +27,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
27 | return quicklist_alloc(0, GFP_KERNEL, NULL); | 27 | return quicklist_alloc(0, GFP_KERNEL, NULL); |
28 | } | 28 | } |
29 | 29 | ||
30 | static inline void pgd_free(pgd_t * pgd) | 30 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
31 | { | 31 | { |
32 | quicklist_free(0, NULL, pgd); | 32 | quicklist_free(0, NULL, pgd); |
33 | } | 33 | } |
@@ -44,11 +44,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
44 | return quicklist_alloc(0, GFP_KERNEL, NULL); | 44 | return quicklist_alloc(0, GFP_KERNEL, NULL); |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void pud_free(pud_t * pud) | 47 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
48 | { | 48 | { |
49 | quicklist_free(0, NULL, pud); | 49 | quicklist_free(0, NULL, pud); |
50 | } | 50 | } |
51 | #define __pud_free_tlb(tlb, pud) pud_free(pud) | 51 | #define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) |
52 | #endif /* CONFIG_PGTABLE_4 */ | 52 | #endif /* CONFIG_PGTABLE_4 */ |
53 | 53 | ||
54 | static inline void | 54 | static inline void |
@@ -62,12 +62,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
62 | return quicklist_alloc(0, GFP_KERNEL, NULL); | 62 | return quicklist_alloc(0, GFP_KERNEL, NULL); |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void pmd_free(pmd_t * pmd) | 65 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
66 | { | 66 | { |
67 | quicklist_free(0, NULL, pmd); | 67 | quicklist_free(0, NULL, pmd); |
68 | } | 68 | } |
69 | 69 | ||
70 | #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) | 70 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) |
71 | 71 | ||
72 | static inline void | 72 | static inline void |
73 | pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte) | 73 | pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte) |
@@ -94,12 +94,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
94 | return quicklist_alloc(0, GFP_KERNEL, NULL); | 94 | return quicklist_alloc(0, GFP_KERNEL, NULL); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void pte_free(struct page *pte) | 97 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
98 | { | 98 | { |
99 | quicklist_free_page(0, NULL, pte); | 99 | quicklist_free_page(0, NULL, pte); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void pte_free_kernel(pte_t * pte) | 102 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
103 | { | 103 | { |
104 | quicklist_free(0, NULL, pte); | 104 | quicklist_free(0, NULL, pte); |
105 | } | 105 | } |
@@ -109,6 +109,6 @@ static inline void check_pgt_cache(void) | |||
109 | quicklist_trim(0, NULL, 25, 16); | 109 | quicklist_trim(0, NULL, 25, 16); |
110 | } | 110 | } |
111 | 111 | ||
112 | #define __pte_free_tlb(tlb, pte) pte_free(pte) | 112 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) |
113 | 113 | ||
114 | #endif /* _ASM_IA64_PGALLOC_H */ | 114 | #endif /* _ASM_IA64_PGALLOC_H */ |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index be3b0ae43270..741f7ecb986a 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
@@ -31,7 +31,8 @@ | |||
31 | * each (assuming 8KB page size), for a total of 8TB of user virtual | 31 | * each (assuming 8KB page size), for a total of 8TB of user virtual |
32 | * address space. | 32 | * address space. |
33 | */ | 33 | */ |
34 | #define TASK_SIZE (current->thread.task_size) | 34 | #define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size) |
35 | #define TASK_SIZE TASK_SIZE_OF(current) | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * This decides where the kernel will search for a free chunk of vm | 38 | * This decides where the kernel will search for a free chunk of vm |
@@ -472,7 +473,7 @@ ia64_set_psr (__u64 psr) | |||
472 | { | 473 | { |
473 | ia64_stop(); | 474 | ia64_stop(); |
474 | ia64_setreg(_IA64_REG_PSR_L, psr); | 475 | ia64_setreg(_IA64_REG_PSR_L, psr); |
475 | ia64_srlz_d(); | 476 | ia64_srlz_i(); |
476 | } | 477 | } |
477 | 478 | ||
478 | /* | 479 | /* |
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 1f5412d6f9bb..2251118894ae 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h | |||
@@ -649,17 +649,6 @@ typedef struct err_rec { | |||
649 | * Now define a couple of inline functions for improved type checking | 649 | * Now define a couple of inline functions for improved type checking |
650 | * and convenience. | 650 | * and convenience. |
651 | */ | 651 | */ |
652 | static inline long | ||
653 | ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, | ||
654 | unsigned long *drift_info) | ||
655 | { | ||
656 | struct ia64_sal_retval isrv; | ||
657 | |||
658 | SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); | ||
659 | *ticks_per_second = isrv.v0; | ||
660 | *drift_info = isrv.v1; | ||
661 | return isrv.status; | ||
662 | } | ||
663 | 652 | ||
664 | extern s64 ia64_sal_cache_flush (u64 cache_type); | 653 | extern s64 ia64_sal_cache_flush (u64 cache_type); |
665 | extern void __init check_sal_cache_flush (void); | 654 | extern void __init check_sal_cache_flush (void); |
@@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64, | |||
841 | u64, u64, u64, u64, u64); | 830 | u64, u64, u64, u64, u64); |
842 | extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, | 831 | extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, |
843 | u64, u64, u64, u64, u64); | 832 | u64, u64, u64, u64, u64); |
833 | extern long | ||
834 | ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, | ||
835 | unsigned long *drift_info); | ||
844 | #ifdef CONFIG_HOTPLUG_CPU | 836 | #ifdef CONFIG_HOTPLUG_CPU |
845 | /* | 837 | /* |
846 | * System Abstraction Layer Specification | 838 | * System Abstraction Layer Specification |
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 8e5d7de9c632..3c0900ab8003 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
@@ -1211,11 +1211,13 @@ xpc_IPI_init(int index) | |||
1211 | static inline enum xpc_retval | 1211 | static inline enum xpc_retval |
1212 | xpc_map_bte_errors(bte_result_t error) | 1212 | xpc_map_bte_errors(bte_result_t error) |
1213 | { | 1213 | { |
1214 | if (error == BTE_SUCCESS) | ||
1215 | return xpcSuccess; | ||
1216 | |||
1214 | if (is_shub2()) { | 1217 | if (is_shub2()) { |
1215 | if (BTE_VALID_SH2_ERROR(error)) | 1218 | if (BTE_VALID_SH2_ERROR(error)) |
1216 | return xpcBteSh2Start + error; | 1219 | return xpcBteSh2Start + error; |
1217 | else | 1220 | return xpcBteUnmappedError; |
1218 | return xpcBteUnmappedError; | ||
1219 | } | 1221 | } |
1220 | switch (error) { | 1222 | switch (error) { |
1221 | case BTE_SUCCESS: return xpcSuccess; | 1223 | case BTE_SUCCESS: return xpcSuccess; |
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h index 9e42ce43cfbe..d5ef0aa3e312 100644 --- a/include/asm-ia64/socket.h +++ b/include/asm-ia64/socket.h | |||
@@ -61,4 +61,6 @@ | |||
61 | #define SO_TIMESTAMPNS 35 | 61 | #define SO_TIMESTAMPNS 35 |
62 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | 62 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS |
63 | 63 | ||
64 | #define SO_MARK 36 | ||
65 | |||
64 | #endif /* _ASM_IA64_SOCKET_H */ | 66 | #endif /* _ASM_IA64_SOCKET_H */ |