diff options
Diffstat (limited to 'include/asm-ia64')
36 files changed, 342 insertions, 318 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index f7a517654308..d734585a23cf 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
@@ -111,7 +111,11 @@ extern int additional_cpus; | |||
111 | 111 | ||
112 | #ifdef CONFIG_ACPI_NUMA | 112 | #ifdef CONFIG_ACPI_NUMA |
113 | /* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ | 113 | /* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ |
114 | #ifdef CONFIG_IA64_NR_NODES | ||
115 | #define MAX_PXM_DOMAINS CONFIG_IA64_NR_NODES | ||
116 | #else | ||
114 | #define MAX_PXM_DOMAINS (256) | 117 | #define MAX_PXM_DOMAINS (256) |
118 | #endif | ||
115 | extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; | 119 | extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; |
116 | extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | 120 | extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; |
117 | #endif | 121 | #endif |
diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h index 77af457f4ad7..edf2cebb2969 100644 --- a/include/asm-ia64/asmmacro.h +++ b/include/asm-ia64/asmmacro.h | |||
@@ -38,6 +38,10 @@ name: | |||
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Helper macros for accessing user memory. | 40 | * Helper macros for accessing user memory. |
41 | * | ||
42 | * When adding any new .section/.previous entries here, make sure to | ||
43 | * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or | ||
44 | * unpleasant things will happen. | ||
41 | */ | 45 | */ |
42 | 46 | ||
43 | .section "__ex_table", "a" // declare section & section attributes | 47 | .section "__ex_table", "a" // declare section & section attributes |
@@ -51,6 +55,17 @@ name: | |||
51 | [99:] x | 55 | [99:] x |
52 | 56 | ||
53 | /* | 57 | /* |
58 | * Tag MCA recoverable instruction ranges. | ||
59 | */ | ||
60 | |||
61 | .section "__mca_table", "a" // declare section & section attributes | ||
62 | .previous | ||
63 | |||
64 | # define MCA_RECOVER_RANGE(y) \ | ||
65 | .xdata4 "__mca_table", y-., 99f-.; \ | ||
66 | [99:] | ||
67 | |||
68 | /* | ||
54 | * Mark instructions that need a load of a virtual address patched to be | 69 | * Mark instructions that need a load of a virtual address patched to be |
55 | * a load of a physical address. We use this either in critical performance | 70 | * a load of a physical address. We use this either in critical performance |
56 | * path (ivt.S - TLB miss processing) or in places where it might not be | 71 | * path (ivt.S - TLB miss processing) or in places where it might not be |
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index d3e0dfa99e1f..569ec7574baf 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h | |||
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) | |||
95 | ({ \ | 95 | ({ \ |
96 | int c, old; \ | 96 | int c, old; \ |
97 | c = atomic_read(v); \ | 97 | c = atomic_read(v); \ |
98 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 98 | for (;;) { \ |
99 | if (unlikely(c == (u))) \ | ||
100 | break; \ | ||
101 | old = atomic_cmpxchg((v), c, c + (a)); \ | ||
102 | if (likely(old == c)) \ | ||
103 | break; \ | ||
99 | c = old; \ | 104 | c = old; \ |
105 | } \ | ||
100 | c != (u); \ | 106 | c != (u); \ |
101 | }) | 107 | }) |
102 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 108 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 36d0fb95ea89..90921e162793 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h | |||
@@ -5,8 +5,8 @@ | |||
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | * | 7 | * |
8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
9 | * scheduler patch | 9 | * O(1) scheduler patch |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
@@ -25,9 +25,9 @@ | |||
25 | * restricted to acting on a single-word quantity. | 25 | * restricted to acting on a single-word quantity. |
26 | * | 26 | * |
27 | * The address must be (at least) "long" aligned. | 27 | * The address must be (at least) "long" aligned. |
28 | * Note that there are driver (e.g., eepro100) which use these operations to operate on | 28 | * Note that there are driver (e.g., eepro100) which use these operations to |
29 | * hw-defined data-structures, so we can't easily change these operations to force a | 29 | * operate on hw-defined data-structures, so we can't easily change these |
30 | * bigger alignment. | 30 | * operations to force a bigger alignment. |
31 | * | 31 | * |
32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
33 | */ | 33 | */ |
@@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr) | |||
284 | * ffz - find the first zero bit in a long word | 284 | * ffz - find the first zero bit in a long word |
285 | * @x: The long word to find the bit in | 285 | * @x: The long word to find the bit in |
286 | * | 286 | * |
287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if | 287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
288 | * no zero exists, so code should check against ~0UL first... | 288 | * Undefined if no zero exists, so code should check against ~0UL first... |
289 | */ | 289 | */ |
290 | static inline unsigned long | 290 | static inline unsigned long |
291 | ffz (unsigned long x) | 291 | ffz (unsigned long x) |
@@ -345,13 +345,14 @@ fls (int t) | |||
345 | x |= x >> 16; | 345 | x |= x >> 16; |
346 | return ia64_popcnt(x); | 346 | return ia64_popcnt(x); |
347 | } | 347 | } |
348 | #define fls64(x) generic_fls64(x) | 348 | |
349 | #include <asm-generic/bitops/fls64.h> | ||
349 | 350 | ||
350 | /* | 351 | /* |
351 | * ffs: find first bit set. This is defined the same way as the libc and compiler builtin | 352 | * ffs: find first bit set. This is defined the same way as the libc and |
352 | * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on | 353 | * compiler builtin ffs routines, therefore differs in spirit from the above |
353 | * "int" values only and the result value is the bit number + 1. ffs(0) is defined to | 354 | * ffz (man ffs): it operates on "int" values only and the result value is the |
354 | * return zero. | 355 | * bit number + 1. ffs(0) is defined to return zero. |
355 | */ | 356 | */ |
356 | #define ffs(x) __builtin_ffs(x) | 357 | #define ffs(x) __builtin_ffs(x) |
357 | 358 | ||
@@ -373,51 +374,17 @@ hweight64 (unsigned long x) | |||
373 | 374 | ||
374 | #endif /* __KERNEL__ */ | 375 | #endif /* __KERNEL__ */ |
375 | 376 | ||
376 | extern int __find_next_zero_bit (const void *addr, unsigned long size, | 377 | #include <asm-generic/bitops/find.h> |
377 | unsigned long offset); | ||
378 | extern int __find_next_bit(const void *addr, unsigned long size, | ||
379 | unsigned long offset); | ||
380 | |||
381 | #define find_next_zero_bit(addr, size, offset) \ | ||
382 | __find_next_zero_bit((addr), (size), (offset)) | ||
383 | #define find_next_bit(addr, size, offset) \ | ||
384 | __find_next_bit((addr), (size), (offset)) | ||
385 | |||
386 | /* | ||
387 | * The optimizer actually does good code for this case.. | ||
388 | */ | ||
389 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
390 | |||
391 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
392 | 378 | ||
393 | #ifdef __KERNEL__ | 379 | #ifdef __KERNEL__ |
394 | 380 | ||
395 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 381 | #include <asm-generic/bitops/ext2-non-atomic.h> |
396 | 382 | ||
397 | #define ext2_set_bit test_and_set_bit | ||
398 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 383 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
399 | #define ext2_clear_bit test_and_clear_bit | ||
400 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 384 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
401 | #define ext2_test_bit test_bit | ||
402 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
403 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
404 | |||
405 | /* Bitmap functions for the minix filesystem. */ | ||
406 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
407 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
408 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
409 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
410 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
411 | 385 | ||
412 | static inline int | 386 | #include <asm-generic/bitops/minix.h> |
413 | sched_find_first_bit (unsigned long *b) | 387 | #include <asm-generic/bitops/sched.h> |
414 | { | ||
415 | if (unlikely(b[0])) | ||
416 | return __ffs(b[0]); | ||
417 | if (unlikely(b[1])) | ||
418 | return 64 + __ffs(b[1]); | ||
419 | return __ffs(b[2]) + 128; | ||
420 | } | ||
421 | 388 | ||
422 | #endif /* __KERNEL__ */ | 389 | #endif /* __KERNEL__ */ |
423 | 390 | ||
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h index 40dd25195d65..f0a104db8f20 100644 --- a/include/asm-ia64/cache.h +++ b/include/asm-ia64/cache.h | |||
@@ -25,4 +25,6 @@ | |||
25 | # define SMP_CACHE_BYTES (1 << 3) | 25 | # define SMP_CACHE_BYTES (1 << 3) |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
29 | |||
28 | #endif /* _ASM_IA64_CACHE_H */ | 30 | #endif /* _ASM_IA64_CACHE_H */ |
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index c0b19106665c..40d01d80610d 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h | |||
@@ -189,6 +189,12 @@ compat_ptr (compat_uptr_t uptr) | |||
189 | return (void __user *) (unsigned long) uptr; | 189 | return (void __user *) (unsigned long) uptr; |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline compat_uptr_t | ||
193 | ptr_to_compat(void __user *uptr) | ||
194 | { | ||
195 | return (u32)(unsigned long)uptr; | ||
196 | } | ||
197 | |||
192 | static __inline__ void __user * | 198 | static __inline__ void __user * |
193 | compat_alloc_user_space (long len) | 199 | compat_alloc_user_space (long len) |
194 | { | 200 | { |
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h new file mode 100644 index 000000000000..f3efaa229525 --- /dev/null +++ b/include/asm-ia64/dmi.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_DMI_H | ||
2 | #define _ASM_DMI_H 1 | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a7122d850177..d069b6acddce 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h | |||
@@ -5,113 +5,10 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> | 6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> |
7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> | 7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> |
8 | * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com> | ||
8 | * | 9 | * |
9 | */ | 10 | */ |
10 | #include <asm/types.h> | 11 | #include <ia64intrin.h> |
11 | |||
12 | void __lfetch(int lfhint, void *y); | ||
13 | void __lfetch_excl(int lfhint, void *y); | ||
14 | void __lfetch_fault(int lfhint, void *y); | ||
15 | void __lfetch_fault_excl(int lfhint, void *y); | ||
16 | |||
17 | /* In the following, whichFloatReg should be an integer from 0-127 */ | ||
18 | void __ldfs(const int whichFloatReg, void *src); | ||
19 | void __ldfd(const int whichFloatReg, void *src); | ||
20 | void __ldfe(const int whichFloatReg, void *src); | ||
21 | void __ldf8(const int whichFloatReg, void *src); | ||
22 | void __ldf_fill(const int whichFloatReg, void *src); | ||
23 | void __stfs(void *dst, const int whichFloatReg); | ||
24 | void __stfd(void *dst, const int whichFloatReg); | ||
25 | void __stfe(void *dst, const int whichFloatReg); | ||
26 | void __stf8(void *dst, const int whichFloatReg); | ||
27 | void __stf_spill(void *dst, const int whichFloatReg); | ||
28 | |||
29 | void __st1_rel(void *dst, const __s8 value); | ||
30 | void __st2_rel(void *dst, const __s16 value); | ||
31 | void __st4_rel(void *dst, const __s32 value); | ||
32 | void __st8_rel(void *dst, const __s64 value); | ||
33 | __u8 __ld1_acq(void *src); | ||
34 | __u16 __ld2_acq(void *src); | ||
35 | __u32 __ld4_acq(void *src); | ||
36 | __u64 __ld8_acq(void *src); | ||
37 | |||
38 | __u64 __fetchadd4_acq(__u32 *addend, const int increment); | ||
39 | __u64 __fetchadd4_rel(__u32 *addend, const int increment); | ||
40 | __u64 __fetchadd8_acq(__u64 *addend, const int increment); | ||
41 | __u64 __fetchadd8_rel(__u64 *addend, const int increment); | ||
42 | |||
43 | __u64 __getf_exp(double d); | ||
44 | |||
45 | /* OS Related Itanium(R) Intrinsics */ | ||
46 | |||
47 | /* The names to use for whichReg and whichIndReg below come from | ||
48 | the include file asm/ia64regs.h */ | ||
49 | |||
50 | __u64 __getIndReg(const int whichIndReg, __s64 index); | ||
51 | __u64 __getReg(const int whichReg); | ||
52 | |||
53 | void __setIndReg(const int whichIndReg, __s64 index, __u64 value); | ||
54 | void __setReg(const int whichReg, __u64 value); | ||
55 | |||
56 | void __mf(void); | ||
57 | void __mfa(void); | ||
58 | void __synci(void); | ||
59 | void __itcd(__s64 pa); | ||
60 | void __itci(__s64 pa); | ||
61 | void __itrd(__s64 whichTransReg, __s64 pa); | ||
62 | void __itri(__s64 whichTransReg, __s64 pa); | ||
63 | void __ptce(__s64 va); | ||
64 | void __ptcl(__s64 va, __s64 pagesz); | ||
65 | void __ptcg(__s64 va, __s64 pagesz); | ||
66 | void __ptcga(__s64 va, __s64 pagesz); | ||
67 | void __ptri(__s64 va, __s64 pagesz); | ||
68 | void __ptrd(__s64 va, __s64 pagesz); | ||
69 | void __invala (void); | ||
70 | void __invala_gr(const int whichGeneralReg /* 0-127 */ ); | ||
71 | void __invala_fr(const int whichFloatReg /* 0-127 */ ); | ||
72 | void __nop(const int); | ||
73 | void __fc(__u64 *addr); | ||
74 | void __sum(int mask); | ||
75 | void __rum(int mask); | ||
76 | void __ssm(int mask); | ||
77 | void __rsm(int mask); | ||
78 | __u64 __thash(__s64); | ||
79 | __u64 __ttag(__s64); | ||
80 | __s64 __tpa(__s64); | ||
81 | |||
82 | /* Intrinsics for implementing get/put_user macros */ | ||
83 | void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); | ||
84 | void __ld_user(const char *tableName, __u64 addr, char size, char relocType); | ||
85 | |||
86 | /* This intrinsic does not generate code, it creates a barrier across which | ||
87 | * the compiler will not schedule data access instructions. | ||
88 | */ | ||
89 | void __memory_barrier(void); | ||
90 | |||
91 | void __isrlz(void); | ||
92 | void __dsrlz(void); | ||
93 | |||
94 | __u64 _m64_mux1(__u64 a, const int n); | ||
95 | __u64 __thash(__u64); | ||
96 | |||
97 | /* Lock and Atomic Operation Related Intrinsics */ | ||
98 | __u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); | ||
99 | __u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); | ||
100 | __s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); | ||
101 | __s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); | ||
102 | |||
103 | __u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
104 | __u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
105 | __u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
106 | __u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
107 | __u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
108 | __u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
109 | __u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
110 | __u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
111 | |||
112 | __s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); | ||
113 | __s64 _m64_shrp(__s64 a, __s64 b, const int count); | ||
114 | __s64 _m64_popcnt(__s64 a); | ||
115 | 12 | ||
116 | #define ia64_barrier() __memory_barrier() | 13 | #define ia64_barrier() __memory_barrier() |
117 | 14 | ||
@@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a); | |||
122 | #define ia64_getreg __getReg | 19 | #define ia64_getreg __getReg |
123 | #define ia64_setreg __setReg | 20 | #define ia64_setreg __setReg |
124 | 21 | ||
125 | #define ia64_hint(x) | 22 | #define ia64_hint __hint |
23 | #define ia64_hint_pause __hint_pause | ||
126 | 24 | ||
127 | #define ia64_mux1_brcst 0 | 25 | #define ia64_mux1_brcst _m64_mux1_brcst |
128 | #define ia64_mux1_mix 8 | 26 | #define ia64_mux1_mix _m64_mux1_mix |
129 | #define ia64_mux1_shuf 9 | 27 | #define ia64_mux1_shuf _m64_mux1_shuf |
130 | #define ia64_mux1_alt 10 | 28 | #define ia64_mux1_alt _m64_mux1_alt |
131 | #define ia64_mux1_rev 11 | 29 | #define ia64_mux1_rev _m64_mux1_rev |
132 | 30 | ||
133 | #define ia64_mux1 _m64_mux1 | 31 | #define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v))) |
134 | #define ia64_popcnt _m64_popcnt | 32 | #define ia64_popcnt _m64_popcnt |
135 | #define ia64_getf_exp __getf_exp | 33 | #define ia64_getf_exp __getf_exp |
136 | #define ia64_shrp _m64_shrp | 34 | #define ia64_shrp _m64_shrp |
@@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a); | |||
158 | #define ia64_stf8 __stf8 | 56 | #define ia64_stf8 __stf8 |
159 | #define ia64_stf_spill __stf_spill | 57 | #define ia64_stf_spill __stf_spill |
160 | 58 | ||
161 | #define ia64_mf __mf | 59 | #define ia64_mf __mf |
162 | #define ia64_mfa __mfa | 60 | #define ia64_mfa __mfa |
163 | 61 | ||
164 | #define ia64_fetchadd4_acq __fetchadd4_acq | 62 | #define ia64_fetchadd4_acq __fetchadd4_acq |
@@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a); | |||
234 | 132 | ||
235 | /* Values for lfhint in __lfetch and __lfetch_fault */ | 133 | /* Values for lfhint in __lfetch and __lfetch_fault */ |
236 | 134 | ||
237 | #define ia64_lfhint_none 0 | 135 | #define ia64_lfhint_none __lfhint_none |
238 | #define ia64_lfhint_nt1 1 | 136 | #define ia64_lfhint_nt1 __lfhint_nt1 |
239 | #define ia64_lfhint_nt2 2 | 137 | #define ia64_lfhint_nt2 __lfhint_nt2 |
240 | #define ia64_lfhint_nta 3 | 138 | #define ia64_lfhint_nta __lfhint_nta |
241 | 139 | ||
242 | #define ia64_lfetch __lfetch | 140 | #define ia64_lfetch __lfetch |
243 | #define ia64_lfetch_excl __lfetch_excl | 141 | #define ia64_lfetch_excl __lfetch_excl |
@@ -254,4 +152,6 @@ do { \ | |||
254 | } \ | 152 | } \ |
255 | } while (0) | 153 | } while (0) |
256 | 154 | ||
155 | #define __builtin_trap() __break(0); | ||
156 | |||
257 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ | 157 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ |
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index b64fdb985494..c2e3742108bb 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h | |||
@@ -88,8 +88,8 @@ phys_to_virt (unsigned long address) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
91 | extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ | 91 | extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ |
92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); | 92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * The following two macros are deprecated and scheduled for removal. | 95 | * The following two macros are deprecated and scheduled for removal. |
@@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr) | |||
416 | # define outl_p outl | 416 | # define outl_p outl |
417 | #endif | 417 | #endif |
418 | 418 | ||
419 | /* | 419 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
420 | * An "address" in IO memory space is not clearly either an integer or a pointer. We will | 420 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
421 | * accept both, thus the casts. | ||
422 | * | ||
423 | * On ia-64, we access the physical I/O memory space through the uncached kernel region. | ||
424 | */ | ||
425 | static inline void __iomem * | ||
426 | ioremap (unsigned long offset, unsigned long size) | ||
427 | { | ||
428 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); | ||
429 | } | ||
430 | 421 | ||
431 | static inline void | 422 | static inline void |
432 | iounmap (volatile void __iomem *addr) | 423 | iounmap (volatile void __iomem *addr) |
433 | { | 424 | { |
434 | } | 425 | } |
435 | 426 | ||
436 | #define ioremap_nocache(o,s) ioremap(o,s) | 427 | /* Use normal IO mappings for DMI */ |
428 | #define dmi_ioremap ioremap | ||
429 | #define dmi_iounmap(x,l) iounmap(x) | ||
430 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) | ||
437 | 431 | ||
438 | # ifdef __KERNEL__ | 432 | # ifdef __KERNEL__ |
439 | 433 | ||
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 8b01a083dde6..218c458ab60c 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h | |||
@@ -40,7 +40,7 @@ struct die_args { | |||
40 | 40 | ||
41 | extern int register_die_notifier(struct notifier_block *); | 41 | extern int register_die_notifier(struct notifier_block *); |
42 | extern int unregister_die_notifier(struct notifier_block *); | 42 | extern int unregister_die_notifier(struct notifier_block *); |
43 | extern struct notifier_block *ia64die_chain; | 43 | extern struct atomic_notifier_head ia64die_chain; |
44 | 44 | ||
45 | enum die_val { | 45 | enum die_val { |
46 | DIE_BREAK = 1, | 46 | DIE_BREAK = 1, |
@@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, | |||
81 | .signr = sig | 81 | .signr = sig |
82 | }; | 82 | }; |
83 | 83 | ||
84 | return notifier_call_chain(&ia64die_chain, val, &args); | 84 | return atomic_notifier_call_chain(&ia64die_chain, val, &args); |
85 | } | 85 | } |
86 | 86 | ||
87 | #endif | 87 | #endif |
diff --git a/include/asm-ia64/linkage.h b/include/asm-ia64/linkage.h index 14cd72cd8007..ef22a45c1890 100644 --- a/include/asm-ia64/linkage.h +++ b/include/asm-ia64/linkage.h | |||
@@ -1,6 +1,14 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | 1 | #ifndef __ASM_LINKAGE_H |
2 | #define __ASM_LINKAGE_H | 2 | #define __ASM_LINKAGE_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
4 | #define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage)) | 6 | #define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage)) |
5 | 7 | ||
8 | #else | ||
9 | |||
10 | #include <asm/asmmacro.h> | ||
11 | |||
12 | #endif | ||
13 | |||
6 | #endif | 14 | #endif |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ca5ea994d688..c3e4ed8a3e17 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -20,6 +20,7 @@ struct scatterlist; | |||
20 | struct page; | 20 | struct page; |
21 | struct mm_struct; | 21 | struct mm_struct; |
22 | struct pci_bus; | 22 | struct pci_bus; |
23 | struct task_struct; | ||
23 | 24 | ||
24 | typedef void ia64_mv_setup_t (char **); | 25 | typedef void ia64_mv_setup_t (char **); |
25 | typedef void ia64_mv_cpu_init_t (void); | 26 | typedef void ia64_mv_cpu_init_t (void); |
@@ -34,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, | |||
34 | u8 size); | 35 | u8 size); |
35 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, | 36 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, |
36 | u8 size); | 37 | u8 size); |
38 | typedef void ia64_mv_migrate_t(struct task_struct * task); | ||
37 | 39 | ||
38 | /* DMA-mapping interface: */ | 40 | /* DMA-mapping interface: */ |
39 | typedef void ia64_mv_dma_init (void); | 41 | typedef void ia64_mv_dma_init (void); |
@@ -85,6 +87,11 @@ machvec_noop_mm (struct mm_struct *mm) | |||
85 | { | 87 | { |
86 | } | 88 | } |
87 | 89 | ||
90 | static inline void | ||
91 | machvec_noop_task (struct task_struct *task) | ||
92 | { | ||
93 | } | ||
94 | |||
88 | extern void machvec_setup (char **); | 95 | extern void machvec_setup (char **); |
89 | extern void machvec_timer_interrupt (int, void *, struct pt_regs *); | 96 | extern void machvec_timer_interrupt (int, void *, struct pt_regs *); |
90 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | 97 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); |
@@ -146,6 +153,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
146 | # define platform_readw_relaxed ia64_mv.readw_relaxed | 153 | # define platform_readw_relaxed ia64_mv.readw_relaxed |
147 | # define platform_readl_relaxed ia64_mv.readl_relaxed | 154 | # define platform_readl_relaxed ia64_mv.readl_relaxed |
148 | # define platform_readq_relaxed ia64_mv.readq_relaxed | 155 | # define platform_readq_relaxed ia64_mv.readq_relaxed |
156 | # define platform_migrate ia64_mv.migrate | ||
149 | # endif | 157 | # endif |
150 | 158 | ||
151 | /* __attribute__((__aligned__(16))) is required to make size of the | 159 | /* __attribute__((__aligned__(16))) is required to make size of the |
@@ -194,6 +202,7 @@ struct ia64_machine_vector { | |||
194 | ia64_mv_readw_relaxed_t *readw_relaxed; | 202 | ia64_mv_readw_relaxed_t *readw_relaxed; |
195 | ia64_mv_readl_relaxed_t *readl_relaxed; | 203 | ia64_mv_readl_relaxed_t *readl_relaxed; |
196 | ia64_mv_readq_relaxed_t *readq_relaxed; | 204 | ia64_mv_readq_relaxed_t *readq_relaxed; |
205 | ia64_mv_migrate_t *migrate; | ||
197 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ | 206 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ |
198 | 207 | ||
199 | #define MACHVEC_INIT(name) \ | 208 | #define MACHVEC_INIT(name) \ |
@@ -238,6 +247,7 @@ struct ia64_machine_vector { | |||
238 | platform_readw_relaxed, \ | 247 | platform_readw_relaxed, \ |
239 | platform_readl_relaxed, \ | 248 | platform_readl_relaxed, \ |
240 | platform_readq_relaxed, \ | 249 | platform_readq_relaxed, \ |
250 | platform_migrate, \ | ||
241 | } | 251 | } |
242 | 252 | ||
243 | extern struct ia64_machine_vector ia64_mv; | 253 | extern struct ia64_machine_vector ia64_mv; |
@@ -386,5 +396,8 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; | |||
386 | #ifndef platform_readq_relaxed | 396 | #ifndef platform_readq_relaxed |
387 | # define platform_readq_relaxed __ia64_readq_relaxed | 397 | # define platform_readq_relaxed __ia64_readq_relaxed |
388 | #endif | 398 | #endif |
399 | #ifndef platform_migrate | ||
400 | # define platform_migrate machvec_noop_task | ||
401 | #endif | ||
389 | 402 | ||
390 | #endif /* _ASM_IA64_MACHVEC_H */ | 403 | #endif /* _ASM_IA64_MACHVEC_H */ |
diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h index 4dc8522c974f..8a0752f40987 100644 --- a/include/asm-ia64/machvec_dig.h +++ b/include/asm-ia64/machvec_dig.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_IA64_MACHVEC_DIG_h | 2 | #define _ASM_IA64_MACHVEC_DIG_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_irq_init_t dig_irq_init; | ||
6 | 5 | ||
7 | /* | 6 | /* |
8 | * This stuff has dual use! | 7 | * This stuff has dual use! |
@@ -13,6 +12,5 @@ extern ia64_mv_irq_init_t dig_irq_init; | |||
13 | */ | 12 | */ |
14 | #define platform_name "dig" | 13 | #define platform_name "dig" |
15 | #define platform_setup dig_setup | 14 | #define platform_setup dig_setup |
16 | #define platform_irq_init dig_irq_init | ||
17 | 15 | ||
18 | #endif /* _ASM_IA64_MACHVEC_DIG_h */ | 16 | #endif /* _ASM_IA64_MACHVEC_DIG_h */ |
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 03d00faf03b5..da1d43755afe 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved. | 2 | * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of version 2 of the GNU General Public License | 5 | * under the terms of version 2 of the GNU General Public License |
@@ -66,6 +66,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | |||
66 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | 66 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; |
67 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | 67 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; |
68 | extern ia64_mv_dma_supported sn_dma_supported; | 68 | extern ia64_mv_dma_supported sn_dma_supported; |
69 | extern ia64_mv_migrate_t sn_migrate; | ||
69 | 70 | ||
70 | /* | 71 | /* |
71 | * This stuff has dual use! | 72 | * This stuff has dual use! |
@@ -115,6 +116,7 @@ extern ia64_mv_dma_supported sn_dma_supported; | |||
115 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | 116 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device |
116 | #define platform_dma_mapping_error sn_dma_mapping_error | 117 | #define platform_dma_mapping_error sn_dma_mapping_error |
117 | #define platform_dma_supported sn_dma_supported | 118 | #define platform_dma_supported sn_dma_supported |
119 | #define platform_migrate sn_migrate | ||
118 | 120 | ||
119 | #include <asm/sn/io.h> | 121 | #include <asm/sn/io.h> |
120 | 122 | ||
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index c7d9c9ed38ba..bfbbb8da79c7 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -131,6 +131,8 @@ struct ia64_mca_cpu { | |||
131 | /* Array of physical addresses of each CPU's MCA area. */ | 131 | /* Array of physical addresses of each CPU's MCA area. */ |
132 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 132 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
133 | 133 | ||
134 | extern int cpe_vector; | ||
135 | extern int ia64_cpe_irq; | ||
134 | extern void ia64_mca_init(void); | 136 | extern void ia64_mca_init(void); |
135 | extern void ia64_mca_cpu_init(void *); | 137 | extern void ia64_mca_cpu_init(void *); |
136 | extern void ia64_os_mca_dispatch(void); | 138 | extern void ia64_os_mca_dispatch(void); |
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 458c1f7fbc18..5a3224f6af38 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h | |||
@@ -1,9 +1,92 @@ | |||
1 | /* | 1 | /* |
2 | * Pull in the generic implementation for the mutex fastpath. | 2 | * ia64 implementation of the mutex fastpath. |
3 | * | 3 | * |
4 | * TODO: implement optimized primitives instead, or leave the generic | 4 | * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com> |
5 | * implementation in place, or pick the atomic_xchg() based generic | 5 | * |
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | 6 | */ |
7 | |||
8 | #ifndef _ASM_MUTEX_H | ||
9 | #define _ASM_MUTEX_H | ||
10 | |||
11 | /** | ||
12 | * __mutex_fastpath_lock - try to take the lock by moving the count | ||
13 | * from 1 to a 0 value | ||
14 | * @count: pointer of type atomic_t | ||
15 | * @fail_fn: function to call if the original value was not 1 | ||
16 | * | ||
17 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
18 | * it wasn't 1 originally. This function MUST leave the value lower than | ||
19 | * 1 even when the "1" assertion wasn't true. | ||
20 | */ | ||
21 | static inline void | ||
22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
23 | { | ||
24 | if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) | ||
25 | fail_fn(count); | ||
26 | } | ||
27 | |||
28 | /** | ||
29 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | ||
30 | * from 1 to a 0 value | ||
31 | * @count: pointer of type atomic_t | ||
32 | * @fail_fn: function to call if the original value was not 1 | ||
33 | * | ||
34 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
35 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | ||
36 | * or anything the slow path function returns. | ||
37 | */ | ||
38 | static inline int | ||
39 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
40 | { | ||
41 | if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) | ||
42 | return fail_fn(count); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | /** | ||
47 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | ||
48 | * @count: pointer of type atomic_t | ||
49 | * @fail_fn: function to call if the original value was not 0 | ||
50 | * | ||
51 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | ||
52 | * In the failure case, this function is allowed to either set the value to | ||
53 | * 1, or to set it to a value lower than 1. | ||
54 | * | ||
55 | * If the implementation sets it to a value of lower than 1, then the | ||
56 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | ||
57 | * to return 0 otherwise. | ||
58 | */ | ||
59 | static inline void | ||
60 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
61 | { | ||
62 | int ret = ia64_fetchadd4_rel(count, 1); | ||
63 | if (unlikely(ret < 0)) | ||
64 | fail_fn(count); | ||
65 | } | ||
66 | |||
67 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
68 | |||
69 | /** | ||
70 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | ||
71 | * | ||
72 | * @count: pointer of type atomic_t | ||
73 | * @fail_fn: fallback function | ||
74 | * | ||
75 | * Change the count from 1 to a value lower than 1, and return 0 (failure) | ||
76 | * if it wasn't 1 originally, or return 1 (success) otherwise. This function | ||
77 | * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | ||
78 | * Additionally, if the value was < 0 originally, this function must not leave | ||
79 | * it to 0 on failure. | ||
80 | * | ||
81 | * If the architecture has no effective trylock variant, it should call the | ||
82 | * <fail_fn> spinlock-based trylock variant unconditionally. | ||
7 | */ | 83 | */ |
84 | static inline int | ||
85 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
86 | { | ||
87 | if (likely(cmpxchg_acq(count, 1, 0)) == 1) | ||
88 | return 1; | ||
89 | return 0; | ||
90 | } | ||
8 | 91 | ||
9 | #include <asm-generic/mutex-dec.h> | 92 | #endif |
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 3ae128fe0823..dae6aeb7b119 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include <asm/mmzone.h> | 24 | #include <asm/mmzone.h> |
25 | 25 | ||
26 | extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | 26 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
27 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | 27 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
28 | 28 | ||
29 | /* Stuff below this line could be architecture independent */ | 29 | /* Stuff below this line could be architecture independent */ |
diff --git a/include/asm-ia64/numnodes.h b/include/asm-ia64/numnodes.h index 21cff4da5485..e9d356f549d9 100644 --- a/include/asm-ia64/numnodes.h +++ b/include/asm-ia64/numnodes.h | |||
@@ -3,13 +3,18 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_IA64_DIG | 4 | #ifdef CONFIG_IA64_DIG |
5 | /* Max 8 Nodes */ | 5 | /* Max 8 Nodes */ |
6 | #define NODES_SHIFT 3 | 6 | # define NODES_SHIFT 3 |
7 | #elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) | 7 | #elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) |
8 | /* Max 32 Nodes */ | 8 | /* Max 32 Nodes */ |
9 | #define NODES_SHIFT 5 | 9 | # define NODES_SHIFT 5 |
10 | #elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) | 10 | #elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) |
11 | /* Max 256 Nodes */ | 11 | # if CONFIG_IA64_NR_NODES == 256 |
12 | #define NODES_SHIFT 8 | 12 | # define NODES_SHIFT 8 |
13 | # elif CONFIG_IA64_NR_NODES <= 512 | ||
14 | # define NODES_SHIFT 9 | ||
15 | # elif CONFIG_IA64_NR_NODES <= 1024 | ||
16 | # define NODES_SHIFT 10 | ||
17 | # endif | ||
13 | #endif | 18 | #endif |
14 | 19 | ||
15 | #endif /* _ASM_MAX_NUMNODES_H */ | 20 | #endif /* _ASM_MAX_NUMNODES_H */ |
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 5e6362a786b7..2087825eefa4 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
@@ -57,6 +57,8 @@ | |||
57 | 57 | ||
58 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 58 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
59 | # define ARCH_HAS_HUGEPAGE_ONLY_RANGE | 59 | # define ARCH_HAS_HUGEPAGE_ONLY_RANGE |
60 | # define ARCH_HAS_PREPARE_HUGEPAGE_RANGE | ||
61 | # define ARCH_HAS_HUGETLB_FREE_PGD_RANGE | ||
60 | #endif /* CONFIG_HUGETLB_PAGE */ | 62 | #endif /* CONFIG_HUGETLB_PAGE */ |
61 | 63 | ||
62 | #ifdef __ASSEMBLY__ | 64 | #ifdef __ASSEMBLY__ |
@@ -104,17 +106,25 @@ extern int ia64_pfn_valid (unsigned long pfn); | |||
104 | # define ia64_pfn_valid(pfn) 1 | 106 | # define ia64_pfn_valid(pfn) 1 |
105 | #endif | 107 | #endif |
106 | 108 | ||
109 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
110 | extern struct page *vmem_map; | ||
111 | #ifdef CONFIG_DISCONTIGMEM | ||
112 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) | ||
113 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | ||
114 | #endif | ||
115 | #endif | ||
116 | |||
117 | #if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM) | ||
118 | /* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */ | ||
119 | #include <asm-generic/memory_model.h> | ||
120 | #endif | ||
121 | |||
107 | #ifdef CONFIG_FLATMEM | 122 | #ifdef CONFIG_FLATMEM |
108 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) | 123 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) |
109 | # define page_to_pfn(page) ((unsigned long) (page - mem_map)) | ||
110 | # define pfn_to_page(pfn) (mem_map + (pfn)) | ||
111 | #elif defined(CONFIG_DISCONTIGMEM) | 124 | #elif defined(CONFIG_DISCONTIGMEM) |
112 | extern struct page *vmem_map; | ||
113 | extern unsigned long min_low_pfn; | 125 | extern unsigned long min_low_pfn; |
114 | extern unsigned long max_low_pfn; | 126 | extern unsigned long max_low_pfn; |
115 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) | 127 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) |
116 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) | ||
117 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | ||
118 | #endif | 128 | #endif |
119 | 129 | ||
120 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 130 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
@@ -147,7 +157,7 @@ typedef union ia64_va { | |||
147 | | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) | 157 | | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) |
148 | # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 158 | # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
149 | # define is_hugepage_only_range(mm, addr, len) \ | 159 | # define is_hugepage_only_range(mm, addr, len) \ |
150 | (REGION_NUMBER(addr) == RGN_HPAGE && \ | 160 | (REGION_NUMBER(addr) == RGN_HPAGE || \ |
151 | REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) | 161 | REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) |
152 | extern unsigned int hpage_shift; | 162 | extern unsigned int hpage_shift; |
153 | #endif | 163 | #endif |
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 7708ec669a33..37e52a2836b0 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h | |||
@@ -68,6 +68,7 @@ | |||
68 | #define PAL_SHUTDOWN 40 /* enter processor shutdown state */ | 68 | #define PAL_SHUTDOWN 40 /* enter processor shutdown state */ |
69 | #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ | 69 | #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ |
70 | #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ | 70 | #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ |
71 | #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ | ||
71 | 72 | ||
72 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ | 73 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ |
73 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ | 74 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ |
@@ -130,7 +131,7 @@ typedef u64 pal_cache_line_state_t; | |||
130 | #define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */ | 131 | #define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */ |
131 | 132 | ||
132 | typedef struct pal_freq_ratio { | 133 | typedef struct pal_freq_ratio { |
133 | u64 den : 32, num : 32; /* numerator & denominator */ | 134 | u32 den, num; /* numerator & denominator */ |
134 | } itc_ratio, proc_ratio; | 135 | } itc_ratio, proc_ratio; |
135 | 136 | ||
136 | typedef union pal_cache_config_info_1_s { | 137 | typedef union pal_cache_config_info_1_s { |
@@ -151,10 +152,10 @@ typedef union pal_cache_config_info_1_s { | |||
151 | 152 | ||
152 | typedef union pal_cache_config_info_2_s { | 153 | typedef union pal_cache_config_info_2_s { |
153 | struct { | 154 | struct { |
154 | u64 cache_size : 32, /*cache size in bytes*/ | 155 | u32 cache_size; /*cache size in bytes*/ |
155 | 156 | ||
156 | 157 | ||
157 | alias_boundary : 8, /* 39-32 aliased addr | 158 | u32 alias_boundary : 8, /* 39-32 aliased addr |
158 | * separation for max | 159 | * separation for max |
159 | * performance. | 160 | * performance. |
160 | */ | 161 | */ |
@@ -1640,14 +1641,40 @@ ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping) | |||
1640 | 1641 | ||
1641 | if (iprv.status == PAL_STATUS_SUCCESS) | 1642 | if (iprv.status == PAL_STATUS_SUCCESS) |
1642 | { | 1643 | { |
1643 | if (proc_number == 0) | 1644 | mapping->overview.overview_data = iprv.v0; |
1644 | mapping->overview.overview_data = iprv.v0; | ||
1645 | mapping->ppli1.ppli1_data = iprv.v1; | 1645 | mapping->ppli1.ppli1_data = iprv.v1; |
1646 | mapping->ppli2.ppli2_data = iprv.v2; | 1646 | mapping->ppli2.ppli2_data = iprv.v2; |
1647 | } | 1647 | } |
1648 | 1648 | ||
1649 | return iprv.status; | 1649 | return iprv.status; |
1650 | } | 1650 | } |
1651 | |||
1652 | typedef struct pal_cache_shared_info_s | ||
1653 | { | ||
1654 | u64 num_shared; | ||
1655 | pal_proc_n_log_info1_t ppli1; | ||
1656 | pal_proc_n_log_info2_t ppli2; | ||
1657 | } pal_cache_shared_info_t; | ||
1658 | |||
1659 | /* Get information on logical to physical processor mappings. */ | ||
1660 | static inline s64 | ||
1661 | ia64_pal_cache_shared_info(u64 level, | ||
1662 | u64 type, | ||
1663 | u64 proc_number, | ||
1664 | pal_cache_shared_info_t *info) | ||
1665 | { | ||
1666 | struct ia64_pal_retval iprv; | ||
1667 | |||
1668 | PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number); | ||
1669 | |||
1670 | if (iprv.status == PAL_STATUS_SUCCESS) { | ||
1671 | info->num_shared = iprv.v0; | ||
1672 | info->ppli1.ppli1_data = iprv.v1; | ||
1673 | info->ppli2.ppli2_data = iprv.v2; | ||
1674 | } | ||
1675 | |||
1676 | return iprv.status; | ||
1677 | } | ||
1651 | #endif /* __ASSEMBLY__ */ | 1678 | #endif /* __ASSEMBLY__ */ |
1652 | 1679 | ||
1653 | #endif /* _ASM_IA64_PAL_H */ | 1680 | #endif /* _ASM_IA64_PAL_H */ |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e2560c58384b..c0f8144f2349 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
@@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr) | |||
314 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) | 314 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) |
315 | #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) | 315 | #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) |
316 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) | 316 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) |
317 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) | 317 | #define pte_mkhuge(pte) (__pte(pte_val(pte))) |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * Macro to a page protection value as "uncacheable". Note that "protection" is really a | 320 | * Macro to a page protection value as "uncacheable". Note that "protection" is really a |
@@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr; | |||
505 | #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) | 505 | #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) |
506 | #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) | 506 | #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) |
507 | #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) | 507 | #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) |
508 | struct mmu_gather; | ||
509 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | ||
510 | unsigned long end, unsigned long floor, unsigned long ceiling); | ||
511 | #endif | 508 | #endif |
512 | 509 | ||
513 | /* | 510 | /* |
diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h index 160258a0528d..bcaf9f140242 100644 --- a/include/asm-ia64/poll.h +++ b/include/asm-ia64/poll.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define POLLWRBAND 0x0200 | 21 | #define POLLWRBAND 0x0200 |
22 | #define POLLMSG 0x0400 | 22 | #define POLLMSG 0x0400 |
23 | #define POLLREMOVE 0x1000 | 23 | #define POLLREMOVE 0x1000 |
24 | #define POLLRDHUP 0x2000 | ||
24 | 25 | ||
25 | struct pollfd { | 26 | struct pollfd { |
26 | int fd; | 27 | int fd; |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 23c8e1be1911..b3bd58e80690 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
@@ -50,7 +50,8 @@ | |||
50 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | 50 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ |
51 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | 51 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ |
52 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | 52 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ |
53 | /* bit 5 is currently unused */ | 53 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
54 | sync at ctx sw */ | ||
54 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ | 55 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
55 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | 56 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ |
56 | 57 | ||
@@ -180,7 +181,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |||
180 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | 181 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) |
181 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | 182 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) |
182 | 183 | ||
183 | extern void identify_cpu (struct cpuinfo_ia64 *); | ||
184 | extern void print_cpu_info (struct cpuinfo_ia64 *); | 184 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
185 | 185 | ||
186 | typedef struct { | 186 | typedef struct { |
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 608168d713d3..5e328ed5d01d 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h | |||
@@ -158,8 +158,6 @@ struct k_sigaction { | |||
158 | 158 | ||
159 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 159 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
160 | 160 | ||
161 | void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr); | ||
162 | |||
163 | #endif /* __KERNEL__ */ | 161 | #endif /* __KERNEL__ */ |
164 | 162 | ||
165 | # endif /* !__ASSEMBLY__ */ | 163 | # endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h index 2c32e4b77b54..1d9efe541662 100644 --- a/include/asm-ia64/sn/addrs.h +++ b/include/asm-ia64/sn/addrs.h | |||
@@ -283,5 +283,13 @@ | |||
283 | #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) | 283 | #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) |
284 | #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) | 284 | #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) |
285 | 285 | ||
286 | /* | ||
287 | * Coretalk address breakdown | ||
288 | */ | ||
289 | #define CTALK_NASID_SHFT 40 | ||
290 | #define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT) | ||
291 | #define CTALK_CID_SHFT 38 | ||
292 | #define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT) | ||
293 | #define CTALK_NODE_OFFSET 0x3FFFFFFFFF | ||
286 | 294 | ||
287 | #endif /* _ASM_IA64_SN_ADDRS_H */ | 295 | #endif /* _ASM_IA64_SN_ADDRS_H */ |
diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h index e3b819110d47..344bf44bb356 100644 --- a/include/asm-ia64/sn/l1.h +++ b/include/asm-ia64/sn/l1.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #define L1_BRICKTYPE_IA 0x6b /* k */ | 34 | #define L1_BRICKTYPE_IA 0x6b /* k */ |
35 | #define L1_BRICKTYPE_ATHENA 0x2b /* + */ | 35 | #define L1_BRICKTYPE_ATHENA 0x2b /* + */ |
36 | #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ | 36 | #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ |
37 | #define L1_BRICKTYPE_1932 0x2c /* . */ | ||
38 | #define L1_BRICKTYPE_191010 0x2e /* , */ | ||
37 | 39 | ||
38 | /* board type response codes */ | 40 | /* board type response codes */ |
39 | #define L1_BOARDTYPE_IP69 0x0100 /* CA */ | 41 | #define L1_BOARDTYPE_IP69 0x0100 /* CA */ |
@@ -46,5 +48,4 @@ | |||
46 | #define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ | 48 | #define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ |
47 | #define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ | 49 | #define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ |
48 | 50 | ||
49 | |||
50 | #endif /* _ASM_IA64_SN_L1_H */ | 51 | #endif /* _ASM_IA64_SN_L1_H */ |
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index a601d3af39b6..51260ab70d91 100644 --- a/include/asm-ia64/sn/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h | |||
@@ -144,4 +144,5 @@ extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, | |||
144 | void *resp); | 144 | void *resp); |
145 | extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, | 145 | extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, |
146 | int action, void *resp); | 146 | int action, void *resp); |
147 | extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); | ||
147 | #endif | 148 | #endif |
diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h index 38cdffbc4c7b..eac3561574be 100644 --- a/include/asm-ia64/sn/pcidev.h +++ b/include/asm-ia64/sn/pcidev.h | |||
@@ -76,6 +76,7 @@ extern void sn_pci_controller_fixup(int segment, int busnum, | |||
76 | struct pci_bus *bus); | 76 | struct pci_bus *bus); |
77 | extern void sn_bus_store_sysdata(struct pci_dev *dev); | 77 | extern void sn_bus_store_sysdata(struct pci_dev *dev); |
78 | extern void sn_bus_free_sysdata(void); | 78 | extern void sn_bus_free_sysdata(void); |
79 | extern void sn_generate_path(struct pci_bus *pci_bus, char *address); | ||
79 | extern void sn_pci_fixup_slot(struct pci_dev *dev); | 80 | extern void sn_pci_fixup_slot(struct pci_dev *dev); |
80 | extern void sn_pci_unfixup_slot(struct pci_dev *dev); | 81 | extern void sn_pci_unfixup_slot(struct pci_dev *dev); |
81 | extern void sn_irq_lh_init(void); | 82 | extern void sn_irq_lh_init(void); |
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h index f40fd1a5510d..2d78f4c5a45e 100644 --- a/include/asm-ia64/sn/rw_mmr.h +++ b/include/asm-ia64/sn/rw_mmr.h | |||
@@ -3,15 +3,14 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | #ifndef _ASM_IA64_SN_RW_MMR_H | 8 | #ifndef _ASM_IA64_SN_RW_MMR_H |
9 | #define _ASM_IA64_SN_RW_MMR_H | 9 | #define _ASM_IA64_SN_RW_MMR_H |
10 | 10 | ||
11 | 11 | ||
12 | /* | 12 | /* |
13 | * This file contains macros used to access MMR registers via | 13 | * This file that access MMRs via uncached physical addresses. |
14 | * uncached physical addresses. | ||
15 | * pio_phys_read_mmr - read an MMR | 14 | * pio_phys_read_mmr - read an MMR |
16 | * pio_phys_write_mmr - write an MMR | 15 | * pio_phys_write_mmr - write an MMR |
17 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | 16 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 |
@@ -22,53 +21,8 @@ | |||
22 | */ | 21 | */ |
23 | 22 | ||
24 | 23 | ||
25 | extern inline long | 24 | extern long pio_phys_read_mmr(volatile long *mmr); |
26 | pio_phys_read_mmr(volatile long *mmr) | 25 | extern void pio_phys_write_mmr(volatile long *mmr, long val); |
27 | { | 26 | extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); |
28 | long val; | ||
29 | asm volatile | ||
30 | ("mov r2=psr;;" | ||
31 | "rsm psr.i | psr.dt;;" | ||
32 | "srlz.i;;" | ||
33 | "ld8.acq %0=[%1];;" | ||
34 | "mov psr.l=r2;;" | ||
35 | "srlz.i;;" | ||
36 | : "=r"(val) | ||
37 | : "r"(mmr) | ||
38 | : "r2"); | ||
39 | return val; | ||
40 | } | ||
41 | |||
42 | |||
43 | |||
44 | extern inline void | ||
45 | pio_phys_write_mmr(volatile long *mmr, long val) | ||
46 | { | ||
47 | asm volatile | ||
48 | ("mov r2=psr;;" | ||
49 | "rsm psr.i | psr.dt;;" | ||
50 | "srlz.i;;" | ||
51 | "st8.rel [%0]=%1;;" | ||
52 | "mov psr.l=r2;;" | ||
53 | "srlz.i;;" | ||
54 | :: "r"(mmr), "r"(val) | ||
55 | : "r2", "memory"); | ||
56 | } | ||
57 | |||
58 | extern inline void | ||
59 | pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) | ||
60 | { | ||
61 | asm volatile | ||
62 | ("mov r2=psr;;" | ||
63 | "rsm psr.i | psr.dt | psr.ic;;" | ||
64 | "cmp.ne p9,p0=%2,r0;" | ||
65 | "srlz.i;;" | ||
66 | "st8.rel [%0]=%1;" | ||
67 | "(p9) st8.rel [%2]=%3;;" | ||
68 | "mov psr.l=r2;;" | ||
69 | "srlz.i;;" | ||
70 | :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) | ||
71 | : "p9", "r2", "memory"); | ||
72 | } | ||
73 | 27 | ||
74 | #endif /* _ASM_IA64_SN_RW_MMR_H */ | 28 | #endif /* _ASM_IA64_SN_RW_MMR_H */ |
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h index ff33e3bd3f8e..30dcfa442e53 100644 --- a/include/asm-ia64/sn/sn_feature_sets.h +++ b/include/asm-ia64/sn/sn_feature_sets.h | |||
@@ -30,8 +30,7 @@ extern int sn_prom_feature_available(int id); | |||
30 | 30 | ||
31 | #define PRF_PAL_CACHE_FLUSH_SAFE 0 | 31 | #define PRF_PAL_CACHE_FLUSH_SAFE 0 |
32 | #define PRF_DEVICE_FLUSH_LIST 1 | 32 | #define PRF_DEVICE_FLUSH_LIST 1 |
33 | 33 | #define PRF_HOTPLUG_SUPPORT 2 | |
34 | |||
35 | 34 | ||
36 | /* --------------------- OS Features -------------------------------*/ | 35 | /* --------------------- OS Features -------------------------------*/ |
37 | 36 | ||
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index e77f0c9b7d3d..bf4cc867a698 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
@@ -159,7 +159,7 @@ | |||
159 | static inline u32 | 159 | static inline u32 |
160 | sn_sal_rev(void) | 160 | sn_sal_rev(void) |
161 | { | 161 | { |
162 | struct ia64_sal_systab *systab = efi.sal_systab; | 162 | struct ia64_sal_systab *systab = __va(efi.sal_systab); |
163 | 163 | ||
164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); | 164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); |
165 | } | 165 | } |
@@ -907,18 +907,22 @@ ia64_sn_sysctl_tio_clock_reset(nasid_t nasid) | |||
907 | /* | 907 | /* |
908 | * Get the associated ioboard type for a given nasid. | 908 | * Get the associated ioboard type for a given nasid. |
909 | */ | 909 | */ |
910 | static inline int | 910 | static inline s64 |
911 | ia64_sn_sysctl_ioboard_get(nasid_t nasid) | 911 | ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard) |
912 | { | 912 | { |
913 | struct ia64_sal_retval rv; | 913 | struct ia64_sal_retval isrv; |
914 | SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, | 914 | SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, |
915 | nasid, 0, 0, 0, 0, 0); | 915 | nasid, 0, 0, 0, 0, 0); |
916 | if (rv.v0 != 0) | 916 | if (isrv.v0 != 0) { |
917 | return (int)rv.v0; | 917 | *ioboard = isrv.v0; |
918 | if (rv.v1 != 0) | 918 | return isrv.status; |
919 | return (int)rv.v1; | 919 | } |
920 | 920 | if (isrv.v1 != 0) { | |
921 | return 0; | 921 | *ioboard = isrv.v1; |
922 | return isrv.status; | ||
923 | } | ||
924 | |||
925 | return isrv.status; | ||
922 | } | 926 | } |
923 | 927 | ||
924 | /** | 928 | /** |
@@ -1037,7 +1041,7 @@ ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, | |||
1037 | 1041 | ||
1038 | /***** BEGIN HACK - temp til old proms no longer supported ********/ | 1042 | /***** BEGIN HACK - temp til old proms no longer supported ********/ |
1039 | if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { | 1043 | if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { |
1040 | int nasid = get_sapicid() & 0xfff;; | 1044 | int nasid = get_sapicid() & 0xfff; |
1041 | #define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL | 1045 | #define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL |
1042 | #define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 | 1046 | #define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 |
1043 | if (shubtype) *shubtype = 0; | 1047 | if (shubtype) *shubtype = 0; |
diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h index d4c990712eac..893468e1b41b 100644 --- a/include/asm-ia64/sn/tioce.h +++ b/include/asm-ia64/sn/tioce.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | /* CE ASIC part & mfgr information */ | 12 | /* CE ASIC part & mfgr information */ |
13 | #define TIOCE_PART_NUM 0xCE00 | 13 | #define TIOCE_PART_NUM 0xCE00 |
14 | #define TIOCE_MFGR_NUM 0x36 | 14 | #define TIOCE_SRC_ID 0x01 |
15 | #define TIOCE_REV_A 0x1 | 15 | #define TIOCE_REV_A 0x1 |
16 | 16 | ||
17 | /* CE Virtual PPB Vendor/Device IDs */ | 17 | /* CE Virtual PPB Vendor/Device IDs */ |
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | /* CE Host Bridge Vendor/Device IDs */ | 21 | /* CE Host Bridge Vendor/Device IDs */ |
22 | #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 | 22 | #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 |
23 | #define CE_HOST_BRIDGE_DEVICE_ID 0x4003 | 23 | #define CE_HOST_BRIDGE_DEVICE_ID 0x4001 |
24 | 24 | ||
25 | 25 | ||
26 | #define TIOCE_NUM_M40_ATES 4096 | 26 | #define TIOCE_NUM_M40_ATES 4096 |
@@ -463,6 +463,25 @@ typedef volatile struct tioce { | |||
463 | u64 ce_end_of_struct; /* 0x044400 */ | 463 | u64 ce_end_of_struct; /* 0x044400 */ |
464 | } tioce_t; | 464 | } tioce_t; |
465 | 465 | ||
466 | /* ce_lsiX_gb_cfg1 register bit masks & shifts */ | ||
467 | #define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0 | ||
468 | #define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0) | ||
469 | #define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8 | ||
470 | #define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8); | ||
471 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12 | ||
472 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12) | ||
473 | #define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15 | ||
474 | #define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15) | ||
475 | #define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16 | ||
476 | #define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16) | ||
477 | #define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18 | ||
478 | #define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18) | ||
479 | #define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19 | ||
480 | #define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19) | ||
481 | #define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20 | ||
482 | #define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20) | ||
483 | #define CE_LSI_GB_CFG1_SLF_TS_SHFT 24 | ||
484 | #define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24) | ||
466 | 485 | ||
467 | /* ce_adm_int_mask/ce_adm_int_status register bit defines */ | 486 | /* ce_adm_int_mask/ce_adm_int_status register bit defines */ |
468 | #define CE_ADM_INT_CE_ERROR_SHFT 0 | 487 | #define CE_ADM_INT_CE_ERROR_SHFT 0 |
@@ -592,6 +611,11 @@ typedef volatile struct tioce { | |||
592 | #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) | 611 | #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) |
593 | #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) | 612 | #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) |
594 | #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) | 613 | #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) |
614 | #define CE_URE_WRT_MRG_TIMER_SHFT 12 | ||
615 | #define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT) | ||
616 | #define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \ | ||
617 | CE_URE_WRT_MRG_TIMER_SHFT) & \ | ||
618 | CE_URE_WRT_MRG_TIMER_MASK) | ||
595 | #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) | 619 | #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) |
596 | #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) | 620 | #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) |
597 | #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) | 621 | #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) |
@@ -653,8 +677,12 @@ typedef volatile struct tioce { | |||
653 | #define CE_URE_SI (0x1ULL << 0) | 677 | #define CE_URE_SI (0x1ULL << 0) |
654 | #define CE_URE_ELAL_SHFT 4 | 678 | #define CE_URE_ELAL_SHFT 4 |
655 | #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) | 679 | #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) |
680 | #define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \ | ||
681 | CE_URE_ELAL_MASK) | ||
656 | #define CE_URE_ELAL1_SHFT 8 | 682 | #define CE_URE_ELAL1_SHFT 8 |
657 | #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) | 683 | #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) |
684 | #define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \ | ||
685 | CE_URE_ELAL1_MASK) | ||
658 | #define CE_URE_SCC (0x1ULL << 12) | 686 | #define CE_URE_SCC (0x1ULL << 12) |
659 | #define CE_URE_PN1_SHFT 16 | 687 | #define CE_URE_PN1_SHFT 16 |
660 | #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) | 688 | #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) |
@@ -675,8 +703,12 @@ typedef volatile struct tioce { | |||
675 | #define CE_URE_HPC (0x1ULL << 6) | 703 | #define CE_URE_HPC (0x1ULL << 6) |
676 | #define CE_URE_SPLV_SHFT 7 | 704 | #define CE_URE_SPLV_SHFT 7 |
677 | #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) | 705 | #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) |
706 | #define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \ | ||
707 | CE_URE_SPLV_MASK) | ||
678 | #define CE_URE_SPLS_SHFT 15 | 708 | #define CE_URE_SPLS_SHFT 15 |
679 | #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) | 709 | #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) |
710 | #define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \ | ||
711 | CE_URE_SPLS_MASK) | ||
680 | #define CE_URE_PSN1_SHFT 19 | 712 | #define CE_URE_PSN1_SHFT 19 |
681 | #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) | 713 | #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) |
682 | #define CE_URE_PSN2_SHFT 32 | 714 | #define CE_URE_PSN2_SHFT 32 |
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index df7f5f4f3cde..aa3b8ace9030 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
@@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error) | |||
1227 | 1227 | ||
1228 | 1228 | ||
1229 | 1229 | ||
1230 | static inline void * | ||
1231 | xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | ||
1232 | { | ||
1233 | /* see if kmalloc will give us cachline aligned memory by default */ | ||
1234 | *base = kmalloc(size, flags); | ||
1235 | if (*base == NULL) { | ||
1236 | return NULL; | ||
1237 | } | ||
1238 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | ||
1239 | return *base; | ||
1240 | } | ||
1241 | kfree(*base); | ||
1242 | |||
1243 | /* nope, we'll have to do it ourselves */ | ||
1244 | *base = kmalloc(size + L1_CACHE_BYTES, flags); | ||
1245 | if (*base == NULL) { | ||
1246 | return NULL; | ||
1247 | } | ||
1248 | return (void *) L1_CACHE_ALIGN((u64) *base); | ||
1249 | } | ||
1250 | |||
1251 | |||
1252 | /* | 1230 | /* |
1253 | * Check to see if there is any channel activity to/from the specified | 1231 | * Check to see if there is any channel activity to/from the specified |
1254 | * partition. | 1232 | * partition. |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 062538715623..2f3620593687 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
@@ -244,6 +244,13 @@ extern void ia64_load_extra (struct task_struct *task); | |||
244 | __ia64_save_fpu((prev)->thread.fph); \ | 244 | __ia64_save_fpu((prev)->thread.fph); \ |
245 | } \ | 245 | } \ |
246 | __switch_to(prev, next, last); \ | 246 | __switch_to(prev, next, last); \ |
247 | /* "next" in old context is "current" in new context */ \ | ||
248 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
249 | (task_cpu(current) != \ | ||
250 | task_thread_info(current)->last_cpu))) { \ | ||
251 | platform_migrate(current); \ | ||
252 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
253 | } \ | ||
247 | } while (0) | 254 | } while (0) |
248 | #else | 255 | #else |
249 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | 256 | # define switch_to(prev,next,last) __switch_to(prev, next, last) |
@@ -258,6 +265,8 @@ void sched_cacheflush(void); | |||
258 | 265 | ||
259 | #define arch_align_stack(x) (x) | 266 | #define arch_align_stack(x) (x) |
260 | 267 | ||
268 | void default_idle(void); | ||
269 | |||
261 | #endif /* __KERNEL__ */ | 270 | #endif /* __KERNEL__ */ |
262 | 271 | ||
263 | #endif /* __ASSEMBLY__ */ | 272 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 1d6518fe1f02..56394a2c7055 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
@@ -26,16 +26,10 @@ struct thread_info { | |||
26 | struct exec_domain *exec_domain;/* execution domain */ | 26 | struct exec_domain *exec_domain;/* execution domain */ |
27 | __u32 flags; /* thread_info flags (see TIF_*) */ | 27 | __u32 flags; /* thread_info flags (see TIF_*) */ |
28 | __u32 cpu; /* current CPU */ | 28 | __u32 cpu; /* current CPU */ |
29 | __u32 last_cpu; /* Last CPU thread ran on */ | ||
29 | mm_segment_t addr_limit; /* user-level address space limit */ | 30 | mm_segment_t addr_limit; /* user-level address space limit */ |
30 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 31 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
31 | struct restart_block restart_block; | 32 | struct restart_block restart_block; |
32 | struct { | ||
33 | int signo; | ||
34 | int code; | ||
35 | void __user *addr; | ||
36 | unsigned long start_time; | ||
37 | pid_t pid; | ||
38 | } sigdelayed; /* Saved information for TIF_SIGDELAYED */ | ||
39 | }; | 33 | }; |
40 | 34 | ||
41 | #define THREAD_SIZE KERNEL_STACK_SIZE | 35 | #define THREAD_SIZE KERNEL_STACK_SIZE |
@@ -89,7 +83,6 @@ struct thread_info { | |||
89 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 83 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
90 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ | 84 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ |
91 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ | 85 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ |
92 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ | ||
93 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 86 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
94 | #define TIF_MEMDIE 17 | 87 | #define TIF_MEMDIE 17 |
95 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | 88 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ |
@@ -101,13 +94,12 @@ struct thread_info { | |||
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 94 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
102 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 95 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
103 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 96 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
104 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) | ||
105 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 97 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
106 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) | 98 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) |
107 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) | 99 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) |
108 | 100 | ||
109 | /* "work to do on user-return" bits */ | 101 | /* "work to do on user-return" bits */ |
110 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) | 102 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) |
111 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ | 103 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ |
112 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) | 104 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) |
113 | 105 | ||
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 019956c613e4..36070c1014d8 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
@@ -285,12 +285,13 @@ | |||
285 | #define __NR_faccessat 1293 | 285 | #define __NR_faccessat 1293 |
286 | /* 1294, 1295 reserved for pselect/ppoll */ | 286 | /* 1294, 1295 reserved for pselect/ppoll */ |
287 | #define __NR_unshare 1296 | 287 | #define __NR_unshare 1296 |
288 | #define __NR_splice 1297 | ||
288 | 289 | ||
289 | #ifdef __KERNEL__ | 290 | #ifdef __KERNEL__ |
290 | 291 | ||
291 | #include <linux/config.h> | 292 | #include <linux/config.h> |
292 | 293 | ||
293 | #define NR_syscalls 273 /* length of syscall table */ | 294 | #define NR_syscalls 274 /* length of syscall table */ |
294 | 295 | ||
295 | #define __ARCH_WANT_SYS_RT_SIGACTION | 296 | #define __ARCH_WANT_SYS_RT_SIGACTION |
296 | 297 | ||