diff options
Diffstat (limited to 'include/asm-ia64')
| -rw-r--r-- | include/asm-ia64/atomic.h | 8 | ||||
| -rw-r--r-- | include/asm-ia64/cache.h | 2 | ||||
| -rw-r--r-- | include/asm-ia64/intel_intrin.h | 134 | ||||
| -rw-r--r-- | include/asm-ia64/machvec.h | 13 | ||||
| -rw-r--r-- | include/asm-ia64/machvec_sn2.h | 4 | ||||
| -rw-r--r-- | include/asm-ia64/mca.h | 2 | ||||
| -rw-r--r-- | include/asm-ia64/mutex.h | 93 | ||||
| -rw-r--r-- | include/asm-ia64/page.h | 2 | ||||
| -rw-r--r-- | include/asm-ia64/pgtable.h | 5 | ||||
| -rw-r--r-- | include/asm-ia64/processor.h | 3 | ||||
| -rw-r--r-- | include/asm-ia64/signal.h | 2 | ||||
| -rw-r--r-- | include/asm-ia64/sn/addrs.h | 8 | ||||
| -rw-r--r-- | include/asm-ia64/sn/rw_mmr.h | 56 | ||||
| -rw-r--r-- | include/asm-ia64/sn/tioce.h | 36 | ||||
| -rw-r--r-- | include/asm-ia64/sn/xpc.h | 22 | ||||
| -rw-r--r-- | include/asm-ia64/system.h | 7 | ||||
| -rw-r--r-- | include/asm-ia64/thread_info.h | 12 |
17 files changed, 193 insertions, 216 deletions
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index d3e0dfa99e1f..569ec7574baf 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h | |||
| @@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) | |||
| 95 | ({ \ | 95 | ({ \ |
| 96 | int c, old; \ | 96 | int c, old; \ |
| 97 | c = atomic_read(v); \ | 97 | c = atomic_read(v); \ |
| 98 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 98 | for (;;) { \ |
| 99 | if (unlikely(c == (u))) \ | ||
| 100 | break; \ | ||
| 101 | old = atomic_cmpxchg((v), c, c + (a)); \ | ||
| 102 | if (likely(old == c)) \ | ||
| 103 | break; \ | ||
| 99 | c = old; \ | 104 | c = old; \ |
| 105 | } \ | ||
| 100 | c != (u); \ | 106 | c != (u); \ |
| 101 | }) | 107 | }) |
| 102 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 108 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h index 40dd25195d65..f0a104db8f20 100644 --- a/include/asm-ia64/cache.h +++ b/include/asm-ia64/cache.h | |||
| @@ -25,4 +25,6 @@ | |||
| 25 | # define SMP_CACHE_BYTES (1 << 3) | 25 | # define SMP_CACHE_BYTES (1 << 3) |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
| 29 | |||
| 28 | #endif /* _ASM_IA64_CACHE_H */ | 30 | #endif /* _ASM_IA64_CACHE_H */ |
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a7122d850177..d069b6acddce 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h | |||
| @@ -5,113 +5,10 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> | 6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> |
| 7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> | 7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> |
| 8 | * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com> | ||
| 8 | * | 9 | * |
| 9 | */ | 10 | */ |
| 10 | #include <asm/types.h> | 11 | #include <ia64intrin.h> |
| 11 | |||
| 12 | void __lfetch(int lfhint, void *y); | ||
| 13 | void __lfetch_excl(int lfhint, void *y); | ||
| 14 | void __lfetch_fault(int lfhint, void *y); | ||
| 15 | void __lfetch_fault_excl(int lfhint, void *y); | ||
| 16 | |||
| 17 | /* In the following, whichFloatReg should be an integer from 0-127 */ | ||
| 18 | void __ldfs(const int whichFloatReg, void *src); | ||
| 19 | void __ldfd(const int whichFloatReg, void *src); | ||
| 20 | void __ldfe(const int whichFloatReg, void *src); | ||
| 21 | void __ldf8(const int whichFloatReg, void *src); | ||
| 22 | void __ldf_fill(const int whichFloatReg, void *src); | ||
| 23 | void __stfs(void *dst, const int whichFloatReg); | ||
| 24 | void __stfd(void *dst, const int whichFloatReg); | ||
| 25 | void __stfe(void *dst, const int whichFloatReg); | ||
| 26 | void __stf8(void *dst, const int whichFloatReg); | ||
| 27 | void __stf_spill(void *dst, const int whichFloatReg); | ||
| 28 | |||
| 29 | void __st1_rel(void *dst, const __s8 value); | ||
| 30 | void __st2_rel(void *dst, const __s16 value); | ||
| 31 | void __st4_rel(void *dst, const __s32 value); | ||
| 32 | void __st8_rel(void *dst, const __s64 value); | ||
| 33 | __u8 __ld1_acq(void *src); | ||
| 34 | __u16 __ld2_acq(void *src); | ||
| 35 | __u32 __ld4_acq(void *src); | ||
| 36 | __u64 __ld8_acq(void *src); | ||
| 37 | |||
| 38 | __u64 __fetchadd4_acq(__u32 *addend, const int increment); | ||
| 39 | __u64 __fetchadd4_rel(__u32 *addend, const int increment); | ||
| 40 | __u64 __fetchadd8_acq(__u64 *addend, const int increment); | ||
| 41 | __u64 __fetchadd8_rel(__u64 *addend, const int increment); | ||
| 42 | |||
| 43 | __u64 __getf_exp(double d); | ||
| 44 | |||
| 45 | /* OS Related Itanium(R) Intrinsics */ | ||
| 46 | |||
| 47 | /* The names to use for whichReg and whichIndReg below come from | ||
| 48 | the include file asm/ia64regs.h */ | ||
| 49 | |||
| 50 | __u64 __getIndReg(const int whichIndReg, __s64 index); | ||
| 51 | __u64 __getReg(const int whichReg); | ||
| 52 | |||
| 53 | void __setIndReg(const int whichIndReg, __s64 index, __u64 value); | ||
| 54 | void __setReg(const int whichReg, __u64 value); | ||
| 55 | |||
| 56 | void __mf(void); | ||
| 57 | void __mfa(void); | ||
| 58 | void __synci(void); | ||
| 59 | void __itcd(__s64 pa); | ||
| 60 | void __itci(__s64 pa); | ||
| 61 | void __itrd(__s64 whichTransReg, __s64 pa); | ||
| 62 | void __itri(__s64 whichTransReg, __s64 pa); | ||
| 63 | void __ptce(__s64 va); | ||
| 64 | void __ptcl(__s64 va, __s64 pagesz); | ||
| 65 | void __ptcg(__s64 va, __s64 pagesz); | ||
| 66 | void __ptcga(__s64 va, __s64 pagesz); | ||
| 67 | void __ptri(__s64 va, __s64 pagesz); | ||
| 68 | void __ptrd(__s64 va, __s64 pagesz); | ||
| 69 | void __invala (void); | ||
| 70 | void __invala_gr(const int whichGeneralReg /* 0-127 */ ); | ||
| 71 | void __invala_fr(const int whichFloatReg /* 0-127 */ ); | ||
| 72 | void __nop(const int); | ||
| 73 | void __fc(__u64 *addr); | ||
| 74 | void __sum(int mask); | ||
| 75 | void __rum(int mask); | ||
| 76 | void __ssm(int mask); | ||
| 77 | void __rsm(int mask); | ||
| 78 | __u64 __thash(__s64); | ||
| 79 | __u64 __ttag(__s64); | ||
| 80 | __s64 __tpa(__s64); | ||
| 81 | |||
| 82 | /* Intrinsics for implementing get/put_user macros */ | ||
| 83 | void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); | ||
| 84 | void __ld_user(const char *tableName, __u64 addr, char size, char relocType); | ||
| 85 | |||
| 86 | /* This intrinsic does not generate code, it creates a barrier across which | ||
| 87 | * the compiler will not schedule data access instructions. | ||
| 88 | */ | ||
| 89 | void __memory_barrier(void); | ||
| 90 | |||
| 91 | void __isrlz(void); | ||
| 92 | void __dsrlz(void); | ||
| 93 | |||
| 94 | __u64 _m64_mux1(__u64 a, const int n); | ||
| 95 | __u64 __thash(__u64); | ||
| 96 | |||
| 97 | /* Lock and Atomic Operation Related Intrinsics */ | ||
| 98 | __u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); | ||
| 99 | __u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); | ||
| 100 | __s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); | ||
| 101 | __s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); | ||
| 102 | |||
| 103 | __u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
| 104 | __u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
| 105 | __u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
| 106 | __u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
| 107 | __u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
| 108 | __u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
| 109 | __u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
| 110 | __u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
| 111 | |||
| 112 | __s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); | ||
| 113 | __s64 _m64_shrp(__s64 a, __s64 b, const int count); | ||
| 114 | __s64 _m64_popcnt(__s64 a); | ||
| 115 | 12 | ||
| 116 | #define ia64_barrier() __memory_barrier() | 13 | #define ia64_barrier() __memory_barrier() |
| 117 | 14 | ||
| @@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a); | |||
| 122 | #define ia64_getreg __getReg | 19 | #define ia64_getreg __getReg |
| 123 | #define ia64_setreg __setReg | 20 | #define ia64_setreg __setReg |
| 124 | 21 | ||
| 125 | #define ia64_hint(x) | 22 | #define ia64_hint __hint |
| 23 | #define ia64_hint_pause __hint_pause | ||
| 126 | 24 | ||
| 127 | #define ia64_mux1_brcst 0 | 25 | #define ia64_mux1_brcst _m64_mux1_brcst |
| 128 | #define ia64_mux1_mix 8 | 26 | #define ia64_mux1_mix _m64_mux1_mix |
| 129 | #define ia64_mux1_shuf 9 | 27 | #define ia64_mux1_shuf _m64_mux1_shuf |
| 130 | #define ia64_mux1_alt 10 | 28 | #define ia64_mux1_alt _m64_mux1_alt |
| 131 | #define ia64_mux1_rev 11 | 29 | #define ia64_mux1_rev _m64_mux1_rev |
| 132 | 30 | ||
| 133 | #define ia64_mux1 _m64_mux1 | 31 | #define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v))) |
| 134 | #define ia64_popcnt _m64_popcnt | 32 | #define ia64_popcnt _m64_popcnt |
| 135 | #define ia64_getf_exp __getf_exp | 33 | #define ia64_getf_exp __getf_exp |
| 136 | #define ia64_shrp _m64_shrp | 34 | #define ia64_shrp _m64_shrp |
| @@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a); | |||
| 158 | #define ia64_stf8 __stf8 | 56 | #define ia64_stf8 __stf8 |
| 159 | #define ia64_stf_spill __stf_spill | 57 | #define ia64_stf_spill __stf_spill |
| 160 | 58 | ||
| 161 | #define ia64_mf __mf | 59 | #define ia64_mf __mf |
| 162 | #define ia64_mfa __mfa | 60 | #define ia64_mfa __mfa |
| 163 | 61 | ||
| 164 | #define ia64_fetchadd4_acq __fetchadd4_acq | 62 | #define ia64_fetchadd4_acq __fetchadd4_acq |
| @@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a); | |||
| 234 | 132 | ||
| 235 | /* Values for lfhint in __lfetch and __lfetch_fault */ | 133 | /* Values for lfhint in __lfetch and __lfetch_fault */ |
| 236 | 134 | ||
| 237 | #define ia64_lfhint_none 0 | 135 | #define ia64_lfhint_none __lfhint_none |
| 238 | #define ia64_lfhint_nt1 1 | 136 | #define ia64_lfhint_nt1 __lfhint_nt1 |
| 239 | #define ia64_lfhint_nt2 2 | 137 | #define ia64_lfhint_nt2 __lfhint_nt2 |
| 240 | #define ia64_lfhint_nta 3 | 138 | #define ia64_lfhint_nta __lfhint_nta |
| 241 | 139 | ||
| 242 | #define ia64_lfetch __lfetch | 140 | #define ia64_lfetch __lfetch |
| 243 | #define ia64_lfetch_excl __lfetch_excl | 141 | #define ia64_lfetch_excl __lfetch_excl |
| @@ -254,4 +152,6 @@ do { \ | |||
| 254 | } \ | 152 | } \ |
| 255 | } while (0) | 153 | } while (0) |
| 256 | 154 | ||
| 155 | #define __builtin_trap() __break(0); | ||
| 156 | |||
| 257 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ | 157 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ca5ea994d688..c3e4ed8a3e17 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
| @@ -20,6 +20,7 @@ struct scatterlist; | |||
| 20 | struct page; | 20 | struct page; |
| 21 | struct mm_struct; | 21 | struct mm_struct; |
| 22 | struct pci_bus; | 22 | struct pci_bus; |
| 23 | struct task_struct; | ||
| 23 | 24 | ||
| 24 | typedef void ia64_mv_setup_t (char **); | 25 | typedef void ia64_mv_setup_t (char **); |
| 25 | typedef void ia64_mv_cpu_init_t (void); | 26 | typedef void ia64_mv_cpu_init_t (void); |
| @@ -34,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, | |||
| 34 | u8 size); | 35 | u8 size); |
| 35 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, | 36 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, |
| 36 | u8 size); | 37 | u8 size); |
| 38 | typedef void ia64_mv_migrate_t(struct task_struct * task); | ||
| 37 | 39 | ||
| 38 | /* DMA-mapping interface: */ | 40 | /* DMA-mapping interface: */ |
| 39 | typedef void ia64_mv_dma_init (void); | 41 | typedef void ia64_mv_dma_init (void); |
| @@ -85,6 +87,11 @@ machvec_noop_mm (struct mm_struct *mm) | |||
| 85 | { | 87 | { |
| 86 | } | 88 | } |
| 87 | 89 | ||
| 90 | static inline void | ||
| 91 | machvec_noop_task (struct task_struct *task) | ||
| 92 | { | ||
| 93 | } | ||
| 94 | |||
| 88 | extern void machvec_setup (char **); | 95 | extern void machvec_setup (char **); |
| 89 | extern void machvec_timer_interrupt (int, void *, struct pt_regs *); | 96 | extern void machvec_timer_interrupt (int, void *, struct pt_regs *); |
| 90 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | 97 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); |
| @@ -146,6 +153,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
| 146 | # define platform_readw_relaxed ia64_mv.readw_relaxed | 153 | # define platform_readw_relaxed ia64_mv.readw_relaxed |
| 147 | # define platform_readl_relaxed ia64_mv.readl_relaxed | 154 | # define platform_readl_relaxed ia64_mv.readl_relaxed |
| 148 | # define platform_readq_relaxed ia64_mv.readq_relaxed | 155 | # define platform_readq_relaxed ia64_mv.readq_relaxed |
| 156 | # define platform_migrate ia64_mv.migrate | ||
| 149 | # endif | 157 | # endif |
| 150 | 158 | ||
| 151 | /* __attribute__((__aligned__(16))) is required to make size of the | 159 | /* __attribute__((__aligned__(16))) is required to make size of the |
| @@ -194,6 +202,7 @@ struct ia64_machine_vector { | |||
| 194 | ia64_mv_readw_relaxed_t *readw_relaxed; | 202 | ia64_mv_readw_relaxed_t *readw_relaxed; |
| 195 | ia64_mv_readl_relaxed_t *readl_relaxed; | 203 | ia64_mv_readl_relaxed_t *readl_relaxed; |
| 196 | ia64_mv_readq_relaxed_t *readq_relaxed; | 204 | ia64_mv_readq_relaxed_t *readq_relaxed; |
| 205 | ia64_mv_migrate_t *migrate; | ||
| 197 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ | 206 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ |
| 198 | 207 | ||
| 199 | #define MACHVEC_INIT(name) \ | 208 | #define MACHVEC_INIT(name) \ |
| @@ -238,6 +247,7 @@ struct ia64_machine_vector { | |||
| 238 | platform_readw_relaxed, \ | 247 | platform_readw_relaxed, \ |
| 239 | platform_readl_relaxed, \ | 248 | platform_readl_relaxed, \ |
| 240 | platform_readq_relaxed, \ | 249 | platform_readq_relaxed, \ |
| 250 | platform_migrate, \ | ||
| 241 | } | 251 | } |
| 242 | 252 | ||
| 243 | extern struct ia64_machine_vector ia64_mv; | 253 | extern struct ia64_machine_vector ia64_mv; |
| @@ -386,5 +396,8 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; | |||
| 386 | #ifndef platform_readq_relaxed | 396 | #ifndef platform_readq_relaxed |
| 387 | # define platform_readq_relaxed __ia64_readq_relaxed | 397 | # define platform_readq_relaxed __ia64_readq_relaxed |
| 388 | #endif | 398 | #endif |
| 399 | #ifndef platform_migrate | ||
| 400 | # define platform_migrate machvec_noop_task | ||
| 401 | #endif | ||
| 389 | 402 | ||
| 390 | #endif /* _ASM_IA64_MACHVEC_H */ | 403 | #endif /* _ASM_IA64_MACHVEC_H */ |
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 03d00faf03b5..da1d43755afe 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved. | 2 | * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of version 2 of the GNU General Public License | 5 | * under the terms of version 2 of the GNU General Public License |
| @@ -66,6 +66,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | |||
| 66 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | 66 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; |
| 67 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | 67 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; |
| 68 | extern ia64_mv_dma_supported sn_dma_supported; | 68 | extern ia64_mv_dma_supported sn_dma_supported; |
| 69 | extern ia64_mv_migrate_t sn_migrate; | ||
| 69 | 70 | ||
| 70 | /* | 71 | /* |
| 71 | * This stuff has dual use! | 72 | * This stuff has dual use! |
| @@ -115,6 +116,7 @@ extern ia64_mv_dma_supported sn_dma_supported; | |||
| 115 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | 116 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device |
| 116 | #define platform_dma_mapping_error sn_dma_mapping_error | 117 | #define platform_dma_mapping_error sn_dma_mapping_error |
| 117 | #define platform_dma_supported sn_dma_supported | 118 | #define platform_dma_supported sn_dma_supported |
| 119 | #define platform_migrate sn_migrate | ||
| 118 | 120 | ||
| 119 | #include <asm/sn/io.h> | 121 | #include <asm/sn/io.h> |
| 120 | 122 | ||
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index c7d9c9ed38ba..bfbbb8da79c7 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
| @@ -131,6 +131,8 @@ struct ia64_mca_cpu { | |||
| 131 | /* Array of physical addresses of each CPU's MCA area. */ | 131 | /* Array of physical addresses of each CPU's MCA area. */ |
| 132 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 132 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
| 133 | 133 | ||
| 134 | extern int cpe_vector; | ||
| 135 | extern int ia64_cpe_irq; | ||
| 134 | extern void ia64_mca_init(void); | 136 | extern void ia64_mca_init(void); |
| 135 | extern void ia64_mca_cpu_init(void *); | 137 | extern void ia64_mca_cpu_init(void *); |
| 136 | extern void ia64_os_mca_dispatch(void); | 138 | extern void ia64_os_mca_dispatch(void); |
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 458c1f7fbc18..5a3224f6af38 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h | |||
| @@ -1,9 +1,92 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Pull in the generic implementation for the mutex fastpath. | 2 | * ia64 implementation of the mutex fastpath. |
| 3 | * | 3 | * |
| 4 | * TODO: implement optimized primitives instead, or leave the generic | 4 | * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com> |
| 5 | * implementation in place, or pick the atomic_xchg() based generic | 5 | * |
| 6 | * implementation. (see asm-generic/mutex-xchg.h for details) | 6 | */ |
| 7 | |||
| 8 | #ifndef _ASM_MUTEX_H | ||
| 9 | #define _ASM_MUTEX_H | ||
| 10 | |||
| 11 | /** | ||
| 12 | * __mutex_fastpath_lock - try to take the lock by moving the count | ||
| 13 | * from 1 to a 0 value | ||
| 14 | * @count: pointer of type atomic_t | ||
| 15 | * @fail_fn: function to call if the original value was not 1 | ||
| 16 | * | ||
| 17 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
| 18 | * it wasn't 1 originally. This function MUST leave the value lower than | ||
| 19 | * 1 even when the "1" assertion wasn't true. | ||
| 20 | */ | ||
| 21 | static inline void | ||
| 22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
| 23 | { | ||
| 24 | if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) | ||
| 25 | fail_fn(count); | ||
| 26 | } | ||
| 27 | |||
| 28 | /** | ||
| 29 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | ||
| 30 | * from 1 to a 0 value | ||
| 31 | * @count: pointer of type atomic_t | ||
| 32 | * @fail_fn: function to call if the original value was not 1 | ||
| 33 | * | ||
| 34 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
| 35 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | ||
| 36 | * or anything the slow path function returns. | ||
| 37 | */ | ||
| 38 | static inline int | ||
| 39 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
| 40 | { | ||
| 41 | if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) | ||
| 42 | return fail_fn(count); | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | /** | ||
| 47 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | ||
| 48 | * @count: pointer of type atomic_t | ||
| 49 | * @fail_fn: function to call if the original value was not 0 | ||
| 50 | * | ||
| 51 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | ||
| 52 | * In the failure case, this function is allowed to either set the value to | ||
| 53 | * 1, or to set it to a value lower than 1. | ||
| 54 | * | ||
| 55 | * If the implementation sets it to a value of lower than 1, then the | ||
| 56 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | ||
| 57 | * to return 0 otherwise. | ||
| 58 | */ | ||
| 59 | static inline void | ||
| 60 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
| 61 | { | ||
| 62 | int ret = ia64_fetchadd4_rel(count, 1); | ||
| 63 | if (unlikely(ret < 0)) | ||
| 64 | fail_fn(count); | ||
| 65 | } | ||
| 66 | |||
| 67 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
| 68 | |||
| 69 | /** | ||
| 70 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | ||
| 71 | * | ||
| 72 | * @count: pointer of type atomic_t | ||
| 73 | * @fail_fn: fallback function | ||
| 74 | * | ||
| 75 | * Change the count from 1 to a value lower than 1, and return 0 (failure) | ||
| 76 | * if it wasn't 1 originally, or return 1 (success) otherwise. This function | ||
| 77 | * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | ||
| 78 | * Additionally, if the value was < 0 originally, this function must not leave | ||
| 79 | * it to 0 on failure. | ||
| 80 | * | ||
| 81 | * If the architecture has no effective trylock variant, it should call the | ||
| 82 | * <fail_fn> spinlock-based trylock variant unconditionally. | ||
| 7 | */ | 83 | */ |
| 84 | static inline int | ||
| 85 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
| 86 | { | ||
| 87 | if (likely(cmpxchg_acq(count, 1, 0)) == 1) | ||
| 88 | return 1; | ||
| 89 | return 0; | ||
| 90 | } | ||
| 8 | 91 | ||
| 9 | #include <asm-generic/mutex-dec.h> | 92 | #endif |
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 5e6362a786b7..3ab27333dae4 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
| @@ -57,6 +57,8 @@ | |||
| 57 | 57 | ||
| 58 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 58 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 59 | # define ARCH_HAS_HUGEPAGE_ONLY_RANGE | 59 | # define ARCH_HAS_HUGEPAGE_ONLY_RANGE |
| 60 | # define ARCH_HAS_PREPARE_HUGEPAGE_RANGE | ||
| 61 | # define ARCH_HAS_HUGETLB_FREE_PGD_RANGE | ||
| 60 | #endif /* CONFIG_HUGETLB_PAGE */ | 62 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 61 | 63 | ||
| 62 | #ifdef __ASSEMBLY__ | 64 | #ifdef __ASSEMBLY__ |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e2560c58384b..c0f8144f2349 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
| @@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr) | |||
| 314 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) | 314 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) |
| 315 | #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) | 315 | #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) |
| 316 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) | 316 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) |
| 317 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) | 317 | #define pte_mkhuge(pte) (__pte(pte_val(pte))) |
| 318 | 318 | ||
| 319 | /* | 319 | /* |
| 320 | * Macro to a page protection value as "uncacheable". Note that "protection" is really a | 320 | * Macro to a page protection value as "uncacheable". Note that "protection" is really a |
| @@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr; | |||
| 505 | #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) | 505 | #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) |
| 506 | #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) | 506 | #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) |
| 507 | #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) | 507 | #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) |
| 508 | struct mmu_gather; | ||
| 509 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | ||
| 510 | unsigned long end, unsigned long floor, unsigned long ceiling); | ||
| 511 | #endif | 508 | #endif |
| 512 | 509 | ||
| 513 | /* | 510 | /* |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 23c8e1be1911..128fefd8056f 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
| @@ -50,7 +50,8 @@ | |||
| 50 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | 50 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ |
| 51 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | 51 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ |
| 52 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | 52 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ |
| 53 | /* bit 5 is currently unused */ | 53 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
| 54 | sync at ctx sw */ | ||
| 54 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ | 55 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
| 55 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | 56 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ |
| 56 | 57 | ||
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 608168d713d3..5e328ed5d01d 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h | |||
| @@ -158,8 +158,6 @@ struct k_sigaction { | |||
| 158 | 158 | ||
| 159 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 159 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
| 160 | 160 | ||
| 161 | void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr); | ||
| 162 | |||
| 163 | #endif /* __KERNEL__ */ | 161 | #endif /* __KERNEL__ */ |
| 164 | 162 | ||
| 165 | # endif /* !__ASSEMBLY__ */ | 163 | # endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h index 2c32e4b77b54..1d9efe541662 100644 --- a/include/asm-ia64/sn/addrs.h +++ b/include/asm-ia64/sn/addrs.h | |||
| @@ -283,5 +283,13 @@ | |||
| 283 | #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) | 283 | #define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) |
| 284 | #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) | 284 | #define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) |
| 285 | 285 | ||
| 286 | /* | ||
| 287 | * Coretalk address breakdown | ||
| 288 | */ | ||
| 289 | #define CTALK_NASID_SHFT 40 | ||
| 290 | #define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT) | ||
| 291 | #define CTALK_CID_SHFT 38 | ||
| 292 | #define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT) | ||
| 293 | #define CTALK_NODE_OFFSET 0x3FFFFFFFFF | ||
| 286 | 294 | ||
| 287 | #endif /* _ASM_IA64_SN_ADDRS_H */ | 295 | #endif /* _ASM_IA64_SN_ADDRS_H */ |
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h index f40fd1a5510d..2d78f4c5a45e 100644 --- a/include/asm-ia64/sn/rw_mmr.h +++ b/include/asm-ia64/sn/rw_mmr.h | |||
| @@ -3,15 +3,14 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. |
| 7 | */ | 7 | */ |
| 8 | #ifndef _ASM_IA64_SN_RW_MMR_H | 8 | #ifndef _ASM_IA64_SN_RW_MMR_H |
| 9 | #define _ASM_IA64_SN_RW_MMR_H | 9 | #define _ASM_IA64_SN_RW_MMR_H |
| 10 | 10 | ||
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * This file contains macros used to access MMR registers via | 13 | * This file that access MMRs via uncached physical addresses. |
| 14 | * uncached physical addresses. | ||
| 15 | * pio_phys_read_mmr - read an MMR | 14 | * pio_phys_read_mmr - read an MMR |
| 16 | * pio_phys_write_mmr - write an MMR | 15 | * pio_phys_write_mmr - write an MMR |
| 17 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | 16 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 |
| @@ -22,53 +21,8 @@ | |||
| 22 | */ | 21 | */ |
| 23 | 22 | ||
| 24 | 23 | ||
| 25 | extern inline long | 24 | extern long pio_phys_read_mmr(volatile long *mmr); |
| 26 | pio_phys_read_mmr(volatile long *mmr) | 25 | extern void pio_phys_write_mmr(volatile long *mmr, long val); |
| 27 | { | 26 | extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); |
| 28 | long val; | ||
| 29 | asm volatile | ||
| 30 | ("mov r2=psr;;" | ||
| 31 | "rsm psr.i | psr.dt;;" | ||
| 32 | "srlz.i;;" | ||
| 33 | "ld8.acq %0=[%1];;" | ||
| 34 | "mov psr.l=r2;;" | ||
| 35 | "srlz.i;;" | ||
| 36 | : "=r"(val) | ||
| 37 | : "r"(mmr) | ||
| 38 | : "r2"); | ||
| 39 | return val; | ||
| 40 | } | ||
| 41 | |||
| 42 | |||
| 43 | |||
| 44 | extern inline void | ||
| 45 | pio_phys_write_mmr(volatile long *mmr, long val) | ||
| 46 | { | ||
| 47 | asm volatile | ||
| 48 | ("mov r2=psr;;" | ||
| 49 | "rsm psr.i | psr.dt;;" | ||
| 50 | "srlz.i;;" | ||
| 51 | "st8.rel [%0]=%1;;" | ||
| 52 | "mov psr.l=r2;;" | ||
| 53 | "srlz.i;;" | ||
| 54 | :: "r"(mmr), "r"(val) | ||
| 55 | : "r2", "memory"); | ||
| 56 | } | ||
| 57 | |||
| 58 | extern inline void | ||
| 59 | pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) | ||
| 60 | { | ||
| 61 | asm volatile | ||
| 62 | ("mov r2=psr;;" | ||
| 63 | "rsm psr.i | psr.dt | psr.ic;;" | ||
| 64 | "cmp.ne p9,p0=%2,r0;" | ||
| 65 | "srlz.i;;" | ||
| 66 | "st8.rel [%0]=%1;" | ||
| 67 | "(p9) st8.rel [%2]=%3;;" | ||
| 68 | "mov psr.l=r2;;" | ||
| 69 | "srlz.i;;" | ||
| 70 | :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) | ||
| 71 | : "p9", "r2", "memory"); | ||
| 72 | } | ||
| 73 | 27 | ||
| 74 | #endif /* _ASM_IA64_SN_RW_MMR_H */ | 28 | #endif /* _ASM_IA64_SN_RW_MMR_H */ |
diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h index d4c990712eac..893468e1b41b 100644 --- a/include/asm-ia64/sn/tioce.h +++ b/include/asm-ia64/sn/tioce.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | /* CE ASIC part & mfgr information */ | 12 | /* CE ASIC part & mfgr information */ |
| 13 | #define TIOCE_PART_NUM 0xCE00 | 13 | #define TIOCE_PART_NUM 0xCE00 |
| 14 | #define TIOCE_MFGR_NUM 0x36 | 14 | #define TIOCE_SRC_ID 0x01 |
| 15 | #define TIOCE_REV_A 0x1 | 15 | #define TIOCE_REV_A 0x1 |
| 16 | 16 | ||
| 17 | /* CE Virtual PPB Vendor/Device IDs */ | 17 | /* CE Virtual PPB Vendor/Device IDs */ |
| @@ -20,7 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | /* CE Host Bridge Vendor/Device IDs */ | 21 | /* CE Host Bridge Vendor/Device IDs */ |
| 22 | #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 | 22 | #define CE_HOST_BRIDGE_VENDOR_ID 0x10a9 |
| 23 | #define CE_HOST_BRIDGE_DEVICE_ID 0x4003 | 23 | #define CE_HOST_BRIDGE_DEVICE_ID 0x4001 |
| 24 | 24 | ||
| 25 | 25 | ||
| 26 | #define TIOCE_NUM_M40_ATES 4096 | 26 | #define TIOCE_NUM_M40_ATES 4096 |
| @@ -463,6 +463,25 @@ typedef volatile struct tioce { | |||
| 463 | u64 ce_end_of_struct; /* 0x044400 */ | 463 | u64 ce_end_of_struct; /* 0x044400 */ |
| 464 | } tioce_t; | 464 | } tioce_t; |
| 465 | 465 | ||
| 466 | /* ce_lsiX_gb_cfg1 register bit masks & shifts */ | ||
| 467 | #define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0 | ||
| 468 | #define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0) | ||
| 469 | #define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8 | ||
| 470 | #define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8); | ||
| 471 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12 | ||
| 472 | #define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12) | ||
| 473 | #define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15 | ||
| 474 | #define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15) | ||
| 475 | #define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16 | ||
| 476 | #define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16) | ||
| 477 | #define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18 | ||
| 478 | #define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18) | ||
| 479 | #define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19 | ||
| 480 | #define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19) | ||
| 481 | #define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20 | ||
| 482 | #define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20) | ||
| 483 | #define CE_LSI_GB_CFG1_SLF_TS_SHFT 24 | ||
| 484 | #define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24) | ||
| 466 | 485 | ||
| 467 | /* ce_adm_int_mask/ce_adm_int_status register bit defines */ | 486 | /* ce_adm_int_mask/ce_adm_int_status register bit defines */ |
| 468 | #define CE_ADM_INT_CE_ERROR_SHFT 0 | 487 | #define CE_ADM_INT_CE_ERROR_SHFT 0 |
| @@ -592,6 +611,11 @@ typedef volatile struct tioce { | |||
| 592 | #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) | 611 | #define CE_URE_RD_MRG_ENABLE (0x1ULL << 0) |
| 593 | #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) | 612 | #define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4) |
| 594 | #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) | 613 | #define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5) |
| 614 | #define CE_URE_WRT_MRG_TIMER_SHFT 12 | ||
| 615 | #define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT) | ||
| 616 | #define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \ | ||
| 617 | CE_URE_WRT_MRG_TIMER_SHFT) & \ | ||
| 618 | CE_URE_WRT_MRG_TIMER_MASK) | ||
| 595 | #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) | 619 | #define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24) |
| 596 | #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) | 620 | #define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32) |
| 597 | #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) | 621 | #define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33) |
| @@ -653,8 +677,12 @@ typedef volatile struct tioce { | |||
| 653 | #define CE_URE_SI (0x1ULL << 0) | 677 | #define CE_URE_SI (0x1ULL << 0) |
| 654 | #define CE_URE_ELAL_SHFT 4 | 678 | #define CE_URE_ELAL_SHFT 4 |
| 655 | #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) | 679 | #define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT) |
| 680 | #define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \ | ||
| 681 | CE_URE_ELAL_MASK) | ||
| 656 | #define CE_URE_ELAL1_SHFT 8 | 682 | #define CE_URE_ELAL1_SHFT 8 |
| 657 | #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) | 683 | #define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT) |
| 684 | #define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \ | ||
| 685 | CE_URE_ELAL1_MASK) | ||
| 658 | #define CE_URE_SCC (0x1ULL << 12) | 686 | #define CE_URE_SCC (0x1ULL << 12) |
| 659 | #define CE_URE_PN1_SHFT 16 | 687 | #define CE_URE_PN1_SHFT 16 |
| 660 | #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) | 688 | #define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT) |
| @@ -675,8 +703,12 @@ typedef volatile struct tioce { | |||
| 675 | #define CE_URE_HPC (0x1ULL << 6) | 703 | #define CE_URE_HPC (0x1ULL << 6) |
| 676 | #define CE_URE_SPLV_SHFT 7 | 704 | #define CE_URE_SPLV_SHFT 7 |
| 677 | #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) | 705 | #define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT) |
| 706 | #define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \ | ||
| 707 | CE_URE_SPLV_MASK) | ||
| 678 | #define CE_URE_SPLS_SHFT 15 | 708 | #define CE_URE_SPLS_SHFT 15 |
| 679 | #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) | 709 | #define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT) |
| 710 | #define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \ | ||
| 711 | CE_URE_SPLS_MASK) | ||
| 680 | #define CE_URE_PSN1_SHFT 19 | 712 | #define CE_URE_PSN1_SHFT 19 |
| 681 | #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) | 713 | #define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT) |
| 682 | #define CE_URE_PSN2_SHFT 32 | 714 | #define CE_URE_PSN2_SHFT 32 |
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index df7f5f4f3cde..aa3b8ace9030 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
| @@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error) | |||
| 1227 | 1227 | ||
| 1228 | 1228 | ||
| 1229 | 1229 | ||
| 1230 | static inline void * | ||
| 1231 | xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | ||
| 1232 | { | ||
| 1233 | /* see if kmalloc will give us cachline aligned memory by default */ | ||
| 1234 | *base = kmalloc(size, flags); | ||
| 1235 | if (*base == NULL) { | ||
| 1236 | return NULL; | ||
| 1237 | } | ||
| 1238 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | ||
| 1239 | return *base; | ||
| 1240 | } | ||
| 1241 | kfree(*base); | ||
| 1242 | |||
| 1243 | /* nope, we'll have to do it ourselves */ | ||
| 1244 | *base = kmalloc(size + L1_CACHE_BYTES, flags); | ||
| 1245 | if (*base == NULL) { | ||
| 1246 | return NULL; | ||
| 1247 | } | ||
| 1248 | return (void *) L1_CACHE_ALIGN((u64) *base); | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | |||
| 1252 | /* | 1230 | /* |
| 1253 | * Check to see if there is any channel activity to/from the specified | 1231 | * Check to see if there is any channel activity to/from the specified |
| 1254 | * partition. | 1232 | * partition. |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 062538715623..cd4233d66f15 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
| @@ -244,6 +244,13 @@ extern void ia64_load_extra (struct task_struct *task); | |||
| 244 | __ia64_save_fpu((prev)->thread.fph); \ | 244 | __ia64_save_fpu((prev)->thread.fph); \ |
| 245 | } \ | 245 | } \ |
| 246 | __switch_to(prev, next, last); \ | 246 | __switch_to(prev, next, last); \ |
| 247 | /* "next" in old context is "current" in new context */ \ | ||
| 248 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
| 249 | (task_cpu(current) != \ | ||
| 250 | task_thread_info(current)->last_cpu))) { \ | ||
| 251 | platform_migrate(current); \ | ||
| 252 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
| 253 | } \ | ||
| 247 | } while (0) | 254 | } while (0) |
| 248 | #else | 255 | #else |
| 249 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | 256 | # define switch_to(prev,next,last) __switch_to(prev, next, last) |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 1d6518fe1f02..56394a2c7055 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
| @@ -26,16 +26,10 @@ struct thread_info { | |||
| 26 | struct exec_domain *exec_domain;/* execution domain */ | 26 | struct exec_domain *exec_domain;/* execution domain */ |
| 27 | __u32 flags; /* thread_info flags (see TIF_*) */ | 27 | __u32 flags; /* thread_info flags (see TIF_*) */ |
| 28 | __u32 cpu; /* current CPU */ | 28 | __u32 cpu; /* current CPU */ |
| 29 | __u32 last_cpu; /* Last CPU thread ran on */ | ||
| 29 | mm_segment_t addr_limit; /* user-level address space limit */ | 30 | mm_segment_t addr_limit; /* user-level address space limit */ |
| 30 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 31 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
| 31 | struct restart_block restart_block; | 32 | struct restart_block restart_block; |
| 32 | struct { | ||
| 33 | int signo; | ||
| 34 | int code; | ||
| 35 | void __user *addr; | ||
| 36 | unsigned long start_time; | ||
| 37 | pid_t pid; | ||
| 38 | } sigdelayed; /* Saved information for TIF_SIGDELAYED */ | ||
| 39 | }; | 33 | }; |
| 40 | 34 | ||
| 41 | #define THREAD_SIZE KERNEL_STACK_SIZE | 35 | #define THREAD_SIZE KERNEL_STACK_SIZE |
| @@ -89,7 +83,6 @@ struct thread_info { | |||
| 89 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 83 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
| 90 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ | 84 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ |
| 91 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ | 85 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ |
| 92 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ | ||
| 93 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 86 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
| 94 | #define TIF_MEMDIE 17 | 87 | #define TIF_MEMDIE 17 |
| 95 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | 88 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ |
| @@ -101,13 +94,12 @@ struct thread_info { | |||
| 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 94 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
| 102 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 95 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| 103 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 96 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
| 104 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) | ||
| 105 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 97 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
| 106 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) | 98 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) |
| 107 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) | 99 | #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) |
| 108 | 100 | ||
| 109 | /* "work to do on user-return" bits */ | 101 | /* "work to do on user-return" bits */ |
| 110 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) | 102 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) |
| 111 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ | 103 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ |
| 112 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) | 104 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) |
| 113 | 105 | ||
