aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/poll.h
diff options
context:
space:
mode:
authorLin Ming <ming.m.lin@intel.com>2010-11-22 08:03:28 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-11-22 08:38:52 -0500
commit691513f70d3957939a318da970987b876c720861 (patch)
treecdaf1826d5895d058e6606eb37a6322892d1d977 /include/linux/poll.h
parent84e1c6bb38eb318e456558b610396d9f1afaabf0 (diff)
x86: Resume trampoline must be executable
commit 5bd5a452(x86: Add NX protection for kernel data) marked the trampoline area NX - which unsurprisingly breaks resume and cpu hotplug. Revert the portion of that commit, which touches the trampoline. Originally-from: Lin Ming <ming.m.lin@intel.com> LKML-Reference: <1290410581.2405.24.camel@minggr.sh.intel.com> Cc: Matthieu Castet <castet.matthieu@free.fr> Cc: Siarhei Liakh <sliakh.lkml@gmail.com> Cc: Xuxian Jiang <jiang@cs.ncsu.edu> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Andi Kleen <andi@firstfloor.org> Tested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/poll.h')
0 files changed, 0 insertions, 0 deletions
an> #define atomic64_dec(v) atomic64_sub(1, v) #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } return c != (u); } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic64_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } return c != (u); } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* Atomic operations are already serializing */ #ifdef CONFIG_SMP #define smp_mb__before_atomic_dec() membar_storeload_loadload(); #define smp_mb__after_atomic_dec() membar_storeload_storestore(); #define smp_mb__before_atomic_inc() membar_storeload_loadload(); #define smp_mb__after_atomic_inc() membar_storeload_storestore(); #else #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #endif #include <asm-generic/atomic.h> #endif /* !(__ARCH_SPARC64_ATOMIC__) */