aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-01 15:06:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-01 15:06:21 -0400
commit3d58f48ba05caed9118bce62b3047f8683438835 (patch)
tree94c911034f0e14ded73d3e9e6e9f8e22b6cad822 /arch/x86/include
parentabfe0af9813153bae8c85d9bac966bafcb8ddab1 (diff)
parentd9244b5d2fbfe9fa540024b410047af13ceec90f (diff)
Merge branch 'linus' into irq/numa
Conflicts: arch/mips/sibyte/bcm1480/irq.c arch/mips/sibyte/sb1250/irq.c Merge reason: we gathered a few conflicts plus update to latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/ptrace.h7
-rw-r--r--arch/x86/include/asm/spinlock.h4
4 files changed, 12 insertions, 11 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 378e3691c08c..a53da004e08e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);
1443 1443
1444#define paravirt_nop ((void *)_paravirt_nop) 1444#define paravirt_nop ((void *)_paravirt_nop)
1445 1445
1446#ifdef CONFIG_SMP 1446#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447 1447
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1449{ 1449{
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d01..02ecb30982a3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -82,22 +82,22 @@ do { \
82 case 1: \ 82 case 1: \
83 asm(op "b %1,"__percpu_arg(0) \ 83 asm(op "b %1,"__percpu_arg(0) \
84 : "+m" (var) \ 84 : "+m" (var) \
85 : "ri" ((T__)val)); \ 85 : "qi" ((T__)(val))); \
86 break; \ 86 break; \
87 case 2: \ 87 case 2: \
88 asm(op "w %1,"__percpu_arg(0) \ 88 asm(op "w %1,"__percpu_arg(0) \
89 : "+m" (var) \ 89 : "+m" (var) \
90 : "ri" ((T__)val)); \ 90 : "ri" ((T__)(val))); \
91 break; \ 91 break; \
92 case 4: \ 92 case 4: \
93 asm(op "l %1,"__percpu_arg(0) \ 93 asm(op "l %1,"__percpu_arg(0) \
94 : "+m" (var) \ 94 : "+m" (var) \
95 : "ri" ((T__)val)); \ 95 : "ri" ((T__)(val))); \
96 break; \ 96 break; \
97 case 8: \ 97 case 8: \
98 asm(op "q %1,"__percpu_arg(0) \ 98 asm(op "q %1,"__percpu_arg(0) \
99 : "+m" (var) \ 99 : "+m" (var) \
100 : "re" ((T__)val)); \ 100 : "re" ((T__)(val))); \
101 break; \ 101 break; \
102 default: __bad_percpu_size(); \ 102 default: __bad_percpu_size(); \
103 } \ 103 } \
@@ -109,7 +109,7 @@ do { \
109 switch (sizeof(var)) { \ 109 switch (sizeof(var)) { \
110 case 1: \ 110 case 1: \
111 asm(op "b "__percpu_arg(1)",%0" \ 111 asm(op "b "__percpu_arg(1)",%0" \
112 : "=r" (ret__) \ 112 : "=q" (ret__) \
113 : "m" (var)); \ 113 : "m" (var)); \
114 break; \ 114 break; \
115 case 2: \ 115 case 2: \
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index e304b66abeea..624f133943ed 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)
187 187
188/* 188/*
189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190 * when it traps. So regs will be the current sp. 190 * when it traps. The previous stack will be directly underneath the saved
191 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
191 * 192 *
192 * This is valid only for kernel mode traps. 193 * This is valid only for kernel mode traps.
193 */ 194 */
194static inline unsigned long kernel_trap_sp(struct pt_regs *regs) 195static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
195{ 196{
196#ifdef CONFIG_X86_32 197#ifdef CONFIG_X86_32
197 return (unsigned long)regs; 198 return (unsigned long)(&regs->sp);
198#else 199#else
199 return regs->sp; 200 return regs->sp;
200#endif 201#endif
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e5e6caffec87..b7e5db876399 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifndef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
178{ 178{
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
206 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
207} 207}
208 208
209#endif 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
212{ 212{