diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:32:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:32:03 -0500 |
commit | ef26b1691d11e17af205a4ff9c91458d931d11db (patch) | |
tree | 5db199f404ca18f6c9e8617b684e9165ca365e30 /arch/x86/include/asm/system.h | |
parent | a77d2e081bbbccb38f42da45500dd089756efdfb (diff) | |
parent | 7cff7ce94a7df2ccf5ac76b48ee0995fee2060df (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
include/linux/compiler-gcc4.h: Fix build bug - gcc-4.0.2 doesn't understand __builtin_object_size
x86/alternatives: No need for alternatives-asm.h to re-invent stuff already in asm.h
x86/alternatives: Check replacementlen <= instrlen at build time
x86, 64-bit: Set data segments to null after switching to 64-bit mode
x86: Clean up the loadsegment() macro
x86: Optimize loadsegment()
x86: Add missing might_fault() checks to copy_{to,from}_user()
x86-64: __copy_from_user_inatomic() adjustments
x86: Remove unused thread_return label from switch_to()
x86, 64-bit: Fix bstep_iret jump
x86: Don't use the strict copy checks when branch profiling is in use
x86, 64-bit: Move K8 B step iret fixup to fault entry asm
x86: Generate cmpxchg build failures
x86: Add a Kconfig option to turn the copy_from_user warnings into errors
x86: Turn the copy_from_user check into an (optional) compile time warning
x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
x86: Use __builtin_object_size() to validate the buffer size for copy_from_user()
Diffstat (limited to 'arch/x86/include/asm/system.h')
-rw-r--r-- | arch/x86/include/asm/system.h | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index f08f9737489..022a84386de 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -128,8 +128,6 @@ do { \ | |||
128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | 129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
130 | "call __switch_to\n\t" \ | 130 | "call __switch_to\n\t" \ |
131 | ".globl thread_return\n" \ | ||
132 | "thread_return:\n\t" \ | ||
133 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ | 131 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
134 | __switch_canary \ | 132 | __switch_canary \ |
135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 133 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
@@ -157,19 +155,22 @@ extern void native_load_gs_index(unsigned); | |||
157 | * Load a segment. Fall back on loading the zero | 155 | * Load a segment. Fall back on loading the zero |
158 | * segment if something goes wrong.. | 156 | * segment if something goes wrong.. |
159 | */ | 157 | */ |
160 | #define loadsegment(seg, value) \ | 158 | #define loadsegment(seg, value) \ |
161 | asm volatile("\n" \ | 159 | do { \ |
162 | "1:\t" \ | 160 | unsigned short __val = (value); \ |
163 | "movl %k0,%%" #seg "\n" \ | 161 | \ |
164 | "2:\n" \ | 162 | asm volatile(" \n" \ |
165 | ".section .fixup,\"ax\"\n" \ | 163 | "1: movl %k0,%%" #seg " \n" \ |
166 | "3:\t" \ | 164 | \ |
167 | "movl %k1, %%" #seg "\n\t" \ | 165 | ".section .fixup,\"ax\" \n" \ |
168 | "jmp 2b\n" \ | 166 | "2: xorl %k0,%k0 \n" \ |
169 | ".previous\n" \ | 167 | " jmp 1b \n" \ |
170 | _ASM_EXTABLE(1b,3b) \ | 168 | ".previous \n" \ |
171 | : :"r" (value), "r" (0) : "memory") | 169 | \ |
172 | 170 | _ASM_EXTABLE(1b, 2b) \ | |
171 | \ | ||
172 | : "+r" (__val) : : "memory"); \ | ||
173 | } while (0) | ||
173 | 174 | ||
174 | /* | 175 | /* |
175 | * Save a segment register away | 176 | * Save a segment register away |