aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/entry/calling.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/entry/calling.h')
-rw-r--r--arch/x86/entry/calling.h40
1 files changed, 0 insertions, 40 deletions
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index d3fbe2dc03ea..efb0d1b1f15f 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,7 +6,6 @@
6#include <asm/percpu.h> 6#include <asm/percpu.h>
7#include <asm/asm-offsets.h> 7#include <asm/asm-offsets.h>
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9#include <asm/inst.h>
10 9
11/* 10/*
12 11
@@ -338,12 +337,6 @@ For 32-bit we have the following conventions - kernel is built with
338#endif 337#endif
339.endm 338.endm
340 339
341.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
342 rdgsbase \save_reg
343 GET_PERCPU_BASE \scratch_reg
344 wrgsbase \scratch_reg
345.endm
346
347#endif /* CONFIG_X86_64 */ 340#endif /* CONFIG_X86_64 */
348 341
349.macro STACKLEAK_ERASE 342.macro STACKLEAK_ERASE
@@ -352,39 +345,6 @@ For 32-bit we have the following conventions - kernel is built with
352#endif 345#endif
353.endm 346.endm
354 347
355#ifdef CONFIG_SMP
356
357/*
358 * CPU/node NR is loaded from the limit (size) field of a special segment
359 * descriptor entry in GDT.
360 */
361.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
362 movq $__CPUNODE_SEG, \reg
363 lsl \reg, \reg
364.endm
365
366/*
367 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
368 * We normally use %gs for accessing per-CPU data, but we are setting up
369 * %gs here and obviously can not use %gs itself to access per-CPU data.
370 */
371.macro GET_PERCPU_BASE reg:req
372 ALTERNATIVE \
373 "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
374 "RDPID \reg", \
375 X86_FEATURE_RDPID
376 andq $VDSO_CPUNODE_MASK, \reg
377 movq __per_cpu_offset(, \reg, 8), \reg
378.endm
379
380#else
381
382.macro GET_PERCPU_BASE reg:req
383 movq pcpu_unit_offsets(%rip), \reg
384.endm
385
386#endif /* CONFIG_SMP */
387
388/* 348/*
389 * This does 'call enter_from_user_mode' unless we can avoid it based on 349 * This does 'call enter_from_user_mode' unless we can avoid it based on
390 * kernel config or using the static jump infrastructure. 350 * kernel config or using the static jump infrastructure.