aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/pda.h88
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S1
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
4 files changed, 16 insertions, 85 deletions
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index 66ae1043393d..e3d3a081d798 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -45,91 +45,15 @@ extern void pda_init(int);
45 45
46#define cpu_pda(cpu) (&per_cpu(__pda, cpu)) 46#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
47 47
48/* 48#define read_pda(field) x86_read_percpu(__pda.field)
49 * There is no fast way to get the base address of the PDA, all the accesses 49#define write_pda(field, val) x86_write_percpu(__pda.field, val)
50 * have to mention %fs/%gs. So it needs to be done this Torvaldian way. 50#define add_pda(field, val) x86_add_percpu(__pda.field, val)
51 */ 51#define sub_pda(field, val) x86_sub_percpu(__pda.field, val)
52extern void __bad_pda_field(void) __attribute__((noreturn)); 52#define or_pda(field, val) x86_or_percpu(__pda.field, val)
53
54/*
55 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
56 * all PDA accesses so it gets read/write dependencies right.
57 */
58extern struct x8664_pda _proxy_pda;
59
60#define pda_offset(field) offsetof(struct x8664_pda, field)
61
62#define pda_to_op(op, field, val) \
63do { \
64 typedef typeof(_proxy_pda.field) T__; \
65 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
66 switch (sizeof(_proxy_pda.field)) { \
67 case 2: \
68 asm(op "w %1,%%gs:%c2" : \
69 "+m" (_proxy_pda.field) : \
70 "ri" ((T__)val), \
71 "i"(pda_offset(field))); \
72 break; \
73 case 4: \
74 asm(op "l %1,%%gs:%c2" : \
75 "+m" (_proxy_pda.field) : \
76 "ri" ((T__)val), \
77 "i" (pda_offset(field))); \
78 break; \
79 case 8: \
80 asm(op "q %1,%%gs:%c2": \
81 "+m" (_proxy_pda.field) : \
82 "r" ((T__)val), \
83 "i"(pda_offset(field))); \
84 break; \
85 default: \
86 __bad_pda_field(); \
87 } \
88} while (0)
89
90#define pda_from_op(op, field) \
91({ \
92 typeof(_proxy_pda.field) ret__; \
93 switch (sizeof(_proxy_pda.field)) { \
94 case 2: \
95 asm(op "w %%gs:%c1,%0" : \
96 "=r" (ret__) : \
97 "i" (pda_offset(field)), \
98 "m" (_proxy_pda.field)); \
99 break; \
100 case 4: \
101 asm(op "l %%gs:%c1,%0": \
102 "=r" (ret__): \
103 "i" (pda_offset(field)), \
104 "m" (_proxy_pda.field)); \
105 break; \
106 case 8: \
107 asm(op "q %%gs:%c1,%0": \
108 "=r" (ret__) : \
109 "i" (pda_offset(field)), \
110 "m" (_proxy_pda.field)); \
111 break; \
112 default: \
113 __bad_pda_field(); \
114 } \
115 ret__; \
116})
117
118#define read_pda(field) pda_from_op("mov", field)
119#define write_pda(field, val) pda_to_op("mov", field, val)
120#define add_pda(field, val) pda_to_op("add", field, val)
121#define sub_pda(field, val) pda_to_op("sub", field, val)
122#define or_pda(field, val) pda_to_op("or", field, val)
123 53
124/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 54/* This is not atomic against other CPUs -- CPU preemption needs to be off */
125#define test_and_clear_bit_pda(bit, field) \ 55#define test_and_clear_bit_pda(bit, field) \
126({ \ 56 x86_test_and_clear_bit_percpu(bit, __pda.field)
127 int old__; \
128 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
129 : "=r" (old__), "+m" (_proxy_pda.field) \
130 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
131 old__; \
132})
133 57
134#endif 58#endif
135 59
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 556f84b9ea96..328b31a429d7 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -121,6 +121,16 @@ do { \
121#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) 121#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
122#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 122#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
123 123
124/* This is not atomic against other CPUs -- CPU preemption needs to be off */
125#define x86_test_and_clear_bit_percpu(bit, var) \
126({ \
127 int old__; \
128 asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \
129 : "=r" (old__) \
130 : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \
131 old__; \
132})
133
124#ifdef CONFIG_X86_64 134#ifdef CONFIG_X86_64
125extern void load_pda_offset(int cpu); 135extern void load_pda_offset(int cpu);
126#else 136#else
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index d2a0baa87d1b..a09abb8fb97f 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -14,7 +14,6 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
14OUTPUT_ARCH(i386:x86-64) 14OUTPUT_ARCH(i386:x86-64)
15ENTRY(phys_startup_64) 15ENTRY(phys_startup_64)
16jiffies_64 = jiffies; 16jiffies_64 = jiffies;
17_proxy_pda = 1;
18PHDRS { 17PHDRS {
19 text PT_LOAD FLAGS(5); /* R_E */ 18 text PT_LOAD FLAGS(5); /* R_E */
20 data PT_LOAD FLAGS(7); /* RWE */ 19 data PT_LOAD FLAGS(7); /* RWE */
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 695e426aa354..3909e3ba5ce3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
58EXPORT_SYMBOL(empty_zero_page); 58EXPORT_SYMBOL(empty_zero_page);
59EXPORT_SYMBOL(init_level4_pgt); 59EXPORT_SYMBOL(init_level4_pgt);
60EXPORT_SYMBOL(load_gs_index); 60EXPORT_SYMBOL(load_gs_index);
61
62EXPORT_SYMBOL(_proxy_pda);