aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-01-13 06:41:35 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 08:20:22 -0500
commit49357d19e4fb31e28796eaff83499e7584c26878 (patch)
treec0b425a5f3a187ae25ebc662816aa7c95c9cc59f /arch/x86/include
parentb12d8db8fbfaed1e8222a15333a3645599636854 (diff)
x86: convert pda ops to wrappers around x86 percpu accessors
pda is now a percpu variable and there's no reason it can't use plain x86 percpu accessors. Add x86_test_and_clear_bit_percpu() and replace pda op implementations with wrappers around x86 percpu accessors. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/pda.h88
-rw-r--r--arch/x86/include/asm/percpu.h10
2 files changed, 16 insertions, 82 deletions
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index 66ae1043393d..e3d3a081d798 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -45,91 +45,15 @@ extern void pda_init(int);
45 45
46#define cpu_pda(cpu) (&per_cpu(__pda, cpu)) 46#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
47 47
48/* 48#define read_pda(field) x86_read_percpu(__pda.field)
49 * There is no fast way to get the base address of the PDA, all the accesses 49#define write_pda(field, val) x86_write_percpu(__pda.field, val)
50 * have to mention %fs/%gs. So it needs to be done this Torvaldian way. 50#define add_pda(field, val) x86_add_percpu(__pda.field, val)
51 */ 51#define sub_pda(field, val) x86_sub_percpu(__pda.field, val)
52extern void __bad_pda_field(void) __attribute__((noreturn)); 52#define or_pda(field, val) x86_or_percpu(__pda.field, val)
53
54/*
55 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
56 * all PDA accesses so it gets read/write dependencies right.
57 */
58extern struct x8664_pda _proxy_pda;
59
60#define pda_offset(field) offsetof(struct x8664_pda, field)
61
62#define pda_to_op(op, field, val) \
63do { \
64 typedef typeof(_proxy_pda.field) T__; \
65 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
66 switch (sizeof(_proxy_pda.field)) { \
67 case 2: \
68 asm(op "w %1,%%gs:%c2" : \
69 "+m" (_proxy_pda.field) : \
70 "ri" ((T__)val), \
71 "i"(pda_offset(field))); \
72 break; \
73 case 4: \
74 asm(op "l %1,%%gs:%c2" : \
75 "+m" (_proxy_pda.field) : \
76 "ri" ((T__)val), \
77 "i" (pda_offset(field))); \
78 break; \
79 case 8: \
80 asm(op "q %1,%%gs:%c2": \
81 "+m" (_proxy_pda.field) : \
82 "r" ((T__)val), \
83 "i"(pda_offset(field))); \
84 break; \
85 default: \
86 __bad_pda_field(); \
87 } \
88} while (0)
89
90#define pda_from_op(op, field) \
91({ \
92 typeof(_proxy_pda.field) ret__; \
93 switch (sizeof(_proxy_pda.field)) { \
94 case 2: \
95 asm(op "w %%gs:%c1,%0" : \
96 "=r" (ret__) : \
97 "i" (pda_offset(field)), \
98 "m" (_proxy_pda.field)); \
99 break; \
100 case 4: \
101 asm(op "l %%gs:%c1,%0": \
102 "=r" (ret__): \
103 "i" (pda_offset(field)), \
104 "m" (_proxy_pda.field)); \
105 break; \
106 case 8: \
107 asm(op "q %%gs:%c1,%0": \
108 "=r" (ret__) : \
109 "i" (pda_offset(field)), \
110 "m" (_proxy_pda.field)); \
111 break; \
112 default: \
113 __bad_pda_field(); \
114 } \
115 ret__; \
116})
117
118#define read_pda(field) pda_from_op("mov", field)
119#define write_pda(field, val) pda_to_op("mov", field, val)
120#define add_pda(field, val) pda_to_op("add", field, val)
121#define sub_pda(field, val) pda_to_op("sub", field, val)
122#define or_pda(field, val) pda_to_op("or", field, val)
123 53
124/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 54/* This is not atomic against other CPUs -- CPU preemption needs to be off */
125#define test_and_clear_bit_pda(bit, field) \ 55#define test_and_clear_bit_pda(bit, field) \
126({ \ 56 x86_test_and_clear_bit_percpu(bit, __pda.field)
127 int old__; \
128 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
129 : "=r" (old__), "+m" (_proxy_pda.field) \
130 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
131 old__; \
132})
133 57
134#endif 58#endif
135 59
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 556f84b9ea96..328b31a429d7 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -121,6 +121,16 @@ do { \
121#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) 121#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
122#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 122#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
123 123
124/* This is not atomic against other CPUs -- CPU preemption needs to be off */
125#define x86_test_and_clear_bit_percpu(bit, var) \
126({ \
127 int old__; \
128 asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \
129 : "=r" (old__) \
130 : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \
131 old__; \
132})
133
124#ifdef CONFIG_X86_64 134#ifdef CONFIG_X86_64
125extern void load_pda_offset(int cpu); 135extern void load_pda_offset(int cpu);
126#else 136#else