aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/msr.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/msr.h')
-rw-r--r--include/asm-x86/msr.h247
1 files changed, 0 insertions, 247 deletions
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
deleted file mode 100644
index 530af1f6389e..000000000000
--- a/include/asm-x86/msr.h
+++ /dev/null
@@ -1,247 +0,0 @@
1#ifndef ASM_X86__MSR_H
2#define ASM_X86__MSR_H
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7# include <linux/types.h>
8#endif
9
10#ifdef __KERNEL__
11#ifndef __ASSEMBLY__
12
13#include <asm/asm.h>
14#include <asm/errno.h>
15
16static inline unsigned long long native_read_tscp(unsigned int *aux)
17{
18 unsigned long low, high;
19 asm volatile(".byte 0x0f,0x01,0xf9"
20 : "=a" (low), "=d" (high), "=c" (*aux));
21 return low | ((u64)high << 32);
22}
23
24/*
25 * i386 calling convention returns 64-bit value in edx:eax, while
26 * x86_64 returns at rax. Also, the "A" constraint does not really
27 * mean rdx:rax in x86_64, so we need specialized behaviour for each
28 * architecture
29 */
30#ifdef CONFIG_X86_64
31#define DECLARE_ARGS(val, low, high) unsigned low, high
32#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
33#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
34#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
35#else
36#define DECLARE_ARGS(val, low, high) unsigned long long val
37#define EAX_EDX_VAL(val, low, high) (val)
38#define EAX_EDX_ARGS(val, low, high) "A" (val)
39#define EAX_EDX_RET(val, low, high) "=A" (val)
40#endif
41
42static inline unsigned long long native_read_msr(unsigned int msr)
43{
44 DECLARE_ARGS(val, low, high);
45
46 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
47 return EAX_EDX_VAL(val, low, high);
48}
49
50static inline unsigned long long native_read_msr_safe(unsigned int msr,
51 int *err)
52{
53 DECLARE_ARGS(val, low, high);
54
55 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
56 "1:\n\t"
57 ".section .fixup,\"ax\"\n\t"
58 "3: mov %[fault],%[err] ; jmp 1b\n\t"
59 ".previous\n\t"
60 _ASM_EXTABLE(2b, 3b)
61 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
62 : "c" (msr), [fault] "i" (-EFAULT));
63 return EAX_EDX_VAL(val, low, high);
64}
65
66static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
67 int *err)
68{
69 DECLARE_ARGS(val, low, high);
70
71 asm volatile("2: rdmsr ; xor %0,%0\n"
72 "1:\n\t"
73 ".section .fixup,\"ax\"\n\t"
74 "3: mov %3,%0 ; jmp 1b\n\t"
75 ".previous\n\t"
76 _ASM_EXTABLE(2b, 3b)
77 : "=r" (*err), EAX_EDX_RET(val, low, high)
78 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
79 return EAX_EDX_VAL(val, low, high);
80}
81
82static inline void native_write_msr(unsigned int msr,
83 unsigned low, unsigned high)
84{
85 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
86}
87
88static inline int native_write_msr_safe(unsigned int msr,
89 unsigned low, unsigned high)
90{
91 int err;
92 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
93 "1:\n\t"
94 ".section .fixup,\"ax\"\n\t"
95 "3: mov %[fault],%[err] ; jmp 1b\n\t"
96 ".previous\n\t"
97 _ASM_EXTABLE(2b, 3b)
98 : [err] "=a" (err)
99 : "c" (msr), "0" (low), "d" (high),
100 [fault] "i" (-EFAULT)
101 : "memory");
102 return err;
103}
104
105extern unsigned long long native_read_tsc(void);
106
107static __always_inline unsigned long long __native_read_tsc(void)
108{
109 DECLARE_ARGS(val, low, high);
110
111 rdtsc_barrier();
112 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
113 rdtsc_barrier();
114
115 return EAX_EDX_VAL(val, low, high);
116}
117
118static inline unsigned long long native_read_pmc(int counter)
119{
120 DECLARE_ARGS(val, low, high);
121
122 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
123 return EAX_EDX_VAL(val, low, high);
124}
125
126#ifdef CONFIG_PARAVIRT
127#include <asm/paravirt.h>
128#else
129#include <linux/errno.h>
130/*
131 * Access to machine-specific registers (available on 586 and better only)
132 * Note: the rd* operations modify the parameters directly (without using
133 * pointer indirection), this allows gcc to optimize better
134 */
135
136#define rdmsr(msr, val1, val2) \
137do { \
138 u64 __val = native_read_msr((msr)); \
139 (val1) = (u32)__val; \
140 (val2) = (u32)(__val >> 32); \
141} while (0)
142
143static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
144{
145 native_write_msr(msr, low, high);
146}
147
148#define rdmsrl(msr, val) \
149 ((val) = native_read_msr((msr)))
150
151#define wrmsrl(msr, val) \
152 native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
153
154/* wrmsr with exception handling */
155static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
156{
157 return native_write_msr_safe(msr, low, high);
158}
159
160/* rdmsr with exception handling */
161#define rdmsr_safe(msr, p1, p2) \
162({ \
163 int __err; \
164 u64 __val = native_read_msr_safe((msr), &__err); \
165 (*p1) = (u32)__val; \
166 (*p2) = (u32)(__val >> 32); \
167 __err; \
168})
169
170static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
171{
172 int err;
173
174 *p = native_read_msr_safe(msr, &err);
175 return err;
176}
177static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
178{
179 int err;
180
181 *p = native_read_msr_amd_safe(msr, &err);
182 return err;
183}
184
185#define rdtscl(low) \
186 ((low) = (u32)native_read_tsc())
187
188#define rdtscll(val) \
189 ((val) = native_read_tsc())
190
191#define rdpmc(counter, low, high) \
192do { \
193 u64 _l = native_read_pmc((counter)); \
194 (low) = (u32)_l; \
195 (high) = (u32)(_l >> 32); \
196} while (0)
197
198#define rdtscp(low, high, aux) \
199do { \
200 unsigned long long _val = native_read_tscp(&(aux)); \
201 (low) = (u32)_val; \
202 (high) = (u32)(_val >> 32); \
203} while (0)
204
205#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
206
207#endif /* !CONFIG_PARAVIRT */
208
209
210#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
211 (u32)((val) >> 32))
212
213#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))
214
215#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
216
217#ifdef CONFIG_SMP
218int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
219int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
220int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
221int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
222#else /* CONFIG_SMP */
223static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
224{
225 rdmsr(msr_no, *l, *h);
226 return 0;
227}
228static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
229{
230 wrmsr(msr_no, l, h);
231 return 0;
232}
233static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
234 u32 *l, u32 *h)
235{
236 return rdmsr_safe(msr_no, l, h);
237}
238static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
239{
240 return wrmsr_safe(msr_no, l, h);
241}
242#endif /* CONFIG_SMP */
243#endif /* __ASSEMBLY__ */
244#endif /* __KERNEL__ */
245
246
247#endif /* ASM_X86__MSR_H */