aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/msr_64.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
commit96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch)
treed947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/msr_64.h
parent27bd0c955648646abf2a353a8371d28c37bcd982 (diff)
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/msr_64.h')
-rw-r--r--include/asm-x86/msr_64.h187
1 files changed, 187 insertions, 0 deletions
diff --git a/include/asm-x86/msr_64.h b/include/asm-x86/msr_64.h
new file mode 100644
index 000000000000..d5c55b80da54
--- /dev/null
+++ b/include/asm-x86/msr_64.h
@@ -0,0 +1,187 @@
1#ifndef X86_64_MSR_H
2#define X86_64_MSR_H 1
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/errno.h>
8/*
9 * Access to machine-specific registers (available on 586 and better only)
10 * Note: the rd* operations modify the parameters directly (without using
11 * pointer indirection), this allows gcc to optimize better
12 */
13
14#define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \
16 : "=a" (val1), "=d" (val2) \
17 : "c" (msr))
18
19
20#define rdmsrl(msr,val) do { unsigned long a__,b__; \
21 __asm__ __volatile__("rdmsr" \
22 : "=a" (a__), "=d" (b__) \
23 : "c" (msr)); \
24 val = a__ | (b__<<32); \
25} while(0)
26
27#define wrmsr(msr,val1,val2) \
28 __asm__ __volatile__("wrmsr" \
29 : /* no outputs */ \
30 : "c" (msr), "a" (val1), "d" (val2))
31
32#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
33
34/* wrmsr with exception handling */
35#define wrmsr_safe(msr,a,b) ({ int ret__; \
36 asm volatile("2: wrmsr ; xorl %0,%0\n" \
37 "1:\n\t" \
38 ".section .fixup,\"ax\"\n\t" \
39 "3: movl %4,%0 ; jmp 1b\n\t" \
40 ".previous\n\t" \
41 ".section __ex_table,\"a\"\n" \
42 " .align 8\n\t" \
43 " .quad 2b,3b\n\t" \
44 ".previous" \
45 : "=a" (ret__) \
46 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
47 ret__; })
48
49#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
50
51#define rdmsr_safe(msr,a,b) \
52 ({ int ret__; \
53 asm volatile ("1: rdmsr\n" \
54 "2:\n" \
55 ".section .fixup,\"ax\"\n" \
56 "3: movl %4,%0\n" \
57 " jmp 2b\n" \
58 ".previous\n" \
59 ".section __ex_table,\"a\"\n" \
60 " .align 8\n" \
61 " .quad 1b,3b\n" \
62 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
63 :"c"(msr), "i"(-EIO), "0"(0)); \
64 ret__; })
65
66#define rdtsc(low,high) \
67 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
68
69#define rdtscl(low) \
70 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
71
72#define rdtscp(low,high,aux) \
73 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
74
75#define rdtscll(val) do { \
76 unsigned int __a,__d; \
77 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
78 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
79} while(0)
80
81#define rdtscpll(val, aux) do { \
82 unsigned long __a, __d; \
83 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
84 (val) = (__d << 32) | __a; \
85} while (0)
86
87#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
88
89#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
90
91#define rdpmc(counter,low,high) \
92 __asm__ __volatile__("rdpmc" \
93 : "=a" (low), "=d" (high) \
94 : "c" (counter))
95
96static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
97 unsigned int *ecx, unsigned int *edx)
98{
99 __asm__("cpuid"
100 : "=a" (*eax),
101 "=b" (*ebx),
102 "=c" (*ecx),
103 "=d" (*edx)
104 : "0" (op));
105}
106
107/* Some CPUID calls want 'count' to be placed in ecx */
108static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
109 int *edx)
110{
111 __asm__("cpuid"
112 : "=a" (*eax),
113 "=b" (*ebx),
114 "=c" (*ecx),
115 "=d" (*edx)
116 : "0" (op), "c" (count));
117}
118
119/*
120 * CPUID functions returning a single datum
121 */
122static inline unsigned int cpuid_eax(unsigned int op)
123{
124 unsigned int eax;
125
126 __asm__("cpuid"
127 : "=a" (eax)
128 : "0" (op)
129 : "bx", "cx", "dx");
130 return eax;
131}
132static inline unsigned int cpuid_ebx(unsigned int op)
133{
134 unsigned int eax, ebx;
135
136 __asm__("cpuid"
137 : "=a" (eax), "=b" (ebx)
138 : "0" (op)
139 : "cx", "dx" );
140 return ebx;
141}
142static inline unsigned int cpuid_ecx(unsigned int op)
143{
144 unsigned int eax, ecx;
145
146 __asm__("cpuid"
147 : "=a" (eax), "=c" (ecx)
148 : "0" (op)
149 : "bx", "dx" );
150 return ecx;
151}
152static inline unsigned int cpuid_edx(unsigned int op)
153{
154 unsigned int eax, edx;
155
156 __asm__("cpuid"
157 : "=a" (eax), "=d" (edx)
158 : "0" (op)
159 : "bx", "cx");
160 return edx;
161}
162
163#ifdef CONFIG_SMP
164void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
165void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
166int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
167int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
168#else /* CONFIG_SMP */
169static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171 rdmsr(msr_no, *l, *h);
172}
173static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
174{
175 wrmsr(msr_no, l, h);
176}
177static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
178{
179 return rdmsr_safe(msr_no, l, h);
180}
181static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
182{
183 return wrmsr_safe(msr_no, l, h);
184}
185#endif /* CONFIG_SMP */
186#endif /* __ASSEMBLY__ */
187#endif /* X86_64_MSR_H */