aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/kernel/Makefile1
-rw-r--r--arch/x86_64/kernel/msr.c279
-rw-r--r--include/asm-x86_64/msr.h39
3 files changed, 28 insertions, 291 deletions
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 1579bdd0adcd..bcdd0a805fe7 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -46,3 +46,4 @@ microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
46intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o 46intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
47quirks-y += ../../i386/kernel/quirks.o 47quirks-y += ../../i386/kernel/quirks.o
48i8237-y += ../../i386/kernel/i8237.o 48i8237-y += ../../i386/kernel/i8237.o
49msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
diff --git a/arch/x86_64/kernel/msr.c b/arch/x86_64/kernel/msr.c
deleted file mode 100644
index 598953ab0154..000000000000
--- a/arch/x86_64/kernel/msr.c
+++ /dev/null
@@ -1,279 +0,0 @@
1/* ----------------------------------------------------------------------- *
2 *
3 * Copyright 2000 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8 * USA; either version 2 of the License, or (at your option) any later
9 * version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13/*
14 * msr.c
15 *
16 * x86 MSR access device
17 *
18 * This device is accessed by lseek() to the appropriate register number
19 * and then read/write in chunks of 8 bytes. A larger size means multiple
20 * reads or writes of the same register.
21 *
22 * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on
23 * an SMP box will direct the access to CPU %d.
24 */
25
26#include <linux/module.h>
27#include <linux/config.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/fcntl.h>
32#include <linux/init.h>
33#include <linux/poll.h>
34#include <linux/smp.h>
35#include <linux/smp_lock.h>
36#include <linux/major.h>
37#include <linux/fs.h>
38
39#include <asm/processor.h>
40#include <asm/msr.h>
41#include <asm/uaccess.h>
42#include <asm/system.h>
43
44/* Note: "err" is handled in a funny way below. Otherwise one version
45 of gcc or another breaks. */
46
47static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
48{
49 int err;
50
51 asm volatile ("1: wrmsr\n"
52 "2:\n"
53 ".section .fixup,\"ax\"\n"
54 "3: movl %4,%0\n"
55 " jmp 2b\n"
56 ".previous\n"
57 ".section __ex_table,\"a\"\n"
58 " .align 8\n" " .quad 1b,3b\n" ".previous":"=&bDS" (err)
59 :"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
60
61 return err;
62}
63
64static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
65{
66 int err;
67
68 asm volatile ("1: rdmsr\n"
69 "2:\n"
70 ".section .fixup,\"ax\"\n"
71 "3: movl %4,%0\n"
72 " jmp 2b\n"
73 ".previous\n"
74 ".section __ex_table,\"a\"\n"
75 " .align 8\n"
76 " .quad 1b,3b\n"
77 ".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
78 :"c"(reg), "i"(-EIO), "0"(0));
79
80 return err;
81}
82
83#ifdef CONFIG_SMP
84
85struct msr_command {
86 int cpu;
87 int err;
88 u32 reg;
89 u32 data[2];
90};
91
92static void msr_smp_wrmsr(void *cmd_block)
93{
94 struct msr_command *cmd = (struct msr_command *)cmd_block;
95
96 if (cmd->cpu == smp_processor_id())
97 cmd->err = wrmsr_eio(cmd->reg, cmd->data[0], cmd->data[1]);
98}
99
100static void msr_smp_rdmsr(void *cmd_block)
101{
102 struct msr_command *cmd = (struct msr_command *)cmd_block;
103
104 if (cmd->cpu == smp_processor_id())
105 cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
106}
107
108static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
109{
110 struct msr_command cmd;
111 int ret;
112
113 preempt_disable();
114 if (cpu == smp_processor_id()) {
115 ret = wrmsr_eio(reg, eax, edx);
116 } else {
117 cmd.cpu = cpu;
118 cmd.reg = reg;
119 cmd.data[0] = eax;
120 cmd.data[1] = edx;
121
122 smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
123 ret = cmd.err;
124 }
125 preempt_enable();
126 return ret;
127}
128
129static inline int do_rdmsr(int cpu, u32 reg, u32 * eax, u32 * edx)
130{
131 struct msr_command cmd;
132 int ret;
133
134 preempt_disable();
135 if (cpu == smp_processor_id()) {
136 ret = rdmsr_eio(reg, eax, edx);
137 } else {
138 cmd.cpu = cpu;
139 cmd.reg = reg;
140
141 smp_call_function(msr_smp_rdmsr, &cmd, 1, 1);
142
143 *eax = cmd.data[0];
144 *edx = cmd.data[1];
145
146 ret = cmd.err;
147 }
148 preempt_enable();
149 return ret;
150}
151
152#else /* ! CONFIG_SMP */
153
154static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
155{
156 return wrmsr_eio(reg, eax, edx);
157}
158
159static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
160{
161 return rdmsr_eio(reg, eax, edx);
162}
163
164#endif /* ! CONFIG_SMP */
165
166static loff_t msr_seek(struct file *file, loff_t offset, int orig)
167{
168 loff_t ret = -EINVAL;
169
170 lock_kernel();
171 switch (orig) {
172 case 0:
173 file->f_pos = offset;
174 ret = file->f_pos;
175 break;
176 case 1:
177 file->f_pos += offset;
178 ret = file->f_pos;
179 }
180 unlock_kernel();
181 return ret;
182}
183
184static ssize_t msr_read(struct file *file, char __user * buf,
185 size_t count, loff_t * ppos)
186{
187 u32 __user *tmp = (u32 __user *) buf;
188 u32 data[2];
189 size_t rv;
190 u32 reg = *ppos;
191 int cpu = iminor(file->f_dentry->d_inode);
192 int err;
193
194 if (count % 8)
195 return -EINVAL; /* Invalid chunk size */
196
197 for (rv = 0; count; count -= 8) {
198 err = do_rdmsr(cpu, reg, &data[0], &data[1]);
199 if (err)
200 return err;
201 if (copy_to_user(tmp, &data, 8))
202 return -EFAULT;
203 tmp += 2;
204 }
205
206 return ((char __user *)tmp) - buf;
207}
208
209static ssize_t msr_write(struct file *file, const char __user *buf,
210 size_t count, loff_t *ppos)
211{
212 const u32 __user *tmp = (const u32 __user *)buf;
213 u32 data[2];
214 size_t rv;
215 u32 reg = *ppos;
216 int cpu = iminor(file->f_dentry->d_inode);
217 int err;
218
219 if (count % 8)
220 return -EINVAL; /* Invalid chunk size */
221
222 for (rv = 0; count; count -= 8) {
223 if (copy_from_user(&data, tmp, 8))
224 return -EFAULT;
225 err = do_wrmsr(cpu, reg, data[0], data[1]);
226 if (err)
227 return err;
228 tmp += 2;
229 }
230
231 return ((char __user *)tmp) - buf;
232}
233
234static int msr_open(struct inode *inode, struct file *file)
235{
236 unsigned int cpu = iminor(file->f_dentry->d_inode);
237 struct cpuinfo_x86 *c = &(cpu_data)[cpu];
238
239 if (cpu >= NR_CPUS || !cpu_online(cpu))
240 return -ENXIO; /* No such CPU */
241 if (!cpu_has(c, X86_FEATURE_MSR))
242 return -EIO; /* MSR not supported */
243
244 return 0;
245}
246
247/*
248 * File operations we support
249 */
250static struct file_operations msr_fops = {
251 .owner = THIS_MODULE,
252 .llseek = msr_seek,
253 .read = msr_read,
254 .write = msr_write,
255 .open = msr_open,
256};
257
258static int __init msr_init(void)
259{
260 if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
261 printk(KERN_ERR "msr: unable to get major %d for msr\n",
262 MSR_MAJOR);
263 return -EBUSY;
264 }
265
266 return 0;
267}
268
269static void __exit msr_exit(void)
270{
271 unregister_chrdev(MSR_MAJOR, "cpu/msr");
272}
273
274module_init(msr_init);
275module_exit(msr_exit)
276
277MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
278MODULE_DESCRIPTION("x86 generic MSR driver");
279MODULE_LICENSE("GPL");
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 15a6ef7cf3d3..4d727f3f5550 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -29,22 +29,37 @@
29#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 29#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
30 30
31/* wrmsr with exception handling */ 31/* wrmsr with exception handling */
32#define wrmsr_safe(msr,a,b) ({ int ret__; \ 32#define wrmsr_safe(msr,a,b) ({ int ret__; \
33 asm volatile("2: wrmsr ; xorl %0,%0\n" \ 33 asm volatile("2: wrmsr ; xorl %0,%0\n" \
34 "1:\n\t" \ 34 "1:\n\t" \
35 ".section .fixup,\"ax\"\n\t" \ 35 ".section .fixup,\"ax\"\n\t" \
36 "3: movl %4,%0 ; jmp 1b\n\t" \ 36 "3: movl %4,%0 ; jmp 1b\n\t" \
37 ".previous\n\t" \ 37 ".previous\n\t" \
38 ".section __ex_table,\"a\"\n" \ 38 ".section __ex_table,\"a\"\n" \
39 " .align 8\n\t" \ 39 " .align 8\n\t" \
40 " .quad 2b,3b\n\t" \ 40 " .quad 2b,3b\n\t" \
41 ".previous" \ 41 ".previous" \
42 : "=a" (ret__) \ 42 : "=a" (ret__) \
43 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ 43 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
44 ret__; }) 44 ret__; })
45 45
46#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) 46#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
47 47
48#define rdmsr_safe(msr,a,b) \
49 ({ int ret__; \
50 asm volatile ("1: rdmsr\n" \
51 "2:\n" \
52 ".section .fixup,\"ax\"\n" \
53 "3: movl %4,%0\n" \
54 " jmp 2b\n" \
55 ".previous\n" \
56 ".section __ex_table,\"a\"\n" \
57 " .align 8\n" \
58 " .quad 1b,3b\n" \
59 ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\
60 :"c"(msr), "i"(-EIO), "0"(0)); \
61 ret__; })
62
48#define rdtsc(low,high) \ 63#define rdtsc(low,high) \
49 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 64 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
50 65