aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-19 12:48:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-19 12:48:14 -0500
commit3981e152864fcc1dbbb564e1f4c0ae11a09639d2 (patch)
tree76c767a9b25e294c3cc8edd9870304b845cabdd9 /arch/x86/lib
parentaac3d39693529ca538e37ebdb6ed5d6432a697c7 (diff)
parent18374d89e5fe96772102f44f535efb1198d9be08 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, irq: Allow 0xff for /proc/irq/[n]/smp_affinity on an 8-cpu system Makefile: Unexport LC_ALL instead of clearing it x86: Fix objdump version check in arch/x86/tools/chkobjdump.awk x86: Reenable TSC sync check at boot, even with NONSTOP_TSC x86: Don't use POSIX character classes in gen-insn-attr-x86.awk Makefile: set LC_CTYPE, LC_COLLATE, LC_NUMERIC to C x86: Increase MAX_EARLY_RES; insufficient on 32-bit NUMA x86: Fix checking of SRAT when node 0 ram is not from 0 x86, cpuid: Add "volatile" to asm in native_cpuid() x86, msr: msrs_alloc/free for CONFIG_SMP=n x86, amd: Get multi-node CPU info from NodeId MSR instead of PCI config space x86: Add IA32_TSC_AUX MSR and use it x86, msr/cpuid: Register enough minors for the MSR and CPUID drivers initramfs: add missing decompressor error check bzip2: Add missing checks for malloc returning NULL bzip2/lzma/gzip: pre-boot malloc doesn't return NULL on failure
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/Makefile4
-rw-r--r--arch/x86/lib/msr-smp.c204
-rw-r--r--arch/x86/lib/msr.c213
3 files changed, 206 insertions, 215 deletions
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 45b20e486c2..cffd754f303 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
14 14
15clean-files := inat-tables.c 15clean-files := inat-tables.c
16 16
17obj-$(CONFIG_SMP) := msr.o 17obj-$(CONFIG_SMP) += msr-smp.o
18 18
19lib-y := delay.o 19lib-y := delay.o
20lib-y += thunk_$(BITS).o 20lib-y += thunk_$(BITS).o
@@ -22,7 +22,7 @@ lib-y += usercopy_$(BITS).o getuser.o putuser.o
22lib-y += memcpy_$(BITS).o 22lib-y += memcpy_$(BITS).o
23lib-$(CONFIG_KPROBES) += insn.o inat.o 23lib-$(CONFIG_KPROBES) += insn.o inat.o
24 24
25obj-y += msr-reg.o msr-reg-export.o 25obj-y += msr.o msr-reg.o msr-reg-export.o
26 26
27ifeq ($(CONFIG_X86_32),y) 27ifeq ($(CONFIG_X86_32),y)
28 obj-y += atomic64_32.o 28 obj-y += atomic64_32.o
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
new file mode 100644
index 00000000000..a6b1b86d225
--- /dev/null
+++ b/arch/x86/lib/msr-smp.c
@@ -0,0 +1,204 @@
1#include <linux/module.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
6static void __rdmsr_on_cpu(void *info)
7{
8 struct msr_info *rv = info;
9 struct msr *reg;
10 int this_cpu = raw_smp_processor_id();
11
12 if (rv->msrs)
13 reg = per_cpu_ptr(rv->msrs, this_cpu);
14 else
15 reg = &rv->reg;
16
17 rdmsr(rv->msr_no, reg->l, reg->h);
18}
19
20static void __wrmsr_on_cpu(void *info)
21{
22 struct msr_info *rv = info;
23 struct msr *reg;
24 int this_cpu = raw_smp_processor_id();
25
26 if (rv->msrs)
27 reg = per_cpu_ptr(rv->msrs, this_cpu);
28 else
29 reg = &rv->reg;
30
31 wrmsr(rv->msr_no, reg->l, reg->h);
32}
33
34int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35{
36 int err;
37 struct msr_info rv;
38
39 memset(&rv, 0, sizeof(rv));
40
41 rv.msr_no = msr_no;
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43 *l = rv.reg.l;
44 *h = rv.reg.h;
45
46 return err;
47}
48EXPORT_SYMBOL(rdmsr_on_cpu);
49
50int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
51{
52 int err;
53 struct msr_info rv;
54
55 memset(&rv, 0, sizeof(rv));
56
57 rv.msr_no = msr_no;
58 rv.reg.l = l;
59 rv.reg.h = h;
60 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
61
62 return err;
63}
64EXPORT_SYMBOL(wrmsr_on_cpu);
65
66static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
67 struct msr *msrs,
68 void (*msr_func) (void *info))
69{
70 struct msr_info rv;
71 int this_cpu;
72
73 memset(&rv, 0, sizeof(rv));
74
75 rv.msrs = msrs;
76 rv.msr_no = msr_no;
77
78 this_cpu = get_cpu();
79
80 if (cpumask_test_cpu(this_cpu, mask))
81 msr_func(&rv);
82
83 smp_call_function_many(mask, msr_func, &rv, 1);
84 put_cpu();
85}
86
87/* rdmsr on a bunch of CPUs
88 *
89 * @mask: which CPUs
90 * @msr_no: which MSR
91 * @msrs: array of MSR values
92 *
93 */
94void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
95{
96 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
97}
98EXPORT_SYMBOL(rdmsr_on_cpus);
99
100/*
101 * wrmsr on a bunch of CPUs
102 *
103 * @mask: which CPUs
104 * @msr_no: which MSR
105 * @msrs: array of MSR values
106 *
107 */
108void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
109{
110 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
111}
112EXPORT_SYMBOL(wrmsr_on_cpus);
113
114/* These "safe" variants are slower and should be used when the target MSR
115 may not actually exist. */
116static void __rdmsr_safe_on_cpu(void *info)
117{
118 struct msr_info *rv = info;
119
120 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
121}
122
123static void __wrmsr_safe_on_cpu(void *info)
124{
125 struct msr_info *rv = info;
126
127 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
128}
129
130int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
131{
132 int err;
133 struct msr_info rv;
134
135 memset(&rv, 0, sizeof(rv));
136
137 rv.msr_no = msr_no;
138 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
139 *l = rv.reg.l;
140 *h = rv.reg.h;
141
142 return err ? err : rv.err;
143}
144EXPORT_SYMBOL(rdmsr_safe_on_cpu);
145
146int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
147{
148 int err;
149 struct msr_info rv;
150
151 memset(&rv, 0, sizeof(rv));
152
153 rv.msr_no = msr_no;
154 rv.reg.l = l;
155 rv.reg.h = h;
156 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
157
158 return err ? err : rv.err;
159}
160EXPORT_SYMBOL(wrmsr_safe_on_cpu);
161
162/*
163 * These variants are significantly slower, but allows control over
164 * the entire 32-bit GPR set.
165 */
166static void __rdmsr_safe_regs_on_cpu(void *info)
167{
168 struct msr_regs_info *rv = info;
169
170 rv->err = rdmsr_safe_regs(rv->regs);
171}
172
173static void __wrmsr_safe_regs_on_cpu(void *info)
174{
175 struct msr_regs_info *rv = info;
176
177 rv->err = wrmsr_safe_regs(rv->regs);
178}
179
180int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
181{
182 int err;
183 struct msr_regs_info rv;
184
185 rv.regs = regs;
186 rv.err = -EIO;
187 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
188
189 return err ? err : rv.err;
190}
191EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
192
193int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
194{
195 int err;
196 struct msr_regs_info rv;
197
198 rv.regs = regs;
199 rv.err = -EIO;
200 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
201
202 return err ? err : rv.err;
203}
204EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 87283417793..8f8eebdca7d 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,123 +1,7 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/preempt.h> 2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h> 3#include <asm/msr.h>
5 4
6struct msr_info {
7 u32 msr_no;
8 struct msr reg;
9 struct msr *msrs;
10 int err;
11};
12
13static void __rdmsr_on_cpu(void *info)
14{
15 struct msr_info *rv = info;
16 struct msr *reg;
17 int this_cpu = raw_smp_processor_id();
18
19 if (rv->msrs)
20 reg = per_cpu_ptr(rv->msrs, this_cpu);
21 else
22 reg = &rv->reg;
23
24 rdmsr(rv->msr_no, reg->l, reg->h);
25}
26
27static void __wrmsr_on_cpu(void *info)
28{
29 struct msr_info *rv = info;
30 struct msr *reg;
31 int this_cpu = raw_smp_processor_id();
32
33 if (rv->msrs)
34 reg = per_cpu_ptr(rv->msrs, this_cpu);
35 else
36 reg = &rv->reg;
37
38 wrmsr(rv->msr_no, reg->l, reg->h);
39}
40
41int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
42{
43 int err;
44 struct msr_info rv;
45
46 memset(&rv, 0, sizeof(rv));
47
48 rv.msr_no = msr_no;
49 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
50 *l = rv.reg.l;
51 *h = rv.reg.h;
52
53 return err;
54}
55EXPORT_SYMBOL(rdmsr_on_cpu);
56
57int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
58{
59 int err;
60 struct msr_info rv;
61
62 memset(&rv, 0, sizeof(rv));
63
64 rv.msr_no = msr_no;
65 rv.reg.l = l;
66 rv.reg.h = h;
67 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
68
69 return err;
70}
71EXPORT_SYMBOL(wrmsr_on_cpu);
72
73static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
74 struct msr *msrs,
75 void (*msr_func) (void *info))
76{
77 struct msr_info rv;
78 int this_cpu;
79
80 memset(&rv, 0, sizeof(rv));
81
82 rv.msrs = msrs;
83 rv.msr_no = msr_no;
84
85 this_cpu = get_cpu();
86
87 if (cpumask_test_cpu(this_cpu, mask))
88 msr_func(&rv);
89
90 smp_call_function_many(mask, msr_func, &rv, 1);
91 put_cpu();
92}
93
94/* rdmsr on a bunch of CPUs
95 *
96 * @mask: which CPUs
97 * @msr_no: which MSR
98 * @msrs: array of MSR values
99 *
100 */
101void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
102{
103 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
104}
105EXPORT_SYMBOL(rdmsr_on_cpus);
106
107/*
108 * wrmsr on a bunch of CPUs
109 *
110 * @mask: which CPUs
111 * @msr_no: which MSR
112 * @msrs: array of MSR values
113 *
114 */
115void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
116{
117 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
118}
119EXPORT_SYMBOL(wrmsr_on_cpus);
120
121struct msr *msrs_alloc(void) 5struct msr *msrs_alloc(void)
122{ 6{
123 struct msr *msrs = NULL; 7 struct msr *msrs = NULL;
@@ -137,100 +21,3 @@ void msrs_free(struct msr *msrs)
137 free_percpu(msrs); 21 free_percpu(msrs);
138} 22}
139EXPORT_SYMBOL(msrs_free); 23EXPORT_SYMBOL(msrs_free);
140
141/* These "safe" variants are slower and should be used when the target MSR
142 may not actually exist. */
143static void __rdmsr_safe_on_cpu(void *info)
144{
145 struct msr_info *rv = info;
146
147 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
148}
149
150static void __wrmsr_safe_on_cpu(void *info)
151{
152 struct msr_info *rv = info;
153
154 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
155}
156
157int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
158{
159 int err;
160 struct msr_info rv;
161
162 memset(&rv, 0, sizeof(rv));
163
164 rv.msr_no = msr_no;
165 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
166 *l = rv.reg.l;
167 *h = rv.reg.h;
168
169 return err ? err : rv.err;
170}
171EXPORT_SYMBOL(rdmsr_safe_on_cpu);
172
173int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
174{
175 int err;
176 struct msr_info rv;
177
178 memset(&rv, 0, sizeof(rv));
179
180 rv.msr_no = msr_no;
181 rv.reg.l = l;
182 rv.reg.h = h;
183 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
184
185 return err ? err : rv.err;
186}
187EXPORT_SYMBOL(wrmsr_safe_on_cpu);
188
189/*
190 * These variants are significantly slower, but allows control over
191 * the entire 32-bit GPR set.
192 */
193struct msr_regs_info {
194 u32 *regs;
195 int err;
196};
197
198static void __rdmsr_safe_regs_on_cpu(void *info)
199{
200 struct msr_regs_info *rv = info;
201
202 rv->err = rdmsr_safe_regs(rv->regs);
203}
204
205static void __wrmsr_safe_regs_on_cpu(void *info)
206{
207 struct msr_regs_info *rv = info;
208
209 rv->err = wrmsr_safe_regs(rv->regs);
210}
211
212int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
213{
214 int err;
215 struct msr_regs_info rv;
216
217 rv.regs = regs;
218 rv.err = -EIO;
219 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
220
221 return err ? err : rv.err;
222}
223EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
224
225int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
226{
227 int err;
228 struct msr_regs_info rv;
229
230 rv.regs = regs;
231 rv.err = -EIO;
232 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
233
234 return err ? err : rv.err;
235}
236EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);