aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib/msr.c
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2009-05-22 07:52:19 -0400
committerBorislav Petkov <borislav.petkov@amd.com>2009-06-10 06:18:43 -0400
commitb034c19f9f61c8b6f2435aa2e77f52348ebde767 (patch)
tree0dce206e1eaa998b7233a4679e366890881c1f7f /arch/x86/lib/msr.c
parent6bc1096d7ab3621b3ffcf06616d1f4e0325d903d (diff)
x86: MSR: add methods for writing of an MSR on several CPUs
Provide for concurrent MSR writes on all the CPUs in the cpumask. Also, add a temporary workaround for smp_call_function_many which skips the CPU we're executing on. Bart: zero out rv struct which is allocated on stack. CC: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'arch/x86/lib/msr.c')
-rw-r--r--arch/x86/lib/msr.c98
1 files changed, 92 insertions, 6 deletions
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index cade714e57f9..1440b9c0547e 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -6,21 +6,37 @@
6struct msr_info { 6struct msr_info {
7 u32 msr_no; 7 u32 msr_no;
8 struct msr reg; 8 struct msr reg;
9 struct msr *msrs;
10 int off;
9 int err; 11 int err;
10}; 12};
11 13
12static void __rdmsr_on_cpu(void *info) 14static void __rdmsr_on_cpu(void *info)
13{ 15{
14 struct msr_info *rv = info; 16 struct msr_info *rv = info;
17 struct msr *reg;
18 int this_cpu = raw_smp_processor_id();
15 19
16 rdmsr(rv->msr_no, rv->reg.l, rv->reg.h); 20 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off];
22 else
23 reg = &rv->reg;
24
25 rdmsr(rv->msr_no, reg->l, reg->h);
17} 26}
18 27
19static void __wrmsr_on_cpu(void *info) 28static void __wrmsr_on_cpu(void *info)
20{ 29{
21 struct msr_info *rv = info; 30 struct msr_info *rv = info;
31 struct msr *reg;
32 int this_cpu = raw_smp_processor_id();
33
34 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off];
36 else
37 reg = &rv->reg;
22 38
23 wrmsr(rv->msr_no, rv->reg.l, rv->reg.h); 39 wrmsr(rv->msr_no, reg->l, reg->h);
24} 40}
25 41
26int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 42int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
@@ -28,6 +44,8 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
28 int err; 44 int err;
29 struct msr_info rv; 45 struct msr_info rv;
30 46
47 memset(&rv, 0, sizeof(rv));
48
31 rv.msr_no = msr_no; 49 rv.msr_no = msr_no;
32 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 50 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
33 *l = rv.reg.l; 51 *l = rv.reg.l;
@@ -35,12 +53,15 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35 53
36 return err; 54 return err;
37} 55}
56EXPORT_SYMBOL(rdmsr_on_cpu);
38 57
39int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 58int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
40{ 59{
41 int err; 60 int err;
42 struct msr_info rv; 61 struct msr_info rv;
43 62
63 memset(&rv, 0, sizeof(rv));
64
44 rv.msr_no = msr_no; 65 rv.msr_no = msr_no;
45 rv.reg.l = l; 66 rv.reg.l = l;
46 rv.reg.h = h; 67 rv.reg.h = h;
@@ -48,6 +69,70 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
48 69
49 return err; 70 return err;
50} 71}
72EXPORT_SYMBOL(wrmsr_on_cpu);
73
74/* rdmsr on a bunch of CPUs
75 *
76 * @mask: which CPUs
77 * @msr_no: which MSR
78 * @msrs: array of MSR values
79 *
80 */
81void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
82{
83 struct msr_info rv;
84 int this_cpu;
85
86 memset(&rv, 0, sizeof(rv));
87
88 rv.off = cpumask_first(mask);
89 rv.msrs = msrs;
90 rv.msr_no = msr_no;
91
92 preempt_disable();
93 /*
94 * FIXME: handle the CPU we're executing on separately for now until
95 * smp_call_function_many has been fixed to not skip it.
96 */
97 this_cpu = raw_smp_processor_id();
98 smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
99
100 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
101 preempt_enable();
102}
103EXPORT_SYMBOL(rdmsr_on_cpus);
104
105/*
106 * wrmsr on a bunch of CPUs
107 *
108 * @mask: which CPUs
109 * @msr_no: which MSR
110 * @msrs: array of MSR values
111 *
112 */
113void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
114{
115 struct msr_info rv;
116 int this_cpu;
117
118 memset(&rv, 0, sizeof(rv));
119
120 rv.off = cpumask_first(mask);
121 rv.msrs = msrs;
122 rv.msr_no = msr_no;
123
124 preempt_disable();
125 /*
126 * FIXME: handle the CPU we're executing on separately for now until
127 * smp_call_function_many has been fixed to not skip it.
128 */
129 this_cpu = raw_smp_processor_id();
130 smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
131
132 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
133 preempt_enable();
134}
135EXPORT_SYMBOL(wrmsr_on_cpus);
51 136
52/* These "safe" variants are slower and should be used when the target MSR 137/* These "safe" variants are slower and should be used when the target MSR
53 may not actually exist. */ 138 may not actually exist. */
@@ -70,6 +155,8 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
70 int err; 155 int err;
71 struct msr_info rv; 156 struct msr_info rv;
72 157
158 memset(&rv, 0, sizeof(rv));
159
73 rv.msr_no = msr_no; 160 rv.msr_no = msr_no;
74 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); 161 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
75 *l = rv.reg.l; 162 *l = rv.reg.l;
@@ -77,12 +164,15 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
77 164
78 return err ? err : rv.err; 165 return err ? err : rv.err;
79} 166}
167EXPORT_SYMBOL(rdmsr_safe_on_cpu);
80 168
81int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 169int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
82{ 170{
83 int err; 171 int err;
84 struct msr_info rv; 172 struct msr_info rv;
85 173
174 memset(&rv, 0, sizeof(rv));
175
86 rv.msr_no = msr_no; 176 rv.msr_no = msr_no;
87 rv.reg.l = l; 177 rv.reg.l = l;
88 rv.reg.h = h; 178 rv.reg.h = h;
@@ -90,8 +180,4 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
90 180
91 return err ? err : rv.err; 181 return err ? err : rv.err;
92} 182}
93
94EXPORT_SYMBOL(rdmsr_on_cpu);
95EXPORT_SYMBOL(wrmsr_on_cpu);
96EXPORT_SYMBOL(rdmsr_safe_on_cpu);
97EXPORT_SYMBOL(wrmsr_safe_on_cpu); 183EXPORT_SYMBOL(wrmsr_safe_on_cpu);