aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2009-12-11 12:14:40 -0500
committerH. Peter Anvin <hpa@zytor.com>2009-12-11 13:59:21 -0500
commit505422517d3f126bb939439e9d15dece94e11d2c (patch)
treea2d58c0b3cdf2b1c6b66eee6d78a283224ae1ac3 /arch/x86
parent5c6baba84e1ac6a79b266b40e17e692aab6604a1 (diff)
x86, msr: Add support for non-contiguous cpumasks
The current rd/wrmsr_on_cpus helpers assume that the supplied cpumasks are contiguous. However, there are machines out there like some K8 multinode Opterons which have a non-contiguous core enumeration on each node (e.g. cores 0,2 on node 0 instead of 0,1), see http://www.gossamer-threads.com/lists/linux/kernel/1160268. This patch fixes out-of-bounds writes (see URL above) by adding per-CPU msr structs which are used on the respective cores. Additionally, two helpers, msrs_{alloc,free}, are provided for use by the callers of the MSR accessors. Cc: H. Peter Anvin <hpa@zytor.com> Cc: Mauro Carvalho Chehab <mchehab@redhat.com> Cc: Aristeu Rozanski <aris@redhat.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Doug Thompson <dougthompson@xmission.com> Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20091211171440.GD31998@aftab> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/lib/msr.c26
2 files changed, 25 insertions, 4 deletions
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 5bef931f8b14..2d228fc9b4b7 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -244,6 +244,9 @@ do { \
244 244
245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
246 246
247struct msr *msrs_alloc(void);
248void msrs_free(struct msr *msrs);
249
247#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
248int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 251int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
249int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 252int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 41628b104b9e..872834177937 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -7,7 +7,6 @@ struct msr_info {
7 u32 msr_no; 7 u32 msr_no;
8 struct msr reg; 8 struct msr reg;
9 struct msr *msrs; 9 struct msr *msrs;
10 int off;
11 int err; 10 int err;
12}; 11};
13 12
@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
18 int this_cpu = raw_smp_processor_id(); 17 int this_cpu = raw_smp_processor_id();
19 18
20 if (rv->msrs) 19 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off]; 20 reg = per_cpu_ptr(rv->msrs, this_cpu);
22 else 21 else
23 reg = &rv->reg; 22 reg = &rv->reg;
24 23
@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
32 int this_cpu = raw_smp_processor_id(); 31 int this_cpu = raw_smp_processor_id();
33 32
34 if (rv->msrs) 33 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off]; 34 reg = per_cpu_ptr(rv->msrs, this_cpu);
36 else 35 else
37 reg = &rv->reg; 36 reg = &rv->reg;
38 37
@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
80 79
81 memset(&rv, 0, sizeof(rv)); 80 memset(&rv, 0, sizeof(rv));
82 81
83 rv.off = cpumask_first(mask);
84 rv.msrs = msrs; 82 rv.msrs = msrs;
85 rv.msr_no = msr_no; 83 rv.msr_no = msr_no;
86 84
@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
120} 118}
121EXPORT_SYMBOL(wrmsr_on_cpus); 119EXPORT_SYMBOL(wrmsr_on_cpus);
122 120
121struct msr *msrs_alloc(void)
122{
123 struct msr *msrs = NULL;
124
125 msrs = alloc_percpu(struct msr);
126 if (!msrs) {
127 pr_warning("%s: error allocating msrs\n", __func__);
128 return NULL;
129 }
130
131 return msrs;
132}
133EXPORT_SYMBOL(msrs_alloc);
134
135void msrs_free(struct msr *msrs)
136{
137 free_percpu(msrs);
138}
139EXPORT_SYMBOL(msrs_free);
140
123/* These "safe" variants are slower and should be used when the target MSR 141/* These "safe" variants are slower and should be used when the target MSR
124 may not actually exist. */ 142 may not actually exist. */
125static void __rdmsr_safe_on_cpu(void *info) 143static void __rdmsr_safe_on_cpu(void *info)