diff options
author | H. Peter Anvin <hpa@zytor.com> | 2008-08-25 20:44:03 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-08-26 01:45:50 -0400 |
commit | bdd314616f7218e325aa9637a46159ecba44cfeb (patch) | |
tree | de3cbbb4aab3bd8344d33c6c5cc262b8d59adfa0 /arch/x86 | |
parent | 94d4ac2f4a58c6e37876827c6688c61cef21290c (diff) |
x86: msr-on-cpu: remove unnecessary level of abstraction
Remove an unnecessary level of abstraction in the msr-on-cpu library.
Although this duplicates some code, the duplicated code is less than
the additional code, and this way should be faster.
Additionally, change the order of the functions to make the regular
structure of this file more obvious.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/lib/msr-on-cpu.c | 78 |
1 files changed, 36 insertions, 42 deletions
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 01b868ba82f8..321cf720dbb6 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info) | |||
16 | rdmsr(rv->msr_no, rv->l, rv->h); | 16 | rdmsr(rv->msr_no, rv->l, rv->h); |
17 | } | 17 | } |
18 | 18 | ||
19 | static void __rdmsr_safe_on_cpu(void *info) | 19 | static void __wrmsr_on_cpu(void *info) |
20 | { | 20 | { |
21 | struct msr_info *rv = info; | 21 | struct msr_info *rv = info; |
22 | 22 | ||
23 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); | 23 | wrmsr(rv->msr_no, rv->l, rv->h); |
24 | } | 24 | } |
25 | 25 | ||
26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | 26 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
27 | { | 27 | { |
28 | int err = 0; | 28 | int err; |
29 | struct msr_info rv; | 29 | struct msr_info rv; |
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
33 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, | ||
34 | &rv, 1); | ||
35 | err = err ? err : rv.err; | ||
36 | } else { | ||
37 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | ||
38 | } | ||
39 | *l = rv.l; | 33 | *l = rv.l; |
40 | *h = rv.h; | 34 | *h = rv.h; |
41 | 35 | ||
42 | return err; | 36 | return err; |
43 | } | 37 | } |
44 | 38 | ||
45 | static void __wrmsr_on_cpu(void *info) | 39 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
40 | { | ||
41 | int err; | ||
42 | struct msr_info rv; | ||
43 | |||
44 | rv.msr_no = msr_no; | ||
45 | rv.l = l; | ||
46 | rv.h = h; | ||
47 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
48 | |||
49 | return err; | ||
50 | } | ||
51 | |||
52 | /* These "safe" variants are slower and should be used when the target MSR | ||
53 | may not actually exist. */ | ||
54 | static void __rdmsr_safe_on_cpu(void *info) | ||
46 | { | 55 | { |
47 | struct msr_info *rv = info; | 56 | struct msr_info *rv = info; |
48 | 57 | ||
49 | wrmsr(rv->msr_no, rv->l, rv->h); | 58 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); |
50 | } | 59 | } |
51 | 60 | ||
52 | static void __wrmsr_safe_on_cpu(void *info) | 61 | static void __wrmsr_safe_on_cpu(void *info) |
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info) | |||
56 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); | 65 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); |
57 | } | 66 | } |
58 | 67 | ||
59 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | 68 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
60 | { | 69 | { |
61 | int err = 0; | 70 | int err; |
62 | struct msr_info rv; | 71 | struct msr_info rv; |
63 | 72 | ||
64 | rv.msr_no = msr_no; | 73 | rv.msr_no = msr_no; |
65 | rv.l = l; | 74 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
66 | rv.h = h; | 75 | *l = rv.l; |
67 | if (safe) { | 76 | *h = rv.h; |
68 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, | ||
69 | &rv, 1); | ||
70 | err = err ? err : rv.err; | ||
71 | } else { | ||
72 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
73 | } | ||
74 | |||
75 | return err; | ||
76 | } | ||
77 | 77 | ||
78 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 78 | return err ? err : rv.err; |
79 | { | ||
80 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
81 | } | 79 | } |
82 | 80 | ||
83 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
84 | { | ||
85 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
86 | } | ||
87 | |||
88 | /* These "safe" variants are slower and should be used when the target MSR | ||
89 | may not actually exist. */ | ||
90 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 81 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
91 | { | 82 | { |
92 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); | 83 | int err; |
93 | } | 84 | struct msr_info rv; |
94 | 85 | ||
95 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 86 | rv.msr_no = msr_no; |
96 | { | 87 | rv.l = l; |
97 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); | 88 | rv.h = h; |
89 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
90 | |||
91 | return err ? err : rv.err; | ||
98 | } | 92 | } |
99 | 93 | ||
100 | EXPORT_SYMBOL(rdmsr_on_cpu); | 94 | EXPORT_SYMBOL(rdmsr_on_cpu); |