aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-03-01 09:34:35 -0500
committerAvi Kivity <avi@redhat.com>2010-05-17 05:15:08 -0400
commit455716fa941ec7a03c04bd54e1b906698171b15c (patch)
tree0bd3cef05eaf333e6c9d9d51d369bb3fcdac723c /arch/x86/kvm/svm.c
parentd24778265ac9b2602889a5e99c6e7ba777a236df (diff)
KVM: SVM: Move msrpm offset calculation to seperate function
The algorithm to find the offset in the msrpm for a given msr is needed at other places too. Move that logic to its own function. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c53
1 files changed, 37 insertions, 16 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 07437ca12787..429a24435c6d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -117,6 +117,8 @@ struct vcpu_svm {
117 unsigned long int3_rip; 117 unsigned long int3_rip;
118}; 118};
119 119
120#define MSR_INVALID 0xffffffffU
121
120/* enable NPT for AMD64 and X86 with PAE */ 122/* enable NPT for AMD64 and X86 with PAE */
121#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 123#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
122static bool npt_enabled = true; 124static bool npt_enabled = true;
@@ -200,6 +202,27 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
200#define MSRS_RANGE_SIZE 2048 202#define MSRS_RANGE_SIZE 2048
201#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) 203#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
202 204
205static u32 svm_msrpm_offset(u32 msr)
206{
207 u32 offset;
208 int i;
209
210 for (i = 0; i < NUM_MSR_MAPS; i++) {
211 if (msr < msrpm_ranges[i] ||
212 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
213 continue;
214
215 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
216 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
217
218 /* Now we have the u8 offset - but need the u32 offset */
219 return offset / 4;
220 }
221
222 /* MSR not in any range */
223 return MSR_INVALID;
224}
225
203#define MAX_INST_SIZE 15 226#define MAX_INST_SIZE 15
204 227
205static inline u32 svm_has(u32 feat) 228static inline u32 svm_has(u32 feat)
@@ -418,23 +441,21 @@ err_1:
418static void set_msr_interception(u32 *msrpm, unsigned msr, 441static void set_msr_interception(u32 *msrpm, unsigned msr,
419 int read, int write) 442 int read, int write)
420{ 443{
421 int i; 444 u8 bit_read, bit_write;
445 unsigned long tmp;
446 u32 offset;
422 447
423 for (i = 0; i < NUM_MSR_MAPS; i++) { 448 offset = svm_msrpm_offset(msr);
424 if (msr >= msrpm_ranges[i] && 449 bit_read = 2 * (msr & 0x0f);
425 msr < msrpm_ranges[i] + MSRS_IN_RANGE) { 450 bit_write = 2 * (msr & 0x0f) + 1;
426 u32 msr_offset = (i * MSRS_IN_RANGE + msr - 451 tmp = msrpm[offset];
427 msrpm_ranges[i]) * 2; 452
428 453 BUG_ON(offset == MSR_INVALID);
429 u32 *base = msrpm + (msr_offset / 32); 454
430 u32 msr_shift = msr_offset % 32; 455 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
431 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); 456 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
432 *base = (*base & ~(0x3 << msr_shift)) | 457
433 (mask << msr_shift); 458 msrpm[offset] = tmp;
434 return;
435 }
436 }
437 BUG();
438} 459}
439 460
440static void svm_vcpu_init_msrpm(u32 *msrpm) 461static void svm_vcpu_init_msrpm(u32 *msrpm)