aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2015-06-15 04:55:33 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-19 11:16:29 -0400
commitf571c0973e4b8c888e049b6842e4b4f93b5c609c (patch)
tree8675c45ba51256fbbc78c65d39d8bff78acf8263
parentf7bfb57b3e89ff89c0da9f93dedab89f68d6ca27 (diff)
KVM: MTRR: introduce mtrr_for_each_mem_type
It walks all MTRRs and gets all the memory cache type setting for the specified range also it checks if the range is fully covered by MTRRs Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> [Adjust for range_size->range_shift change. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mtrr.c188
1 files changed, 188 insertions, 0 deletions
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index b5346d20f458..4c6ac464063e 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -220,6 +220,15 @@ static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
220 return mtrr_seg->range_start + 8 * unit; 220 return mtrr_seg->range_start + 8 * unit;
221} 221}
222 222
223static int fixed_mtrr_seg_end_range_index(int seg)
224{
225 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
226 int n;
227
228 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
229 return mtrr_seg->range_start + n - 1;
230}
231
223static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) 232static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
224{ 233{
225 int seg, unit; 234 int seg, unit;
@@ -266,6 +275,14 @@ static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
266 return index; 275 return index;
267} 276}
268 277
278static u64 fixed_mtrr_range_end_addr(int seg, int index)
279{
280 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
281 int pos = index - mtrr_seg->range_start;
282
283 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
284}
285
269static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) 286static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
270{ 287{
271 u64 mask; 288 u64 mask;
@@ -409,6 +426,177 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
409 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); 426 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
410} 427}
411 428
429struct mtrr_iter {
430 /* input fields. */
431 struct kvm_mtrr *mtrr_state;
432 u64 start;
433 u64 end;
434
435 /* output fields. */
436 int mem_type;
437 /* [start, end) is not fully covered in MTRRs? */
438 bool partial_map;
439
440 /* private fields. */
441 union {
442 /* used for fixed MTRRs. */
443 struct {
444 int index;
445 int seg;
446 };
447
448 /* used for var MTRRs. */
449 struct {
450 struct kvm_mtrr_range *range;
451 /* max address has been covered in var MTRRs. */
452 u64 start_max;
453 };
454 };
455
456 bool fixed;
457};
458
459static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
460{
461 int seg, index;
462
463 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
464 return false;
465
466 seg = fixed_mtrr_addr_to_seg(iter->start);
467 if (seg < 0)
468 return false;
469
470 iter->fixed = true;
471 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
472 iter->index = index;
473 iter->seg = seg;
474 return true;
475}
476
477static bool match_var_range(struct mtrr_iter *iter,
478 struct kvm_mtrr_range *range)
479{
480 u64 start, end;
481
482 var_mtrr_range(range, &start, &end);
483 if (!(start >= iter->end || end <= iter->start)) {
484 iter->range = range;
485
486 /*
487 * the function is called when we do kvm_mtrr.head walking.
488 * Range has the minimum base address which interleaves
489 * [looker->start_max, looker->end).
490 */
491 iter->partial_map |= iter->start_max < start;
492
493 /* update the max address has been covered. */
494 iter->start_max = max(iter->start_max, end);
495 return true;
496 }
497
498 return false;
499}
500
501static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
502{
503 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
504
505 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
506 if (match_var_range(iter, iter->range))
507 return;
508
509 iter->range = NULL;
510 iter->partial_map |= iter->start_max < iter->end;
511}
512
513static void mtrr_lookup_var_start(struct mtrr_iter *iter)
514{
515 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
516
517 iter->fixed = false;
518 iter->start_max = iter->start;
519 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
520
521 __mtrr_lookup_var_next(iter);
522}
523
524static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
525{
526 /* terminate the lookup. */
527 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
528 iter->fixed = false;
529 iter->range = NULL;
530 return;
531 }
532
533 iter->index++;
534
535 /* have looked up for all fixed MTRRs. */
536 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
537 return mtrr_lookup_var_start(iter);
538
539 /* switch to next segment. */
540 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
541 iter->seg++;
542}
543
544static void mtrr_lookup_var_next(struct mtrr_iter *iter)
545{
546 __mtrr_lookup_var_next(iter);
547}
548
549static void mtrr_lookup_start(struct mtrr_iter *iter)
550{
551 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true;
553 return;
554 }
555
556 if (!mtrr_lookup_fixed_start(iter))
557 mtrr_lookup_var_start(iter);
558}
559
560static void mtrr_lookup_init(struct mtrr_iter *iter,
561 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
562{
563 iter->mtrr_state = mtrr_state;
564 iter->start = start;
565 iter->end = end;
566 iter->partial_map = false;
567 iter->fixed = false;
568 iter->range = NULL;
569
570 mtrr_lookup_start(iter);
571}
572
573static bool mtrr_lookup_okay(struct mtrr_iter *iter)
574{
575 if (iter->fixed) {
576 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
577 return true;
578 }
579
580 if (iter->range) {
581 iter->mem_type = iter->range->base & 0xff;
582 return true;
583 }
584
585 return false;
586}
587
588static void mtrr_lookup_next(struct mtrr_iter *iter)
589{
590 if (iter->fixed)
591 mtrr_lookup_fixed_next(iter);
592 else
593 mtrr_lookup_var_next(iter);
594}
595
596#define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
597 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
598 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
599
412u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) 600u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
413{ 601{
414 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; 602 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;