aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2011-09-19 19:31:48 -0400
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:24 -0500
commit95325e6b190bb4ec3383aa1241d10675057bff45 (patch)
tree2920d458a8e25539f6031a4e520afa81331d9a7b /arch/powerpc
parent841741f23b91088810e657a535b8aa683136d870 (diff)
KVM: PPC: E500: Support hugetlbfs
With hugetlbfs support emerging on e500, we should also support KVM backing its guest memory by it. This patch adds support for hugetlbfs into the e500 shadow mmu code. Signed-off-by: Alexander Graf <agraf@suse.de> Acked-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/e500_tlb.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index ec17148392b..6fefb9144f2 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -24,6 +24,7 @@
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/rwsem.h> 25#include <linux/rwsem.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/hugetlb.h>
27#include <asm/kvm_ppc.h> 28#include <asm/kvm_ppc.h>
28#include <asm/kvm_e500.h> 29#include <asm/kvm_e500.h>
29 30
@@ -673,12 +674,31 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
673 pfn &= ~(tsize_pages - 1); 674 pfn &= ~(tsize_pages - 1);
674 break; 675 break;
675 } 676 }
677 } else if (vma && hva >= vma->vm_start &&
678 (vma->vm_flags & VM_HUGETLB)) {
679 unsigned long psize = vma_kernel_pagesize(vma);
680
681 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
682 MAS1_TSIZE_SHIFT;
683
684 /*
685 * Take the largest page size that satisfies both host
686 * and guest mapping
687 */
688 tsize = min(__ilog2(psize) - 10, tsize);
689
690 /*
691 * e500 doesn't implement the lowest tsize bit,
692 * or 1K pages.
693 */
694 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
676 } 695 }
677 696
678 up_read(&current->mm->mmap_sem); 697 up_read(&current->mm->mmap_sem);
679 } 698 }
680 699
681 if (likely(!pfnmap)) { 700 if (likely(!pfnmap)) {
701 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
682 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); 702 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
683 if (is_error_pfn(pfn)) { 703 if (is_error_pfn(pfn)) {
684 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 704 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
@@ -686,6 +706,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
686 kvm_release_pfn_clean(pfn); 706 kvm_release_pfn_clean(pfn);
687 return; 707 return;
688 } 708 }
709
710 /* Align guest and physical address to page map boundaries */
711 pfn &= ~(tsize_pages - 1);
712 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
689 } 713 }
690 714
691 /* Drop old ref and setup new one. */ 715 /* Drop old ref and setup new one. */