aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-01-16 10:50:10 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:57:25 -0500
commitb4e706111d501991c59d2af23a299ab52a06b03d (patch)
treeb341991a0653307f026a6be570b8de5c62b40ea1 /arch
parent9cf7c0e465197fa97972428e93162318e917f8ed (diff)
KVM: PPC: Convert RMA allocation into generic code
We have code to allocate big chunks of linear memory on bootup for later use. This code is currently used for RMA allocation, but can be useful beyond that extent. Make it generic so we can reuse it for other stuff later. Signed-off-by: Alexander Graf <agraf@suse.de> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h7
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h8
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c175
5 files changed, 118 insertions, 82 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af438b1e8a3c..8221e717bbce 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -173,12 +173,13 @@ struct kvmppc_spapr_tce_table {
173 struct page *pages[0]; 173 struct page *pages[0];
174}; 174};
175 175
176struct kvmppc_rma_info { 176struct kvmppc_linear_info {
177 void *base_virt; 177 void *base_virt;
178 unsigned long base_pfn; 178 unsigned long base_pfn;
179 unsigned long npages; 179 unsigned long npages;
180 struct list_head list; 180 struct list_head list;
181 atomic_t use_count; 181 atomic_t use_count;
182 int type;
182}; 183};
183 184
184/* 185/*
@@ -224,7 +225,7 @@ struct kvm_arch {
224 int tlbie_lock; 225 int tlbie_lock;
225 unsigned long lpcr; 226 unsigned long lpcr;
226 unsigned long rmor; 227 unsigned long rmor;
227 struct kvmppc_rma_info *rma; 228 struct kvmppc_linear_info *rma;
228 unsigned long vrma_slb_v; 229 unsigned long vrma_slb_v;
229 int rma_setup_done; 230 int rma_setup_done;
230 int using_mmu_notifiers; 231 int using_mmu_notifiers;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a61b5b5047d6..1c37a2f8d0f4 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -128,8 +128,8 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
128 struct kvm_create_spapr_tce *args); 128 struct kvm_create_spapr_tce *args);
129extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, 129extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
130 struct kvm_allocate_rma *rma); 130 struct kvm_allocate_rma *rma);
131extern struct kvmppc_rma_info *kvm_alloc_rma(void); 131extern struct kvmppc_linear_info *kvm_alloc_rma(void);
132extern void kvm_release_rma(struct kvmppc_rma_info *ri); 132extern void kvm_release_rma(struct kvmppc_linear_info *ri);
133extern int kvmppc_core_init_vm(struct kvm *kvm); 133extern int kvmppc_core_init_vm(struct kvm *kvm);
134extern void kvmppc_core_destroy_vm(struct kvm *kvm); 134extern void kvmppc_core_destroy_vm(struct kvm *kvm);
135extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 135extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
@@ -187,13 +187,13 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
187 paca[cpu].kvm_hstate.xics_phys = addr; 187 paca[cpu].kvm_hstate.xics_phys = addr;
188} 188}
189 189
190extern void kvm_rma_init(void); 190extern void kvm_linear_init(void);
191 191
192#else 192#else
193static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 193static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
194{} 194{}
195 195
196static inline void kvm_rma_init(void) 196static inline void kvm_linear_init(void)
197{} 197{}
198#endif 198#endif
199 199
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4cb8f1e9d044..4721b0c8d7b7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -598,7 +598,7 @@ void __init setup_arch(char **cmdline_p)
598 /* Initialize the MMU context management stuff */ 598 /* Initialize the MMU context management stuff */
599 mmu_context_init(); 599 mmu_context_init();
600 600
601 kvm_rma_init(); 601 kvm_linear_init();
602 602
603 ppc64_boot_msg(0x15, "Setup Done"); 603 ppc64_boot_msg(0x15, "Setup Done");
604} 604}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3580db8a2326..ce1cac765193 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1055,7 +1055,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
1055 1055
1056static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1056static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1057{ 1057{
1058 struct kvmppc_rma_info *ri = vma->vm_file->private_data; 1058 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
1059 struct page *page; 1059 struct page *page;
1060 1060
1061 if (vmf->pgoff >= ri->npages) 1061 if (vmf->pgoff >= ri->npages)
@@ -1080,7 +1080,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1080 1080
1081static int kvm_rma_release(struct inode *inode, struct file *filp) 1081static int kvm_rma_release(struct inode *inode, struct file *filp)
1082{ 1082{
1083 struct kvmppc_rma_info *ri = filp->private_data; 1083 struct kvmppc_linear_info *ri = filp->private_data;
1084 1084
1085 kvm_release_rma(ri); 1085 kvm_release_rma(ri);
1086 return 0; 1086 return 0;
@@ -1093,7 +1093,7 @@ static struct file_operations kvm_rma_fops = {
1093 1093
1094long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1094long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1095{ 1095{
1096 struct kvmppc_rma_info *ri; 1096 struct kvmppc_linear_info *ri;
1097 long fd; 1097 long fd;
1098 1098
1099 ri = kvm_alloc_rma(); 1099 ri = kvm_alloc_rma();
@@ -1212,7 +1212,7 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1212{ 1212{
1213 int err = 0; 1213 int err = 0;
1214 struct kvm *kvm = vcpu->kvm; 1214 struct kvm *kvm = vcpu->kvm;
1215 struct kvmppc_rma_info *ri = NULL; 1215 struct kvmppc_linear_info *ri = NULL;
1216 unsigned long hva; 1216 unsigned long hva;
1217 struct kvm_memory_slot *memslot; 1217 struct kvm_memory_slot *memslot;
1218 struct vm_area_struct *vma; 1218 struct vm_area_struct *vma;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index a795a13f4a70..1c7e6ab5f9de 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -18,6 +18,14 @@
18#include <asm/kvm_ppc.h> 18#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h> 19#include <asm/kvm_book3s.h>
20 20
21#define KVM_LINEAR_RMA 0
22
23static void __init kvm_linear_init_one(ulong size, int count, int type);
24static struct kvmppc_linear_info *kvm_alloc_linear(int type);
25static void kvm_release_linear(struct kvmppc_linear_info *ri);
26
27/*************** RMA *************/
28
21/* 29/*
22 * This maintains a list of RMAs (real mode areas) for KVM guests to use. 30 * This maintains a list of RMAs (real mode areas) for KVM guests to use.
23 * Each RMA has to be physically contiguous and of a size that the 31 * Each RMA has to be physically contiguous and of a size that the
@@ -29,32 +37,6 @@
29static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ 37static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
30static unsigned long kvm_rma_count; 38static unsigned long kvm_rma_count;
31 39
32static int __init early_parse_rma_size(char *p)
33{
34 if (!p)
35 return 1;
36
37 kvm_rma_size = memparse(p, &p);
38
39 return 0;
40}
41early_param("kvm_rma_size", early_parse_rma_size);
42
43static int __init early_parse_rma_count(char *p)
44{
45 if (!p)
46 return 1;
47
48 kvm_rma_count = simple_strtoul(p, NULL, 0);
49
50 return 0;
51}
52early_param("kvm_rma_count", early_parse_rma_count);
53
54static struct kvmppc_rma_info *rma_info;
55static LIST_HEAD(free_rmas);
56static DEFINE_SPINLOCK(rma_lock);
57
58/* Work out RMLS (real mode limit selector) field value for a given RMA size. 40/* Work out RMLS (real mode limit selector) field value for a given RMA size.
59 Assumes POWER7 or PPC970. */ 41 Assumes POWER7 or PPC970. */
60static inline int lpcr_rmls(unsigned long rma_size) 42static inline int lpcr_rmls(unsigned long rma_size)
@@ -81,45 +63,73 @@ static inline int lpcr_rmls(unsigned long rma_size)
81 } 63 }
82} 64}
83 65
84/* 66static int __init early_parse_rma_size(char *p)
85 * Called at boot time while the bootmem allocator is active, 67{
86 * to allocate contiguous physical memory for the real memory 68 if (!p)
87 * areas for guests. 69 return 1;
88 */ 70
89void __init kvm_rma_init(void) 71 kvm_rma_size = memparse(p, &p);
72
73 return 0;
74}
75early_param("kvm_rma_size", early_parse_rma_size);
76
77static int __init early_parse_rma_count(char *p)
78{
79 if (!p)
80 return 1;
81
82 kvm_rma_count = simple_strtoul(p, NULL, 0);
83
84 return 0;
85}
86early_param("kvm_rma_count", early_parse_rma_count);
87
88struct kvmppc_linear_info *kvm_alloc_rma(void)
89{
90 return kvm_alloc_linear(KVM_LINEAR_RMA);
91}
92EXPORT_SYMBOL_GPL(kvm_alloc_rma);
93
94void kvm_release_rma(struct kvmppc_linear_info *ri)
95{
96 kvm_release_linear(ri);
97}
98EXPORT_SYMBOL_GPL(kvm_release_rma);
99
100/*************** generic *************/
101
102static LIST_HEAD(free_linears);
103static DEFINE_SPINLOCK(linear_lock);
104
105static void __init kvm_linear_init_one(ulong size, int count, int type)
90{ 106{
91 unsigned long i; 107 unsigned long i;
92 unsigned long j, npages; 108 unsigned long j, npages;
93 void *rma; 109 void *linear;
94 struct page *pg; 110 struct page *pg;
111 const char *typestr;
112 struct kvmppc_linear_info *linear_info;
95 113
96 /* Only do this on PPC970 in HV mode */ 114 if (!count)
97 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
98 !cpu_has_feature(CPU_FTR_ARCH_201))
99 return;
100
101 if (!kvm_rma_size || !kvm_rma_count)
102 return; 115 return;
103 116
104 /* Check that the requested size is one supported in hardware */ 117 typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "";
105 if (lpcr_rmls(kvm_rma_size) < 0) { 118
106 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); 119 npages = size >> PAGE_SHIFT;
107 return; 120 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
108 } 121 for (i = 0; i < count; ++i) {
109 122 linear = alloc_bootmem_align(size, size);
110 npages = kvm_rma_size >> PAGE_SHIFT; 123 pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
111 rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info)); 124 size >> 20);
112 for (i = 0; i < kvm_rma_count; ++i) { 125 linear_info[i].base_virt = linear;
113 rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size); 126 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
114 pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma, 127 linear_info[i].npages = npages;
115 kvm_rma_size >> 20); 128 linear_info[i].type = type;
116 rma_info[i].base_virt = rma; 129 list_add_tail(&linear_info[i].list, &free_linears);
117 rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT; 130 atomic_set(&linear_info[i].use_count, 0);
118 rma_info[i].npages = npages; 131
119 list_add_tail(&rma_info[i].list, &free_rmas); 132 pg = pfn_to_page(linear_info[i].base_pfn);
120 atomic_set(&rma_info[i].use_count, 0);
121
122 pg = pfn_to_page(rma_info[i].base_pfn);
123 for (j = 0; j < npages; ++j) { 133 for (j = 0; j < npages; ++j) {
124 atomic_inc(&pg->_count); 134 atomic_inc(&pg->_count);
125 ++pg; 135 ++pg;
@@ -127,30 +137,55 @@ void __init kvm_rma_init(void)
127 } 137 }
128} 138}
129 139
130struct kvmppc_rma_info *kvm_alloc_rma(void) 140static struct kvmppc_linear_info *kvm_alloc_linear(int type)
131{ 141{
132 struct kvmppc_rma_info *ri; 142 struct kvmppc_linear_info *ri;
133 143
134 ri = NULL; 144 ri = NULL;
135 spin_lock(&rma_lock); 145 spin_lock(&linear_lock);
136 if (!list_empty(&free_rmas)) { 146 list_for_each_entry(ri, &free_linears, list) {
137 ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list); 147 if (ri->type != type)
148 continue;
149
138 list_del(&ri->list); 150 list_del(&ri->list);
139 atomic_inc(&ri->use_count); 151 atomic_inc(&ri->use_count);
152 break;
140 } 153 }
141 spin_unlock(&rma_lock); 154 spin_unlock(&linear_lock);
142 return ri; 155 return ri;
143} 156}
144EXPORT_SYMBOL_GPL(kvm_alloc_rma);
145 157
146void kvm_release_rma(struct kvmppc_rma_info *ri) 158static void kvm_release_linear(struct kvmppc_linear_info *ri)
147{ 159{
148 if (atomic_dec_and_test(&ri->use_count)) { 160 if (atomic_dec_and_test(&ri->use_count)) {
149 spin_lock(&rma_lock); 161 spin_lock(&linear_lock);
150 list_add_tail(&ri->list, &free_rmas); 162 list_add_tail(&ri->list, &free_linears);
151 spin_unlock(&rma_lock); 163 spin_unlock(&linear_lock);
152 164
153 } 165 }
154} 166}
155EXPORT_SYMBOL_GPL(kvm_release_rma);
156 167
168/*
169 * Called at boot time while the bootmem allocator is active,
170 * to allocate contiguous physical memory for the hash page
171 * tables for guests.
172 */
173void __init kvm_linear_init(void)
174{
175 /* RMA */
176 /* Only do this on PPC970 in HV mode */
177 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
178 !cpu_has_feature(CPU_FTR_ARCH_201))
179 return;
180
181 if (!kvm_rma_size || !kvm_rma_count)
182 return;
183
184 /* Check that the requested size is one supported in hardware */
185 if (lpcr_rmls(kvm_rma_size) < 0) {
186 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
187 return;
188 }
189
190 kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
191}