aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_builtin.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c136
1 files changed, 34 insertions, 102 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4fdc27c80f4c..3e43f815ac5d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -17,6 +17,7 @@
17#include <linux/memblock.h> 17#include <linux/memblock.h>
18#include <linux/sizes.h> 18#include <linux/sizes.h>
19#include <linux/cma.h> 19#include <linux/cma.h>
20#include <linux/bitops.h>
20 21
21#include <asm/cputable.h> 22#include <asm/cputable.h>
22#include <asm/kvm_ppc.h> 23#include <asm/kvm_ppc.h>
@@ -33,95 +34,9 @@
33 * By default we reserve 5% of memory for hash pagetable allocation. 34 * By default we reserve 5% of memory for hash pagetable allocation.
34 */ 35 */
35static unsigned long kvm_cma_resv_ratio = 5; 36static unsigned long kvm_cma_resv_ratio = 5;
36/*
37 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
38 * Each RMA has to be physically contiguous and of a size that the
39 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
40 * and other larger sizes. Since we are unlikely to be allocate that
41 * much physically contiguous memory after the system is up and running,
42 * we preallocate a set of RMAs in early boot using CMA.
43 * should be power of 2.
44 */
45unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
46EXPORT_SYMBOL_GPL(kvm_rma_pages);
47 37
48static struct cma *kvm_cma; 38static struct cma *kvm_cma;
49 39
50/* Work out RMLS (real mode limit selector) field value for a given RMA size.
51 Assumes POWER7 or PPC970. */
52static inline int lpcr_rmls(unsigned long rma_size)
53{
54 switch (rma_size) {
55 case 32ul << 20: /* 32 MB */
56 if (cpu_has_feature(CPU_FTR_ARCH_206))
57 return 8; /* only supported on POWER7 */
58 return -1;
59 case 64ul << 20: /* 64 MB */
60 return 3;
61 case 128ul << 20: /* 128 MB */
62 return 7;
63 case 256ul << 20: /* 256 MB */
64 return 4;
65 case 1ul << 30: /* 1 GB */
66 return 2;
67 case 16ul << 30: /* 16 GB */
68 return 1;
69 case 256ul << 30: /* 256 GB */
70 return 0;
71 default:
72 return -1;
73 }
74}
75
76static int __init early_parse_rma_size(char *p)
77{
78 unsigned long kvm_rma_size;
79
80 pr_debug("%s(%s)\n", __func__, p);
81 if (!p)
82 return -EINVAL;
83 kvm_rma_size = memparse(p, &p);
84 /*
85 * Check that the requested size is one supported in hardware
86 */
87 if (lpcr_rmls(kvm_rma_size) < 0) {
88 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
89 return -EINVAL;
90 }
91 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
92 return 0;
93}
94early_param("kvm_rma_size", early_parse_rma_size);
95
96struct kvm_rma_info *kvm_alloc_rma()
97{
98 struct page *page;
99 struct kvm_rma_info *ri;
100
101 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
102 if (!ri)
103 return NULL;
104 page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
105 if (!page)
106 goto err_out;
107 atomic_set(&ri->use_count, 1);
108 ri->base_pfn = page_to_pfn(page);
109 return ri;
110err_out:
111 kfree(ri);
112 return NULL;
113}
114EXPORT_SYMBOL_GPL(kvm_alloc_rma);
115
116void kvm_release_rma(struct kvm_rma_info *ri)
117{
118 if (atomic_dec_and_test(&ri->use_count)) {
119 cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
120 kfree(ri);
121 }
122}
123EXPORT_SYMBOL_GPL(kvm_release_rma);
124
125static int __init early_parse_kvm_cma_resv(char *p) 40static int __init early_parse_kvm_cma_resv(char *p)
126{ 41{
127 pr_debug("%s(%s)\n", __func__, p); 42 pr_debug("%s(%s)\n", __func__, p);
@@ -133,14 +48,9 @@ early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
133 48
134struct page *kvm_alloc_hpt(unsigned long nr_pages) 49struct page *kvm_alloc_hpt(unsigned long nr_pages)
135{ 50{
136 unsigned long align_pages = HPT_ALIGN_PAGES;
137
138 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 51 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
139 52
140 /* Old CPUs require HPT aligned on a multiple of its size */ 53 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
141 if (!cpu_has_feature(CPU_FTR_ARCH_206))
142 align_pages = nr_pages;
143 return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
144} 54}
145EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 55EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
146 56
@@ -181,22 +91,44 @@ void __init kvm_cma_reserve(void)
181 if (selected_size) { 91 if (selected_size) {
182 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 92 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
183 (unsigned long)selected_size / SZ_1M); 93 (unsigned long)selected_size / SZ_1M);
184 /* 94 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
185 * Old CPUs require HPT aligned on a multiple of its size. So for them
186 * make the alignment as max size we could request.
187 */
188 if (!cpu_has_feature(CPU_FTR_ARCH_206))
189 align_size = __rounddown_pow_of_two(selected_size);
190 else
191 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
192
193 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
194 cma_declare_contiguous(0, selected_size, 0, align_size, 95 cma_declare_contiguous(0, selected_size, 0, align_size,
195 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 96 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
196 } 97 }
197} 98}
198 99
199/* 100/*
101 * Real-mode H_CONFER implementation.
102 * We check if we are the only vcpu out of this virtual core
103 * still running in the guest and not ceded. If so, we pop up
104 * to the virtual-mode implementation; if not, just return to
105 * the guest.
106 */
107long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
108 unsigned int yield_count)
109{
110 struct kvmppc_vcore *vc = vcpu->arch.vcore;
111 int threads_running;
112 int threads_ceded;
113 int threads_conferring;
114 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
115 int rv = H_SUCCESS; /* => don't yield */
116
117 set_bit(vcpu->arch.ptid, &vc->conferring_threads);
118 while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
119 threads_running = VCORE_ENTRY_COUNT(vc);
120 threads_ceded = hweight32(vc->napping_threads);
121 threads_conferring = hweight32(vc->conferring_threads);
122 if (threads_ceded + threads_conferring >= threads_running) {
123 rv = H_TOO_HARD; /* => do yield */
124 break;
125 }
126 }
127 clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
128 return rv;
129}
130
131/*
200 * When running HV mode KVM we need to block certain operations while KVM VMs 132 * When running HV mode KVM we need to block certain operations while KVM VMs
201 * exist in the system. We use a counter of VMs to track this. 133 * exist in the system. We use a counter of VMs to track this.
202 * 134 *