aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvmclock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/kvmclock.c')
-rw-r--r--arch/x86/kernel/kvmclock.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1e6764648af3..013fe3d21dbb 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -28,6 +28,7 @@
28#include <linux/sched/clock.h> 28#include <linux/sched/clock.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/set_memory.h>
31 32
32#include <asm/hypervisor.h> 33#include <asm/hypervisor.h>
33#include <asm/mem_encrypt.h> 34#include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
61 (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) 62 (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
62 63
63static struct pvclock_vsyscall_time_info 64static struct pvclock_vsyscall_time_info
64 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); 65 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
65static struct pvclock_wall_clock wall_clock; 66static struct pvclock_wall_clock wall_clock __bss_decrypted;
66static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); 67static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
68static struct pvclock_vsyscall_time_info *hvclock_mem;
67 69
68static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) 70static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
69{ 71{
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
236 native_machine_shutdown(); 238 native_machine_shutdown();
237} 239}
238 240
241static void __init kvmclock_init_mem(void)
242{
243 unsigned long ncpus;
244 unsigned int order;
245 struct page *p;
246 int r;
247
248 if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
249 return;
250
251 ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
252 order = get_order(ncpus * sizeof(*hvclock_mem));
253
254 p = alloc_pages(GFP_KERNEL, order);
255 if (!p) {
256 pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
257 return;
258 }
259
260 hvclock_mem = page_address(p);
261
262 /*
263 * hvclock is shared between the guest and the hypervisor, must
264 * be mapped decrypted.
265 */
266 if (sev_active()) {
267 r = set_memory_decrypted((unsigned long) hvclock_mem,
268 1UL << order);
269 if (r) {
270 __free_pages(p, order);
271 hvclock_mem = NULL;
272 pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
273 return;
274 }
275 }
276
277 memset(hvclock_mem, 0, PAGE_SIZE << order);
278}
279
239static int __init kvm_setup_vsyscall_timeinfo(void) 280static int __init kvm_setup_vsyscall_timeinfo(void)
240{ 281{
241#ifdef CONFIG_X86_64 282#ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
250 291
251 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; 292 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
252#endif 293#endif
294
295 kvmclock_init_mem();
296
253 return 0; 297 return 0;
254} 298}
255early_initcall(kvm_setup_vsyscall_timeinfo); 299early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
269 /* Use the static page for the first CPUs, allocate otherwise */ 313 /* Use the static page for the first CPUs, allocate otherwise */
270 if (cpu < HVC_BOOT_ARRAY_SIZE) 314 if (cpu < HVC_BOOT_ARRAY_SIZE)
271 p = &hv_clock_boot[cpu]; 315 p = &hv_clock_boot[cpu];
316 else if (hvclock_mem)
317 p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
272 else 318 else
273 p = kzalloc(sizeof(*p), GFP_KERNEL); 319 return -ENOMEM;
274 320
275 per_cpu(hv_clock_per_cpu, cpu) = p; 321 per_cpu(hv_clock_per_cpu, cpu) = p;
276 return p ? 0 : -ENOMEM; 322 return p ? 0 : -ENOMEM;