aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/hyperv/Makefile3
-rw-r--r--arch/x86/hyperv/hv_apic.c256
-rw-r--r--arch/x86/hyperv/hv_init.c32
-rw-r--r--arch/x86/hyperv/mmu.c75
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h30
-rw-r--r--arch/x86/include/asm/mshyperv.h44
6 files changed, 368 insertions, 72 deletions
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index 367a8203cfcf..b173d404e3df 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1 +1,2 @@
1obj-y := hv_init.o mmu.o 1obj-y := hv_init.o mmu.o
2obj-$(CONFIG_X86_64) += hv_apic.o
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
new file mode 100644
index 000000000000..f68855499391
--- /dev/null
+++ b/arch/x86/hyperv/hv_apic.c
@@ -0,0 +1,256 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Hyper-V specific APIC code.
5 *
6 * Copyright (C) 2018, Microsoft, Inc.
7 *
8 * Author : K. Y. Srinivasan <kys@microsoft.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 */
21
22#include <linux/types.h>
23#include <linux/version.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26#include <linux/clockchips.h>
27#include <linux/hyperv.h>
28#include <linux/slab.h>
29#include <linux/cpuhotplug.h>
30#include <asm/hypervisor.h>
31#include <asm/mshyperv.h>
32#include <asm/apic.h>
33
34static struct apic orig_apic;
35
36static u64 hv_apic_icr_read(void)
37{
38 u64 reg_val;
39
40 rdmsrl(HV_X64_MSR_ICR, reg_val);
41 return reg_val;
42}
43
44static void hv_apic_icr_write(u32 low, u32 id)
45{
46 u64 reg_val;
47
48 reg_val = SET_APIC_DEST_FIELD(id);
49 reg_val = reg_val << 32;
50 reg_val |= low;
51
52 wrmsrl(HV_X64_MSR_ICR, reg_val);
53}
54
55static u32 hv_apic_read(u32 reg)
56{
57 u32 reg_val, hi;
58
59 switch (reg) {
60 case APIC_EOI:
61 rdmsr(HV_X64_MSR_EOI, reg_val, hi);
62 return reg_val;
63 case APIC_TASKPRI:
64 rdmsr(HV_X64_MSR_TPR, reg_val, hi);
65 return reg_val;
66
67 default:
68 return native_apic_mem_read(reg);
69 }
70}
71
72static void hv_apic_write(u32 reg, u32 val)
73{
74 switch (reg) {
75 case APIC_EOI:
76 wrmsr(HV_X64_MSR_EOI, val, 0);
77 break;
78 case APIC_TASKPRI:
79 wrmsr(HV_X64_MSR_TPR, val, 0);
80 break;
81 default:
82 native_apic_mem_write(reg, val);
83 }
84}
85
86static void hv_apic_eoi_write(u32 reg, u32 val)
87{
88 wrmsr(HV_X64_MSR_EOI, val, 0);
89}
90
91/*
92 * IPI implementation on Hyper-V.
93 */
94static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
95{
96 struct ipi_arg_ex **arg;
97 struct ipi_arg_ex *ipi_arg;
98 unsigned long flags;
99 int nr_bank = 0;
100 int ret = 1;
101
102 local_irq_save(flags);
103 arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
104
105 ipi_arg = *arg;
106 if (unlikely(!ipi_arg))
107 goto ipi_mask_ex_done;
108
109 ipi_arg->vector = vector;
110 ipi_arg->reserved = 0;
111 ipi_arg->vp_set.valid_bank_mask = 0;
112
113 if (!cpumask_equal(mask, cpu_present_mask)) {
114 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
115 nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
116 }
117 if (!nr_bank)
118 ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
119
120 ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
121 ipi_arg, NULL);
122
123ipi_mask_ex_done:
124 local_irq_restore(flags);
125 return ((ret == 0) ? true : false);
126}
127
128static bool __send_ipi_mask(const struct cpumask *mask, int vector)
129{
130 int cur_cpu, vcpu;
131 struct ipi_arg_non_ex **arg;
132 struct ipi_arg_non_ex *ipi_arg;
133 int ret = 1;
134 unsigned long flags;
135
136 if (cpumask_empty(mask))
137 return true;
138
139 if (!hv_hypercall_pg)
140 return false;
141
142 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
143 return false;
144
145 if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
146 return __send_ipi_mask_ex(mask, vector);
147
148 local_irq_save(flags);
149 arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
150
151 ipi_arg = *arg;
152 if (unlikely(!ipi_arg))
153 goto ipi_mask_done;
154
155 ipi_arg->vector = vector;
156 ipi_arg->reserved = 0;
157 ipi_arg->cpu_mask = 0;
158
159 for_each_cpu(cur_cpu, mask) {
160 vcpu = hv_cpu_number_to_vp_number(cur_cpu);
161 /*
162 * This particular version of the IPI hypercall can
163 * only target upto 64 CPUs.
164 */
165 if (vcpu >= 64)
166 goto ipi_mask_done;
167
168 __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
169 }
170
171 ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
172
173ipi_mask_done:
174 local_irq_restore(flags);
175 return ((ret == 0) ? true : false);
176}
177
178static bool __send_ipi_one(int cpu, int vector)
179{
180 struct cpumask mask = CPU_MASK_NONE;
181
182 cpumask_set_cpu(cpu, &mask);
183 return __send_ipi_mask(&mask, vector);
184}
185
186static void hv_send_ipi(int cpu, int vector)
187{
188 if (!__send_ipi_one(cpu, vector))
189 orig_apic.send_IPI(cpu, vector);
190}
191
192static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
193{
194 if (!__send_ipi_mask(mask, vector))
195 orig_apic.send_IPI_mask(mask, vector);
196}
197
198static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
199{
200 unsigned int this_cpu = smp_processor_id();
201 struct cpumask new_mask;
202 const struct cpumask *local_mask;
203
204 cpumask_copy(&new_mask, mask);
205 cpumask_clear_cpu(this_cpu, &new_mask);
206 local_mask = &new_mask;
207 if (!__send_ipi_mask(local_mask, vector))
208 orig_apic.send_IPI_mask_allbutself(mask, vector);
209}
210
211static void hv_send_ipi_allbutself(int vector)
212{
213 hv_send_ipi_mask_allbutself(cpu_online_mask, vector);
214}
215
216static void hv_send_ipi_all(int vector)
217{
218 if (!__send_ipi_mask(cpu_online_mask, vector))
219 orig_apic.send_IPI_all(vector);
220}
221
222static void hv_send_ipi_self(int vector)
223{
224 if (!__send_ipi_one(smp_processor_id(), vector))
225 orig_apic.send_IPI_self(vector);
226}
227
228void __init hv_apic_init(void)
229{
230 if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
231 if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
232 pr_info("Hyper-V: Using ext hypercalls for IPI\n");
233 else
234 pr_info("Hyper-V: Using IPI hypercalls\n");
235 /*
236 * Set the IPI entry points.
237 */
238 orig_apic = *apic;
239
240 apic->send_IPI = hv_send_ipi;
241 apic->send_IPI_mask = hv_send_ipi_mask;
242 apic->send_IPI_mask_allbutself = hv_send_ipi_mask_allbutself;
243 apic->send_IPI_allbutself = hv_send_ipi_allbutself;
244 apic->send_IPI_all = hv_send_ipi_all;
245 apic->send_IPI_self = hv_send_ipi_self;
246 }
247
248 if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
249 pr_info("Hyper-V: Using MSR based APIC access\n");
250 apic_set_eoi_write(hv_apic_eoi_write);
251 apic->read = hv_apic_read;
252 apic->write = hv_apic_write;
253 apic->icr_write = hv_apic_icr_write;
254 apic->icr_read = hv_apic_icr_read;
255 }
256}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index cfecc2272f2d..4c431e1c1eff 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -91,12 +91,19 @@ EXPORT_SYMBOL_GPL(hv_vp_index);
91struct hv_vp_assist_page **hv_vp_assist_page; 91struct hv_vp_assist_page **hv_vp_assist_page;
92EXPORT_SYMBOL_GPL(hv_vp_assist_page); 92EXPORT_SYMBOL_GPL(hv_vp_assist_page);
93 93
94void __percpu **hyperv_pcpu_input_arg;
95EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
96
94u32 hv_max_vp_index; 97u32 hv_max_vp_index;
95 98
96static int hv_cpu_init(unsigned int cpu) 99static int hv_cpu_init(unsigned int cpu)
97{ 100{
98 u64 msr_vp_index; 101 u64 msr_vp_index;
99 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; 102 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
103 void **input_arg;
104
105 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
106 *input_arg = page_address(alloc_page(GFP_KERNEL));
100 107
101 hv_get_vp_index(msr_vp_index); 108 hv_get_vp_index(msr_vp_index);
102 109
@@ -217,6 +224,16 @@ static int hv_cpu_die(unsigned int cpu)
217{ 224{
218 struct hv_reenlightenment_control re_ctrl; 225 struct hv_reenlightenment_control re_ctrl;
219 unsigned int new_cpu; 226 unsigned int new_cpu;
227 unsigned long flags;
228 void **input_arg;
229 void *input_pg = NULL;
230
231 local_irq_save(flags);
232 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
233 input_pg = *input_arg;
234 *input_arg = NULL;
235 local_irq_restore(flags);
236 free_page((unsigned long)input_pg);
220 237
221 if (hv_vp_assist_page && hv_vp_assist_page[cpu]) 238 if (hv_vp_assist_page && hv_vp_assist_page[cpu])
222 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); 239 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0);
@@ -242,8 +259,9 @@ static int hv_cpu_die(unsigned int cpu)
242 * 259 *
243 * 1. Setup the hypercall page. 260 * 1. Setup the hypercall page.
244 * 2. Register Hyper-V specific clocksource. 261 * 2. Register Hyper-V specific clocksource.
262 * 3. Setup Hyper-V specific APIC entry points.
245 */ 263 */
246void hyperv_init(void) 264void __init hyperv_init(void)
247{ 265{
248 u64 guest_id, required_msrs; 266 u64 guest_id, required_msrs;
249 union hv_x64_msr_hypercall_contents hypercall_msr; 267 union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -259,6 +277,16 @@ void hyperv_init(void)
259 if ((ms_hyperv.features & required_msrs) != required_msrs) 277 if ((ms_hyperv.features & required_msrs) != required_msrs)
260 return; 278 return;
261 279
280 /*
281 * Allocate the per-CPU state for the hypercall input arg.
282 * If this allocation fails, we will not be able to setup
283 * (per-CPU) hypercall input page and thus this failure is
284 * fatal on Hyper-V.
285 */
286 hyperv_pcpu_input_arg = alloc_percpu(void *);
287
288 BUG_ON(hyperv_pcpu_input_arg == NULL);
289
262 /* Allocate percpu VP index */ 290 /* Allocate percpu VP index */
263 hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), 291 hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
264 GFP_KERNEL); 292 GFP_KERNEL);
@@ -296,7 +324,7 @@ void hyperv_init(void)
296 hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); 324 hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
297 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 325 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
298 326
299 hyper_alloc_mmu(); 327 hv_apic_init();
300 328
301 /* 329 /*
302 * Register Hyper-V specific clocksource. 330 * Register Hyper-V specific clocksource.
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 56c9ebac946f..5f053d7d1bd9 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -25,20 +25,13 @@ struct hv_flush_pcpu {
25struct hv_flush_pcpu_ex { 25struct hv_flush_pcpu_ex {
26 u64 address_space; 26 u64 address_space;
27 u64 flags; 27 u64 flags;
28 struct { 28 struct hv_vpset hv_vp_set;
29 u64 format;
30 u64 valid_bank_mask;
31 u64 bank_contents[];
32 } hv_vp_set;
33 u64 gva_list[]; 29 u64 gva_list[];
34}; 30};
35 31
36/* Each gva in gva_list encodes up to 4096 pages to flush */ 32/* Each gva in gva_list encodes up to 4096 pages to flush */
37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) 33#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
38 34
39static struct hv_flush_pcpu __percpu **pcpu_flush;
40
41static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
42 35
43/* 36/*
44 * Fills in gva_list starting from offset. Returns the number of items added. 37 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -70,41 +63,6 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
70 return gva_n - offset; 63 return gva_n - offset;
71} 64}
72 65
73/* Return the number of banks in the resulting vp_set */
74static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
75 const struct cpumask *cpus)
76{
77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
78
79 /* valid_bank_mask can represent up to 64 banks */
80 if (hv_max_vp_index / 64 >= 64)
81 return 0;
82
83 /*
84 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
85 * structs are not cleared between calls, we risk flushing unneeded
86 * vCPUs otherwise.
87 */
88 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
89 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
90
91 /*
92 * Some banks may end up being empty but this is acceptable.
93 */
94 for_each_cpu(cpu, cpus) {
95 vcpu = hv_cpu_number_to_vp_number(cpu);
96 vcpu_bank = vcpu / 64;
97 vcpu_offset = vcpu % 64;
98 __set_bit(vcpu_offset, (unsigned long *)
99 &flush->hv_vp_set.bank_contents[vcpu_bank]);
100 if (vcpu_bank >= nr_bank)
101 nr_bank = vcpu_bank + 1;
102 }
103 flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
104
105 return nr_bank;
106}
107
108static void hyperv_flush_tlb_others(const struct cpumask *cpus, 66static void hyperv_flush_tlb_others(const struct cpumask *cpus,
109 const struct flush_tlb_info *info) 67 const struct flush_tlb_info *info)
110{ 68{
@@ -116,7 +74,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
116 74
117 trace_hyperv_mmu_flush_tlb_others(cpus, info); 75 trace_hyperv_mmu_flush_tlb_others(cpus, info);
118 76
119 if (!pcpu_flush || !hv_hypercall_pg) 77 if (!hv_hypercall_pg)
120 goto do_native; 78 goto do_native;
121 79
122 if (cpumask_empty(cpus)) 80 if (cpumask_empty(cpus))
@@ -124,10 +82,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
124 82
125 local_irq_save(flags); 83 local_irq_save(flags);
126 84
127 flush_pcpu = this_cpu_ptr(pcpu_flush); 85 flush_pcpu = (struct hv_flush_pcpu **)
128 86 this_cpu_ptr(hyperv_pcpu_input_arg);
129 if (unlikely(!*flush_pcpu))
130 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
131 87
132 flush = *flush_pcpu; 88 flush = *flush_pcpu;
133 89
@@ -203,7 +159,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
203 159
204 trace_hyperv_mmu_flush_tlb_others(cpus, info); 160 trace_hyperv_mmu_flush_tlb_others(cpus, info);
205 161
206 if (!pcpu_flush_ex || !hv_hypercall_pg) 162 if (!hv_hypercall_pg)
207 goto do_native; 163 goto do_native;
208 164
209 if (cpumask_empty(cpus)) 165 if (cpumask_empty(cpus))
@@ -211,10 +167,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
211 167
212 local_irq_save(flags); 168 local_irq_save(flags);
213 169
214 flush_pcpu = this_cpu_ptr(pcpu_flush_ex); 170 flush_pcpu = (struct hv_flush_pcpu_ex **)
215 171 this_cpu_ptr(hyperv_pcpu_input_arg);
216 if (unlikely(!*flush_pcpu))
217 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
218 172
219 flush = *flush_pcpu; 173 flush = *flush_pcpu;
220 174
@@ -239,8 +193,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
239 flush->hv_vp_set.valid_bank_mask = 0; 193 flush->hv_vp_set.valid_bank_mask = 0;
240 194
241 if (!cpumask_equal(cpus, cpu_present_mask)) { 195 if (!cpumask_equal(cpus, cpu_present_mask)) {
242 flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; 196 flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
243 nr_bank = cpumask_to_vp_set(flush, cpus); 197 nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
244 } 198 }
245 199
246 if (!nr_bank) { 200 if (!nr_bank) {
@@ -296,14 +250,3 @@ void hyperv_setup_mmu_ops(void)
296 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; 250 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
297 } 251 }
298} 252}
299
300void hyper_alloc_mmu(void)
301{
302 if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
303 return;
304
305 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
306 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
307 else
308 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
309}
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 416cb0e0c496..3bfa92c2793c 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -164,6 +164,11 @@
164 */ 164 */
165#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) 165#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
166 166
167/*
168 * Recommend using cluster IPI hypercalls.
169 */
170#define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10)
171
167/* Recommend using the newer ExProcessorMasks interface */ 172/* Recommend using the newer ExProcessorMasks interface */
168#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) 173#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
169 174
@@ -329,12 +334,17 @@ struct hv_tsc_emulation_status {
329#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ 334#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
330 (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) 335 (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
331 336
337#define HV_IPI_LOW_VECTOR 0x10
338#define HV_IPI_HIGH_VECTOR 0xff
339
332/* Declare the various hypercall operations. */ 340/* Declare the various hypercall operations. */
333#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 341#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
334#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 342#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
335#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 343#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
344#define HVCALL_SEND_IPI 0x000b
336#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 345#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
337#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 346#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
347#define HVCALL_SEND_IPI_EX 0x0015
338#define HVCALL_POST_MESSAGE 0x005c 348#define HVCALL_POST_MESSAGE 0x005c
339#define HVCALL_SIGNAL_EVENT 0x005d 349#define HVCALL_SIGNAL_EVENT 0x005d
340 350
@@ -360,7 +370,7 @@ struct hv_tsc_emulation_status {
360#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) 370#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
361 371
362enum HV_GENERIC_SET_FORMAT { 372enum HV_GENERIC_SET_FORMAT {
363 HV_GENERIC_SET_SPARCE_4K, 373 HV_GENERIC_SET_SPARSE_4K,
364 HV_GENERIC_SET_ALL, 374 HV_GENERIC_SET_ALL,
365}; 375};
366 376
@@ -706,4 +716,22 @@ struct hv_enlightened_vmcs {
706#define HV_STIMER_AUTOENABLE (1ULL << 3) 716#define HV_STIMER_AUTOENABLE (1ULL << 3)
707#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) 717#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
708 718
719struct ipi_arg_non_ex {
720 u32 vector;
721 u32 reserved;
722 u64 cpu_mask;
723};
724
725struct hv_vpset {
726 u64 format;
727 u64 valid_bank_mask;
728 u64 bank_contents[];
729};
730
731struct ipi_arg_ex {
732 u32 vector;
733 u32 reserved;
734 struct hv_vpset vp_set;
735};
736
709#endif 737#endif
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index b90e79610cf7..997192131b7b 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -122,6 +122,7 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
122#if IS_ENABLED(CONFIG_HYPERV) 122#if IS_ENABLED(CONFIG_HYPERV)
123extern struct clocksource *hyperv_cs; 123extern struct clocksource *hyperv_cs;
124extern void *hv_hypercall_pg; 124extern void *hv_hypercall_pg;
125extern void __percpu **hyperv_pcpu_input_arg;
125 126
126static inline u64 hv_do_hypercall(u64 control, void *input, void *output) 127static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
127{ 128{
@@ -258,9 +259,41 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
258 return hv_vp_index[cpu_number]; 259 return hv_vp_index[cpu_number];
259} 260}
260 261
261void hyperv_init(void); 262static inline int cpumask_to_vpset(struct hv_vpset *vpset,
263 const struct cpumask *cpus)
264{
265 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
266
267 /* valid_bank_mask can represent up to 64 banks */
268 if (hv_max_vp_index / 64 >= 64)
269 return 0;
270
271 /*
272 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
273 * structs are not cleared between calls, we risk flushing unneeded
274 * vCPUs otherwise.
275 */
276 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
277 vpset->bank_contents[vcpu_bank] = 0;
278
279 /*
280 * Some banks may end up being empty but this is acceptable.
281 */
282 for_each_cpu(cpu, cpus) {
283 vcpu = hv_cpu_number_to_vp_number(cpu);
284 vcpu_bank = vcpu / 64;
285 vcpu_offset = vcpu % 64;
286 __set_bit(vcpu_offset, (unsigned long *)
287 &vpset->bank_contents[vcpu_bank]);
288 if (vcpu_bank >= nr_bank)
289 nr_bank = vcpu_bank + 1;
290 }
291 vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
292 return nr_bank;
293}
294
295void __init hyperv_init(void);
262void hyperv_setup_mmu_ops(void); 296void hyperv_setup_mmu_ops(void);
263void hyper_alloc_mmu(void);
264void hyperv_report_panic(struct pt_regs *regs, long err); 297void hyperv_report_panic(struct pt_regs *regs, long err);
265bool hv_is_hyperv_initialized(void); 298bool hv_is_hyperv_initialized(void);
266void hyperv_cleanup(void); 299void hyperv_cleanup(void);
@@ -269,6 +302,13 @@ void hyperv_reenlightenment_intr(struct pt_regs *regs);
269void set_hv_tscchange_cb(void (*cb)(void)); 302void set_hv_tscchange_cb(void (*cb)(void));
270void clear_hv_tscchange_cb(void); 303void clear_hv_tscchange_cb(void);
271void hyperv_stop_tsc_emulation(void); 304void hyperv_stop_tsc_emulation(void);
305
306#ifdef CONFIG_X86_64
307void hv_apic_init(void);
308#else
309static inline void hv_apic_init(void) {}
310#endif
311
272#else /* CONFIG_HYPERV */ 312#else /* CONFIG_HYPERV */
273static inline void hyperv_init(void) {} 313static inline void hyperv_init(void) {}
274static inline bool hv_is_hyperv_initialized(void) { return false; } 314static inline bool hv_is_hyperv_initialized(void) { return false; }