aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/hyperv/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/hyperv/mmu.c')
-rw-r--r--arch/x86/hyperv/mmu.c75
1 files changed, 9 insertions, 66 deletions
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 56c9ebac946f..5f053d7d1bd9 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -25,20 +25,13 @@ struct hv_flush_pcpu {
25struct hv_flush_pcpu_ex { 25struct hv_flush_pcpu_ex {
26 u64 address_space; 26 u64 address_space;
27 u64 flags; 27 u64 flags;
28 struct { 28 struct hv_vpset hv_vp_set;
29 u64 format;
30 u64 valid_bank_mask;
31 u64 bank_contents[];
32 } hv_vp_set;
33 u64 gva_list[]; 29 u64 gva_list[];
34}; 30};
35 31
36/* Each gva in gva_list encodes up to 4096 pages to flush */ 32/* Each gva in gva_list encodes up to 4096 pages to flush */
37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) 33#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
38 34
39static struct hv_flush_pcpu __percpu **pcpu_flush;
40
41static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
42 35
43/* 36/*
44 * Fills in gva_list starting from offset. Returns the number of items added. 37 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -70,41 +63,6 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
70 return gva_n - offset; 63 return gva_n - offset;
71} 64}
72 65
73/* Return the number of banks in the resulting vp_set */
74static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
75 const struct cpumask *cpus)
76{
77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
78
79 /* valid_bank_mask can represent up to 64 banks */
80 if (hv_max_vp_index / 64 >= 64)
81 return 0;
82
83 /*
84 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
85 * structs are not cleared between calls, we risk flushing unneeded
86 * vCPUs otherwise.
87 */
88 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
89 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
90
91 /*
92 * Some banks may end up being empty but this is acceptable.
93 */
94 for_each_cpu(cpu, cpus) {
95 vcpu = hv_cpu_number_to_vp_number(cpu);
96 vcpu_bank = vcpu / 64;
97 vcpu_offset = vcpu % 64;
98 __set_bit(vcpu_offset, (unsigned long *)
99 &flush->hv_vp_set.bank_contents[vcpu_bank]);
100 if (vcpu_bank >= nr_bank)
101 nr_bank = vcpu_bank + 1;
102 }
103 flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
104
105 return nr_bank;
106}
107
108static void hyperv_flush_tlb_others(const struct cpumask *cpus, 66static void hyperv_flush_tlb_others(const struct cpumask *cpus,
109 const struct flush_tlb_info *info) 67 const struct flush_tlb_info *info)
110{ 68{
@@ -116,7 +74,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
116 74
117 trace_hyperv_mmu_flush_tlb_others(cpus, info); 75 trace_hyperv_mmu_flush_tlb_others(cpus, info);
118 76
119 if (!pcpu_flush || !hv_hypercall_pg) 77 if (!hv_hypercall_pg)
120 goto do_native; 78 goto do_native;
121 79
122 if (cpumask_empty(cpus)) 80 if (cpumask_empty(cpus))
@@ -124,10 +82,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
124 82
125 local_irq_save(flags); 83 local_irq_save(flags);
126 84
127 flush_pcpu = this_cpu_ptr(pcpu_flush); 85 flush_pcpu = (struct hv_flush_pcpu **)
128 86 this_cpu_ptr(hyperv_pcpu_input_arg);
129 if (unlikely(!*flush_pcpu))
130 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
131 87
132 flush = *flush_pcpu; 88 flush = *flush_pcpu;
133 89
@@ -203,7 +159,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
203 159
204 trace_hyperv_mmu_flush_tlb_others(cpus, info); 160 trace_hyperv_mmu_flush_tlb_others(cpus, info);
205 161
206 if (!pcpu_flush_ex || !hv_hypercall_pg) 162 if (!hv_hypercall_pg)
207 goto do_native; 163 goto do_native;
208 164
209 if (cpumask_empty(cpus)) 165 if (cpumask_empty(cpus))
@@ -211,10 +167,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
211 167
212 local_irq_save(flags); 168 local_irq_save(flags);
213 169
214 flush_pcpu = this_cpu_ptr(pcpu_flush_ex); 170 flush_pcpu = (struct hv_flush_pcpu_ex **)
215 171 this_cpu_ptr(hyperv_pcpu_input_arg);
216 if (unlikely(!*flush_pcpu))
217 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
218 172
219 flush = *flush_pcpu; 173 flush = *flush_pcpu;
220 174
@@ -239,8 +193,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
239 flush->hv_vp_set.valid_bank_mask = 0; 193 flush->hv_vp_set.valid_bank_mask = 0;
240 194
241 if (!cpumask_equal(cpus, cpu_present_mask)) { 195 if (!cpumask_equal(cpus, cpu_present_mask)) {
242 flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; 196 flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
243 nr_bank = cpumask_to_vp_set(flush, cpus); 197 nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
244 } 198 }
245 199
246 if (!nr_bank) { 200 if (!nr_bank) {
@@ -296,14 +250,3 @@ void hyperv_setup_mmu_ops(void)
296 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; 250 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
297 } 251 }
298} 252}
299
300void hyper_alloc_mmu(void)
301{
302 if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
303 return;
304
305 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
306 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
307 else
308 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
309}