aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/hyperv/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/hyperv/mmu.c')
-rw-r--r--arch/x86/hyperv/mmu.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 39e7f6e50919..9cc9e1c1e2db 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
36/* Each gva in gva_list encodes up to 4096 pages to flush */ 36/* Each gva in gva_list encodes up to 4096 pages to flush */
37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) 37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
38 38
39static struct hv_flush_pcpu __percpu *pcpu_flush; 39static struct hv_flush_pcpu __percpu **pcpu_flush;
40 40
41static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; 41static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
42 42
43/* 43/*
44 * Fills in gva_list starting from offset. Returns the number of items added. 44 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
76{ 76{
77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; 77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
78 78
79 /* valid_bank_mask can represent up to 64 banks */
80 if (hv_max_vp_index / 64 >= 64)
81 return 0;
82
83 /*
84 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
85 * structs are not cleared between calls, we risk flushing unneeded
86 * vCPUs otherwise.
87 */
88 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
89 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
90
79 /* 91 /*
80 * Some banks may end up being empty but this is acceptable. 92 * Some banks may end up being empty but this is acceptable.
81 */ 93 */
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
83 vcpu = hv_cpu_number_to_vp_number(cpu); 95 vcpu = hv_cpu_number_to_vp_number(cpu);
84 vcpu_bank = vcpu / 64; 96 vcpu_bank = vcpu / 64;
85 vcpu_offset = vcpu % 64; 97 vcpu_offset = vcpu % 64;
86
87 /* valid_bank_mask can represent up to 64 banks */
88 if (vcpu_bank >= 64)
89 return 0;
90
91 __set_bit(vcpu_offset, (unsigned long *) 98 __set_bit(vcpu_offset, (unsigned long *)
92 &flush->hv_vp_set.bank_contents[vcpu_bank]); 99 &flush->hv_vp_set.bank_contents[vcpu_bank]);
93 if (vcpu_bank >= nr_bank) 100 if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
102 const struct flush_tlb_info *info) 109 const struct flush_tlb_info *info)
103{ 110{
104 int cpu, vcpu, gva_n, max_gvas; 111 int cpu, vcpu, gva_n, max_gvas;
112 struct hv_flush_pcpu **flush_pcpu;
105 struct hv_flush_pcpu *flush; 113 struct hv_flush_pcpu *flush;
106 u64 status = U64_MAX; 114 u64 status = U64_MAX;
107 unsigned long flags; 115 unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
116 124
117 local_irq_save(flags); 125 local_irq_save(flags);
118 126
119 flush = this_cpu_ptr(pcpu_flush); 127 flush_pcpu = this_cpu_ptr(pcpu_flush);
128
129 if (unlikely(!*flush_pcpu))
130 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
131
132 flush = *flush_pcpu;
133
134 if (unlikely(!flush)) {
135 local_irq_restore(flags);
136 goto do_native;
137 }
120 138
121 if (info->mm) { 139 if (info->mm) {
122 flush->address_space = virt_to_phys(info->mm->pgd); 140 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
173 const struct flush_tlb_info *info) 191 const struct flush_tlb_info *info)
174{ 192{
175 int nr_bank = 0, max_gvas, gva_n; 193 int nr_bank = 0, max_gvas, gva_n;
194 struct hv_flush_pcpu_ex **flush_pcpu;
176 struct hv_flush_pcpu_ex *flush; 195 struct hv_flush_pcpu_ex *flush;
177 u64 status = U64_MAX; 196 u64 status = U64_MAX;
178 unsigned long flags; 197 unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
187 206
188 local_irq_save(flags); 207 local_irq_save(flags);
189 208
190 flush = this_cpu_ptr(pcpu_flush_ex); 209 flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
210
211 if (unlikely(!*flush_pcpu))
212 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
213
214 flush = *flush_pcpu;
215
216 if (unlikely(!flush)) {
217 local_irq_restore(flags);
218 goto do_native;
219 }
191 220
192 if (info->mm) { 221 if (info->mm) {
193 flush->address_space = virt_to_phys(info->mm->pgd); 222 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
222 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; 251 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
223 status = hv_do_rep_hypercall( 252 status = hv_do_rep_hypercall(
224 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 253 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
225 0, nr_bank + 2, flush, NULL); 254 0, nr_bank, flush, NULL);
226 } else if (info->end && 255 } else if (info->end &&
227 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { 256 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
228 status = hv_do_rep_hypercall( 257 status = hv_do_rep_hypercall(
229 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 258 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
230 0, nr_bank + 2, flush, NULL); 259 0, nr_bank, flush, NULL);
231 } else { 260 } else {
232 gva_n = fill_gva_list(flush->gva_list, nr_bank, 261 gva_n = fill_gva_list(flush->gva_list, nr_bank,
233 info->start, info->end); 262 info->start, info->end);
234 status = hv_do_rep_hypercall( 263 status = hv_do_rep_hypercall(
235 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, 264 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
236 gva_n, nr_bank + 2, flush, NULL); 265 gva_n, nr_bank, flush, NULL);
237 } 266 }
238 267
239 local_irq_restore(flags); 268 local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
266 return; 295 return;
267 296
268 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) 297 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
269 pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 298 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
270 else 299 else
271 pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 300 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
272} 301}