aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2018-06-22 13:06:23 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-03 03:00:33 -0400
commitd8e6b232cfdd5d141c03e40a14c1c781480ea05e (patch)
tree7da4e56bb4512352f4e8594012ec174d5000b43d
parent53e52966901a5b14caa2a7c77428a693fe71f734 (diff)
x86/hyper-v: Use 'fast' hypercall for HVCALL_SEND_IPI
Current Hyper-V TLFS (v5.0b) claims that HvCallSendSyntheticClusterIpi hypercall can't be 'fast' (passing parameters through registers) but apparently this is not true, Windows always uses 'fast' version. We can do the same in Linux too. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Cc: devel@linuxdriverproject.org Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Tianyu Lan <Tianyu.Lan@microsoft.com> Cc: "Michael Kelley (EOSG)" <Michael.H.Kelley@microsoft.com> Link: https://lkml.kernel.org/r/20180622170625.30688-3-vkuznets@redhat.com
-rw-r--r--arch/x86/hyperv/hv_apic.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f68855499391..90055f89223b 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -128,10 +128,8 @@ ipi_mask_ex_done:
128static bool __send_ipi_mask(const struct cpumask *mask, int vector) 128static bool __send_ipi_mask(const struct cpumask *mask, int vector)
129{ 129{
130 int cur_cpu, vcpu; 130 int cur_cpu, vcpu;
131 struct ipi_arg_non_ex **arg; 131 struct ipi_arg_non_ex ipi_arg;
132 struct ipi_arg_non_ex *ipi_arg;
133 int ret = 1; 132 int ret = 1;
134 unsigned long flags;
135 133
136 if (cpumask_empty(mask)) 134 if (cpumask_empty(mask))
137 return true; 135 return true;
@@ -145,16 +143,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
145 if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) 143 if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
146 return __send_ipi_mask_ex(mask, vector); 144 return __send_ipi_mask_ex(mask, vector);
147 145
148 local_irq_save(flags); 146 ipi_arg.vector = vector;
149 arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); 147 ipi_arg.cpu_mask = 0;
150
151 ipi_arg = *arg;
152 if (unlikely(!ipi_arg))
153 goto ipi_mask_done;
154
155 ipi_arg->vector = vector;
156 ipi_arg->reserved = 0;
157 ipi_arg->cpu_mask = 0;
158 148
159 for_each_cpu(cur_cpu, mask) { 149 for_each_cpu(cur_cpu, mask) {
160 vcpu = hv_cpu_number_to_vp_number(cur_cpu); 150 vcpu = hv_cpu_number_to_vp_number(cur_cpu);
@@ -165,13 +155,13 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
165 if (vcpu >= 64) 155 if (vcpu >= 64)
166 goto ipi_mask_done; 156 goto ipi_mask_done;
167 157
168 __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask); 158 __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
169 } 159 }
170 160
171 ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL); 161 ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
162 ipi_arg.cpu_mask);
172 163
173ipi_mask_done: 164ipi_mask_done:
174 local_irq_restore(flags);
175 return ((ret == 0) ? true : false); 165 return ((ret == 0) ? true : false);
176} 166}
177 167