diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 12:25:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 12:25:15 -0400 |
commit | 57e88b43b81301d9b28f124a5576ac43a1cf9e8d (patch) | |
tree | 4f8be8cab8afe07dbb4b8469bf6f873f4372b396 | |
parent | 3b9f8ed25dbe5f858b1331588929f2a766aef55f (diff) | |
parent | 773b79f7a7c7839fb9d09c0e206734173a8b0a6b (diff) |
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar:
"The main changes include various Hyper-V optimizations such as faster
hypercalls and faster/better TLB flushes - and there's also some
Intel-MID cleanups"
* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tracing/hyper-v: Trace hyperv_mmu_flush_tlb_others()
x86/hyper-v: Support extended CPU ranges for TLB flush hypercalls
x86/platform/intel-mid: Make several arrays static, to make code smaller
MAINTAINERS: Add missed file for Hyper-V
x86/hyper-v: Use hypercall for remote TLB flush
hyper-v: Globalize vp_index
x86/hyper-v: Implement rep hypercalls
hyper-v: Use fast hypercall for HVCALL_SIGNAL_EVENT
x86/hyper-v: Introduce fast hypercall implementation
x86/hyper-v: Make hv_do_hypercall() inline
x86/hyper-v: Include hyperv/ only when CONFIG_HYPERV is set
x86/platform/intel-mid: Make 'bt_sfi_data' const
x86/platform/intel-mid: Make IRQ allocation a bit more flexible
x86/platform/intel-mid: Group timers callbacks together
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | arch/x86/Kbuild | 2 | ||||
-rw-r--r-- | arch/x86/hyperv/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/hyperv/hv_init.c | 90 | ||||
-rw-r--r-- | arch/x86/hyperv/mmu.c | 272 | ||||
-rw-r--r-- | arch/x86/include/asm/mshyperv.h | 147 | ||||
-rw-r--r-- | arch/x86/include/asm/trace/hyperv.h | 40 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/hyperv.h | 17 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mshyperv.c | 1 | ||||
-rw-r--r-- | arch/x86/pci/intel_mid_pci.c | 12 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/platform_bt.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | 6 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/intel-mid.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/pwr.c | 4 | ||||
-rw-r--r-- | drivers/hv/Kconfig | 1 | ||||
-rw-r--r-- | drivers/hv/channel_mgmt.c | 20 | ||||
-rw-r--r-- | drivers/hv/connection.c | 7 | ||||
-rw-r--r-- | drivers/hv/hv.c | 9 | ||||
-rw-r--r-- | drivers/hv/hyperv_vmbus.h | 11 | ||||
-rw-r--r-- | drivers/hv/vmbus_drv.c | 17 | ||||
-rw-r--r-- | drivers/pci/host/pci-hyperv.c | 54 | ||||
-rw-r--r-- | include/linux/hyperv.h | 17 |
22 files changed, 552 insertions, 183 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 11dde284a426..fb63e53ea3c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6312,6 +6312,7 @@ L: devel@linuxdriverproject.org | |||
6312 | S: Maintained | 6312 | S: Maintained |
6313 | F: Documentation/networking/netvsc.txt | 6313 | F: Documentation/networking/netvsc.txt |
6314 | F: arch/x86/include/asm/mshyperv.h | 6314 | F: arch/x86/include/asm/mshyperv.h |
6315 | F: arch/x86/include/asm/trace/hyperv.h | ||
6315 | F: arch/x86/include/uapi/asm/hyperv.h | 6316 | F: arch/x86/include/uapi/asm/hyperv.h |
6316 | F: arch/x86/kernel/cpu/mshyperv.c | 6317 | F: arch/x86/kernel/cpu/mshyperv.c |
6317 | F: arch/x86/hyperv | 6318 | F: arch/x86/hyperv |
@@ -6325,6 +6326,7 @@ F: drivers/uio/uio_hv_generic.c | |||
6325 | F: drivers/video/fbdev/hyperv_fb.c | 6326 | F: drivers/video/fbdev/hyperv_fb.c |
6326 | F: net/vmw_vsock/hyperv_transport.c | 6327 | F: net/vmw_vsock/hyperv_transport.c |
6327 | F: include/linux/hyperv.h | 6328 | F: include/linux/hyperv.h |
6329 | F: include/uapi/linux/hyperv.h | ||
6328 | F: tools/hv/ | 6330 | F: tools/hv/ |
6329 | F: Documentation/ABI/stable/sysfs-bus-vmbus | 6331 | F: Documentation/ABI/stable/sysfs-bus-vmbus |
6330 | 6332 | ||
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index f65a804b86f0..0038a2d10a7a 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_KVM) += kvm/ | |||
8 | obj-$(CONFIG_XEN) += xen/ | 8 | obj-$(CONFIG_XEN) += xen/ |
9 | 9 | ||
10 | # Hyper-V paravirtualization support | 10 | # Hyper-V paravirtualization support |
11 | obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/ | 11 | obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ |
12 | 12 | ||
13 | obj-y += realmode/ | 13 | obj-y += realmode/ |
14 | obj-y += kernel/ | 14 | obj-y += kernel/ |
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile index 171ae09864d7..367a8203cfcf 100644 --- a/arch/x86/hyperv/Makefile +++ b/arch/x86/hyperv/Makefile | |||
@@ -1 +1 @@ | |||
obj-y := hv_init.o | obj-y := hv_init.o mmu.o | ||
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 5b882cc0c0e9..1a8eb550c40f 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/clockchips.h> | 27 | #include <linux/clockchips.h> |
28 | #include <linux/hyperv.h> | 28 | #include <linux/hyperv.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/cpuhotplug.h> | ||
29 | 31 | ||
30 | #ifdef CONFIG_HYPERV_TSCPAGE | 32 | #ifdef CONFIG_HYPERV_TSCPAGE |
31 | 33 | ||
@@ -75,10 +77,25 @@ static struct clocksource hyperv_cs_msr = { | |||
75 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 77 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
76 | }; | 78 | }; |
77 | 79 | ||
78 | static void *hypercall_pg; | 80 | void *hv_hypercall_pg; |
81 | EXPORT_SYMBOL_GPL(hv_hypercall_pg); | ||
79 | struct clocksource *hyperv_cs; | 82 | struct clocksource *hyperv_cs; |
80 | EXPORT_SYMBOL_GPL(hyperv_cs); | 83 | EXPORT_SYMBOL_GPL(hyperv_cs); |
81 | 84 | ||
85 | u32 *hv_vp_index; | ||
86 | EXPORT_SYMBOL_GPL(hv_vp_index); | ||
87 | |||
88 | static int hv_cpu_init(unsigned int cpu) | ||
89 | { | ||
90 | u64 msr_vp_index; | ||
91 | |||
92 | hv_get_vp_index(msr_vp_index); | ||
93 | |||
94 | hv_vp_index[smp_processor_id()] = msr_vp_index; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
82 | /* | 99 | /* |
83 | * This function is to be invoked early in the boot sequence after the | 100 | * This function is to be invoked early in the boot sequence after the |
84 | * hypervisor has been detected. | 101 | * hypervisor has been detected. |
@@ -94,6 +111,16 @@ void hyperv_init(void) | |||
94 | if (x86_hyper != &x86_hyper_ms_hyperv) | 111 | if (x86_hyper != &x86_hyper_ms_hyperv) |
95 | return; | 112 | return; |
96 | 113 | ||
114 | /* Allocate percpu VP index */ | ||
115 | hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), | ||
116 | GFP_KERNEL); | ||
117 | if (!hv_vp_index) | ||
118 | return; | ||
119 | |||
120 | if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online", | ||
121 | hv_cpu_init, NULL) < 0) | ||
122 | goto free_vp_index; | ||
123 | |||
97 | /* | 124 | /* |
98 | * Setup the hypercall page and enable hypercalls. | 125 | * Setup the hypercall page and enable hypercalls. |
99 | * 1. Register the guest ID | 126 | * 1. Register the guest ID |
@@ -102,17 +129,19 @@ void hyperv_init(void) | |||
102 | guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0); | 129 | guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0); |
103 | wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); | 130 | wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); |
104 | 131 | ||
105 | hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); | 132 | hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); |
106 | if (hypercall_pg == NULL) { | 133 | if (hv_hypercall_pg == NULL) { |
107 | wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); | 134 | wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); |
108 | return; | 135 | goto free_vp_index; |
109 | } | 136 | } |
110 | 137 | ||
111 | rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); | 138 | rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); |
112 | hypercall_msr.enable = 1; | 139 | hypercall_msr.enable = 1; |
113 | hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg); | 140 | hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); |
114 | wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); | 141 | wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); |
115 | 142 | ||
143 | hyper_alloc_mmu(); | ||
144 | |||
116 | /* | 145 | /* |
117 | * Register Hyper-V specific clocksource. | 146 | * Register Hyper-V specific clocksource. |
118 | */ | 147 | */ |
@@ -148,6 +177,12 @@ register_msr_cs: | |||
148 | hyperv_cs = &hyperv_cs_msr; | 177 | hyperv_cs = &hyperv_cs_msr; |
149 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) | 178 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
150 | clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); | 179 | clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); |
180 | |||
181 | return; | ||
182 | |||
183 | free_vp_index: | ||
184 | kfree(hv_vp_index); | ||
185 | hv_vp_index = NULL; | ||
151 | } | 186 | } |
152 | 187 | ||
153 | /* | 188 | /* |
@@ -170,51 +205,6 @@ void hyperv_cleanup(void) | |||
170 | } | 205 | } |
171 | EXPORT_SYMBOL_GPL(hyperv_cleanup); | 206 | EXPORT_SYMBOL_GPL(hyperv_cleanup); |
172 | 207 | ||
173 | /* | ||
174 | * hv_do_hypercall- Invoke the specified hypercall | ||
175 | */ | ||
176 | u64 hv_do_hypercall(u64 control, void *input, void *output) | ||
177 | { | ||
178 | u64 input_address = (input) ? virt_to_phys(input) : 0; | ||
179 | u64 output_address = (output) ? virt_to_phys(output) : 0; | ||
180 | #ifdef CONFIG_X86_64 | ||
181 | u64 hv_status = 0; | ||
182 | |||
183 | if (!hypercall_pg) | ||
184 | return (u64)ULLONG_MAX; | ||
185 | |||
186 | __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); | ||
187 | __asm__ __volatile__("call *%3" : "=a" (hv_status) : | ||
188 | "c" (control), "d" (input_address), | ||
189 | "m" (hypercall_pg)); | ||
190 | |||
191 | return hv_status; | ||
192 | |||
193 | #else | ||
194 | |||
195 | u32 control_hi = control >> 32; | ||
196 | u32 control_lo = control & 0xFFFFFFFF; | ||
197 | u32 hv_status_hi = 1; | ||
198 | u32 hv_status_lo = 1; | ||
199 | u32 input_address_hi = input_address >> 32; | ||
200 | u32 input_address_lo = input_address & 0xFFFFFFFF; | ||
201 | u32 output_address_hi = output_address >> 32; | ||
202 | u32 output_address_lo = output_address & 0xFFFFFFFF; | ||
203 | |||
204 | if (!hypercall_pg) | ||
205 | return (u64)ULLONG_MAX; | ||
206 | |||
207 | __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), | ||
208 | "=a"(hv_status_lo) : "d" (control_hi), | ||
209 | "a" (control_lo), "b" (input_address_hi), | ||
210 | "c" (input_address_lo), "D"(output_address_hi), | ||
211 | "S"(output_address_lo), "m" (hypercall_pg)); | ||
212 | |||
213 | return hv_status_lo | ((u64)hv_status_hi << 32); | ||
214 | #endif /* !x86_64 */ | ||
215 | } | ||
216 | EXPORT_SYMBOL_GPL(hv_do_hypercall); | ||
217 | |||
218 | void hyperv_report_panic(struct pt_regs *regs) | 208 | void hyperv_report_panic(struct pt_regs *regs) |
219 | { | 209 | { |
220 | static bool panic_reported; | 210 | static bool panic_reported; |
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c new file mode 100644 index 000000000000..39e7f6e50919 --- /dev/null +++ b/arch/x86/hyperv/mmu.c | |||
@@ -0,0 +1,272 @@ | |||
1 | #define pr_fmt(fmt) "Hyper-V: " fmt | ||
2 | |||
3 | #include <linux/hyperv.h> | ||
4 | #include <linux/log2.h> | ||
5 | #include <linux/slab.h> | ||
6 | #include <linux/types.h> | ||
7 | |||
8 | #include <asm/fpu/api.h> | ||
9 | #include <asm/mshyperv.h> | ||
10 | #include <asm/msr.h> | ||
11 | #include <asm/tlbflush.h> | ||
12 | |||
13 | #define CREATE_TRACE_POINTS | ||
14 | #include <asm/trace/hyperv.h> | ||
15 | |||
16 | /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ | ||
17 | struct hv_flush_pcpu { | ||
18 | u64 address_space; | ||
19 | u64 flags; | ||
20 | u64 processor_mask; | ||
21 | u64 gva_list[]; | ||
22 | }; | ||
23 | |||
24 | /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ | ||
25 | struct hv_flush_pcpu_ex { | ||
26 | u64 address_space; | ||
27 | u64 flags; | ||
28 | struct { | ||
29 | u64 format; | ||
30 | u64 valid_bank_mask; | ||
31 | u64 bank_contents[]; | ||
32 | } hv_vp_set; | ||
33 | u64 gva_list[]; | ||
34 | }; | ||
35 | |||
36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ | ||
37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) | ||
38 | |||
39 | static struct hv_flush_pcpu __percpu *pcpu_flush; | ||
40 | |||
41 | static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; | ||
42 | |||
43 | /* | ||
44 | * Fills in gva_list starting from offset. Returns the number of items added. | ||
45 | */ | ||
46 | static inline int fill_gva_list(u64 gva_list[], int offset, | ||
47 | unsigned long start, unsigned long end) | ||
48 | { | ||
49 | int gva_n = offset; | ||
50 | unsigned long cur = start, diff; | ||
51 | |||
52 | do { | ||
53 | diff = end > cur ? end - cur : 0; | ||
54 | |||
55 | gva_list[gva_n] = cur & PAGE_MASK; | ||
56 | /* | ||
57 | * Lower 12 bits encode the number of additional | ||
58 | * pages to flush (in addition to the 'cur' page). | ||
59 | */ | ||
60 | if (diff >= HV_TLB_FLUSH_UNIT) | ||
61 | gva_list[gva_n] |= ~PAGE_MASK; | ||
62 | else if (diff) | ||
63 | gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; | ||
64 | |||
65 | cur += HV_TLB_FLUSH_UNIT; | ||
66 | gva_n++; | ||
67 | |||
68 | } while (cur < end); | ||
69 | |||
70 | return gva_n - offset; | ||
71 | } | ||
72 | |||
73 | /* Return the number of banks in the resulting vp_set */ | ||
74 | static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | ||
75 | const struct cpumask *cpus) | ||
76 | { | ||
77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | ||
78 | |||
79 | /* | ||
80 | * Some banks may end up being empty but this is acceptable. | ||
81 | */ | ||
82 | for_each_cpu(cpu, cpus) { | ||
83 | vcpu = hv_cpu_number_to_vp_number(cpu); | ||
84 | vcpu_bank = vcpu / 64; | ||
85 | vcpu_offset = vcpu % 64; | ||
86 | |||
87 | /* valid_bank_mask can represent up to 64 banks */ | ||
88 | if (vcpu_bank >= 64) | ||
89 | return 0; | ||
90 | |||
91 | __set_bit(vcpu_offset, (unsigned long *) | ||
92 | &flush->hv_vp_set.bank_contents[vcpu_bank]); | ||
93 | if (vcpu_bank >= nr_bank) | ||
94 | nr_bank = vcpu_bank + 1; | ||
95 | } | ||
96 | flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); | ||
97 | |||
98 | return nr_bank; | ||
99 | } | ||
100 | |||
101 | static void hyperv_flush_tlb_others(const struct cpumask *cpus, | ||
102 | const struct flush_tlb_info *info) | ||
103 | { | ||
104 | int cpu, vcpu, gva_n, max_gvas; | ||
105 | struct hv_flush_pcpu *flush; | ||
106 | u64 status = U64_MAX; | ||
107 | unsigned long flags; | ||
108 | |||
109 | trace_hyperv_mmu_flush_tlb_others(cpus, info); | ||
110 | |||
111 | if (!pcpu_flush || !hv_hypercall_pg) | ||
112 | goto do_native; | ||
113 | |||
114 | if (cpumask_empty(cpus)) | ||
115 | return; | ||
116 | |||
117 | local_irq_save(flags); | ||
118 | |||
119 | flush = this_cpu_ptr(pcpu_flush); | ||
120 | |||
121 | if (info->mm) { | ||
122 | flush->address_space = virt_to_phys(info->mm->pgd); | ||
123 | flush->flags = 0; | ||
124 | } else { | ||
125 | flush->address_space = 0; | ||
126 | flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; | ||
127 | } | ||
128 | |||
129 | flush->processor_mask = 0; | ||
130 | if (cpumask_equal(cpus, cpu_present_mask)) { | ||
131 | flush->flags |= HV_FLUSH_ALL_PROCESSORS; | ||
132 | } else { | ||
133 | for_each_cpu(cpu, cpus) { | ||
134 | vcpu = hv_cpu_number_to_vp_number(cpu); | ||
135 | if (vcpu >= 64) | ||
136 | goto do_native; | ||
137 | |||
138 | __set_bit(vcpu, (unsigned long *) | ||
139 | &flush->processor_mask); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * We can flush not more than max_gvas with one hypercall. Flush the | ||
145 | * whole address space if we were asked to do more. | ||
146 | */ | ||
147 | max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]); | ||
148 | |||
149 | if (info->end == TLB_FLUSH_ALL) { | ||
150 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; | ||
151 | status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, | ||
152 | flush, NULL); | ||
153 | } else if (info->end && | ||
154 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { | ||
155 | status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, | ||
156 | flush, NULL); | ||
157 | } else { | ||
158 | gva_n = fill_gva_list(flush->gva_list, 0, | ||
159 | info->start, info->end); | ||
160 | status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST, | ||
161 | gva_n, 0, flush, NULL); | ||
162 | } | ||
163 | |||
164 | local_irq_restore(flags); | ||
165 | |||
166 | if (!(status & HV_HYPERCALL_RESULT_MASK)) | ||
167 | return; | ||
168 | do_native: | ||
169 | native_flush_tlb_others(cpus, info); | ||
170 | } | ||
171 | |||
172 | static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | ||
173 | const struct flush_tlb_info *info) | ||
174 | { | ||
175 | int nr_bank = 0, max_gvas, gva_n; | ||
176 | struct hv_flush_pcpu_ex *flush; | ||
177 | u64 status = U64_MAX; | ||
178 | unsigned long flags; | ||
179 | |||
180 | trace_hyperv_mmu_flush_tlb_others(cpus, info); | ||
181 | |||
182 | if (!pcpu_flush_ex || !hv_hypercall_pg) | ||
183 | goto do_native; | ||
184 | |||
185 | if (cpumask_empty(cpus)) | ||
186 | return; | ||
187 | |||
188 | local_irq_save(flags); | ||
189 | |||
190 | flush = this_cpu_ptr(pcpu_flush_ex); | ||
191 | |||
192 | if (info->mm) { | ||
193 | flush->address_space = virt_to_phys(info->mm->pgd); | ||
194 | flush->flags = 0; | ||
195 | } else { | ||
196 | flush->address_space = 0; | ||
197 | flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; | ||
198 | } | ||
199 | |||
200 | flush->hv_vp_set.valid_bank_mask = 0; | ||
201 | |||
202 | if (!cpumask_equal(cpus, cpu_present_mask)) { | ||
203 | flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; | ||
204 | nr_bank = cpumask_to_vp_set(flush, cpus); | ||
205 | } | ||
206 | |||
207 | if (!nr_bank) { | ||
208 | flush->hv_vp_set.format = HV_GENERIC_SET_ALL; | ||
209 | flush->flags |= HV_FLUSH_ALL_PROCESSORS; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * We can flush not more than max_gvas with one hypercall. Flush the | ||
214 | * whole address space if we were asked to do more. | ||
215 | */ | ||
216 | max_gvas = | ||
217 | (PAGE_SIZE - sizeof(*flush) - nr_bank * | ||
218 | sizeof(flush->hv_vp_set.bank_contents[0])) / | ||
219 | sizeof(flush->gva_list[0]); | ||
220 | |||
221 | if (info->end == TLB_FLUSH_ALL) { | ||
222 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; | ||
223 | status = hv_do_rep_hypercall( | ||
224 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | ||
225 | 0, nr_bank + 2, flush, NULL); | ||
226 | } else if (info->end && | ||
227 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { | ||
228 | status = hv_do_rep_hypercall( | ||
229 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | ||
230 | 0, nr_bank + 2, flush, NULL); | ||
231 | } else { | ||
232 | gva_n = fill_gva_list(flush->gva_list, nr_bank, | ||
233 | info->start, info->end); | ||
234 | status = hv_do_rep_hypercall( | ||
235 | HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, | ||
236 | gva_n, nr_bank + 2, flush, NULL); | ||
237 | } | ||
238 | |||
239 | local_irq_restore(flags); | ||
240 | |||
241 | if (!(status & HV_HYPERCALL_RESULT_MASK)) | ||
242 | return; | ||
243 | do_native: | ||
244 | native_flush_tlb_others(cpus, info); | ||
245 | } | ||
246 | |||
247 | void hyperv_setup_mmu_ops(void) | ||
248 | { | ||
249 | if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) | ||
250 | return; | ||
251 | |||
252 | setup_clear_cpu_cap(X86_FEATURE_PCID); | ||
253 | |||
254 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) { | ||
255 | pr_info("Using hypercall for remote TLB flush\n"); | ||
256 | pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others; | ||
257 | } else { | ||
258 | pr_info("Using ext hypercall for remote TLB flush\n"); | ||
259 | pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | void hyper_alloc_mmu(void) | ||
264 | { | ||
265 | if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) | ||
266 | return; | ||
267 | |||
268 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | ||
269 | pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | ||
270 | else | ||
271 | pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | ||
272 | } | ||
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 58b9291b46d8..63cc96f064dc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/atomic.h> | 5 | #include <linux/atomic.h> |
6 | #include <linux/nmi.h> | ||
7 | #include <asm/io.h> | ||
6 | #include <asm/hyperv.h> | 8 | #include <asm/hyperv.h> |
7 | 9 | ||
8 | /* | 10 | /* |
@@ -170,12 +172,155 @@ void hv_remove_crash_handler(void); | |||
170 | 172 | ||
171 | #if IS_ENABLED(CONFIG_HYPERV) | 173 | #if IS_ENABLED(CONFIG_HYPERV) |
172 | extern struct clocksource *hyperv_cs; | 174 | extern struct clocksource *hyperv_cs; |
175 | extern void *hv_hypercall_pg; | ||
176 | |||
177 | static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | ||
178 | { | ||
179 | u64 input_address = input ? virt_to_phys(input) : 0; | ||
180 | u64 output_address = output ? virt_to_phys(output) : 0; | ||
181 | u64 hv_status; | ||
182 | register void *__sp asm(_ASM_SP); | ||
183 | |||
184 | #ifdef CONFIG_X86_64 | ||
185 | if (!hv_hypercall_pg) | ||
186 | return U64_MAX; | ||
187 | |||
188 | __asm__ __volatile__("mov %4, %%r8\n" | ||
189 | "call *%5" | ||
190 | : "=a" (hv_status), "+r" (__sp), | ||
191 | "+c" (control), "+d" (input_address) | ||
192 | : "r" (output_address), "m" (hv_hypercall_pg) | ||
193 | : "cc", "memory", "r8", "r9", "r10", "r11"); | ||
194 | #else | ||
195 | u32 input_address_hi = upper_32_bits(input_address); | ||
196 | u32 input_address_lo = lower_32_bits(input_address); | ||
197 | u32 output_address_hi = upper_32_bits(output_address); | ||
198 | u32 output_address_lo = lower_32_bits(output_address); | ||
199 | |||
200 | if (!hv_hypercall_pg) | ||
201 | return U64_MAX; | ||
202 | |||
203 | __asm__ __volatile__("call *%7" | ||
204 | : "=A" (hv_status), | ||
205 | "+c" (input_address_lo), "+r" (__sp) | ||
206 | : "A" (control), | ||
207 | "b" (input_address_hi), | ||
208 | "D"(output_address_hi), "S"(output_address_lo), | ||
209 | "m" (hv_hypercall_pg) | ||
210 | : "cc", "memory"); | ||
211 | #endif /* !x86_64 */ | ||
212 | return hv_status; | ||
213 | } | ||
214 | |||
215 | #define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) | ||
216 | #define HV_HYPERCALL_FAST_BIT BIT(16) | ||
217 | #define HV_HYPERCALL_VARHEAD_OFFSET 17 | ||
218 | #define HV_HYPERCALL_REP_COMP_OFFSET 32 | ||
219 | #define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) | ||
220 | #define HV_HYPERCALL_REP_START_OFFSET 48 | ||
221 | #define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) | ||
222 | |||
223 | /* Fast hypercall with 8 bytes of input and no output */ | ||
224 | static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) | ||
225 | { | ||
226 | u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; | ||
227 | register void *__sp asm(_ASM_SP); | ||
228 | |||
229 | #ifdef CONFIG_X86_64 | ||
230 | { | ||
231 | __asm__ __volatile__("call *%4" | ||
232 | : "=a" (hv_status), "+r" (__sp), | ||
233 | "+c" (control), "+d" (input1) | ||
234 | : "m" (hv_hypercall_pg) | ||
235 | : "cc", "r8", "r9", "r10", "r11"); | ||
236 | } | ||
237 | #else | ||
238 | { | ||
239 | u32 input1_hi = upper_32_bits(input1); | ||
240 | u32 input1_lo = lower_32_bits(input1); | ||
241 | |||
242 | __asm__ __volatile__ ("call *%5" | ||
243 | : "=A"(hv_status), | ||
244 | "+c"(input1_lo), | ||
245 | "+r"(__sp) | ||
246 | : "A" (control), | ||
247 | "b" (input1_hi), | ||
248 | "m" (hv_hypercall_pg) | ||
249 | : "cc", "edi", "esi"); | ||
250 | } | ||
251 | #endif | ||
252 | return hv_status; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Rep hypercalls. Callers of this functions are supposed to ensure that | ||
257 | * rep_count and varhead_size comply with Hyper-V hypercall definition. | ||
258 | */ | ||
259 | static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, | ||
260 | void *input, void *output) | ||
261 | { | ||
262 | u64 control = code; | ||
263 | u64 status; | ||
264 | u16 rep_comp; | ||
265 | |||
266 | control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; | ||
267 | control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; | ||
268 | |||
269 | do { | ||
270 | status = hv_do_hypercall(control, input, output); | ||
271 | if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) | ||
272 | return status; | ||
273 | |||
274 | /* Bits 32-43 of status have 'Reps completed' data. */ | ||
275 | rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >> | ||
276 | HV_HYPERCALL_REP_COMP_OFFSET; | ||
277 | |||
278 | control &= ~HV_HYPERCALL_REP_START_MASK; | ||
279 | control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; | ||
280 | |||
281 | touch_nmi_watchdog(); | ||
282 | } while (rep_comp < rep_count); | ||
283 | |||
284 | return status; | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Hypervisor's notion of virtual processor ID is different from | ||
289 | * Linux' notion of CPU ID. This information can only be retrieved | ||
290 | * in the context of the calling CPU. Setup a map for easy access | ||
291 | * to this information. | ||
292 | */ | ||
293 | extern u32 *hv_vp_index; | ||
294 | |||
295 | /** | ||
296 | * hv_cpu_number_to_vp_number() - Map CPU to VP. | ||
297 | * @cpu_number: CPU number in Linux terms | ||
298 | * | ||
299 | * This function returns the mapping between the Linux processor | ||
300 | * number and the hypervisor's virtual processor number, useful | ||
301 | * in making hypercalls and such that talk about specific | ||
302 | * processors. | ||
303 | * | ||
304 | * Return: Virtual processor number in Hyper-V terms | ||
305 | */ | ||
306 | static inline int hv_cpu_number_to_vp_number(int cpu_number) | ||
307 | { | ||
308 | return hv_vp_index[cpu_number]; | ||
309 | } | ||
173 | 310 | ||
174 | void hyperv_init(void); | 311 | void hyperv_init(void); |
312 | void hyperv_setup_mmu_ops(void); | ||
313 | void hyper_alloc_mmu(void); | ||
175 | void hyperv_report_panic(struct pt_regs *regs); | 314 | void hyperv_report_panic(struct pt_regs *regs); |
176 | bool hv_is_hypercall_page_setup(void); | 315 | bool hv_is_hypercall_page_setup(void); |
177 | void hyperv_cleanup(void); | 316 | void hyperv_cleanup(void); |
178 | #endif | 317 | #else /* CONFIG_HYPERV */ |
318 | static inline void hyperv_init(void) {} | ||
319 | static inline bool hv_is_hypercall_page_setup(void) { return false; } | ||
320 | static inline void hyperv_cleanup(void) {} | ||
321 | static inline void hyperv_setup_mmu_ops(void) {} | ||
322 | #endif /* CONFIG_HYPERV */ | ||
323 | |||
179 | #ifdef CONFIG_HYPERV_TSCPAGE | 324 | #ifdef CONFIG_HYPERV_TSCPAGE |
180 | struct ms_hyperv_tsc_page *hv_get_tsc_page(void); | 325 | struct ms_hyperv_tsc_page *hv_get_tsc_page(void); |
181 | static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) | 326 | static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) |
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h new file mode 100644 index 000000000000..4253bca99989 --- /dev/null +++ b/arch/x86/include/asm/trace/hyperv.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM hyperv | ||
3 | |||
4 | #if !defined(_TRACE_HYPERV_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_HYPERV_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | #if IS_ENABLED(CONFIG_HYPERV) | ||
10 | |||
11 | TRACE_EVENT(hyperv_mmu_flush_tlb_others, | ||
12 | TP_PROTO(const struct cpumask *cpus, | ||
13 | const struct flush_tlb_info *info), | ||
14 | TP_ARGS(cpus, info), | ||
15 | TP_STRUCT__entry( | ||
16 | __field(unsigned int, ncpus) | ||
17 | __field(struct mm_struct *, mm) | ||
18 | __field(unsigned long, addr) | ||
19 | __field(unsigned long, end) | ||
20 | ), | ||
21 | TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); | ||
22 | __entry->mm = info->mm; | ||
23 | __entry->addr = info->start; | ||
24 | __entry->end = info->end; | ||
25 | ), | ||
26 | TP_printk("ncpus %d mm %p addr %lx, end %lx", | ||
27 | __entry->ncpus, __entry->mm, | ||
28 | __entry->addr, __entry->end) | ||
29 | ); | ||
30 | |||
31 | #endif /* CONFIG_HYPERV */ | ||
32 | |||
33 | #undef TRACE_INCLUDE_PATH | ||
34 | #define TRACE_INCLUDE_PATH asm/trace/ | ||
35 | #undef TRACE_INCLUDE_FILE | ||
36 | #define TRACE_INCLUDE_FILE hyperv | ||
37 | #endif /* _TRACE_HYPERV_H */ | ||
38 | |||
39 | /* This part must be outside protection */ | ||
40 | #include <trace/define_trace.h> | ||
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 127ddadee1a5..7032f4d8dff3 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h | |||
@@ -149,6 +149,9 @@ | |||
149 | */ | 149 | */ |
150 | #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) | 150 | #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) |
151 | 151 | ||
152 | /* Recommend using the newer ExProcessorMasks interface */ | ||
153 | #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) | ||
154 | |||
152 | /* | 155 | /* |
153 | * HV_VP_SET available | 156 | * HV_VP_SET available |
154 | */ | 157 | */ |
@@ -242,7 +245,11 @@ | |||
242 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) | 245 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) |
243 | 246 | ||
244 | /* Declare the various hypercall operations. */ | 247 | /* Declare the various hypercall operations. */ |
248 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 | ||
249 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 | ||
245 | #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 | 250 | #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 |
251 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 | ||
252 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 | ||
246 | #define HVCALL_POST_MESSAGE 0x005c | 253 | #define HVCALL_POST_MESSAGE 0x005c |
247 | #define HVCALL_SIGNAL_EVENT 0x005d | 254 | #define HVCALL_SIGNAL_EVENT 0x005d |
248 | 255 | ||
@@ -259,6 +266,16 @@ | |||
259 | #define HV_PROCESSOR_POWER_STATE_C2 2 | 266 | #define HV_PROCESSOR_POWER_STATE_C2 2 |
260 | #define HV_PROCESSOR_POWER_STATE_C3 3 | 267 | #define HV_PROCESSOR_POWER_STATE_C3 3 |
261 | 268 | ||
269 | #define HV_FLUSH_ALL_PROCESSORS BIT(0) | ||
270 | #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) | ||
271 | #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) | ||
272 | #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) | ||
273 | |||
274 | enum HV_GENERIC_SET_FORMAT { | ||
275 | HV_GENERIC_SET_SPARCE_4K, | ||
276 | HV_GENERIC_SET_ALL, | ||
277 | }; | ||
278 | |||
262 | /* hypercall status code */ | 279 | /* hypercall status code */ |
263 | #define HV_STATUS_SUCCESS 0 | 280 | #define HV_STATUS_SUCCESS 0 |
264 | #define HV_STATUS_INVALID_HYPERCALL_CODE 2 | 281 | #define HV_STATUS_INVALID_HYPERCALL_CODE 2 |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index fbafd24174af..3b3f713e15e5 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -250,6 +250,7 @@ static void __init ms_hyperv_init_platform(void) | |||
250 | * Setup the hook to get control post apic initialization. | 250 | * Setup the hook to get control post apic initialization. |
251 | */ | 251 | */ |
252 | x86_platform.apic_post_init = hyperv_init; | 252 | x86_platform.apic_post_init = hyperv_init; |
253 | hyperv_setup_mmu_ops(); | ||
253 | #endif | 254 | #endif |
254 | } | 255 | } |
255 | 256 | ||
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 5a18aedcb341..b901ece278dd 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -215,16 +215,23 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
215 | struct irq_alloc_info info; | 215 | struct irq_alloc_info info; |
216 | int polarity; | 216 | int polarity; |
217 | int ret; | 217 | int ret; |
218 | u8 gsi; | ||
218 | 219 | ||
219 | if (dev->irq_managed && dev->irq > 0) | 220 | if (dev->irq_managed && dev->irq > 0) |
220 | return 0; | 221 | return 0; |
221 | 222 | ||
223 | ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); | ||
224 | if (ret < 0) { | ||
225 | dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); | ||
226 | return ret; | ||
227 | } | ||
228 | |||
222 | switch (intel_mid_identify_cpu()) { | 229 | switch (intel_mid_identify_cpu()) { |
223 | case INTEL_MID_CPU_CHIP_TANGIER: | 230 | case INTEL_MID_CPU_CHIP_TANGIER: |
224 | polarity = IOAPIC_POL_HIGH; | 231 | polarity = IOAPIC_POL_HIGH; |
225 | 232 | ||
226 | /* Special treatment for IRQ0 */ | 233 | /* Special treatment for IRQ0 */ |
227 | if (dev->irq == 0) { | 234 | if (gsi == 0) { |
228 | /* | 235 | /* |
229 | * Skip HS UART common registers device since it has | 236 | * Skip HS UART common registers device since it has |
230 | * IRQ0 assigned and not used by the kernel. | 237 | * IRQ0 assigned and not used by the kernel. |
@@ -253,10 +260,11 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
253 | * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to | 260 | * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to |
254 | * IOAPIC RTE entries, so we just enable RTE for the device. | 261 | * IOAPIC RTE entries, so we just enable RTE for the device. |
255 | */ | 262 | */ |
256 | ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info); | 263 | ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); |
257 | if (ret < 0) | 264 | if (ret < 0) |
258 | return ret; | 265 | return ret; |
259 | 266 | ||
267 | dev->irq = ret; | ||
260 | dev->irq_managed = 1; | 268 | dev->irq_managed = 1; |
261 | 269 | ||
262 | return 0; | 270 | return 0; |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index 5a0483e7bf66..dc036e511f48 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c | |||
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static struct bt_sfi_data tng_bt_sfi_data __initdata = { | 63 | static const struct bt_sfi_data tng_bt_sfi_data __initdata = { |
64 | .setup = tng_bt_sfi_setup, | 64 | .setup = tng_bt_sfi_setup, |
65 | }; | 65 | }; |
66 | 66 | ||
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 9e304e2ea4f5..4f5fa65a1011 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | |||
@@ -30,13 +30,13 @@ static int tangier_probe(struct platform_device *pdev) | |||
30 | { | 30 | { |
31 | struct irq_alloc_info info; | 31 | struct irq_alloc_info info; |
32 | struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data; | 32 | struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data; |
33 | int gsi, irq; | 33 | int gsi = TANGIER_EXT_TIMER0_MSI; |
34 | int irq; | ||
34 | 35 | ||
35 | if (!pdata) | 36 | if (!pdata) |
36 | return -EINVAL; | 37 | return -EINVAL; |
37 | 38 | ||
38 | /* IOAPIC builds identity mapping between GSI and IRQ on MID */ | 39 | /* IOAPIC builds identity mapping between GSI and IRQ on MID */ |
39 | gsi = pdata->irq; | ||
40 | ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0); | 40 | ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0); |
41 | irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); | 41 | irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); |
42 | if (irq < 0) { | 42 | if (irq < 0) { |
@@ -44,11 +44,11 @@ static int tangier_probe(struct platform_device *pdev) | |||
44 | return irq; | 44 | return irq; |
45 | } | 45 | } |
46 | 46 | ||
47 | pdata->irq = irq; | ||
47 | return 0; | 48 | return 0; |
48 | } | 49 | } |
49 | 50 | ||
50 | static struct intel_mid_wdt_pdata tangier_pdata = { | 51 | static struct intel_mid_wdt_pdata tangier_pdata = { |
51 | .irq = TANGIER_EXT_TIMER0_MSI, | ||
52 | .probe = tangier_probe, | 52 | .probe = tangier_probe, |
53 | }; | 53 | }; |
54 | 54 | ||
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 12a272582cdc..86676cec99a1 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c | |||
@@ -183,6 +183,7 @@ void __init x86_intel_mid_early_setup(void) | |||
183 | 183 | ||
184 | x86_init.timers.timer_init = intel_mid_time_init; | 184 | x86_init.timers.timer_init = intel_mid_time_init; |
185 | x86_init.timers.setup_percpu_clockev = x86_init_noop; | 185 | x86_init.timers.setup_percpu_clockev = x86_init_noop; |
186 | x86_init.timers.wallclock_init = intel_mid_rtc_init; | ||
186 | 187 | ||
187 | x86_init.irqs.pre_vector_init = x86_init_noop; | 188 | x86_init.irqs.pre_vector_init = x86_init_noop; |
188 | 189 | ||
@@ -191,7 +192,6 @@ void __init x86_intel_mid_early_setup(void) | |||
191 | x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; | 192 | x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; |
192 | 193 | ||
193 | x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; | 194 | x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; |
194 | x86_init.timers.wallclock_init = intel_mid_rtc_init; | ||
195 | x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; | 195 | x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; |
196 | 196 | ||
197 | x86_init.pci.init = intel_mid_pci_init; | 197 | x86_init.pci.init = intel_mid_pci_init; |
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c index ef03852ea6e8..49ec5b94c71f 100644 --- a/arch/x86/platform/intel-mid/pwr.c +++ b/arch/x86/platform/intel-mid/pwr.c | |||
@@ -444,7 +444,7 @@ static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states) | |||
444 | static int pnw_set_initial_state(struct mid_pwr *pwr) | 444 | static int pnw_set_initial_state(struct mid_pwr *pwr) |
445 | { | 445 | { |
446 | /* On Penwell SRAM must stay powered on */ | 446 | /* On Penwell SRAM must stay powered on */ |
447 | const u32 states[] = { | 447 | static const u32 states[] = { |
448 | 0xf00fffff, /* PM_SSC(0) */ | 448 | 0xf00fffff, /* PM_SSC(0) */ |
449 | 0xffffffff, /* PM_SSC(1) */ | 449 | 0xffffffff, /* PM_SSC(1) */ |
450 | 0xffffffff, /* PM_SSC(2) */ | 450 | 0xffffffff, /* PM_SSC(2) */ |
@@ -455,7 +455,7 @@ static int pnw_set_initial_state(struct mid_pwr *pwr) | |||
455 | 455 | ||
456 | static int tng_set_initial_state(struct mid_pwr *pwr) | 456 | static int tng_set_initial_state(struct mid_pwr *pwr) |
457 | { | 457 | { |
458 | const u32 states[] = { | 458 | static const u32 states[] = { |
459 | 0xffffffff, /* PM_SSC(0) */ | 459 | 0xffffffff, /* PM_SSC(0) */ |
460 | 0xffffffff, /* PM_SSC(1) */ | 460 | 0xffffffff, /* PM_SSC(1) */ |
461 | 0xffffffff, /* PM_SSC(2) */ | 461 | 0xffffffff, /* PM_SSC(2) */ |
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index c29cd5387a35..50b89ea0e60f 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig | |||
@@ -3,6 +3,7 @@ menu "Microsoft Hyper-V guest support" | |||
3 | config HYPERV | 3 | config HYPERV |
4 | tristate "Microsoft Hyper-V client drivers" | 4 | tristate "Microsoft Hyper-V client drivers" |
5 | depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST | 5 | depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST |
6 | select PARAVIRT | ||
6 | help | 7 | help |
7 | Select this option to run Linux as a Hyper-V client operating | 8 | Select this option to run Linux as a Hyper-V client operating |
8 | system. | 9 | system. |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 968af173c4c1..060df71c2e8b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -603,7 +603,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) | |||
603 | */ | 603 | */ |
604 | channel->numa_node = 0; | 604 | channel->numa_node = 0; |
605 | channel->target_cpu = 0; | 605 | channel->target_cpu = 0; |
606 | channel->target_vp = hv_context.vp_index[0]; | 606 | channel->target_vp = hv_cpu_number_to_vp_number(0); |
607 | return; | 607 | return; |
608 | } | 608 | } |
609 | 609 | ||
@@ -687,7 +687,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) | |||
687 | } | 687 | } |
688 | 688 | ||
689 | channel->target_cpu = cur_cpu; | 689 | channel->target_cpu = cur_cpu; |
690 | channel->target_vp = hv_context.vp_index[cur_cpu]; | 690 | channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); |
691 | } | 691 | } |
692 | 692 | ||
693 | static void vmbus_wait_for_unload(void) | 693 | static void vmbus_wait_for_unload(void) |
@@ -809,21 +809,12 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) | |||
809 | /* | 809 | /* |
810 | * Setup state for signalling the host. | 810 | * Setup state for signalling the host. |
811 | */ | 811 | */ |
812 | newchannel->sig_event = (struct hv_input_signal_event *) | 812 | newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID; |
813 | (ALIGN((unsigned long) | ||
814 | &newchannel->sig_buf, | ||
815 | HV_HYPERCALL_PARAM_ALIGN)); | ||
816 | |||
817 | newchannel->sig_event->connectionid.asu32 = 0; | ||
818 | newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID; | ||
819 | newchannel->sig_event->flag_number = 0; | ||
820 | newchannel->sig_event->rsvdz = 0; | ||
821 | 813 | ||
822 | if (vmbus_proto_version != VERSION_WS2008) { | 814 | if (vmbus_proto_version != VERSION_WS2008) { |
823 | newchannel->is_dedicated_interrupt = | 815 | newchannel->is_dedicated_interrupt = |
824 | (offer->is_dedicated_interrupt != 0); | 816 | (offer->is_dedicated_interrupt != 0); |
825 | newchannel->sig_event->connectionid.u.id = | 817 | newchannel->sig_event = offer->connection_id; |
826 | offer->connection_id; | ||
827 | } | 818 | } |
828 | 819 | ||
829 | memcpy(&newchannel->offermsg, offer, | 820 | memcpy(&newchannel->offermsg, offer, |
@@ -1251,8 +1242,7 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) | |||
1251 | return outgoing_channel; | 1242 | return outgoing_channel; |
1252 | } | 1243 | } |
1253 | 1244 | ||
1254 | cur_cpu = hv_context.vp_index[get_cpu()]; | 1245 | cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id()); |
1255 | put_cpu(); | ||
1256 | list_for_each_safe(cur, tmp, &primary->sc_list) { | 1246 | list_for_each_safe(cur, tmp, &primary->sc_list) { |
1257 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); | 1247 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); |
1258 | if (cur_channel->state != CHANNEL_OPENED_STATE) | 1248 | if (cur_channel->state != CHANNEL_OPENED_STATE) |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 59c11ff90d12..f41901f80b64 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/hyperv.h> | 32 | #include <linux/hyperv.h> |
33 | #include <linux/export.h> | 33 | #include <linux/export.h> |
34 | #include <asm/hyperv.h> | 34 | #include <asm/hyperv.h> |
35 | #include <asm/mshyperv.h> | ||
36 | |||
35 | #include "hyperv_vmbus.h" | 37 | #include "hyperv_vmbus.h" |
36 | 38 | ||
37 | 39 | ||
@@ -94,7 +96,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
94 | * the CPU attempting to connect may not be CPU 0. | 96 | * the CPU attempting to connect may not be CPU 0. |
95 | */ | 97 | */ |
96 | if (version >= VERSION_WIN8_1) { | 98 | if (version >= VERSION_WIN8_1) { |
97 | msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; | 99 | msg->target_vcpu = |
100 | hv_cpu_number_to_vp_number(smp_processor_id()); | ||
98 | vmbus_connection.connect_cpu = smp_processor_id(); | 101 | vmbus_connection.connect_cpu = smp_processor_id(); |
99 | } else { | 102 | } else { |
100 | msg->target_vcpu = 0; | 103 | msg->target_vcpu = 0; |
@@ -406,6 +409,6 @@ void vmbus_set_event(struct vmbus_channel *channel) | |||
406 | if (!channel->is_dedicated_interrupt) | 409 | if (!channel->is_dedicated_interrupt) |
407 | vmbus_send_interrupt(child_relid); | 410 | vmbus_send_interrupt(child_relid); |
408 | 411 | ||
409 | hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); | 412 | hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event); |
410 | } | 413 | } |
411 | EXPORT_SYMBOL_GPL(vmbus_set_event); | 414 | EXPORT_SYMBOL_GPL(vmbus_set_event); |
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 2ea12207caa0..8267439dd1ee 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
@@ -234,7 +234,6 @@ int hv_synic_init(unsigned int cpu) | |||
234 | union hv_synic_siefp siefp; | 234 | union hv_synic_siefp siefp; |
235 | union hv_synic_sint shared_sint; | 235 | union hv_synic_sint shared_sint; |
236 | union hv_synic_scontrol sctrl; | 236 | union hv_synic_scontrol sctrl; |
237 | u64 vp_index; | ||
238 | 237 | ||
239 | /* Setup the Synic's message page */ | 238 | /* Setup the Synic's message page */ |
240 | hv_get_simp(simp.as_uint64); | 239 | hv_get_simp(simp.as_uint64); |
@@ -276,14 +275,6 @@ int hv_synic_init(unsigned int cpu) | |||
276 | hv_context.synic_initialized = true; | 275 | hv_context.synic_initialized = true; |
277 | 276 | ||
278 | /* | 277 | /* |
279 | * Setup the mapping between Hyper-V's notion | ||
280 | * of cpuid and Linux' notion of cpuid. | ||
281 | * This array will be indexed using Linux cpuid. | ||
282 | */ | ||
283 | hv_get_vp_index(vp_index); | ||
284 | hv_context.vp_index[cpu] = (u32)vp_index; | ||
285 | |||
286 | /* | ||
287 | * Register the per-cpu clockevent source. | 278 | * Register the per-cpu clockevent source. |
288 | */ | 279 | */ |
289 | if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) | 280 | if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 1b6a5e0dfa75..49569f8fe038 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
@@ -229,17 +229,6 @@ struct hv_context { | |||
229 | struct hv_per_cpu_context __percpu *cpu_context; | 229 | struct hv_per_cpu_context __percpu *cpu_context; |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * Hypervisor's notion of virtual processor ID is different from | ||
233 | * Linux' notion of CPU ID. This information can only be retrieved | ||
234 | * in the context of the calling CPU. Setup a map for easy access | ||
235 | * to this information: | ||
236 | * | ||
237 | * vp_index[a] is the Hyper-V's processor ID corresponding to | ||
238 | * Linux cpuid 'a'. | ||
239 | */ | ||
240 | u32 vp_index[NR_CPUS]; | ||
241 | |||
242 | /* | ||
243 | * To manage allocations in a NUMA node. | 232 | * To manage allocations in a NUMA node. |
244 | * Array indexed by numa node ID. | 233 | * Array indexed by numa node ID. |
245 | */ | 234 | */ |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 43160a2eafe0..a9d49f6f6501 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
@@ -1454,23 +1454,6 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size) | |||
1454 | } | 1454 | } |
1455 | EXPORT_SYMBOL_GPL(vmbus_free_mmio); | 1455 | EXPORT_SYMBOL_GPL(vmbus_free_mmio); |
1456 | 1456 | ||
1457 | /** | ||
1458 | * vmbus_cpu_number_to_vp_number() - Map CPU to VP. | ||
1459 | * @cpu_number: CPU number in Linux terms | ||
1460 | * | ||
1461 | * This function returns the mapping between the Linux processor | ||
1462 | * number and the hypervisor's virtual processor number, useful | ||
1463 | * in making hypercalls and such that talk about specific | ||
1464 | * processors. | ||
1465 | * | ||
1466 | * Return: Virtual processor number in Hyper-V terms | ||
1467 | */ | ||
1468 | int vmbus_cpu_number_to_vp_number(int cpu_number) | ||
1469 | { | ||
1470 | return hv_context.vp_index[cpu_number]; | ||
1471 | } | ||
1472 | EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); | ||
1473 | |||
1474 | static int vmbus_acpi_add(struct acpi_device *device) | 1457 | static int vmbus_acpi_add(struct acpi_device *device) |
1475 | { | 1458 | { |
1476 | acpi_status result; | 1459 | acpi_status result; |
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 415dcc69a502..aba041438566 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
@@ -562,52 +562,6 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, | |||
562 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 562 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
563 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 563 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
564 | 564 | ||
565 | |||
566 | /* | ||
567 | * Temporary CPU to vCPU mapping to address transitioning | ||
568 | * vmbus_cpu_number_to_vp_number() being migrated to | ||
569 | * hv_cpu_number_to_vp_number() in a separate patch. Once that patch | ||
570 | * has been picked up in the main line, remove this code here and use | ||
571 | * the official code. | ||
572 | */ | ||
573 | static struct hv_tmpcpumap | ||
574 | { | ||
575 | bool initialized; | ||
576 | u32 vp_index[NR_CPUS]; | ||
577 | } hv_tmpcpumap; | ||
578 | |||
579 | static void hv_tmpcpumap_init_cpu(void *_unused) | ||
580 | { | ||
581 | int cpu = smp_processor_id(); | ||
582 | u64 vp_index; | ||
583 | |||
584 | hv_get_vp_index(vp_index); | ||
585 | |||
586 | hv_tmpcpumap.vp_index[cpu] = vp_index; | ||
587 | } | ||
588 | |||
589 | static void hv_tmpcpumap_init(void) | ||
590 | { | ||
591 | if (hv_tmpcpumap.initialized) | ||
592 | return; | ||
593 | |||
594 | memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); | ||
595 | on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); | ||
596 | hv_tmpcpumap.initialized = true; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr | ||
601 | * | ||
602 | * Remove once vmbus_cpu_number_to_vp_number() has been converted to | ||
603 | * hv_cpu_number_to_vp_number() and replace callers appropriately. | ||
604 | */ | ||
605 | static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) | ||
606 | { | ||
607 | return hv_tmpcpumap.vp_index[cpu]; | ||
608 | } | ||
609 | |||
610 | |||
611 | /** | 565 | /** |
612 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows | 566 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows |
613 | * @devfn: The Linux representation of PCI slot | 567 | * @devfn: The Linux representation of PCI slot |
@@ -971,7 +925,7 @@ static void hv_irq_unmask(struct irq_data *data) | |||
971 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; | 925 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; |
972 | 926 | ||
973 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | 927 | for_each_cpu_and(cpu, dest, cpu_online_mask) { |
974 | cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); | 928 | cpu_vmbus = hv_cpu_number_to_vp_number(cpu); |
975 | 929 | ||
976 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { | 930 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { |
977 | dev_err(&hbus->hdev->device, | 931 | dev_err(&hbus->hdev->device, |
@@ -986,7 +940,7 @@ static void hv_irq_unmask(struct irq_data *data) | |||
986 | } else { | 940 | } else { |
987 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | 941 | for_each_cpu_and(cpu, dest, cpu_online_mask) { |
988 | params->int_target.vp_mask |= | 942 | params->int_target.vp_mask |= |
989 | (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); | 943 | (1ULL << hv_cpu_number_to_vp_number(cpu)); |
990 | } | 944 | } |
991 | } | 945 | } |
992 | 946 | ||
@@ -1063,7 +1017,7 @@ static u32 hv_compose_msi_req_v2( | |||
1063 | */ | 1017 | */ |
1064 | cpu = cpumask_first_and(affinity, cpu_online_mask); | 1018 | cpu = cpumask_first_and(affinity, cpu_online_mask); |
1065 | int_pkt->int_desc.processor_array[0] = | 1019 | int_pkt->int_desc.processor_array[0] = |
1066 | hv_tmp_cpu_nr_to_vp_nr(cpu); | 1020 | hv_cpu_number_to_vp_number(cpu); |
1067 | int_pkt->int_desc.processor_count = 1; | 1021 | int_pkt->int_desc.processor_count = 1; |
1068 | 1022 | ||
1069 | return sizeof(*int_pkt); | 1023 | return sizeof(*int_pkt); |
@@ -2490,8 +2444,6 @@ static int hv_pci_probe(struct hv_device *hdev, | |||
2490 | return -ENOMEM; | 2444 | return -ENOMEM; |
2491 | hbus->state = hv_pcibus_init; | 2445 | hbus->state = hv_pcibus_init; |
2492 | 2446 | ||
2493 | hv_tmpcpumap_init(); | ||
2494 | |||
2495 | /* | 2447 | /* |
2496 | * The PCI bus "domain" is what is called "segment" in ACPI and | 2448 | * The PCI bus "domain" is what is called "segment" in ACPI and |
2497 | * other specs. Pull it from the instance ID, to get something | 2449 | * other specs. Pull it from the instance ID, to get something |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index e4bbf7dc9932..c458d7b7ad19 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -661,18 +661,6 @@ union hv_connection_id { | |||
661 | } u; | 661 | } u; |
662 | }; | 662 | }; |
663 | 663 | ||
664 | /* Definition of the hv_signal_event hypercall input structure. */ | ||
665 | struct hv_input_signal_event { | ||
666 | union hv_connection_id connectionid; | ||
667 | u16 flag_number; | ||
668 | u16 rsvdz; | ||
669 | }; | ||
670 | |||
671 | struct hv_input_signal_event_buffer { | ||
672 | u64 align8; | ||
673 | struct hv_input_signal_event event; | ||
674 | }; | ||
675 | |||
676 | enum hv_numa_policy { | 664 | enum hv_numa_policy { |
677 | HV_BALANCED = 0, | 665 | HV_BALANCED = 0, |
678 | HV_LOCALIZED, | 666 | HV_LOCALIZED, |
@@ -754,8 +742,7 @@ struct vmbus_channel { | |||
754 | } callback_mode; | 742 | } callback_mode; |
755 | 743 | ||
756 | bool is_dedicated_interrupt; | 744 | bool is_dedicated_interrupt; |
757 | struct hv_input_signal_event_buffer sig_buf; | 745 | u64 sig_event; |
758 | struct hv_input_signal_event *sig_event; | ||
759 | 746 | ||
760 | /* | 747 | /* |
761 | * Starting with win8, this field will be used to specify | 748 | * Starting with win8, this field will be used to specify |
@@ -1151,8 +1138,6 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | |||
1151 | resource_size_t size, resource_size_t align, | 1138 | resource_size_t size, resource_size_t align, |
1152 | bool fb_overlap_ok); | 1139 | bool fb_overlap_ok); |
1153 | void vmbus_free_mmio(resource_size_t start, resource_size_t size); | 1140 | void vmbus_free_mmio(resource_size_t start, resource_size_t size); |
1154 | int vmbus_cpu_number_to_vp_number(int cpu_number); | ||
1155 | u64 hv_do_hypercall(u64 control, void *input, void *output); | ||
1156 | 1141 | ||
1157 | /* | 1142 | /* |
1158 | * GUID definitions of various offer types - services offered to the guest. | 1143 | * GUID definitions of various offer types - services offered to the guest. |