aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/xen.txt35
-rw-r--r--arch/arm/include/asm/xen/hypercall.h1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h6
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/xen/Makefile1
-rw-r--r--arch/arm/xen/efi.c40
-rw-r--r--arch/arm/xen/enlighten.c157
-rw-r--r--arch/arm/xen/hypercall.S1
-rw-r--r--arch/arm64/include/asm/xen/xen-ops.h6
-rw-r--r--arch/arm64/kernel/setup.c3
-rw-r--r--arch/arm64/xen/Makefile1
-rw-r--r--arch/arm64/xen/hypercall.S1
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/include/asm/xen/cpuid.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c3
-rw-r--r--arch/x86/xen/efi.c111
-rw-r--r--arch/x86/xen/enlighten.c49
-rw-r--r--arch/x86/xen/grant-table.c57
-rw-r--r--arch/x86/xen/irq.c3
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--arch/x86/xen/smp.c18
-rw-r--r--arch/x86/xen/time.c63
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--drivers/acpi/scan.c74
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c43
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/firmware/efi/efi.c81
-rw-r--r--drivers/of/fdt.c13
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/arm-device.c196
-rw-r--r--drivers/xen/efi.c173
-rw-r--r--drivers/xen/events/events_base.c13
-rw-r--r--drivers/xen/events/events_fifo.c2
-rw-r--r--drivers/xen/evtchn.c43
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/time.c50
-rw-r--r--drivers/xen/xen-pciback/conf_space.c22
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c57
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c10
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c15
-rw-r--r--drivers/xen/xlate_mmu.c77
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/uapi/xen/evtchn.h15
-rw-r--r--include/xen/interface/hvm/params.h40
-rw-r--r--include/xen/interface/memory.h1
-rw-r--r--include/xen/interface/vcpu.h24
-rw-r--r--include/xen/interface/xen.h17
-rw-r--r--include/xen/xen-ops.h40
-rw-r--r--kernel/sched/cputime.c10
59 files changed, 1150 insertions, 493 deletions
diff --git a/Documentation/devicetree/bindings/arm/xen.txt b/Documentation/devicetree/bindings/arm/xen.txt
index 0f7b9c2109f8..c9b9321434ea 100644
--- a/Documentation/devicetree/bindings/arm/xen.txt
+++ b/Documentation/devicetree/bindings/arm/xen.txt
@@ -11,10 +11,32 @@ the following properties:
11 memory where the grant table should be mapped to, using an 11 memory where the grant table should be mapped to, using an
12 HYPERVISOR_memory_op hypercall. The memory region is large enough to map 12 HYPERVISOR_memory_op hypercall. The memory region is large enough to map
13 the whole grant table (it is larger or equal to gnttab_max_grant_frames()). 13 the whole grant table (it is larger or equal to gnttab_max_grant_frames()).
14 This property is unnecessary when booting Dom0 using ACPI.
14 15
15- interrupts: the interrupt used by Xen to inject event notifications. 16- interrupts: the interrupt used by Xen to inject event notifications.
16 A GIC node is also required. 17 A GIC node is also required.
18 This property is unnecessary when booting Dom0 using ACPI.
17 19
20To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node
21under /hypervisor with following parameters:
22
23________________________________________________________________________________
24Name | Size | Description
25================================================================================
26xen,uefi-system-table | 64-bit | Guest physical address of the UEFI System
27 | | Table.
28--------------------------------------------------------------------------------
29xen,uefi-mmap-start | 64-bit | Guest physical address of the UEFI memory
30 | | map.
31--------------------------------------------------------------------------------
32xen,uefi-mmap-size | 32-bit | Size in bytes of the UEFI memory map
33 | | pointed to in previous entry.
34--------------------------------------------------------------------------------
35xen,uefi-mmap-desc-size | 32-bit | Size in bytes of each entry in the UEFI
36 | | memory map.
37--------------------------------------------------------------------------------
38xen,uefi-mmap-desc-ver | 32-bit | Version of the mmap descriptor format.
39--------------------------------------------------------------------------------
18 40
19Example (assuming #address-cells = <2> and #size-cells = <2>): 41Example (assuming #address-cells = <2> and #size-cells = <2>):
20 42
@@ -22,4 +44,17 @@ hypervisor {
22 compatible = "xen,xen-4.3", "xen,xen"; 44 compatible = "xen,xen-4.3", "xen,xen";
23 reg = <0 0xb0000000 0 0x20000>; 45 reg = <0 0xb0000000 0 0x20000>;
24 interrupts = <1 15 0xf08>; 46 interrupts = <1 15 0xf08>;
47 uefi {
48 xen,uefi-system-table = <0xXXXXXXXX>;
49 xen,uefi-mmap-start = <0xXXXXXXXX>;
50 xen,uefi-mmap-size = <0xXXXXXXXX>;
51 xen,uefi-mmap-desc-size = <0xXXXXXXXX>;
52 xen,uefi-mmap-desc-ver = <0xXXXXXXXX>;
53 };
25}; 54};
55
56The format and meaning of the "xen,uefi-*" parameters are similar to those in
57Documentation/arm/uefi.txt, which are provided by the regular UEFI stub. However
58they differ because they are provided by the Xen hypervisor, together with a set
59of UEFI runtime services implemented via hypercalls, see
60http://xenbits.xen.org/docs/unstable/hypercall/x86_64/include,public,platform.h.html.
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index b6b962d70db9..9d874db13c0e 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -52,6 +52,7 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
52int HYPERVISOR_physdev_op(int cmd, void *arg); 52int HYPERVISOR_physdev_op(int cmd, void *arg);
53int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); 53int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
54int HYPERVISOR_tmem_op(void *arg); 54int HYPERVISOR_tmem_op(void *arg);
55int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
55int HYPERVISOR_platform_op_raw(void *arg); 56int HYPERVISOR_platform_op_raw(void *arg);
56static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) 57static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
57{ 58{
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
new file mode 100644
index 000000000000..ec154e719b11
--- /dev/null
+++ b/arch/arm/include/asm/xen/xen-ops.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_XEN_OPS_H
2#define _ASM_XEN_OPS_H
3
4void xen_efi_runtime_setup(void);
5
6#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7b5350060612..261dae6f3fec 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1064,6 +1064,7 @@ void __init setup_arch(char **cmdline_p)
1064 early_paging_init(mdesc); 1064 early_paging_init(mdesc);
1065#endif 1065#endif
1066 setup_dma_zone(mdesc); 1066 setup_dma_zone(mdesc);
1067 xen_early_init();
1067 efi_init(); 1068 efi_init();
1068 sanity_check_meminfo(); 1069 sanity_check_meminfo();
1069 arm_memblock_init(mdesc); 1070 arm_memblock_init(mdesc);
@@ -1080,7 +1081,6 @@ void __init setup_arch(char **cmdline_p)
1080 1081
1081 arm_dt_init_cpu_maps(); 1082 arm_dt_init_cpu_maps();
1082 psci_dt_init(); 1083 psci_dt_init();
1083 xen_early_init();
1084#ifdef CONFIG_SMP 1084#ifdef CONFIG_SMP
1085 if (is_smp()) { 1085 if (is_smp()) {
1086 if (!mdesc->smp_init || !mdesc->smp_init()) { 1086 if (!mdesc->smp_init || !mdesc->smp_init()) {
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..227952103b0b 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1,2 @@
1obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o 1obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
2obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
new file mode 100644
index 000000000000..16db419f9e90
--- /dev/null
+++ b/arch/arm/xen/efi.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (c) 2015, Linaro Limited, Shannon Zhao
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/efi.h>
19#include <xen/xen-ops.h>
20#include <asm/xen/xen-ops.h>
21
22/* Set XEN EFI runtime services function pointers. Other fields of struct efi,
23 * e.g. efi.systab, will be set like normal EFI.
24 */
25void __init xen_efi_runtime_setup(void)
26{
27 efi.get_time = xen_efi_get_time;
28 efi.set_time = xen_efi_set_time;
29 efi.get_wakeup_time = xen_efi_get_wakeup_time;
30 efi.set_wakeup_time = xen_efi_set_wakeup_time;
31 efi.get_variable = xen_efi_get_variable;
32 efi.get_next_variable = xen_efi_get_next_variable;
33 efi.set_variable = xen_efi_set_variable;
34 efi.query_variable_info = xen_efi_query_variable_info;
35 efi.update_capsule = xen_efi_update_capsule;
36 efi.query_capsule_caps = xen_efi_query_capsule_caps;
37 efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
38 efi.reset_system = NULL; /* Functionality provided by Xen. */
39}
40EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 75cd7345c654..0bea3d271f6e 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -12,14 +12,16 @@
12#include <xen/page.h> 12#include <xen/page.h>
13#include <xen/interface/sched.h> 13#include <xen/interface/sched.h>
14#include <xen/xen-ops.h> 14#include <xen/xen-ops.h>
15#include <asm/paravirt.h>
16#include <asm/xen/hypervisor.h> 15#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 16#include <asm/xen/hypercall.h>
17#include <asm/xen/xen-ops.h>
18#include <asm/system_misc.h> 18#include <asm/system_misc.h>
19#include <asm/efi.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/irqreturn.h> 21#include <linux/irqreturn.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/of_fdt.h>
23#include <linux/of_irq.h> 25#include <linux/of_irq.h>
24#include <linux/of_address.h> 26#include <linux/of_address.h>
25#include <linux/cpuidle.h> 27#include <linux/cpuidle.h>
@@ -30,6 +32,7 @@
30#include <linux/time64.h> 32#include <linux/time64.h>
31#include <linux/timekeeping.h> 33#include <linux/timekeeping.h>
32#include <linux/timekeeper_internal.h> 34#include <linux/timekeeper_internal.h>
35#include <linux/acpi.h>
33 36
34#include <linux/mm.h> 37#include <linux/mm.h>
35 38
@@ -46,14 +49,16 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
46DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 49DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
47static struct vcpu_info __percpu *xen_vcpu_info; 50static struct vcpu_info __percpu *xen_vcpu_info;
48 51
52/* Linux <-> Xen vCPU id mapping */
53DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
54EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
55
49/* These are unused until we support booting "pre-ballooned" */ 56/* These are unused until we support booting "pre-ballooned" */
50unsigned long xen_released_pages; 57unsigned long xen_released_pages;
51struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 58struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
52 59
53static __read_mostly unsigned int xen_events_irq; 60static __read_mostly unsigned int xen_events_irq;
54 61
55static __initdata struct device_node *xen_node;
56
57int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 62int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
58 unsigned long addr, 63 unsigned long addr,
59 xen_pfn_t *gfn, int nr, 64 xen_pfn_t *gfn, int nr,
@@ -84,19 +89,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
84} 89}
85EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); 90EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
86 91
87static unsigned long long xen_stolen_accounting(int cpu)
88{
89 struct vcpu_runstate_info state;
90
91 BUG_ON(cpu != smp_processor_id());
92
93 xen_get_runstate_snapshot(&state);
94
95 WARN_ON(state.state != RUNSTATE_running);
96
97 return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
98}
99
100static void xen_read_wallclock(struct timespec64 *ts) 92static void xen_read_wallclock(struct timespec64 *ts)
101{ 93{
102 u32 version; 94 u32 version;
@@ -179,10 +171,14 @@ static void xen_percpu_init(void)
179 pr_info("Xen: initializing cpu%d\n", cpu); 171 pr_info("Xen: initializing cpu%d\n", cpu);
180 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 172 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
181 173
174 /* Direct vCPU id mapping for ARM guests. */
175 per_cpu(xen_vcpu_id, cpu) = cpu;
176
182 info.mfn = virt_to_gfn(vcpup); 177 info.mfn = virt_to_gfn(vcpup);
183 info.offset = xen_offset_in_page(vcpup); 178 info.offset = xen_offset_in_page(vcpup);
184 179
185 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 180 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
181 &info);
186 BUG_ON(err); 182 BUG_ON(err);
187 per_cpu(xen_vcpu, cpu) = vcpup; 183 per_cpu(xen_vcpu, cpu) = vcpup;
188 184
@@ -237,6 +233,46 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
237 return IRQ_HANDLED; 233 return IRQ_HANDLED;
238} 234}
239 235
236static __initdata struct {
237 const char *compat;
238 const char *prefix;
239 const char *version;
240 bool found;
241} hyper_node = {"xen,xen", "xen,xen-", NULL, false};
242
243static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
244 int depth, void *data)
245{
246 const void *s = NULL;
247 int len;
248
249 if (depth != 1 || strcmp(uname, "hypervisor") != 0)
250 return 0;
251
252 if (of_flat_dt_is_compatible(node, hyper_node.compat))
253 hyper_node.found = true;
254
255 s = of_get_flat_dt_prop(node, "compatible", &len);
256 if (strlen(hyper_node.prefix) + 3 < len &&
257 !strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix)))
258 hyper_node.version = s + strlen(hyper_node.prefix);
259
260 /*
261 * Check if Xen supports EFI by checking whether there is the
262 * "/hypervisor/uefi" node in DT. If so, runtime services are available
263 * through proxy functions (e.g. in case of Xen dom0 EFI implementation
264 * they call special hypercall which executes relevant EFI functions)
265 * and that is why they are always enabled.
266 */
267 if (IS_ENABLED(CONFIG_XEN_EFI)) {
268 if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
269 !efi_runtime_disabled())
270 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
271 }
272
273 return 0;
274}
275
240/* 276/*
241 * see Documentation/devicetree/bindings/arm/xen.txt for the 277 * see Documentation/devicetree/bindings/arm/xen.txt for the
242 * documentation of the Xen Device Tree format. 278 * documentation of the Xen Device Tree format.
@@ -244,26 +280,18 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
244#define GRANT_TABLE_PHYSADDR 0 280#define GRANT_TABLE_PHYSADDR 0
245void __init xen_early_init(void) 281void __init xen_early_init(void)
246{ 282{
247 int len; 283 of_scan_flat_dt(fdt_find_hyper_node, NULL);
248 const char *s = NULL; 284 if (!hyper_node.found) {
249 const char *version = NULL;
250 const char *xen_prefix = "xen,xen-";
251
252 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
253 if (!xen_node) {
254 pr_debug("No Xen support\n"); 285 pr_debug("No Xen support\n");
255 return; 286 return;
256 } 287 }
257 s = of_get_property(xen_node, "compatible", &len); 288
258 if (strlen(xen_prefix) + 3 < len && 289 if (hyper_node.version == NULL) {
259 !strncmp(xen_prefix, s, strlen(xen_prefix)))
260 version = s + strlen(xen_prefix);
261 if (version == NULL) {
262 pr_debug("Xen version not found\n"); 290 pr_debug("Xen version not found\n");
263 return; 291 return;
264 } 292 }
265 293
266 pr_info("Xen %s support found\n", version); 294 pr_info("Xen %s support found\n", hyper_node.version);
267 295
268 xen_domain_type = XEN_HVM_DOMAIN; 296 xen_domain_type = XEN_HVM_DOMAIN;
269 297
@@ -278,28 +306,68 @@ void __init xen_early_init(void)
278 add_preferred_console("hvc", 0, NULL); 306 add_preferred_console("hvc", 0, NULL);
279} 307}
280 308
309static void __init xen_acpi_guest_init(void)
310{
311#ifdef CONFIG_ACPI
312 struct xen_hvm_param a;
313 int interrupt, trigger, polarity;
314
315 a.domid = DOMID_SELF;
316 a.index = HVM_PARAM_CALLBACK_IRQ;
317
318 if (HYPERVISOR_hvm_op(HVMOP_get_param, &a)
319 || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) {
320 xen_events_irq = 0;
321 return;
322 }
323
324 interrupt = a.value & 0xff;
325 trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE
326 : ACPI_LEVEL_SENSITIVE;
327 polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW
328 : ACPI_ACTIVE_HIGH;
329 xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity);
330#endif
331}
332
333static void __init xen_dt_guest_init(void)
334{
335 struct device_node *xen_node;
336
337 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
338 if (!xen_node) {
339 pr_err("Xen support was detected before, but it has disappeared\n");
340 return;
341 }
342
343 xen_events_irq = irq_of_parse_and_map(xen_node, 0);
344}
345
281static int __init xen_guest_init(void) 346static int __init xen_guest_init(void)
282{ 347{
283 struct xen_add_to_physmap xatp; 348 struct xen_add_to_physmap xatp;
284 struct shared_info *shared_info_page = NULL; 349 struct shared_info *shared_info_page = NULL;
285 struct resource res;
286 phys_addr_t grant_frames;
287 350
288 if (!xen_domain()) 351 if (!xen_domain())
289 return 0; 352 return 0;
290 353
291 if (of_address_to_resource(xen_node, GRANT_TABLE_PHYSADDR, &res)) { 354 if (!acpi_disabled)
292 pr_err("Xen grant table base address not found\n"); 355 xen_acpi_guest_init();
293 return -ENODEV; 356 else
294 } 357 xen_dt_guest_init();
295 grant_frames = res.start;
296 358
297 xen_events_irq = irq_of_parse_and_map(xen_node, 0);
298 if (!xen_events_irq) { 359 if (!xen_events_irq) {
299 pr_err("Xen event channel interrupt not found\n"); 360 pr_err("Xen event channel interrupt not found\n");
300 return -ENODEV; 361 return -ENODEV;
301 } 362 }
302 363
364 /*
365 * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI
366 * parameters are found. Force enable runtime services.
367 */
368 if (efi_enabled(EFI_RUNTIME_SERVICES))
369 xen_efi_runtime_setup();
370
303 shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL); 371 shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
304 372
305 if (!shared_info_page) { 373 if (!shared_info_page) {
@@ -328,7 +396,13 @@ static int __init xen_guest_init(void)
328 if (xen_vcpu_info == NULL) 396 if (xen_vcpu_info == NULL)
329 return -ENOMEM; 397 return -ENOMEM;
330 398
331 if (gnttab_setup_auto_xlat_frames(grant_frames)) { 399 /* Direct vCPU id mapping for ARM guests. */
400 per_cpu(xen_vcpu_id, 0) = 0;
401
402 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
403 if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
404 &xen_auto_xlat_grant_frames.vaddr,
405 xen_auto_xlat_grant_frames.count)) {
332 free_percpu(xen_vcpu_info); 406 free_percpu(xen_vcpu_info);
333 return -ENOMEM; 407 return -ENOMEM;
334 } 408 }
@@ -355,8 +429,8 @@ static int __init xen_guest_init(void)
355 429
356 register_cpu_notifier(&xen_cpu_notifier); 430 register_cpu_notifier(&xen_cpu_notifier);
357 431
358 pv_time_ops.steal_clock = xen_stolen_accounting; 432 xen_time_setup_guest();
359 static_key_slow_inc(&paravirt_steal_enabled); 433
360 if (xen_initial_domain()) 434 if (xen_initial_domain())
361 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); 435 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
362 436
@@ -403,4 +477,5 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
403EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); 477EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
404EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op); 478EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op);
405EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); 479EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
480EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
406EXPORT_SYMBOL_GPL(privcmd_call); 481EXPORT_SYMBOL_GPL(privcmd_call);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 9a36f4f49c10..a648dfc3be30 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -91,6 +91,7 @@ HYPERCALL3(vcpu_op);
91HYPERCALL1(tmem_op); 91HYPERCALL1(tmem_op);
92HYPERCALL1(platform_op_raw); 92HYPERCALL1(platform_op_raw);
93HYPERCALL2(multicall); 93HYPERCALL2(multicall);
94HYPERCALL2(vm_assist);
94 95
95ENTRY(privcmd_call) 96ENTRY(privcmd_call)
96 stmdb sp!, {r4} 97 stmdb sp!, {r4}
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
new file mode 100644
index 000000000000..ec154e719b11
--- /dev/null
+++ b/arch/arm64/include/asm/xen/xen-ops.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_XEN_OPS_H
2#define _ASM_XEN_OPS_H
3
4void xen_efi_runtime_setup(void);
5
6#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 5b8256770e22..2981f1bdd073 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -257,6 +257,7 @@ void __init setup_arch(char **cmdline_p)
257 */ 257 */
258 cpu_uninstall_idmap(); 258 cpu_uninstall_idmap();
259 259
260 xen_early_init();
260 efi_init(); 261 efi_init();
261 arm64_memblock_init(); 262 arm64_memblock_init();
262 263
@@ -283,8 +284,6 @@ void __init setup_arch(char **cmdline_p)
283 else 284 else
284 psci_acpi_init(); 285 psci_acpi_init();
285 286
286 xen_early_init();
287
288 cpu_read_bootcpu_ops(); 287 cpu_read_bootcpu_ops();
289 smp_init_cpus(); 288 smp_init_cpus();
290 smp_build_mpidr_hash(); 289 smp_build_mpidr_hash();
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index 74a8d87e542b..8ff8aa9c6228 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,3 @@
1xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) 1xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
2obj-y := xen-arm.o hypercall.o 2obj-y := xen-arm.o hypercall.o
3obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 70df80e8da2c..329c8027b0a9 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -82,6 +82,7 @@ HYPERCALL3(vcpu_op);
82HYPERCALL1(tmem_op); 82HYPERCALL1(tmem_op);
83HYPERCALL1(platform_op_raw); 83HYPERCALL1(platform_op_raw);
84HYPERCALL2(multicall); 84HYPERCALL2(multicall);
85HYPERCALL2(vm_assist);
85 86
86ENTRY(privcmd_call) 87ENTRY(privcmd_call)
87 mov x16, x0 88 mov x16, x0
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 59d34c521d96..9b7fa6313f1a 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -16,6 +16,7 @@ extern void prefill_possible_map(void);
16static inline void prefill_possible_map(void) {} 16static inline void prefill_possible_map(void) {}
17 17
18#define cpu_physical_id(cpu) boot_cpu_physical_apicid 18#define cpu_physical_id(cpu) boot_cpu_physical_apicid
19#define cpu_acpi_id(cpu) 0
19#define safe_smp_processor_id() 0 20#define safe_smp_processor_id() 0
20 21
21#endif /* CONFIG_SMP */ 22#endif /* CONFIG_SMP */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index c9734dc76257..ebd0c164cd4e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -33,6 +33,7 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
33} 33}
34 34
35DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); 35DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
36DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
36DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); 37DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
37#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 38#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
38DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); 39DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
@@ -148,6 +149,7 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
148void smp_store_boot_cpu_info(void); 149void smp_store_boot_cpu_info(void);
149void smp_store_cpu_info(int id); 150void smp_store_cpu_info(int id);
150#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 151#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
152#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
151 153
152#else /* !CONFIG_SMP */ 154#else /* !CONFIG_SMP */
153#define wbinvd_on_cpu(cpu) wbinvd() 155#define wbinvd_on_cpu(cpu) wbinvd()
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
index 0d809e9fc975..3bdd10d71223 100644
--- a/arch/x86/include/asm/xen/cpuid.h
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -76,15 +76,18 @@
76/* 76/*
77 * Leaf 5 (0x40000x04) 77 * Leaf 5 (0x40000x04)
78 * HVM-specific features 78 * HVM-specific features
79 * EAX: Features
80 * EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
79 */ 81 */
80 82
81/* EAX Features */
82/* Virtualized APIC registers */ 83/* Virtualized APIC registers */
83#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) 84#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
84/* Virtualized x2APIC accesses */ 85/* Virtualized x2APIC accesses */
85#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) 86#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
86/* Memory mapped from other domains has valid IOMMU entries */ 87/* Memory mapped from other domains has valid IOMMU entries */
87#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) 88#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
89/* vcpu id is present in EBX */
90#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3)
88 91
89#define XEN_CPUID_MAX_NUM_LEAVES 4 92#define XEN_CPUID_MAX_NUM_LEAVES 4
90 93
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 9414f84584e4..6738e5c82cca 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -161,13 +161,15 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
161/** 161/**
162 * acpi_register_lapic - register a local apic and generates a logic cpu number 162 * acpi_register_lapic - register a local apic and generates a logic cpu number
163 * @id: local apic id to register 163 * @id: local apic id to register
164 * @acpiid: ACPI id to register
164 * @enabled: this cpu is enabled or not 165 * @enabled: this cpu is enabled or not
165 * 166 *
166 * Returns the logic cpu number which maps to the local apic 167 * Returns the logic cpu number which maps to the local apic
167 */ 168 */
168static int acpi_register_lapic(int id, u8 enabled) 169static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
169{ 170{
170 unsigned int ver = 0; 171 unsigned int ver = 0;
172 int cpu;
171 173
172 if (id >= MAX_LOCAL_APIC) { 174 if (id >= MAX_LOCAL_APIC) {
173 printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); 175 printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
@@ -182,7 +184,11 @@ static int acpi_register_lapic(int id, u8 enabled)
182 if (boot_cpu_physical_apicid != -1U) 184 if (boot_cpu_physical_apicid != -1U)
183 ver = apic_version[boot_cpu_physical_apicid]; 185 ver = apic_version[boot_cpu_physical_apicid];
184 186
185 return generic_processor_info(id, ver); 187 cpu = generic_processor_info(id, ver);
188 if (cpu >= 0)
189 early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
190
191 return cpu;
186} 192}
187 193
188static int __init 194static int __init
@@ -212,7 +218,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
212 if (!apic->apic_id_valid(apic_id) && enabled) 218 if (!apic->apic_id_valid(apic_id) && enabled)
213 printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); 219 printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
214 else 220 else
215 acpi_register_lapic(apic_id, enabled); 221 acpi_register_lapic(apic_id, processor->uid, enabled);
216#else 222#else
217 printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); 223 printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
218#endif 224#endif
@@ -240,6 +246,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
240 * when we use CPU hotplug. 246 * when we use CPU hotplug.
241 */ 247 */
242 acpi_register_lapic(processor->id, /* APIC ID */ 248 acpi_register_lapic(processor->id, /* APIC ID */
249 processor->processor_id, /* ACPI ID */
243 processor->lapic_flags & ACPI_MADT_ENABLED); 250 processor->lapic_flags & ACPI_MADT_ENABLED);
244 251
245 return 0; 252 return 0;
@@ -258,6 +265,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
258 acpi_table_print_madt_entry(header); 265 acpi_table_print_madt_entry(header);
259 266
260 acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ 267 acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
268 processor->processor_id, /* ACPI ID */
261 processor->lapic_flags & ACPI_MADT_ENABLED); 269 processor->lapic_flags & ACPI_MADT_ENABLED);
262 270
263 return 0; 271 return 0;
@@ -714,7 +722,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
714{ 722{
715 int cpu; 723 int cpu;
716 724
717 cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED); 725 cpu = acpi_register_lapic(physid, U32_MAX, ACPI_MADT_ENABLED);
718 if (cpu < 0) { 726 if (cpu < 0) {
719 pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); 727 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
720 return cpu; 728 return cpu;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f943d2f453a4..ac8d8ad8b009 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -92,8 +92,10 @@ static int apic_extnmi = APIC_EXTNMI_BSP;
92 */ 92 */
93DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); 93DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
94DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID); 94DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
95DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
95EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); 96EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
96EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); 97EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
98EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
97 99
98#ifdef CONFIG_X86_32 100#ifdef CONFIG_X86_32
99 101
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87ba7a6..7a40e068302d 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -236,6 +236,8 @@ void __init setup_per_cpu_areas(void)
236 early_per_cpu_map(x86_cpu_to_apicid, cpu); 236 early_per_cpu_map(x86_cpu_to_apicid, cpu);
237 per_cpu(x86_bios_cpu_apicid, cpu) = 237 per_cpu(x86_bios_cpu_apicid, cpu) =
238 early_per_cpu_map(x86_bios_cpu_apicid, cpu); 238 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
239 per_cpu(x86_cpu_to_acpiid, cpu) =
240 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
239#endif 241#endif
240#ifdef CONFIG_X86_32 242#ifdef CONFIG_X86_32
241 per_cpu(x86_cpu_to_logical_apicid, cpu) = 243 per_cpu(x86_cpu_to_logical_apicid, cpu) =
@@ -271,6 +273,7 @@ void __init setup_per_cpu_areas(void)
271#ifdef CONFIG_X86_LOCAL_APIC 273#ifdef CONFIG_X86_LOCAL_APIC
272 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 274 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
273 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 275 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
276 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
274#endif 277#endif
275#ifdef CONFIG_X86_32 278#ifdef CONFIG_X86_32
276 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; 279 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index be14cc3e48d5..3be012115853 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -20,10 +20,121 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/string.h> 21#include <linux/string.h>
22 22
23#include <xen/xen.h>
23#include <xen/xen-ops.h> 24#include <xen/xen-ops.h>
25#include <xen/interface/platform.h>
24 26
25#include <asm/page.h> 27#include <asm/page.h>
26#include <asm/setup.h> 28#include <asm/setup.h>
29#include <asm/xen/hypercall.h>
30
31static efi_char16_t vendor[100] __initdata;
32
33static efi_system_table_t efi_systab_xen __initdata = {
34 .hdr = {
35 .signature = EFI_SYSTEM_TABLE_SIGNATURE,
36 .revision = 0, /* Initialized later. */
37 .headersize = 0, /* Ignored by Linux Kernel. */
38 .crc32 = 0, /* Ignored by Linux Kernel. */
39 .reserved = 0
40 },
41 .fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */
42 .fw_revision = 0, /* Initialized later. */
43 .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
44 .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
45 .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
46 .con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
47 .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
48 .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
49 .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
50 /* Not used under Xen. */
51 .boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR,
52 /* Not used under Xen. */
53 .nr_tables = 0, /* Initialized later. */
54 .tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
55};
56
57static const struct efi efi_xen __initconst = {
58 .systab = NULL, /* Initialized later. */
59 .runtime_version = 0, /* Initialized later. */
60 .mps = EFI_INVALID_TABLE_ADDR,
61 .acpi = EFI_INVALID_TABLE_ADDR,
62 .acpi20 = EFI_INVALID_TABLE_ADDR,
63 .smbios = EFI_INVALID_TABLE_ADDR,
64 .smbios3 = EFI_INVALID_TABLE_ADDR,
65 .sal_systab = EFI_INVALID_TABLE_ADDR,
66 .boot_info = EFI_INVALID_TABLE_ADDR,
67 .hcdp = EFI_INVALID_TABLE_ADDR,
68 .uga = EFI_INVALID_TABLE_ADDR,
69 .uv_systab = EFI_INVALID_TABLE_ADDR,
70 .fw_vendor = EFI_INVALID_TABLE_ADDR,
71 .runtime = EFI_INVALID_TABLE_ADDR,
72 .config_table = EFI_INVALID_TABLE_ADDR,
73 .get_time = xen_efi_get_time,
74 .set_time = xen_efi_set_time,
75 .get_wakeup_time = xen_efi_get_wakeup_time,
76 .set_wakeup_time = xen_efi_set_wakeup_time,
77 .get_variable = xen_efi_get_variable,
78 .get_next_variable = xen_efi_get_next_variable,
79 .set_variable = xen_efi_set_variable,
80 .query_variable_info = xen_efi_query_variable_info,
81 .update_capsule = xen_efi_update_capsule,
82 .query_capsule_caps = xen_efi_query_capsule_caps,
83 .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
84 .reset_system = NULL, /* Functionality provided by Xen. */
85 .set_virtual_address_map = NULL, /* Not used under Xen. */
86 .flags = 0 /* Initialized later. */
87};
88
89static efi_system_table_t __init *xen_efi_probe(void)
90{
91 struct xen_platform_op op = {
92 .cmd = XENPF_firmware_info,
93 .u.firmware_info = {
94 .type = XEN_FW_EFI_INFO,
95 .index = XEN_FW_EFI_CONFIG_TABLE
96 }
97 };
98 union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
99
100 if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0)
101 return NULL;
102
103 /* Here we know that Xen runs on EFI platform. */
104
105 efi = efi_xen;
106
107 efi_systab_xen.tables = info->cfg.addr;
108 efi_systab_xen.nr_tables = info->cfg.nent;
109
110 op.cmd = XENPF_firmware_info;
111 op.u.firmware_info.type = XEN_FW_EFI_INFO;
112 op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
113 info->vendor.bufsz = sizeof(vendor);
114 set_xen_guest_handle(info->vendor.name, vendor);
115
116 if (HYPERVISOR_platform_op(&op) == 0) {
117 efi_systab_xen.fw_vendor = __pa_symbol(vendor);
118 efi_systab_xen.fw_revision = info->vendor.revision;
119 } else
120 efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");
121
122 op.cmd = XENPF_firmware_info;
123 op.u.firmware_info.type = XEN_FW_EFI_INFO;
124 op.u.firmware_info.index = XEN_FW_EFI_VERSION;
125
126 if (HYPERVISOR_platform_op(&op) == 0)
127 efi_systab_xen.hdr.revision = info->version;
128
129 op.cmd = XENPF_firmware_info;
130 op.u.firmware_info.type = XEN_FW_EFI_INFO;
131 op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;
132
133 if (HYPERVISOR_platform_op(&op) == 0)
134 efi.runtime_version = info->version;
135
136 return &efi_systab_xen;
137}
27 138
28void __init xen_efi_init(void) 139void __init xen_efi_init(void)
29{ 140{
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0f87db2cc6a8..69b4b6d29738 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -59,6 +59,7 @@
59#include <asm/xen/pci.h> 59#include <asm/xen/pci.h>
60#include <asm/xen/hypercall.h> 60#include <asm/xen/hypercall.h>
61#include <asm/xen/hypervisor.h> 61#include <asm/xen/hypervisor.h>
62#include <asm/xen/cpuid.h>
62#include <asm/fixmap.h> 63#include <asm/fixmap.h>
63#include <asm/processor.h> 64#include <asm/processor.h>
64#include <asm/proto.h> 65#include <asm/proto.h>
@@ -118,6 +119,10 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
118 */ 119 */
119DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 120DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
120 121
122/* Linux <-> Xen vCPU id mapping */
123DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
124EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
125
121enum xen_domain_type xen_domain_type = XEN_NATIVE; 126enum xen_domain_type xen_domain_type = XEN_NATIVE;
122EXPORT_SYMBOL_GPL(xen_domain_type); 127EXPORT_SYMBOL_GPL(xen_domain_type);
123 128
@@ -179,7 +184,7 @@ static void clamp_max_cpus(void)
179#endif 184#endif
180} 185}
181 186
182static void xen_vcpu_setup(int cpu) 187void xen_vcpu_setup(int cpu)
183{ 188{
184 struct vcpu_register_vcpu_info info; 189 struct vcpu_register_vcpu_info info;
185 int err; 190 int err;
@@ -202,8 +207,9 @@ static void xen_vcpu_setup(int cpu)
202 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) 207 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
203 return; 208 return;
204 } 209 }
205 if (cpu < MAX_VIRT_CPUS) 210 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
206 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 211 per_cpu(xen_vcpu, cpu) =
212 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
207 213
208 if (!have_vcpu_info_placement) { 214 if (!have_vcpu_info_placement) {
209 if (cpu >= MAX_VIRT_CPUS) 215 if (cpu >= MAX_VIRT_CPUS)
@@ -223,7 +229,8 @@ static void xen_vcpu_setup(int cpu)
223 hypervisor has no unregister variant and this hypercall does not 229 hypervisor has no unregister variant and this hypercall does not
224 allow to over-write info.mfn and info.offset. 230 allow to over-write info.mfn and info.offset.
225 */ 231 */
226 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 232 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
233 &info);
227 234
228 if (err) { 235 if (err) {
229 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); 236 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
@@ -247,10 +254,11 @@ void xen_vcpu_restore(void)
247 254
248 for_each_possible_cpu(cpu) { 255 for_each_possible_cpu(cpu) {
249 bool other_cpu = (cpu != smp_processor_id()); 256 bool other_cpu = (cpu != smp_processor_id());
250 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); 257 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
258 NULL);
251 259
252 if (other_cpu && is_up && 260 if (other_cpu && is_up &&
253 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 261 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
254 BUG(); 262 BUG();
255 263
256 xen_setup_runstate_info(cpu); 264 xen_setup_runstate_info(cpu);
@@ -259,7 +267,7 @@ void xen_vcpu_restore(void)
259 xen_vcpu_setup(cpu); 267 xen_vcpu_setup(cpu);
260 268
261 if (other_cpu && is_up && 269 if (other_cpu && is_up &&
262 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 270 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
263 BUG(); 271 BUG();
264 } 272 }
265} 273}
@@ -588,7 +596,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
588{ 596{
589 unsigned long va = dtr->address; 597 unsigned long va = dtr->address;
590 unsigned int size = dtr->size + 1; 598 unsigned int size = dtr->size + 1;
591 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 599 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
592 unsigned long frames[pages]; 600 unsigned long frames[pages];
593 int f; 601 int f;
594 602
@@ -637,7 +645,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
637{ 645{
638 unsigned long va = dtr->address; 646 unsigned long va = dtr->address;
639 unsigned int size = dtr->size + 1; 647 unsigned int size = dtr->size + 1;
640 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 648 unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
641 unsigned long frames[pages]; 649 unsigned long frames[pages];
642 int f; 650 int f;
643 651
@@ -1135,8 +1143,11 @@ void xen_setup_vcpu_info_placement(void)
1135{ 1143{
1136 int cpu; 1144 int cpu;
1137 1145
1138 for_each_possible_cpu(cpu) 1146 for_each_possible_cpu(cpu) {
1147 /* Set up direct vCPU id mapping for PV guests. */
1148 per_cpu(xen_vcpu_id, cpu) = cpu;
1139 xen_vcpu_setup(cpu); 1149 xen_vcpu_setup(cpu);
1150 }
1140 1151
1141 /* xen_vcpu_setup managed to place the vcpu_info within the 1152 /* xen_vcpu_setup managed to place the vcpu_info within the
1142 * percpu area for all cpus, so make use of it. Note that for 1153 * percpu area for all cpus, so make use of it. Note that for
@@ -1727,6 +1738,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
1727#endif 1738#endif
1728 xen_raw_console_write("about to get started...\n"); 1739 xen_raw_console_write("about to get started...\n");
1729 1740
1741 /* Let's presume PV guests always boot on vCPU with id 0. */
1742 per_cpu(xen_vcpu_id, 0) = 0;
1743
1730 xen_setup_runstate_info(0); 1744 xen_setup_runstate_info(0);
1731 1745
1732 xen_efi_init(); 1746 xen_efi_init();
@@ -1768,9 +1782,10 @@ void __ref xen_hvm_init_shared_info(void)
1768 * in that case multiple vcpus might be online. */ 1782 * in that case multiple vcpus might be online. */
1769 for_each_online_cpu(cpu) { 1783 for_each_online_cpu(cpu) {
1770 /* Leave it to be NULL. */ 1784 /* Leave it to be NULL. */
1771 if (cpu >= MAX_VIRT_CPUS) 1785 if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
1772 continue; 1786 continue;
1773 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1787 per_cpu(xen_vcpu, cpu) =
1788 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
1774 } 1789 }
1775} 1790}
1776 1791
@@ -1795,6 +1810,12 @@ static void __init init_hvm_pv_info(void)
1795 1810
1796 xen_setup_features(); 1811 xen_setup_features();
1797 1812
1813 cpuid(base + 4, &eax, &ebx, &ecx, &edx);
1814 if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
1815 this_cpu_write(xen_vcpu_id, ebx);
1816 else
1817 this_cpu_write(xen_vcpu_id, smp_processor_id());
1818
1798 pv_info.name = "Xen HVM"; 1819 pv_info.name = "Xen HVM";
1799 1820
1800 xen_domain_type = XEN_HVM_DOMAIN; 1821 xen_domain_type = XEN_HVM_DOMAIN;
@@ -1806,6 +1827,10 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1806 int cpu = (long)hcpu; 1827 int cpu = (long)hcpu;
1807 switch (action) { 1828 switch (action) {
1808 case CPU_UP_PREPARE: 1829 case CPU_UP_PREPARE:
1830 if (cpu_acpi_id(cpu) != U32_MAX)
1831 per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
1832 else
1833 per_cpu(xen_vcpu_id, cpu) = cpu;
1809 xen_vcpu_setup(cpu); 1834 xen_vcpu_setup(cpu);
1810 if (xen_have_vector_callback) { 1835 if (xen_have_vector_callback) {
1811 if (xen_feature(XENFEAT_hvm_safe_pvclock)) 1836 if (xen_feature(XENFEAT_hvm_safe_pvclock))
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index e079500b17f3..de4144c24f1c 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -111,63 +111,18 @@ int arch_gnttab_init(unsigned long nr_shared)
111} 111}
112 112
113#ifdef CONFIG_XEN_PVH 113#ifdef CONFIG_XEN_PVH
114#include <xen/balloon.h>
115#include <xen/events.h> 114#include <xen/events.h>
116#include <linux/slab.h> 115#include <xen/xen-ops.h>
117static int __init xlated_setup_gnttab_pages(void)
118{
119 struct page **pages;
120 xen_pfn_t *pfns;
121 void *vaddr;
122 int rc;
123 unsigned int i;
124 unsigned long nr_grant_frames = gnttab_max_grant_frames();
125
126 BUG_ON(nr_grant_frames == 0);
127 pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL);
128 if (!pages)
129 return -ENOMEM;
130
131 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
132 if (!pfns) {
133 kfree(pages);
134 return -ENOMEM;
135 }
136 rc = alloc_xenballooned_pages(nr_grant_frames, pages);
137 if (rc) {
138 pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__,
139 nr_grant_frames, rc);
140 kfree(pages);
141 kfree(pfns);
142 return rc;
143 }
144 for (i = 0; i < nr_grant_frames; i++)
145 pfns[i] = page_to_pfn(pages[i]);
146
147 vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL);
148 if (!vaddr) {
149 pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
150 nr_grant_frames, rc);
151 free_xenballooned_pages(nr_grant_frames, pages);
152 kfree(pages);
153 kfree(pfns);
154 return -ENOMEM;
155 }
156 kfree(pages);
157
158 xen_auto_xlat_grant_frames.pfn = pfns;
159 xen_auto_xlat_grant_frames.count = nr_grant_frames;
160 xen_auto_xlat_grant_frames.vaddr = vaddr;
161
162 return 0;
163}
164
165static int __init xen_pvh_gnttab_setup(void) 116static int __init xen_pvh_gnttab_setup(void)
166{ 117{
167 if (!xen_pvh_domain()) 118 if (!xen_pvh_domain())
168 return -ENODEV; 119 return -ENODEV;
169 120
170 return xlated_setup_gnttab_pages(); 121 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
122
123 return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
124 &xen_auto_xlat_grant_frames.vaddr,
125 xen_auto_xlat_grant_frames.count);
171} 126}
172/* Call it _before_ __gnttab_init as we need to initialize the 127/* Call it _before_ __gnttab_init as we need to initialize the
173 * xen_auto_xlat_grant_frames first. */ 128 * xen_auto_xlat_grant_frames first. */
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index a1207cb6472a..33e92955e09d 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -109,7 +109,8 @@ static void xen_safe_halt(void)
109static void xen_halt(void) 109static void xen_halt(void)
110{ 110{
111 if (irqs_disabled()) 111 if (irqs_disabled())
112 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 112 HYPERVISOR_vcpu_op(VCPUOP_down,
113 xen_vcpu_nr(smp_processor_id()), NULL);
113 else 114 else
114 xen_safe_halt(); 115 xen_safe_halt();
115} 116}
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 9466354d3e49..32bdc2c90297 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -547,7 +547,7 @@ void xen_pmu_init(int cpu)
547 return; 547 return;
548 548
549fail: 549fail:
550 pr_warn_once("Could not initialize VPMU for cpu %d, error %d\n", 550 pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
551 cpu, err); 551 cpu, err);
552 free_pages((unsigned long)xenpmu_data, 0); 552 free_pages((unsigned long)xenpmu_data, 0);
553} 553}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 719cf291dcdf..0b4d04c8ab4d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -322,6 +322,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
322 xen_filter_cpu_maps(); 322 xen_filter_cpu_maps();
323 xen_setup_vcpu_info_placement(); 323 xen_setup_vcpu_info_placement();
324 } 324 }
325
326 /*
327 * Setup vcpu_info for boot CPU.
328 */
329 if (xen_hvm_domain())
330 xen_vcpu_setup(0);
331
325 /* 332 /*
326 * The alternative logic (which patches the unlock/lock) runs before 333 * The alternative logic (which patches the unlock/lock) runs before
327 * the smp bootup up code is activated. Hence we need to set this up 334 * the smp bootup up code is activated. Hence we need to set this up
@@ -454,7 +461,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
454#endif 461#endif
455 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 462 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
456 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 463 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 464 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
458 BUG(); 465 BUG();
459 466
460 kfree(ctxt); 467 kfree(ctxt);
@@ -492,7 +499,7 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
492 if (rc) 499 if (rc)
493 return rc; 500 return rc;
494 501
495 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 502 rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
496 BUG_ON(rc); 503 BUG_ON(rc);
497 504
498 while (cpu_report_state(cpu) != CPU_ONLINE) 505 while (cpu_report_state(cpu) != CPU_ONLINE)
@@ -520,7 +527,8 @@ static int xen_cpu_disable(void)
520 527
521static void xen_cpu_die(unsigned int cpu) 528static void xen_cpu_die(unsigned int cpu)
522{ 529{
523 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 530 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up,
531 xen_vcpu_nr(cpu), NULL)) {
524 __set_current_state(TASK_UNINTERRUPTIBLE); 532 __set_current_state(TASK_UNINTERRUPTIBLE);
525 schedule_timeout(HZ/10); 533 schedule_timeout(HZ/10);
526 } 534 }
@@ -536,7 +544,7 @@ static void xen_cpu_die(unsigned int cpu)
536static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 544static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
537{ 545{
538 play_dead_common(); 546 play_dead_common();
539 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 547 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
540 cpu_bringup(); 548 cpu_bringup();
541 /* 549 /*
542 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) 550 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
@@ -576,7 +584,7 @@ static void stop_self(void *v)
576 584
577 set_cpu_online(cpu, false); 585 set_cpu_online(cpu, false);
578 586
579 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); 587 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
580 BUG(); 588 BUG();
581} 589}
582 590
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 6deba5bc7e34..67356d29d74d 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -11,8 +11,6 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/clocksource.h> 12#include <linux/clocksource.h>
13#include <linux/clockchips.h> 13#include <linux/clockchips.h>
14#include <linux/kernel_stat.h>
15#include <linux/math64.h>
16#include <linux/gfp.h> 14#include <linux/gfp.h>
17#include <linux/slab.h> 15#include <linux/slab.h>
18#include <linux/pvclock_gtod.h> 16#include <linux/pvclock_gtod.h>
@@ -31,44 +29,6 @@
31 29
32/* Xen may fire a timer up to this many ns early */ 30/* Xen may fire a timer up to this many ns early */
33#define TIMER_SLOP 100000 31#define TIMER_SLOP 100000
34#define NS_PER_TICK (1000000000LL / HZ)
35
36/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38
39/* unused ns of stolen time */
40static DEFINE_PER_CPU(u64, xen_residual_stolen);
41
42static void do_stolen_accounting(void)
43{
44 struct vcpu_runstate_info state;
45 struct vcpu_runstate_info *snap;
46 s64 runnable, offline, stolen;
47 cputime_t ticks;
48
49 xen_get_runstate_snapshot(&state);
50
51 WARN_ON(state.state != RUNSTATE_running);
52
53 snap = this_cpu_ptr(&xen_runstate_snapshot);
54
55 /* work out how much time the VCPU has not been runn*ing* */
56 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
57 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
58
59 *snap = state;
60
61 /* Add the appropriate number of ticks of stolen time,
62 including any left-overs from last time. */
63 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
64
65 if (stolen < 0)
66 stolen = 0;
67
68 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
69 __this_cpu_write(xen_residual_stolen, stolen);
70 account_steal_ticks(ticks);
71}
72 32
73/* Get the TSC speed from Xen */ 33/* Get the TSC speed from Xen */
74static unsigned long xen_tsc_khz(void) 34static unsigned long xen_tsc_khz(void)
@@ -263,8 +223,10 @@ static int xen_vcpuop_shutdown(struct clock_event_device *evt)
263{ 223{
264 int cpu = smp_processor_id(); 224 int cpu = smp_processor_id();
265 225
266 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || 226 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
267 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) 227 NULL) ||
228 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
229 NULL))
268 BUG(); 230 BUG();
269 231
270 return 0; 232 return 0;
@@ -274,7 +236,8 @@ static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
274{ 236{
275 int cpu = smp_processor_id(); 237 int cpu = smp_processor_id();
276 238
277 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) 239 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
240 NULL))
278 BUG(); 241 BUG();
279 242
280 return 0; 243 return 0;
@@ -293,7 +256,8 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
293 /* Get an event anyway, even if the timeout is already expired */ 256 /* Get an event anyway, even if the timeout is already expired */
294 single.flags = 0; 257 single.flags = 0;
295 258
296 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); 259 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
260 &single);
297 BUG_ON(ret != 0); 261 BUG_ON(ret != 0);
298 262
299 return ret; 263 return ret;
@@ -335,8 +299,6 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
335 ret = IRQ_HANDLED; 299 ret = IRQ_HANDLED;
336 } 300 }
337 301
338 do_stolen_accounting();
339
340 return ret; 302 return ret;
341} 303}
342 304
@@ -394,13 +356,15 @@ void xen_timer_resume(void)
394 return; 356 return;
395 357
396 for_each_online_cpu(cpu) { 358 for_each_online_cpu(cpu) {
397 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) 359 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
360 xen_vcpu_nr(cpu), NULL))
398 BUG(); 361 BUG();
399 } 362 }
400} 363}
401 364
402static const struct pv_time_ops xen_time_ops __initconst = { 365static const struct pv_time_ops xen_time_ops __initconst = {
403 .sched_clock = xen_clocksource_read, 366 .sched_clock = xen_clocksource_read,
367 .steal_clock = xen_steal_clock,
404}; 368};
405 369
406static void __init xen_time_init(void) 370static void __init xen_time_init(void)
@@ -414,7 +378,8 @@ static void __init xen_time_init(void)
414 378
415 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); 379 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
416 380
417 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { 381 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
382 NULL) == 0) {
418 /* Successfully turned off 100Hz tick, so we have the 383 /* Successfully turned off 100Hz tick, so we have the
419 vcpuop-based timer interface */ 384 vcpuop-based timer interface */
420 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n"); 385 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
@@ -431,6 +396,8 @@ static void __init xen_time_init(void)
431 xen_setup_timer(cpu); 396 xen_setup_timer(cpu);
432 xen_setup_cpu_clockevents(); 397 xen_setup_cpu_clockevents();
433 398
399 xen_time_setup_guest();
400
434 if (xen_initial_domain()) 401 if (xen_initial_domain())
435 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); 402 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
436} 403}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 4140b070f2e9..3cbce3b085e7 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -76,6 +76,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
76 76
77bool xen_vcpu_stolen(int vcpu); 77bool xen_vcpu_stolen(int vcpu);
78 78
79void xen_vcpu_setup(int cpu);
79void xen_setup_vcpu_info_placement(void); 80void xen_setup_vcpu_info_placement(void);
80 81
81#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 405056b95b05..ad9fc84a8601 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -46,6 +46,13 @@ DEFINE_MUTEX(acpi_device_lock);
46LIST_HEAD(acpi_wakeup_device_list); 46LIST_HEAD(acpi_wakeup_device_list);
47static DEFINE_MUTEX(acpi_hp_context_lock); 47static DEFINE_MUTEX(acpi_hp_context_lock);
48 48
49/*
50 * The UART device described by the SPCR table is the only object which needs
51 * special-casing. Everything else is covered by ACPI namespace paths in STAO
52 * table.
53 */
54static u64 spcr_uart_addr;
55
49struct acpi_dep_data { 56struct acpi_dep_data {
50 struct list_head node; 57 struct list_head node;
51 acpi_handle master; 58 acpi_handle master;
@@ -1458,6 +1465,41 @@ static int acpi_add_single_object(struct acpi_device **child,
1458 return 0; 1465 return 0;
1459} 1466}
1460 1467
1468static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
1469 void *context)
1470{
1471 struct resource *res = context;
1472
1473 if (acpi_dev_resource_memory(ares, res))
1474 return AE_CTRL_TERMINATE;
1475
1476 return AE_OK;
1477}
1478
1479static bool acpi_device_should_be_hidden(acpi_handle handle)
1480{
1481 acpi_status status;
1482 struct resource res;
1483
1484 /* Check if it should ignore the UART device */
1485 if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
1486 return false;
1487
1488 /*
1489 * The UART device described in SPCR table is assumed to have only one
1490 * memory resource present. So we only look for the first one here.
1491 */
1492 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1493 acpi_get_resource_memory, &res);
1494 if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
1495 return false;
1496
1497 acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
1498 &res.start);
1499
1500 return true;
1501}
1502
1461static int acpi_bus_type_and_status(acpi_handle handle, int *type, 1503static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1462 unsigned long long *sta) 1504 unsigned long long *sta)
1463{ 1505{
@@ -1471,6 +1513,9 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1471 switch (acpi_type) { 1513 switch (acpi_type) {
1472 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ 1514 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1473 case ACPI_TYPE_DEVICE: 1515 case ACPI_TYPE_DEVICE:
1516 if (acpi_device_should_be_hidden(handle))
1517 return -ENODEV;
1518
1474 *type = ACPI_BUS_TYPE_DEVICE; 1519 *type = ACPI_BUS_TYPE_DEVICE;
1475 status = acpi_bus_get_status_handle(handle, sta); 1520 status = acpi_bus_get_status_handle(handle, sta);
1476 if (ACPI_FAILURE(status)) 1521 if (ACPI_FAILURE(status))
@@ -1925,11 +1970,26 @@ static int acpi_bus_scan_fixed(void)
1925 return result < 0 ? result : 0; 1970 return result < 0 ? result : 0;
1926} 1971}
1927 1972
1973static void __init acpi_get_spcr_uart_addr(void)
1974{
1975 acpi_status status;
1976 struct acpi_table_spcr *spcr_ptr;
1977
1978 status = acpi_get_table(ACPI_SIG_SPCR, 0,
1979 (struct acpi_table_header **)&spcr_ptr);
1980 if (ACPI_SUCCESS(status))
1981 spcr_uart_addr = spcr_ptr->serial_port.address;
1982 else
1983 printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
1984}
1985
1928static bool acpi_scan_initialized; 1986static bool acpi_scan_initialized;
1929 1987
1930int __init acpi_scan_init(void) 1988int __init acpi_scan_init(void)
1931{ 1989{
1932 int result; 1990 int result;
1991 acpi_status status;
1992 struct acpi_table_stao *stao_ptr;
1933 1993
1934 acpi_pci_root_init(); 1994 acpi_pci_root_init();
1935 acpi_pci_link_init(); 1995 acpi_pci_link_init();
@@ -1945,6 +2005,20 @@ int __init acpi_scan_init(void)
1945 2005
1946 acpi_scan_add_handler(&generic_device_handler); 2006 acpi_scan_add_handler(&generic_device_handler);
1947 2007
2008 /*
2009 * If there is STAO table, check whether it needs to ignore the UART
2010 * device in SPCR table.
2011 */
2012 status = acpi_get_table(ACPI_SIG_STAO, 0,
2013 (struct acpi_table_header **)&stao_ptr);
2014 if (ACPI_SUCCESS(status)) {
2015 if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
2016 printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
2017
2018 if (stao_ptr->ignore_uart)
2019 acpi_get_spcr_uart_addr();
2020 }
2021
1948 mutex_lock(&acpi_scan_lock); 2022 mutex_lock(&acpi_scan_lock);
1949 /* 2023 /*
1950 * Enumerate devices in the ACPI namespace. 2024 * Enumerate devices in the ACPI namespace.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 2994cfa44c8a..3cc6d1d86f1e 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -379,7 +379,7 @@ static struct attribute *xen_vbdstat_attrs[] = {
379 NULL 379 NULL
380}; 380};
381 381
382static struct attribute_group xen_vbdstat_group = { 382static const struct attribute_group xen_vbdstat_group = {
383 .name = "statistics", 383 .name = "statistics",
384 .attrs = xen_vbdstat_attrs, 384 .attrs = xen_vbdstat_attrs,
385}; 385};
@@ -715,8 +715,11 @@ static void backend_changed(struct xenbus_watch *watch,
715 715
716 /* Front end dir is a number, which is used as the handle. */ 716 /* Front end dir is a number, which is used as the handle. */
717 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); 717 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
718 if (err) 718 if (err) {
719 kfree(be->mode);
720 be->mode = NULL;
719 return; 721 return;
722 }
720 723
721 be->major = major; 724 be->major = major;
722 be->minor = minor; 725 be->minor = minor;
@@ -1022,9 +1025,9 @@ static int connect_ring(struct backend_info *be)
1022 pr_debug("%s %s\n", __func__, dev->otherend); 1025 pr_debug("%s %s\n", __func__, dev->otherend);
1023 1026
1024 be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; 1027 be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1025 err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", 1028 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1026 "%63s", protocol, NULL); 1029 "%63s", protocol);
1027 if (err) 1030 if (err <= 0)
1028 strcpy(protocol, "unspecified, assuming default"); 1031 strcpy(protocol, "unspecified, assuming default");
1029 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) 1032 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1030 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; 1033 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
@@ -1036,10 +1039,9 @@ static int connect_ring(struct backend_info *be)
1036 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); 1039 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1037 return -ENOSYS; 1040 return -ENOSYS;
1038 } 1041 }
1039 err = xenbus_gather(XBT_NIL, dev->otherend, 1042 err = xenbus_scanf(XBT_NIL, dev->otherend,
1040 "feature-persistent", "%u", 1043 "feature-persistent", "%u", &pers_grants);
1041 &pers_grants, NULL); 1044 if (err <= 0)
1042 if (err)
1043 pers_grants = 0; 1045 pers_grants = 0;
1044 1046
1045 be->blkif->vbd.feature_gnt_persistent = pers_grants; 1047 be->blkif->vbd.feature_gnt_persistent = pers_grants;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0b6682a33e3b..be4fea6a5dd3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2197,10 +2197,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
2197 info->discard_granularity = discard_granularity; 2197 info->discard_granularity = discard_granularity;
2198 info->discard_alignment = discard_alignment; 2198 info->discard_alignment = discard_alignment;
2199 } 2199 }
2200 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2200 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2201 "discard-secure", "%d", &discard_secure, 2201 "discard-secure", "%u", &discard_secure);
2202 NULL); 2202 if (err > 0)
2203 if (!err)
2204 info->feature_secdiscard = !!discard_secure; 2203 info->feature_secdiscard = !!discard_secure;
2205} 2204}
2206 2205
@@ -2300,9 +2299,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2300 info->feature_flush = 0; 2299 info->feature_flush = 0;
2301 info->feature_fua = 0; 2300 info->feature_fua = 0;
2302 2301
2303 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2302 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2304 "feature-barrier", "%d", &barrier, 2303 "feature-barrier", "%d", &barrier);
2305 NULL);
2306 2304
2307 /* 2305 /*
2308 * If there's no "feature-barrier" defined, then it means 2306 * If there's no "feature-barrier" defined, then it means
@@ -2311,7 +2309,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2311 * 2309 *
2312 * If there are barriers, then we use flush. 2310 * If there are barriers, then we use flush.
2313 */ 2311 */
2314 if (!err && barrier) { 2312 if (err > 0 && barrier) {
2315 info->feature_flush = 1; 2313 info->feature_flush = 1;
2316 info->feature_fua = 1; 2314 info->feature_fua = 1;
2317 } 2315 }
@@ -2320,34 +2318,31 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2320 * And if there is "feature-flush-cache" use that above 2318 * And if there is "feature-flush-cache" use that above
2321 * barriers. 2319 * barriers.
2322 */ 2320 */
2323 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2321 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2324 "feature-flush-cache", "%d", &flush, 2322 "feature-flush-cache", "%d", &flush);
2325 NULL);
2326 2323
2327 if (!err && flush) { 2324 if (err > 0 && flush) {
2328 info->feature_flush = 1; 2325 info->feature_flush = 1;
2329 info->feature_fua = 0; 2326 info->feature_fua = 0;
2330 } 2327 }
2331 2328
2332 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2329 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2333 "feature-discard", "%d", &discard, 2330 "feature-discard", "%d", &discard);
2334 NULL);
2335 2331
2336 if (!err && discard) 2332 if (err > 0 && discard)
2337 blkfront_setup_discard(info); 2333 blkfront_setup_discard(info);
2338 2334
2339 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2335 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2340 "feature-persistent", "%u", &persistent, 2336 "feature-persistent", "%d", &persistent);
2341 NULL); 2337 if (err <= 0)
2342 if (err)
2343 info->feature_persistent = 0; 2338 info->feature_persistent = 0;
2344 else 2339 else
2345 info->feature_persistent = persistent; 2340 info->feature_persistent = persistent;
2346 2341
2347 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2342 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2348 "feature-max-indirect-segments", "%u", &indirect_segments, 2343 "feature-max-indirect-segments", "%u",
2349 NULL); 2344 &indirect_segments);
2350 if (err) 2345 if (err <= 0)
2351 info->max_indirect_segments = 0; 2346 info->max_indirect_segments = 0;
2352 else 2347 else
2353 info->max_indirect_segments = min(indirect_segments, 2348 info->max_indirect_segments = min(indirect_segments,
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 17ccf0a8787a..c394b81fe452 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -107,6 +107,11 @@ static int __init arm_enable_runtime_services(void)
107 return 0; 107 return 0;
108 } 108 }
109 109
110 if (efi_enabled(EFI_RUNTIME_SERVICES)) {
111 pr_info("EFI runtime services access via paravirt.\n");
112 return 0;
113 }
114
110 pr_info("Remapping and enabling EFI services.\n"); 115 pr_info("Remapping and enabling EFI services.\n");
111 116
112 mapsize = efi.memmap.map_end - efi.memmap.map; 117 mapsize = efi.memmap.map_end - efi.memmap.map;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8730fd475bf3..5a2631af7410 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -568,12 +568,14 @@ device_initcall(efi_load_efivars);
568 FIELD_SIZEOF(struct efi_fdt_params, field) \ 568 FIELD_SIZEOF(struct efi_fdt_params, field) \
569 } 569 }
570 570
571static __initdata struct { 571struct params {
572 const char name[32]; 572 const char name[32];
573 const char propname[32]; 573 const char propname[32];
574 int offset; 574 int offset;
575 int size; 575 int size;
576} dt_params[] = { 576};
577
578static __initdata struct params fdt_params[] = {
577 UEFI_PARAM("System Table", "linux,uefi-system-table", system_table), 579 UEFI_PARAM("System Table", "linux,uefi-system-table", system_table),
578 UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap), 580 UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap),
579 UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size), 581 UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size),
@@ -581,44 +583,91 @@ static __initdata struct {
581 UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver) 583 UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver)
582}; 584};
583 585
586static __initdata struct params xen_fdt_params[] = {
587 UEFI_PARAM("System Table", "xen,uefi-system-table", system_table),
588 UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap),
589 UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size),
590 UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size),
591 UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver)
592};
593
594#define EFI_FDT_PARAMS_SIZE ARRAY_SIZE(fdt_params)
595
596static __initdata struct {
597 const char *uname;
598 const char *subnode;
599 struct params *params;
600} dt_params[] = {
601 { "hypervisor", "uefi", xen_fdt_params },
602 { "chosen", NULL, fdt_params },
603};
604
584struct param_info { 605struct param_info {
585 int found; 606 int found;
586 void *params; 607 void *params;
608 const char *missing;
587}; 609};
588 610
589static int __init fdt_find_uefi_params(unsigned long node, const char *uname, 611static int __init __find_uefi_params(unsigned long node,
590 int depth, void *data) 612 struct param_info *info,
613 struct params *params)
591{ 614{
592 struct param_info *info = data;
593 const void *prop; 615 const void *prop;
594 void *dest; 616 void *dest;
595 u64 val; 617 u64 val;
596 int i, len; 618 int i, len;
597 619
598 if (depth != 1 || strcmp(uname, "chosen") != 0) 620 for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) {
599 return 0; 621 prop = of_get_flat_dt_prop(node, params[i].propname, &len);
600 622 if (!prop) {
601 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 623 info->missing = params[i].name;
602 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
603 if (!prop)
604 return 0; 624 return 0;
605 dest = info->params + dt_params[i].offset; 625 }
626
627 dest = info->params + params[i].offset;
606 info->found++; 628 info->found++;
607 629
608 val = of_read_number(prop, len / sizeof(u32)); 630 val = of_read_number(prop, len / sizeof(u32));
609 631
610 if (dt_params[i].size == sizeof(u32)) 632 if (params[i].size == sizeof(u32))
611 *(u32 *)dest = val; 633 *(u32 *)dest = val;
612 else 634 else
613 *(u64 *)dest = val; 635 *(u64 *)dest = val;
614 636
615 if (efi_enabled(EFI_DBG)) 637 if (efi_enabled(EFI_DBG))
616 pr_info(" %s: 0x%0*llx\n", dt_params[i].name, 638 pr_info(" %s: 0x%0*llx\n", params[i].name,
617 dt_params[i].size * 2, val); 639 params[i].size * 2, val);
618 } 640 }
641
619 return 1; 642 return 1;
620} 643}
621 644
645static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
646 int depth, void *data)
647{
648 struct param_info *info = data;
649 int i;
650
651 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
652 const char *subnode = dt_params[i].subnode;
653
654 if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) {
655 info->missing = dt_params[i].params[0].name;
656 continue;
657 }
658
659 if (subnode) {
660 node = of_get_flat_dt_subnode_by_name(node, subnode);
661 if (node < 0)
662 return 0;
663 }
664
665 return __find_uefi_params(node, info, dt_params[i].params);
666 }
667
668 return 0;
669}
670
622int __init efi_get_fdt_params(struct efi_fdt_params *params) 671int __init efi_get_fdt_params(struct efi_fdt_params *params)
623{ 672{
624 struct param_info info; 673 struct param_info info;
@@ -634,7 +683,7 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params)
634 pr_info("UEFI not found.\n"); 683 pr_info("UEFI not found.\n");
635 else if (!ret) 684 else if (!ret)
636 pr_err("Can't find '%s' in device tree!\n", 685 pr_err("Can't find '%s' in device tree!\n",
637 dt_params[info.found].name); 686 info.missing);
638 687
639 return ret; 688 return ret;
640} 689}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 33daffc4392c..0e02947a8a7a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -744,6 +744,19 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
744} 744}
745 745
746/** 746/**
747 * of_get_flat_dt_subnode_by_name - get the subnode by given name
748 *
749 * @node: the parent node
750 * @uname: the name of subnode
751 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
752 */
753
754int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
755{
756 return fdt_subnode_offset(initial_boot_params, node, uname);
757}
758
759/**
747 * of_get_flat_dt_root - find the root node in the flat blob 760 * of_get_flat_dt_root - find the root node in the flat blob
748 */ 761 */
749unsigned long __init of_get_flat_dt_root(void) 762unsigned long __init of_get_flat_dt_root(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 979a8317204f..f15bb3b789d5 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -275,7 +275,7 @@ config XEN_HAVE_PVMMU
275 275
276config XEN_EFI 276config XEN_EFI
277 def_bool y 277 def_bool y
278 depends on X86_64 && EFI 278 depends on (ARM || ARM64 || X86_64) && EFI
279 279
280config XEN_AUTO_XLATE 280config XEN_AUTO_XLATE
281 def_bool y 281 def_bool y
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 030e91b38e32..8feab810aed9 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -10,6 +10,7 @@ CFLAGS_features.o := $(nostackp)
10CFLAGS_efi.o += -fshort-wchar 10CFLAGS_efi.o += -fshort-wchar
11LDFLAGS += $(call ld-option, --no-wchar-size-warning) 11LDFLAGS += $(call ld-option, --no-wchar-size-warning)
12 12
13dom0-$(CONFIG_ARM64) += arm-device.o
13dom0-$(CONFIG_PCI) += pci.o 14dom0-$(CONFIG_PCI) += pci.o
14dom0-$(CONFIG_USB_SUPPORT) += dbgp.o 15dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
15dom0-$(CONFIG_XEN_ACPI) += acpi.o $(xen-pad-y) 16dom0-$(CONFIG_XEN_ACPI) += acpi.o $(xen-pad-y)
diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
new file mode 100644
index 000000000000..778acf80aacb
--- /dev/null
+++ b/drivers/xen/arm-device.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright (c) 2015, Linaro Limited, Shannon Zhao
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/platform_device.h>
18#include <linux/acpi.h>
19#include <xen/xen.h>
20#include <xen/page.h>
21#include <xen/interface/memory.h>
22#include <asm/xen/hypervisor.h>
23#include <asm/xen/hypercall.h>
24
25static int xen_unmap_device_mmio(const struct resource *resources,
26 unsigned int count)
27{
28 unsigned int i, j, nr;
29 int rc = 0;
30 const struct resource *r;
31 struct xen_remove_from_physmap xrp;
32
33 for (i = 0; i < count; i++) {
34 r = &resources[i];
35 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
36 if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
37 continue;
38
39 for (j = 0; j < nr; j++) {
40 xrp.domid = DOMID_SELF;
41 xrp.gpfn = XEN_PFN_DOWN(r->start) + j;
42 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap,
43 &xrp);
44 if (rc)
45 return rc;
46 }
47 }
48
49 return rc;
50}
51
52static int xen_map_device_mmio(const struct resource *resources,
53 unsigned int count)
54{
55 unsigned int i, j, nr;
56 int rc = 0;
57 const struct resource *r;
58 xen_pfn_t *gpfns;
59 xen_ulong_t *idxs;
60 int *errs;
61 struct xen_add_to_physmap_range xatp;
62
63 for (i = 0; i < count; i++) {
64 r = &resources[i];
65 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
66 if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
67 continue;
68
69 gpfns = kzalloc(sizeof(xen_pfn_t) * nr, GFP_KERNEL);
70 idxs = kzalloc(sizeof(xen_ulong_t) * nr, GFP_KERNEL);
71 errs = kzalloc(sizeof(int) * nr, GFP_KERNEL);
72 if (!gpfns || !idxs || !errs) {
73 kfree(gpfns);
74 kfree(idxs);
75 kfree(errs);
76 rc = -ENOMEM;
77 goto unmap;
78 }
79
80 for (j = 0; j < nr; j++) {
81 /*
82 * The regions are always mapped 1:1 to DOM0 and this is
83 * fine because the memory map for DOM0 is the same as
84 * the host (except for the RAM).
85 */
86 gpfns[j] = XEN_PFN_DOWN(r->start) + j;
87 idxs[j] = XEN_PFN_DOWN(r->start) + j;
88 }
89
90 xatp.domid = DOMID_SELF;
91 xatp.size = nr;
92 xatp.space = XENMAPSPACE_dev_mmio;
93
94 set_xen_guest_handle(xatp.gpfns, gpfns);
95 set_xen_guest_handle(xatp.idxs, idxs);
96 set_xen_guest_handle(xatp.errs, errs);
97
98 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
99 kfree(gpfns);
100 kfree(idxs);
101 kfree(errs);
102 if (rc)
103 goto unmap;
104 }
105
106 return rc;
107
108unmap:
109 xen_unmap_device_mmio(resources, i);
110 return rc;
111}
112
113static int xen_platform_notifier(struct notifier_block *nb,
114 unsigned long action, void *data)
115{
116 struct platform_device *pdev = to_platform_device(data);
117 int r = 0;
118
119 if (pdev->num_resources == 0 || pdev->resource == NULL)
120 return NOTIFY_OK;
121
122 switch (action) {
123 case BUS_NOTIFY_ADD_DEVICE:
124 r = xen_map_device_mmio(pdev->resource, pdev->num_resources);
125 break;
126 case BUS_NOTIFY_DEL_DEVICE:
127 r = xen_unmap_device_mmio(pdev->resource, pdev->num_resources);
128 break;
129 default:
130 return NOTIFY_DONE;
131 }
132 if (r)
133 dev_err(&pdev->dev, "Platform: Failed to %s device %s MMIO!\n",
134 action == BUS_NOTIFY_ADD_DEVICE ? "map" :
135 (action == BUS_NOTIFY_DEL_DEVICE ? "unmap" : "?"),
136 pdev->name);
137
138 return NOTIFY_OK;
139}
140
141static struct notifier_block platform_device_nb = {
142 .notifier_call = xen_platform_notifier,
143};
144
145static int __init register_xen_platform_notifier(void)
146{
147 if (!xen_initial_domain() || acpi_disabled)
148 return 0;
149
150 return bus_register_notifier(&platform_bus_type, &platform_device_nb);
151}
152
153arch_initcall(register_xen_platform_notifier);
154
155#ifdef CONFIG_ARM_AMBA
156#include <linux/amba/bus.h>
157
158static int xen_amba_notifier(struct notifier_block *nb,
159 unsigned long action, void *data)
160{
161 struct amba_device *adev = to_amba_device(data);
162 int r = 0;
163
164 switch (action) {
165 case BUS_NOTIFY_ADD_DEVICE:
166 r = xen_map_device_mmio(&adev->res, 1);
167 break;
168 case BUS_NOTIFY_DEL_DEVICE:
169 r = xen_unmap_device_mmio(&adev->res, 1);
170 break;
171 default:
172 return NOTIFY_DONE;
173 }
174 if (r)
175 dev_err(&adev->dev, "AMBA: Failed to %s device %s MMIO!\n",
176 action == BUS_NOTIFY_ADD_DEVICE ? "map" :
177 (action == BUS_NOTIFY_DEL_DEVICE ? "unmap" : "?"),
178 adev->dev.init_name);
179
180 return NOTIFY_OK;
181}
182
183static struct notifier_block amba_device_nb = {
184 .notifier_call = xen_amba_notifier,
185};
186
187static int __init register_xen_amba_notifier(void)
188{
189 if (!xen_initial_domain() || acpi_disabled)
190 return 0;
191
192 return bus_register_notifier(&amba_bustype, &amba_device_nb);
193}
194
195arch_initcall(register_xen_amba_notifier);
196#endif
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index e9d2135445c1..22f71ffd3406 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -38,7 +38,7 @@
38 38
39#define efi_data(op) (op.u.efi_runtime_call) 39#define efi_data(op) (op.u.efi_runtime_call)
40 40
41static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 41efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
42{ 42{
43 struct xen_platform_op op = INIT_EFI_OP(get_time); 43 struct xen_platform_op op = INIT_EFI_OP(get_time);
44 44
@@ -59,8 +59,9 @@ static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
59 59
60 return efi_data(op).status; 60 return efi_data(op).status;
61} 61}
62EXPORT_SYMBOL_GPL(xen_efi_get_time);
62 63
63static efi_status_t xen_efi_set_time(efi_time_t *tm) 64efi_status_t xen_efi_set_time(efi_time_t *tm)
64{ 65{
65 struct xen_platform_op op = INIT_EFI_OP(set_time); 66 struct xen_platform_op op = INIT_EFI_OP(set_time);
66 67
@@ -72,10 +73,10 @@ static efi_status_t xen_efi_set_time(efi_time_t *tm)
72 73
73 return efi_data(op).status; 74 return efi_data(op).status;
74} 75}
76EXPORT_SYMBOL_GPL(xen_efi_set_time);
75 77
76static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, 78efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
77 efi_bool_t *pending, 79 efi_time_t *tm)
78 efi_time_t *tm)
79{ 80{
80 struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time); 81 struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
81 82
@@ -95,8 +96,9 @@ static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
95 96
96 return efi_data(op).status; 97 return efi_data(op).status;
97} 98}
99EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
98 100
99static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) 101efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
100{ 102{
101 struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time); 103 struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
102 104
@@ -113,12 +115,11 @@ static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
113 115
114 return efi_data(op).status; 116 return efi_data(op).status;
115} 117}
118EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
116 119
117static efi_status_t xen_efi_get_variable(efi_char16_t *name, 120efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
118 efi_guid_t *vendor, 121 u32 *attr, unsigned long *data_size,
119 u32 *attr, 122 void *data)
120 unsigned long *data_size,
121 void *data)
122{ 123{
123 struct xen_platform_op op = INIT_EFI_OP(get_variable); 124 struct xen_platform_op op = INIT_EFI_OP(get_variable);
124 125
@@ -138,10 +139,11 @@ static efi_status_t xen_efi_get_variable(efi_char16_t *name,
138 139
139 return efi_data(op).status; 140 return efi_data(op).status;
140} 141}
142EXPORT_SYMBOL_GPL(xen_efi_get_variable);
141 143
142static efi_status_t xen_efi_get_next_variable(unsigned long *name_size, 144efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
143 efi_char16_t *name, 145 efi_char16_t *name,
144 efi_guid_t *vendor) 146 efi_guid_t *vendor)
145{ 147{
146 struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name); 148 struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
147 149
@@ -161,12 +163,11 @@ static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
161 163
162 return efi_data(op).status; 164 return efi_data(op).status;
163} 165}
166EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
164 167
165static efi_status_t xen_efi_set_variable(efi_char16_t *name, 168efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
166 efi_guid_t *vendor, 169 u32 attr, unsigned long data_size,
167 u32 attr, 170 void *data)
168 unsigned long data_size,
169 void *data)
170{ 171{
171 struct xen_platform_op op = INIT_EFI_OP(set_variable); 172 struct xen_platform_op op = INIT_EFI_OP(set_variable);
172 173
@@ -183,11 +184,11 @@ static efi_status_t xen_efi_set_variable(efi_char16_t *name,
183 184
184 return efi_data(op).status; 185 return efi_data(op).status;
185} 186}
187EXPORT_SYMBOL_GPL(xen_efi_set_variable);
186 188
187static efi_status_t xen_efi_query_variable_info(u32 attr, 189efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
188 u64 *storage_space, 190 u64 *remaining_space,
189 u64 *remaining_space, 191 u64 *max_variable_size)
190 u64 *max_variable_size)
191{ 192{
192 struct xen_platform_op op = INIT_EFI_OP(query_variable_info); 193 struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
193 194
@@ -205,8 +206,9 @@ static efi_status_t xen_efi_query_variable_info(u32 attr,
205 206
206 return efi_data(op).status; 207 return efi_data(op).status;
207} 208}
209EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
208 210
209static efi_status_t xen_efi_get_next_high_mono_count(u32 *count) 211efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
210{ 212{
211 struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count); 213 struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
212 214
@@ -217,10 +219,10 @@ static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
217 219
218 return efi_data(op).status; 220 return efi_data(op).status;
219} 221}
222EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
220 223
221static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, 224efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
222 unsigned long count, 225 unsigned long count, unsigned long sg_list)
223 unsigned long sg_list)
224{ 226{
225 struct xen_platform_op op = INIT_EFI_OP(update_capsule); 227 struct xen_platform_op op = INIT_EFI_OP(update_capsule);
226 228
@@ -237,11 +239,11 @@ static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
237 239
238 return efi_data(op).status; 240 return efi_data(op).status;
239} 241}
242EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
240 243
241static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, 244efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
242 unsigned long count, 245 unsigned long count, u64 *max_size,
243 u64 *max_size, 246 int *reset_type)
244 int *reset_type)
245{ 247{
246 struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities); 248 struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
247 249
@@ -260,111 +262,4 @@ static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
260 262
261 return efi_data(op).status; 263 return efi_data(op).status;
262} 264}
263 265EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
264static efi_char16_t vendor[100] __initdata;
265
266static efi_system_table_t efi_systab_xen __initdata = {
267 .hdr = {
268 .signature = EFI_SYSTEM_TABLE_SIGNATURE,
269 .revision = 0, /* Initialized later. */
270 .headersize = 0, /* Ignored by Linux Kernel. */
271 .crc32 = 0, /* Ignored by Linux Kernel. */
272 .reserved = 0
273 },
274 .fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */
275 .fw_revision = 0, /* Initialized later. */
276 .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
277 .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
278 .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
279 .con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
280 .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
281 .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
282 .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
283 /* Not used under Xen. */
284 .boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR,
285 /* Not used under Xen. */
286 .nr_tables = 0, /* Initialized later. */
287 .tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
288};
289
290static const struct efi efi_xen __initconst = {
291 .systab = NULL, /* Initialized later. */
292 .runtime_version = 0, /* Initialized later. */
293 .mps = EFI_INVALID_TABLE_ADDR,
294 .acpi = EFI_INVALID_TABLE_ADDR,
295 .acpi20 = EFI_INVALID_TABLE_ADDR,
296 .smbios = EFI_INVALID_TABLE_ADDR,
297 .smbios3 = EFI_INVALID_TABLE_ADDR,
298 .sal_systab = EFI_INVALID_TABLE_ADDR,
299 .boot_info = EFI_INVALID_TABLE_ADDR,
300 .hcdp = EFI_INVALID_TABLE_ADDR,
301 .uga = EFI_INVALID_TABLE_ADDR,
302 .uv_systab = EFI_INVALID_TABLE_ADDR,
303 .fw_vendor = EFI_INVALID_TABLE_ADDR,
304 .runtime = EFI_INVALID_TABLE_ADDR,
305 .config_table = EFI_INVALID_TABLE_ADDR,
306 .get_time = xen_efi_get_time,
307 .set_time = xen_efi_set_time,
308 .get_wakeup_time = xen_efi_get_wakeup_time,
309 .set_wakeup_time = xen_efi_set_wakeup_time,
310 .get_variable = xen_efi_get_variable,
311 .get_next_variable = xen_efi_get_next_variable,
312 .set_variable = xen_efi_set_variable,
313 .query_variable_info = xen_efi_query_variable_info,
314 .update_capsule = xen_efi_update_capsule,
315 .query_capsule_caps = xen_efi_query_capsule_caps,
316 .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
317 .reset_system = NULL, /* Functionality provided by Xen. */
318 .set_virtual_address_map = NULL, /* Not used under Xen. */
319 .flags = 0 /* Initialized later. */
320};
321
322efi_system_table_t __init *xen_efi_probe(void)
323{
324 struct xen_platform_op op = {
325 .cmd = XENPF_firmware_info,
326 .u.firmware_info = {
327 .type = XEN_FW_EFI_INFO,
328 .index = XEN_FW_EFI_CONFIG_TABLE
329 }
330 };
331 union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
332
333 if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0)
334 return NULL;
335
336 /* Here we know that Xen runs on EFI platform. */
337
338 efi = efi_xen;
339
340 efi_systab_xen.tables = info->cfg.addr;
341 efi_systab_xen.nr_tables = info->cfg.nent;
342
343 op.cmd = XENPF_firmware_info;
344 op.u.firmware_info.type = XEN_FW_EFI_INFO;
345 op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
346 info->vendor.bufsz = sizeof(vendor);
347 set_xen_guest_handle(info->vendor.name, vendor);
348
349 if (HYPERVISOR_platform_op(&op) == 0) {
350 efi_systab_xen.fw_vendor = __pa_symbol(vendor);
351 efi_systab_xen.fw_revision = info->vendor.revision;
352 } else
353 efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");
354
355 op.cmd = XENPF_firmware_info;
356 op.u.firmware_info.type = XEN_FW_EFI_INFO;
357 op.u.firmware_info.index = XEN_FW_EFI_VERSION;
358
359 if (HYPERVISOR_platform_op(&op) == 0)
360 efi_systab_xen.hdr.revision = info->version;
361
362 op.cmd = XENPF_firmware_info;
363 op.u.firmware_info.type = XEN_FW_EFI_INFO;
364 op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;
365
366 if (HYPERVISOR_platform_op(&op) == 0)
367 efi.runtime_version = info->version;
368
369 return &efi_systab_xen;
370}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 71d49a95f8c0..d5dbdb9d24d8 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -895,7 +895,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
895 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 895 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
896 handle_percpu_irq, "ipi"); 896 handle_percpu_irq, "ipi");
897 897
898 bind_ipi.vcpu = cpu; 898 bind_ipi.vcpu = xen_vcpu_nr(cpu);
899 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 899 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
900 &bind_ipi) != 0) 900 &bind_ipi) != 0)
901 BUG(); 901 BUG();
@@ -991,7 +991,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
991 handle_edge_irq, "virq"); 991 handle_edge_irq, "virq");
992 992
993 bind_virq.virq = virq; 993 bind_virq.virq = virq;
994 bind_virq.vcpu = cpu; 994 bind_virq.vcpu = xen_vcpu_nr(cpu);
995 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 995 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
996 &bind_virq); 996 &bind_virq);
997 if (ret == 0) 997 if (ret == 0)
@@ -1211,7 +1211,8 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1211 1211
1212#ifdef CONFIG_X86 1212#ifdef CONFIG_X86
1213 if (unlikely(vector == XEN_NMI_VECTOR)) { 1213 if (unlikely(vector == XEN_NMI_VECTOR)) {
1214 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); 1214 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1215 NULL);
1215 if (rc < 0) 1216 if (rc < 0)
1216 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); 1217 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1217 return; 1218 return;
@@ -1318,7 +1319,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1318 1319
1319 /* Send future instances of this interrupt to other vcpu. */ 1320 /* Send future instances of this interrupt to other vcpu. */
1320 bind_vcpu.port = evtchn; 1321 bind_vcpu.port = evtchn;
1321 bind_vcpu.vcpu = tcpu; 1322 bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1322 1323
1323 /* 1324 /*
1324 * Mask the event while changing the VCPU binding to prevent 1325 * Mask the event while changing the VCPU binding to prevent
@@ -1458,7 +1459,7 @@ static void restore_cpu_virqs(unsigned int cpu)
1458 1459
1459 /* Get a new binding from Xen. */ 1460 /* Get a new binding from Xen. */
1460 bind_virq.virq = virq; 1461 bind_virq.virq = virq;
1461 bind_virq.vcpu = cpu; 1462 bind_virq.vcpu = xen_vcpu_nr(cpu);
1462 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1463 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1463 &bind_virq) != 0) 1464 &bind_virq) != 0)
1464 BUG(); 1465 BUG();
@@ -1482,7 +1483,7 @@ static void restore_cpu_ipis(unsigned int cpu)
1482 BUG_ON(ipi_from_irq(irq) != ipi); 1483 BUG_ON(ipi_from_irq(irq) != ipi);
1483 1484
1484 /* Get a new binding from Xen. */ 1485 /* Get a new binding from Xen. */
1485 bind_ipi.vcpu = cpu; 1486 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1486 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1487 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1487 &bind_ipi) != 0) 1488 &bind_ipi) != 0)
1488 BUG(); 1489 BUG();
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 9289a17712e2..266c2c733039 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -113,7 +113,7 @@ static int init_control_block(int cpu,
113 113
114 init_control.control_gfn = virt_to_gfn(control_block); 114 init_control.control_gfn = virt_to_gfn(control_block);
115 init_control.offset = 0; 115 init_control.offset = 0;
116 init_control.vcpu = cpu; 116 init_control.vcpu = xen_vcpu_nr(cpu);
117 117
118 return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control); 118 return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
119} 119}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index f4edd6df3df2..e8c7f09d01be 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -55,6 +55,7 @@
55#include <xen/xen.h> 55#include <xen/xen.h>
56#include <xen/events.h> 56#include <xen/events.h>
57#include <xen/evtchn.h> 57#include <xen/evtchn.h>
58#include <xen/xen-ops.h>
58#include <asm/xen/hypervisor.h> 59#include <asm/xen/hypervisor.h>
59 60
60struct per_user_data { 61struct per_user_data {
@@ -73,8 +74,12 @@ struct per_user_data {
73 wait_queue_head_t evtchn_wait; 74 wait_queue_head_t evtchn_wait;
74 struct fasync_struct *evtchn_async_queue; 75 struct fasync_struct *evtchn_async_queue;
75 const char *name; 76 const char *name;
77
78 domid_t restrict_domid;
76}; 79};
77 80
81#define UNRESTRICTED_DOMID ((domid_t)-1)
82
78struct user_evtchn { 83struct user_evtchn {
79 struct rb_node node; 84 struct rb_node node;
80 struct per_user_data *user; 85 struct per_user_data *user;
@@ -443,12 +448,16 @@ static long evtchn_ioctl(struct file *file,
443 struct ioctl_evtchn_bind_virq bind; 448 struct ioctl_evtchn_bind_virq bind;
444 struct evtchn_bind_virq bind_virq; 449 struct evtchn_bind_virq bind_virq;
445 450
451 rc = -EACCES;
452 if (u->restrict_domid != UNRESTRICTED_DOMID)
453 break;
454
446 rc = -EFAULT; 455 rc = -EFAULT;
447 if (copy_from_user(&bind, uarg, sizeof(bind))) 456 if (copy_from_user(&bind, uarg, sizeof(bind)))
448 break; 457 break;
449 458
450 bind_virq.virq = bind.virq; 459 bind_virq.virq = bind.virq;
451 bind_virq.vcpu = 0; 460 bind_virq.vcpu = xen_vcpu_nr(0);
452 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 461 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
453 &bind_virq); 462 &bind_virq);
454 if (rc != 0) 463 if (rc != 0)
@@ -468,6 +477,11 @@ static long evtchn_ioctl(struct file *file,
468 if (copy_from_user(&bind, uarg, sizeof(bind))) 477 if (copy_from_user(&bind, uarg, sizeof(bind)))
469 break; 478 break;
470 479
480 rc = -EACCES;
481 if (u->restrict_domid != UNRESTRICTED_DOMID &&
482 u->restrict_domid != bind.remote_domain)
483 break;
484
471 bind_interdomain.remote_dom = bind.remote_domain; 485 bind_interdomain.remote_dom = bind.remote_domain;
472 bind_interdomain.remote_port = bind.remote_port; 486 bind_interdomain.remote_port = bind.remote_port;
473 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 487 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
@@ -485,6 +499,10 @@ static long evtchn_ioctl(struct file *file,
485 struct ioctl_evtchn_bind_unbound_port bind; 499 struct ioctl_evtchn_bind_unbound_port bind;
486 struct evtchn_alloc_unbound alloc_unbound; 500 struct evtchn_alloc_unbound alloc_unbound;
487 501
502 rc = -EACCES;
503 if (u->restrict_domid != UNRESTRICTED_DOMID)
504 break;
505
488 rc = -EFAULT; 506 rc = -EFAULT;
489 if (copy_from_user(&bind, uarg, sizeof(bind))) 507 if (copy_from_user(&bind, uarg, sizeof(bind)))
490 break; 508 break;
@@ -553,6 +571,27 @@ static long evtchn_ioctl(struct file *file,
553 break; 571 break;
554 } 572 }
555 573
574 case IOCTL_EVTCHN_RESTRICT_DOMID: {
575 struct ioctl_evtchn_restrict_domid ierd;
576
577 rc = -EACCES;
578 if (u->restrict_domid != UNRESTRICTED_DOMID)
579 break;
580
581 rc = -EFAULT;
582 if (copy_from_user(&ierd, uarg, sizeof(ierd)))
583 break;
584
585 rc = -EINVAL;
586 if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
587 break;
588
589 u->restrict_domid = ierd.domid;
590 rc = 0;
591
592 break;
593 }
594
556 default: 595 default:
557 rc = -ENOSYS; 596 rc = -ENOSYS;
558 break; 597 break;
@@ -601,6 +640,8 @@ static int evtchn_open(struct inode *inode, struct file *filp)
601 mutex_init(&u->ring_cons_mutex); 640 mutex_init(&u->ring_cons_mutex);
602 spin_lock_init(&u->ring_prod_lock); 641 spin_lock_init(&u->ring_prod_lock);
603 642
643 u->restrict_domid = UNRESTRICTED_DOMID;
644
604 filp->private_data = u; 645 filp->private_data = u;
605 646
606 return nonseekable_open(inode, filp); 647 return nonseekable_open(inode, filp);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 4547a91bca67..7a47c4c9fb1b 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -504,7 +504,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
504 struct gntalloc_file_private_data *priv = filp->private_data; 504 struct gntalloc_file_private_data *priv = filp->private_data;
505 struct gntalloc_vma_private_data *vm_priv; 505 struct gntalloc_vma_private_data *vm_priv;
506 struct gntalloc_gref *gref; 506 struct gntalloc_gref *gref;
507 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 507 int count = vma_pages(vma);
508 int rv, i; 508 int rv, i;
509 509
510 if (!(vma->vm_flags & VM_SHARED)) { 510 if (!(vma->vm_flags & VM_SHARED)) {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67939578cd6d..bb952121ea94 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -982,7 +982,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
982{ 982{
983 struct gntdev_priv *priv = flip->private_data; 983 struct gntdev_priv *priv = flip->private_data;
984 int index = vma->vm_pgoff; 984 int index = vma->vm_pgoff;
985 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 985 int count = vma_pages(vma);
986 struct grant_map *map; 986 struct grant_map *map;
987 int i, err = -EINVAL; 987 int i, err = -EINVAL;
988 988
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index df2e6f783318..702040fe2001 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -582,7 +582,7 @@ static long privcmd_ioctl(struct file *file,
582static void privcmd_close(struct vm_area_struct *vma) 582static void privcmd_close(struct vm_area_struct *vma)
583{ 583{
584 struct page **pages = vma->vm_private_data; 584 struct page **pages = vma->vm_private_data;
585 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 585 int numpgs = vma_pages(vma);
586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT; 586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
587 int rc; 587 int rc;
588 588
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 71078425c9ea..ac5f23fcafc2 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -6,6 +6,7 @@
6#include <linux/math64.h> 6#include <linux/math64.h>
7#include <linux/gfp.h> 7#include <linux/gfp.h>
8 8
9#include <asm/paravirt.h>
9#include <asm/xen/hypervisor.h> 10#include <asm/xen/hypervisor.h>
10#include <asm/xen/hypercall.h> 11#include <asm/xen/hypercall.h>
11 12
@@ -46,27 +47,31 @@ static u64 get64(const u64 *p)
46 return ret; 47 return ret;
47} 48}
48 49
49/* 50static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
50 * Runstate accounting 51 unsigned int cpu)
51 */
52void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
53{ 52{
54 u64 state_time; 53 u64 state_time;
55 struct vcpu_runstate_info *state; 54 struct vcpu_runstate_info *state;
56 55
57 BUG_ON(preemptible()); 56 BUG_ON(preemptible());
58 57
59 state = this_cpu_ptr(&xen_runstate); 58 state = per_cpu_ptr(&xen_runstate, cpu);
60 59
61 /*
62 * The runstate info is always updated by the hypervisor on
63 * the current CPU, so there's no need to use anything
64 * stronger than a compiler barrier when fetching it.
65 */
66 do { 60 do {
67 state_time = get64(&state->state_entry_time); 61 state_time = get64(&state->state_entry_time);
62 rmb(); /* Hypervisor might update data. */
68 *res = READ_ONCE(*state); 63 *res = READ_ONCE(*state);
69 } while (get64(&state->state_entry_time) != state_time); 64 rmb(); /* Hypervisor might update data. */
65 } while (get64(&state->state_entry_time) != state_time ||
66 (state_time & XEN_RUNSTATE_UPDATE));
67}
68
69/*
70 * Runstate accounting
71 */
72void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
73{
74 xen_get_runstate_snapshot_cpu(res, smp_processor_id());
70} 75}
71 76
72/* return true when a vcpu could run but has no real cpu to run on */ 77/* return true when a vcpu could run but has no real cpu to run on */
@@ -75,6 +80,14 @@ bool xen_vcpu_stolen(int vcpu)
75 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; 80 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
76} 81}
77 82
83u64 xen_steal_clock(int cpu)
84{
85 struct vcpu_runstate_info state;
86
87 xen_get_runstate_snapshot_cpu(&state, cpu);
88 return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
89}
90
78void xen_setup_runstate_info(int cpu) 91void xen_setup_runstate_info(int cpu)
79{ 92{
80 struct vcpu_register_runstate_memory_area area; 93 struct vcpu_register_runstate_memory_area area;
@@ -82,7 +95,20 @@ void xen_setup_runstate_info(int cpu)
82 area.addr.v = &per_cpu(xen_runstate, cpu); 95 area.addr.v = &per_cpu(xen_runstate, cpu);
83 96
84 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, 97 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
85 cpu, &area)) 98 xen_vcpu_nr(cpu), &area))
86 BUG(); 99 BUG();
87} 100}
88 101
102void __init xen_time_setup_guest(void)
103{
104 bool xen_runstate_remote;
105
106 xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
107 VMASST_TYPE_runstate_update_flag);
108
109 pv_time_ops.steal_clock = xen_steal_clock;
110
111 static_key_slow_inc(&paravirt_steal_enabled);
112 if (xen_runstate_remote)
113 static_key_slow_inc(&paravirt_steal_rq_enabled);
114}
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 6a25533da237..9e9286d0872e 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -148,7 +148,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
148 struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev); 148 struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
149 const struct config_field_entry *cfg_entry; 149 const struct config_field_entry *cfg_entry;
150 const struct config_field *field; 150 const struct config_field *field;
151 int req_start, req_end, field_start, field_end; 151 int field_start, field_end;
152 /* if read fails for any reason, return 0 152 /* if read fails for any reason, return 0
153 * (as if device didn't respond) */ 153 * (as if device didn't respond) */
154 u32 value = 0, tmp_val; 154 u32 value = 0, tmp_val;
@@ -178,12 +178,10 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
178 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) { 178 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
179 field = cfg_entry->field; 179 field = cfg_entry->field;
180 180
181 req_start = offset;
182 req_end = offset + size;
183 field_start = OFFSET(cfg_entry); 181 field_start = OFFSET(cfg_entry);
184 field_end = OFFSET(cfg_entry) + field->size; 182 field_end = OFFSET(cfg_entry) + field->size;
185 183
186 if (req_end > field_start && field_end > req_start) { 184 if (offset + size > field_start && field_end > offset) {
187 err = conf_space_read(dev, cfg_entry, field_start, 185 err = conf_space_read(dev, cfg_entry, field_start,
188 &tmp_val); 186 &tmp_val);
189 if (err) 187 if (err)
@@ -191,7 +189,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
191 189
192 value = merge_value(value, tmp_val, 190 value = merge_value(value, tmp_val,
193 get_mask(field->size), 191 get_mask(field->size),
194 field_start - req_start); 192 field_start - offset);
195 } 193 }
196 } 194 }
197 195
@@ -211,7 +209,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
211 const struct config_field_entry *cfg_entry; 209 const struct config_field_entry *cfg_entry;
212 const struct config_field *field; 210 const struct config_field *field;
213 u32 tmp_val; 211 u32 tmp_val;
214 int req_start, req_end, field_start, field_end; 212 int field_start, field_end;
215 213
216 if (unlikely(verbose_request)) 214 if (unlikely(verbose_request))
217 printk(KERN_DEBUG 215 printk(KERN_DEBUG
@@ -224,21 +222,17 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
224 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) { 222 list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
225 field = cfg_entry->field; 223 field = cfg_entry->field;
226 224
227 req_start = offset;
228 req_end = offset + size;
229 field_start = OFFSET(cfg_entry); 225 field_start = OFFSET(cfg_entry);
230 field_end = OFFSET(cfg_entry) + field->size; 226 field_end = OFFSET(cfg_entry) + field->size;
231 227
232 if (req_end > field_start && field_end > req_start) { 228 if (offset + size > field_start && field_end > offset) {
233 tmp_val = 0; 229 err = conf_space_read(dev, cfg_entry, field_start,
234 230 &tmp_val);
235 err = xen_pcibk_config_read(dev, field_start,
236 field->size, &tmp_val);
237 if (err) 231 if (err)
238 break; 232 break;
239 233
240 tmp_val = merge_value(tmp_val, value, get_mask(size), 234 tmp_val = merge_value(tmp_val, value, get_mask(size),
241 req_start - field_start); 235 offset - field_start);
242 236
243 err = conf_space_write(dev, cfg_entry, field_start, 237 err = conf_space_write(dev, cfg_entry, field_start,
244 tmp_val); 238 tmp_val);
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 9ead1c2ff1dd..5fbfd9cfb6d6 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -209,58 +209,35 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
209 return 0; 209 return 0;
210} 210}
211 211
212static inline void read_dev_bar(struct pci_dev *dev, 212static void *bar_init(struct pci_dev *dev, int offset)
213 struct pci_bar_info *bar_info, int offset,
214 u32 len_mask)
215{ 213{
216 int pos; 214 unsigned int pos;
217 struct resource *res = dev->resource; 215 const struct resource *res = dev->resource;
216 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
217
218 if (!bar)
219 return ERR_PTR(-ENOMEM);
218 220
219 if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1) 221 if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
220 pos = PCI_ROM_RESOURCE; 222 pos = PCI_ROM_RESOURCE;
221 else { 223 else {
222 pos = (offset - PCI_BASE_ADDRESS_0) / 4; 224 pos = (offset - PCI_BASE_ADDRESS_0) / 4;
223 if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE | 225 if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) {
224 PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 226 bar->val = res[pos - 1].start >> 32;
225 (PCI_BASE_ADDRESS_SPACE_MEMORY | 227 bar->len_val = -resource_size(&res[pos - 1]) >> 32;
226 PCI_BASE_ADDRESS_MEM_TYPE_64))) { 228 return bar;
227 bar_info->val = res[pos - 1].start >> 32;
228 bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
229 return;
230 } 229 }
231 } 230 }
232 231
233 if (!res[pos].flags || 232 if (!res[pos].flags ||
234 (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | 233 (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
235 IORESOURCE_BUSY))) 234 IORESOURCE_BUSY)))
236 return; 235 return bar;
237
238 bar_info->val = res[pos].start |
239 (res[pos].flags & PCI_REGION_FLAG_MASK);
240 bar_info->len_val = -resource_size(&res[pos]) |
241 (res[pos].flags & PCI_REGION_FLAG_MASK);
242}
243 236
244static void *bar_init(struct pci_dev *dev, int offset) 237 bar->val = res[pos].start |
245{ 238 (res[pos].flags & PCI_REGION_FLAG_MASK);
246 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); 239 bar->len_val = -resource_size(&res[pos]) |
247 240 (res[pos].flags & PCI_REGION_FLAG_MASK);
248 if (!bar)
249 return ERR_PTR(-ENOMEM);
250
251 read_dev_bar(dev, bar, offset, ~0);
252
253 return bar;
254}
255
256static void *rom_init(struct pci_dev *dev, int offset)
257{
258 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
259
260 if (!bar)
261 return ERR_PTR(-ENOMEM);
262
263 read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
264 241
265 return bar; 242 return bar;
266} 243}
@@ -383,7 +360,7 @@ static const struct config_field header_common[] = {
383 { \ 360 { \
384 .offset = reg_offset, \ 361 .offset = reg_offset, \
385 .size = 4, \ 362 .size = 4, \
386 .init = rom_init, \ 363 .init = bar_init, \
387 .reset = bar_reset, \ 364 .reset = bar_reset, \
388 .release = bar_release, \ 365 .release = bar_release, \
389 .u.dw.read = bar_read, \ 366 .u.dw.read = bar_read, \
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 4d529f3e40df..7af369b6aaa2 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -55,7 +55,6 @@ struct xen_pcibk_dev_data {
55 55
56/* Used by XenBus and xen_pcibk_ops.c */ 56/* Used by XenBus and xen_pcibk_ops.c */
57extern wait_queue_head_t xen_pcibk_aer_wait_queue; 57extern wait_queue_head_t xen_pcibk_aer_wait_queue;
58extern struct workqueue_struct *xen_pcibk_wq;
59/* Used by pcistub.c and conf_space_quirks.c */ 58/* Used by pcistub.c and conf_space_quirks.c */
60extern struct list_head xen_pcibk_quirks; 59extern struct list_head xen_pcibk_quirks;
61 60
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 2f19dd7553e6..f8c77751f330 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -310,7 +310,7 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
310 * already processing a request */ 310 * already processing a request */
311 if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) 311 if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
312 && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { 312 && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
313 queue_work(xen_pcibk_wq, &pdev->op_work); 313 schedule_work(&pdev->op_work);
314 } 314 }
315 /*_XEN_PCIB_active should have been cleared by pcifront. And also make 315 /*_XEN_PCIB_active should have been cleared by pcifront. And also make
316 sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ 316 sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index c252eb3f0176..5ce878c51d03 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -17,7 +17,6 @@
17#include "pciback.h" 17#include "pciback.h"
18 18
19#define INVALID_EVTCHN_IRQ (-1) 19#define INVALID_EVTCHN_IRQ (-1)
20struct workqueue_struct *xen_pcibk_wq;
21 20
22static bool __read_mostly passthrough; 21static bool __read_mostly passthrough;
23module_param(passthrough, bool, S_IRUGO); 22module_param(passthrough, bool, S_IRUGO);
@@ -76,8 +75,7 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
76 /* If the driver domain started an op, make sure we complete it 75 /* If the driver domain started an op, make sure we complete it
77 * before releasing the shared memory */ 76 * before releasing the shared memory */
78 77
79 /* Note, the workqueue does not use spinlocks at all.*/ 78 flush_work(&pdev->op_work);
80 flush_workqueue(xen_pcibk_wq);
81 79
82 if (pdev->sh_info != NULL) { 80 if (pdev->sh_info != NULL) {
83 xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info); 81 xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
@@ -733,11 +731,6 @@ const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
733 731
734int __init xen_pcibk_xenbus_register(void) 732int __init xen_pcibk_xenbus_register(void)
735{ 733{
736 xen_pcibk_wq = create_workqueue("xen_pciback_workqueue");
737 if (!xen_pcibk_wq) {
738 pr_err("%s: create xen_pciback_workqueue failed\n", __func__);
739 return -EFAULT;
740 }
741 xen_pcibk_backend = &xen_pcibk_vpci_backend; 734 xen_pcibk_backend = &xen_pcibk_vpci_backend;
742 if (passthrough) 735 if (passthrough)
743 xen_pcibk_backend = &xen_pcibk_passthrough_backend; 736 xen_pcibk_backend = &xen_pcibk_passthrough_backend;
@@ -747,6 +740,5 @@ int __init xen_pcibk_xenbus_register(void)
747 740
748void __exit xen_pcibk_xenbus_unregister(void) 741void __exit xen_pcibk_xenbus_unregister(void)
749{ 742{
750 destroy_workqueue(xen_pcibk_wq);
751 xenbus_unregister_driver(&xen_pcibk_driver); 743 xenbus_unregister_driver(&xen_pcibk_driver);
752} 744}
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index bcb53bdc469c..611a23119675 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -31,7 +31,6 @@
31#include "xenbus_probe.h" 31#include "xenbus_probe.h"
32 32
33 33
34static struct workqueue_struct *xenbus_frontend_wq;
35 34
36/* device/<type>/<id> => <type>-<id> */ 35/* device/<type>/<id> => <type>-<id> */
37static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) 36static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
@@ -109,13 +108,7 @@ static int xenbus_frontend_dev_resume(struct device *dev)
109 if (xen_store_domain_type == XS_LOCAL) { 108 if (xen_store_domain_type == XS_LOCAL) {
110 struct xenbus_device *xdev = to_xenbus_device(dev); 109 struct xenbus_device *xdev = to_xenbus_device(dev);
111 110
112 if (!xenbus_frontend_wq) { 111 schedule_work(&xdev->work);
113 pr_err("%s: no workqueue to process delayed resume\n",
114 xdev->nodename);
115 return -EFAULT;
116 }
117
118 queue_work(xenbus_frontend_wq, &xdev->work);
119 112
120 return 0; 113 return 0;
121 } 114 }
@@ -485,12 +478,6 @@ static int __init xenbus_probe_frontend_init(void)
485 478
486 register_xenstore_notifier(&xenstore_notifier); 479 register_xenstore_notifier(&xenstore_notifier);
487 480
488 if (xen_store_domain_type == XS_LOCAL) {
489 xenbus_frontend_wq = create_workqueue("xenbus_frontend");
490 if (!xenbus_frontend_wq)
491 pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n");
492 }
493
494 return 0; 481 return 0;
495} 482}
496subsys_initcall(xenbus_probe_frontend_init); 483subsys_initcall(xenbus_probe_frontend_init);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 5063c5e796b7..23f1387b3ef7 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -29,6 +29,8 @@
29 */ 29 */
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/slab.h>
33#include <linux/vmalloc.h>
32 34
33#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
34#include <asm/xen/hypervisor.h> 36#include <asm/xen/hypervisor.h>
@@ -37,6 +39,7 @@
37#include <xen/page.h> 39#include <xen/page.h>
38#include <xen/interface/xen.h> 40#include <xen/interface/xen.h>
39#include <xen/interface/memory.h> 41#include <xen/interface/memory.h>
42#include <xen/balloon.h>
40 43
41typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 44typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
42 45
@@ -185,3 +188,77 @@ int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
185 return 0; 188 return 0;
186} 189}
187EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range); 190EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
191
192struct map_balloon_pages {
193 xen_pfn_t *pfns;
194 unsigned int idx;
195};
196
197static void setup_balloon_gfn(unsigned long gfn, void *data)
198{
199 struct map_balloon_pages *info = data;
200
201 info->pfns[info->idx++] = gfn;
202}
203
204/**
205 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
206 * @gfns: returns the array of corresponding GFNs
207 * @virt: returns the virtual address of the mapped region
208 * @nr_grant_frames: number of GFNs
209 * @return 0 on success, error otherwise
210 *
211 * This allocates a set of ballooned pages and maps them into the
212 * kernel's address space.
213 */
214int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
215 unsigned long nr_grant_frames)
216{
217 struct page **pages;
218 xen_pfn_t *pfns;
219 void *vaddr;
220 struct map_balloon_pages data;
221 int rc;
222 unsigned long nr_pages;
223
224 BUG_ON(nr_grant_frames == 0);
225 nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
227 if (!pages)
228 return -ENOMEM;
229
230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
231 if (!pfns) {
232 kfree(pages);
233 return -ENOMEM;
234 }
235 rc = alloc_xenballooned_pages(nr_pages, pages);
236 if (rc) {
237 pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
238 nr_pages, rc);
239 kfree(pages);
240 kfree(pfns);
241 return rc;
242 }
243
244 data.pfns = pfns;
245 data.idx = 0;
246 xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
247
248 vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
249 if (!vaddr) {
250 pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
251 nr_pages, rc);
252 free_xenballooned_pages(nr_pages, pages);
253 kfree(pages);
254 kfree(pfns);
255 return -ENOMEM;
256 }
257 kfree(pages);
258
259 *gfns = pfns;
260 *virt = vaddr;
261
262 return 0;
263}
264EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 25a822f6f000..44fda64ad434 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -92,7 +92,6 @@ static inline void account_process_tick(struct task_struct *tsk, int user)
92extern void account_process_tick(struct task_struct *, int user); 92extern void account_process_tick(struct task_struct *, int user);
93#endif 93#endif
94 94
95extern void account_steal_ticks(unsigned long ticks);
96extern void account_idle_ticks(unsigned long ticks); 95extern void account_idle_ticks(unsigned long ticks);
97 96
98#endif /* _LINUX_KERNEL_STAT_H */ 97#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 901ec01c9fba..26c3302ae58f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -53,6 +53,8 @@ extern char __dtb_end[];
53extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, 53extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
54 int depth, void *data), 54 int depth, void *data),
55 void *data); 55 void *data);
56extern int of_get_flat_dt_subnode_by_name(unsigned long node,
57 const char *uname);
56extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, 58extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
57 int *size); 59 int *size);
58extern int of_flat_dt_is_compatible(unsigned long node, const char *name); 60extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
diff --git a/include/uapi/xen/evtchn.h b/include/uapi/xen/evtchn.h
index 14e833ee4e0b..cb4aa4bb905e 100644
--- a/include/uapi/xen/evtchn.h
+++ b/include/uapi/xen/evtchn.h
@@ -85,4 +85,19 @@ struct ioctl_evtchn_notify {
85#define IOCTL_EVTCHN_RESET \ 85#define IOCTL_EVTCHN_RESET \
86 _IOC(_IOC_NONE, 'E', 5, 0) 86 _IOC(_IOC_NONE, 'E', 5, 0)
87 87
88/*
89 * Restrict this file descriptor so that it can only be used to bind
90 * new interdomain events from one domain.
91 *
92 * Once a file descriptor has been restricted it cannot be
93 * de-restricted, and must be closed and re-opened. Event channels
94 * which were bound before restricting remain bound afterwards, and
95 * can be notified as usual.
96 */
97#define IOCTL_EVTCHN_RESTRICT_DOMID \
98 _IOC(_IOC_NONE, 'E', 6, sizeof(struct ioctl_evtchn_restrict_domid))
99struct ioctl_evtchn_restrict_domid {
100 domid_t domid;
101};
102
88#endif /* __LINUX_PUBLIC_EVTCHN_H__ */ 103#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
diff --git a/include/xen/interface/hvm/params.h b/include/xen/interface/hvm/params.h
index a6c79911e729..4d61fc58d99d 100644
--- a/include/xen/interface/hvm/params.h
+++ b/include/xen/interface/hvm/params.h
@@ -27,16 +27,44 @@
27 * Parameter space for HVMOP_{set,get}_param. 27 * Parameter space for HVMOP_{set,get}_param.
28 */ 28 */
29 29
30#define HVM_PARAM_CALLBACK_IRQ 0
30/* 31/*
31 * How should CPU0 event-channel notifications be delivered? 32 * How should CPU0 event-channel notifications be delivered?
32 * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). 33 *
33 * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
34 * Domain = val[47:32], Bus = val[31:16],
35 * DevFn = val[15: 8], IntX = val[ 1: 0]
36 * val[63:56] == 2: val[7:0] is a vector number.
37 * If val == 0 then CPU0 event-channel notifications are not delivered. 34 * If val == 0 then CPU0 event-channel notifications are not delivered.
35 * If val != 0, val[63:56] encodes the type, as follows:
38 */ 36 */
39#define HVM_PARAM_CALLBACK_IRQ 0 37
38#define HVM_PARAM_CALLBACK_TYPE_GSI 0
39/*
40 * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
41 * and disables all notifications.
42 */
43
44#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
45/*
46 * val[55:0] is a delivery PCI INTx line:
47 * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
48 */
49
50#if defined(__i386__) || defined(__x86_64__)
51#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
52/*
53 * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
54 * if this delivery method is available.
55 */
56#elif defined(__arm__) || defined(__aarch64__)
57#define HVM_PARAM_CALLBACK_TYPE_PPI 2
58/*
59 * val[55:16] needs to be zero.
60 * val[15:8] is interrupt flag of the PPI used by event-channel:
61 * bit 8: the PPI is edge(1) or level(0) triggered
62 * bit 9: the PPI is active low(1) or high(0)
63 * val[7:0] is a PPI number used by event-channel.
64 * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
65 * the notification is handled by the interrupt controller.
66 */
67#endif
40 68
41#define HVM_PARAM_STORE_PFN 1 69#define HVM_PARAM_STORE_PFN 1
42#define HVM_PARAM_STORE_EVTCHN 2 70#define HVM_PARAM_STORE_EVTCHN 2
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 2ecfe4f700d9..9aa8988cb340 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -160,6 +160,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
160#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, 160#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
161 * XENMEM_add_to_physmap_range only. 161 * XENMEM_add_to_physmap_range only.
162 */ 162 */
163#define XENMAPSPACE_dev_mmio 5 /* device mmio region */
163 164
164/* 165/*
165 * Sets the GPFN at which a particular page appears in the specified guest's 166 * Sets the GPFN at which a particular page appears in the specified guest's
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index b05288ce3991..98188c87f5c1 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -75,15 +75,21 @@
75 */ 75 */
76#define VCPUOP_get_runstate_info 4 76#define VCPUOP_get_runstate_info 4
77struct vcpu_runstate_info { 77struct vcpu_runstate_info {
78 /* VCPU's current state (RUNSTATE_*). */ 78 /* VCPU's current state (RUNSTATE_*). */
79 int state; 79 int state;
80 /* When was current state entered (system time, ns)? */ 80 /* When was current state entered (system time, ns)? */
81 uint64_t state_entry_time; 81 uint64_t state_entry_time;
82 /* 82 /*
83 * Time spent in each RUNSTATE_* (ns). The sum of these times is 83 * Update indicator set in state_entry_time:
84 * guaranteed not to drift from system time. 84 * When activated via VMASST_TYPE_runstate_update_flag, set during
85 */ 85 * updates in guest memory mapped copy of vcpu_runstate_info.
86 uint64_t time[4]; 86 */
87#define XEN_RUNSTATE_UPDATE (1ULL << 63)
88 /*
89 * Time spent in each RUNSTATE_* (ns). The sum of these times is
90 * guaranteed not to drift from system time.
91 */
92 uint64_t time[4];
87}; 93};
88DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info); 94DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
89 95
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index d1331121c0bd..1b0d189cd3d3 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -413,7 +413,22 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
413/* x86/PAE guests: support PDPTs above 4GB. */ 413/* x86/PAE guests: support PDPTs above 4GB. */
414#define VMASST_TYPE_pae_extended_cr3 3 414#define VMASST_TYPE_pae_extended_cr3 3
415 415
416#define MAX_VMASST_TYPE 3 416/*
417 * x86 guests: Sane behaviour for virtual iopl
418 * - virtual iopl updated from do_iret() hypercalls.
419 * - virtual iopl reported in bounce frames.
420 * - guest kernels assumed to be level 0 for the purpose of iopl checks.
421 */
422#define VMASST_TYPE_architectural_iopl 4
423
424/*
425 * All guests: activate update indicator in vcpu_runstate_info
426 * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
427 * vcpu_runstate_info during updates of the runstate information.
428 */
429#define VMASST_TYPE_runstate_update_flag 5
430
431#define MAX_VMASST_TYPE 5
417 432
418#ifndef __ASSEMBLY__ 433#ifndef __ASSEMBLY__
419 434
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 86abe07b20ec..9a37c541822f 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -9,6 +9,12 @@
9 9
10DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 10DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
11 11
12DECLARE_PER_CPU(int, xen_vcpu_id);
13static inline int xen_vcpu_nr(int cpu)
14{
15 return per_cpu(xen_vcpu_id, cpu);
16}
17
12void xen_arch_pre_suspend(void); 18void xen_arch_pre_suspend(void);
13void xen_arch_post_suspend(int suspend_cancelled); 19void xen_arch_post_suspend(int suspend_cancelled);
14 20
@@ -21,7 +27,9 @@ void xen_resume_notifier_unregister(struct notifier_block *nb);
21 27
22bool xen_vcpu_stolen(int vcpu); 28bool xen_vcpu_stolen(int vcpu);
23void xen_setup_runstate_info(int cpu); 29void xen_setup_runstate_info(int cpu);
30void xen_time_setup_guest(void);
24void xen_get_runstate_snapshot(struct vcpu_runstate_info *res); 31void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
32u64 xen_steal_clock(int cpu);
25 33
26int xen_setup_shutdown_event(void); 34int xen_setup_shutdown_event(void);
27 35
@@ -85,17 +93,33 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
85 struct page **pages); 93 struct page **pages);
86int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 94int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
87 int nr, struct page **pages); 95 int nr, struct page **pages);
96int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
97 unsigned long nr_grant_frames);
88 98
89bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); 99bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
90 100
91#ifdef CONFIG_XEN_EFI 101efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
92extern efi_system_table_t *xen_efi_probe(void); 102efi_status_t xen_efi_set_time(efi_time_t *tm);
93#else 103efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
94static inline efi_system_table_t __init *xen_efi_probe(void) 104 efi_time_t *tm);
95{ 105efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
96 return NULL; 106efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
97} 107 u32 *attr, unsigned long *data_size,
98#endif 108 void *data);
109efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
110 efi_char16_t *name, efi_guid_t *vendor);
111efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
112 u32 attr, unsigned long data_size,
113 void *data);
114efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
115 u64 *remaining_space,
116 u64 *max_variable_size);
117efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
118efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
119 unsigned long count, unsigned long sg_list);
120efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
121 unsigned long count, u64 *max_size,
122 int *reset_type);
99 123
100#ifdef CONFIG_PREEMPT 124#ifdef CONFIG_PREEMPT
101 125
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ea0f6f31a244..1934f658c036 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -503,16 +503,6 @@ void account_process_tick(struct task_struct *p, int user_tick)
503} 503}
504 504
505/* 505/*
506 * Account multiple ticks of steal time.
507 * @p: the process from which the cpu time has been stolen
508 * @ticks: number of stolen ticks
509 */
510void account_steal_ticks(unsigned long ticks)
511{
512 account_steal_time(jiffies_to_cputime(ticks));
513}
514
515/*
516 * Account multiple ticks of idle time. 506 * Account multiple ticks of idle time.
517 * @ticks: number of stolen ticks 507 * @ticks: number of stolen ticks
518 */ 508 */