aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 19:07:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 19:07:55 -0500
commitaa3ecf388adc90bde90776bba71a7f2d278fc4e3 (patch)
treeeab9a54822048f3cb4a280ab70d3eaea3c1c4e14
parentb5cab0da75c292ffa0fbd68dd2c820066b2842de (diff)
parent709613ad2b3c9eaeb2a3e24284b7c8feffc17326 (diff)
Merge tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: "Xen features and fixes for 4.10 These are some fixes, a move of some arm related headers to share them between arm and arm64 and a series introducing a helper to make code more readable. The most notable change is David stepping down as maintainer of the Xen hypervisor interface. This results in me sending you the pull requests for Xen related code from now on" * tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (29 commits) xen/balloon: Only mark a page as managed when it is released xenbus: fix deadlock on writes to /proc/xen/xenbus xen/scsifront: don't request a slot on the ring until request is ready xen/x86: Increase xen_e820_map to E820_X_MAX possible entries x86: Make E820_X_MAX unconditionally larger than E820MAX xen/pci: Bubble up error and fix description. xen: xenbus: set error code on failure xen: set error code on failures arm/xen: Use alloc_percpu rather than __alloc_percpu arm/arm64: xen: Move shared architecture headers to include/xen/arm xen/events: use xen_vcpu_id mapping for EVTCHNOP_status xen/gntdev: Use VM_MIXEDMAP instead of VM_IO to avoid NUMA balancing xen-scsifront: Add a missing call to kfree MAINTAINERS: update XEN HYPERVISOR INTERFACE xenfs: Use proc_create_mount_point() to create /proc/xen xen-platform: use builtin_pci_driver xen-netback: fix error handling output xen: make use of xenbus_read_unsigned() in xenbus xen: make use of xenbus_read_unsigned() in xen-pciback xen: make use of xenbus_read_unsigned() in xen-fbfront ...
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/include/asm/xen/hypercall.h88
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h40
-rw-r--r--arch/arm/include/asm/xen/interface.h86
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h99
-rw-r--r--arch/arm/include/asm/xen/page.h123
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm64/include/asm/xen/hypercall.h2
-rw-r--r--arch/arm64/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm64/include/asm/xen/interface.h2
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h2
-rw-r--r--arch/arm64/include/asm/xen/page.h2
-rw-r--r--arch/x86/include/asm/e820.h12
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--drivers/block/xen-blkback/xenbus.c36
-rw-r--r--drivers/block/xen-blkfront.c81
-rw-r--r--drivers/char/tpm/xen-tpmfront.c8
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/net/xen-netback/xenbus.c52
-rw-r--r--drivers/net/xen-netfront.c67
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/scsi/xen-scsifront.c193
-rw-r--r--drivers/video/fbdev/xen-fbfront.c13
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntalloc.c9
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/xen-pciback/xenbus.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c22
-rw-r--r--fs/proc/generic.c1
-rw-r--r--fs/proc/internal.h1
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/xen/arm/hypercall.h87
-rw-r--r--include/xen/arm/hypervisor.h39
-rw-r--r--include/xen/arm/interface.h85
-rw-r--r--include/xen/arm/page-coherent.h98
-rw-r--r--include/xen/arm/page.h122
-rw-r--r--include/xen/xenbus.h4
43 files changed, 669 insertions, 780 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index bbb445050b04..3d3ed12b2c29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13354,7 +13354,6 @@ F: drivers/media/tuners/tuner-xc2028.*
13354 13354
13355XEN HYPERVISOR INTERFACE 13355XEN HYPERVISOR INTERFACE
13356M: Boris Ostrovsky <boris.ostrovsky@oracle.com> 13356M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
13357M: David Vrabel <david.vrabel@citrix.com>
13358M: Juergen Gross <jgross@suse.com> 13357M: Juergen Gross <jgross@suse.com>
13359L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 13358L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
13360T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git 13359T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 9d874db13c0e..3522cbaed316 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1,87 +1 @@
1/****************************************************************************** #include <xen/arm/hypercall.h>
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_ARM_XEN_HYPERCALL_H
34#define _ASM_ARM_XEN_HYPERCALL_H
35
36#include <linux/bug.h>
37
38#include <xen/interface/xen.h>
39#include <xen/interface/sched.h>
40#include <xen/interface/platform.h>
41
42long privcmd_call(unsigned call, unsigned long a1,
43 unsigned long a2, unsigned long a3,
44 unsigned long a4, unsigned long a5);
45int HYPERVISOR_xen_version(int cmd, void *arg);
46int HYPERVISOR_console_io(int cmd, int count, char *str);
47int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
48int HYPERVISOR_sched_op(int cmd, void *arg);
49int HYPERVISOR_event_channel_op(int cmd, void *arg);
50unsigned long HYPERVISOR_hvm_op(int op, void *arg);
51int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
52int HYPERVISOR_physdev_op(int cmd, void *arg);
53int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
54int HYPERVISOR_tmem_op(void *arg);
55int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
56int HYPERVISOR_platform_op_raw(void *arg);
57static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
58{
59 op->interface_version = XENPF_INTERFACE_VERSION;
60 return HYPERVISOR_platform_op_raw(op);
61}
62int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
63
64static inline int
65HYPERVISOR_suspend(unsigned long start_info_mfn)
66{
67 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
68
69 /* start_info_mfn is unused on ARM */
70 return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
71}
72
73static inline void
74MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
75 unsigned int new_val, unsigned long flags)
76{
77 BUG();
78}
79
80static inline void
81MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
82 int count, int *success_count, domid_t domid)
83{
84 BUG();
85}
86
87#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index 95251512e2c4..d6e7709d0688 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -1,39 +1 @@
1#ifndef _ASM_ARM_XEN_HYPERVISOR_H #include <xen/arm/hypervisor.h>
2#define _ASM_ARM_XEN_HYPERVISOR_H
3
4#include <linux/init.h>
5
6extern struct shared_info *HYPERVISOR_shared_info;
7extern struct start_info *xen_start_info;
8
9/* Lazy mode for batching updates / context switch */
10enum paravirt_lazy_mode {
11 PARAVIRT_LAZY_NONE,
12 PARAVIRT_LAZY_MMU,
13 PARAVIRT_LAZY_CPU,
14};
15
16static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
17{
18 return PARAVIRT_LAZY_NONE;
19}
20
21extern struct dma_map_ops *xen_dma_ops;
22
23#ifdef CONFIG_XEN
24void __init xen_early_init(void);
25#else
26static inline void xen_early_init(void) { return; }
27#endif
28
29#ifdef CONFIG_HOTPLUG_CPU
30static inline void xen_arch_register_cpu(int num)
31{
32}
33
34static inline void xen_arch_unregister_cpu(int num)
35{
36}
37#endif
38
39#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 75d596862892..88c0d75da190 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -1,85 +1 @@
1/****************************************************************************** #include <xen/arm/interface.h>
2 * Guest OS interface to ARM Xen.
3 *
4 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
5 */
6
7#ifndef _ASM_ARM_XEN_INTERFACE_H
8#define _ASM_ARM_XEN_INTERFACE_H
9
10#include <linux/types.h>
11
12#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
13
14#define __DEFINE_GUEST_HANDLE(name, type) \
15 typedef struct { union { type *p; uint64_aligned_t q; }; } \
16 __guest_handle_ ## name
17
18#define DEFINE_GUEST_HANDLE_STRUCT(name) \
19 __DEFINE_GUEST_HANDLE(name, struct name)
20#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
21#define GUEST_HANDLE(name) __guest_handle_ ## name
22
23#define set_xen_guest_handle(hnd, val) \
24 do { \
25 if (sizeof(hnd) == 8) \
26 *(uint64_t *)&(hnd) = 0; \
27 (hnd).p = val; \
28 } while (0)
29
30#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
31
32#ifndef __ASSEMBLY__
33/* Explicitly size integers that represent pfns in the interface with
34 * Xen so that we can have one ABI that works for 32 and 64 bit guests.
35 * Note that this means that the xen_pfn_t type may be capable of
36 * representing pfn's which the guest cannot represent in its own pfn
37 * type. However since pfn space is controlled by the guest this is
38 * fine since it simply wouldn't be able to create any sure pfns in
39 * the first place.
40 */
41typedef uint64_t xen_pfn_t;
42#define PRI_xen_pfn "llx"
43typedef uint64_t xen_ulong_t;
44#define PRI_xen_ulong "llx"
45typedef int64_t xen_long_t;
46#define PRI_xen_long "llx"
47/* Guest handles for primitive C types. */
48__DEFINE_GUEST_HANDLE(uchar, unsigned char);
49__DEFINE_GUEST_HANDLE(uint, unsigned int);
50DEFINE_GUEST_HANDLE(char);
51DEFINE_GUEST_HANDLE(int);
52DEFINE_GUEST_HANDLE(void);
53DEFINE_GUEST_HANDLE(uint64_t);
54DEFINE_GUEST_HANDLE(uint32_t);
55DEFINE_GUEST_HANDLE(xen_pfn_t);
56DEFINE_GUEST_HANDLE(xen_ulong_t);
57
58/* Maximum number of virtual CPUs in multi-processor guests. */
59#define MAX_VIRT_CPUS 1
60
61struct arch_vcpu_info { };
62struct arch_shared_info { };
63
64/* TODO: Move pvclock definitions some place arch independent */
65struct pvclock_vcpu_time_info {
66 u32 version;
67 u32 pad0;
68 u64 tsc_timestamp;
69 u64 system_time;
70 u32 tsc_to_system_mul;
71 s8 tsc_shift;
72 u8 flags;
73 u8 pad[2];
74} __attribute__((__packed__)); /* 32 bytes */
75
76/* It is OK to have a 12 bytes struct with no padding because it is packed */
77struct pvclock_wall_clock {
78 u32 version;
79 u32 sec;
80 u32 nsec;
81 u32 sec_hi;
82} __attribute__((__packed__));
83#endif
84
85#endif /* _ASM_ARM_XEN_INTERFACE_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 95ce6ac3a971..b3ef061d8b74 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,98 +1 @@
1#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H #include <xen/arm/page-coherent.h>
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-mapping.h>
6
7void __xen_dma_map_page(struct device *hwdev, struct page *page,
8 dma_addr_t dev_addr, unsigned long offset, size_t size,
9 enum dma_data_direction dir, unsigned long attrs);
10void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
11 size_t size, enum dma_data_direction dir,
12 unsigned long attrs);
13void __xen_dma_sync_single_for_cpu(struct device *hwdev,
14 dma_addr_t handle, size_t size, enum dma_data_direction dir);
15
16void __xen_dma_sync_single_for_device(struct device *hwdev,
17 dma_addr_t handle, size_t size, enum dma_data_direction dir);
18
19static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
21{
22 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
23}
24
25static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
26 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
27{
28 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
29}
30
31static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
32 dma_addr_t dev_addr, unsigned long offset, size_t size,
33 enum dma_data_direction dir, unsigned long attrs)
34{
35 unsigned long page_pfn = page_to_xen_pfn(page);
36 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
37 unsigned long compound_pages =
38 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
39 bool local = (page_pfn <= dev_pfn) &&
40 (dev_pfn - page_pfn < compound_pages);
41
42 /*
43 * Dom0 is mapped 1:1, while the Linux page can span across
44 * multiple Xen pages, it's not possible for it to contain a
45 * mix of local and foreign Xen pages. So if the first xen_pfn
46 * == mfn the page is local otherwise it's a foreign page
47 * grant-mapped in dom0. If the page is local we can safely
48 * call the native dma_ops function, otherwise we call the xen
49 * specific function.
50 */
51 if (local)
52 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
53 else
54 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
55}
56
57static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
58 size_t size, enum dma_data_direction dir, unsigned long attrs)
59{
60 unsigned long pfn = PFN_DOWN(handle);
61 /*
62 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
63 * multiple Xen page, it's not possible to have a mix of local and
64 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
65 * foreign mfn will always return false. If the page is local we can
66 * safely call the native dma_ops function, otherwise we call the xen
67 * specific function.
68 */
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->unmap_page)
71 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
72 } else
73 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
74}
75
76static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
78{
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
82 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
85}
86
87static inline void xen_dma_sync_single_for_device(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 unsigned long pfn = PFN_DOWN(handle);
91 if (pfn_valid(pfn)) {
92 if (__generic_dma_ops(hwdev)->sync_single_for_device)
93 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
94 } else
95 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
96}
97
98#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 415dbc6e43fd..31bbc803cecb 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,122 +1 @@
1#ifndef _ASM_ARM_XEN_PAGE_H #include <xen/arm/page.h>
2#define _ASM_ARM_XEN_PAGE_H
3
4#include <asm/page.h>
5#include <asm/pgtable.h>
6
7#include <linux/pfn.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10
11#include <xen/xen.h>
12#include <xen/interface/grant_table.h>
13
14#define phys_to_machine_mapping_valid(pfn) (1)
15
16/* Xen machine address */
17typedef struct xmaddr {
18 phys_addr_t maddr;
19} xmaddr_t;
20
21/* Xen pseudo-physical address */
22typedef struct xpaddr {
23 phys_addr_t paddr;
24} xpaddr_t;
25
26#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
27#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
28
29#define INVALID_P2M_ENTRY (~0UL)
30
31/*
32 * The pseudo-physical frame (pfn) used in all the helpers is always based
33 * on Xen page granularity (i.e 4KB).
34 *
35 * A Linux page may be split across multiple non-contiguous Xen page so we
36 * have to keep track with frame based on 4KB page granularity.
37 *
38 * PV drivers should never make a direct usage of those helpers (particularly
39 * pfn_to_gfn and gfn_to_pfn).
40 */
41
42unsigned long __pfn_to_mfn(unsigned long pfn);
43extern struct rb_root phys_to_mach;
44
45/* Pseudo-physical <-> Guest conversion */
46static inline unsigned long pfn_to_gfn(unsigned long pfn)
47{
48 return pfn;
49}
50
51static inline unsigned long gfn_to_pfn(unsigned long gfn)
52{
53 return gfn;
54}
55
56/* Pseudo-physical <-> BUS conversion */
57static inline unsigned long pfn_to_bfn(unsigned long pfn)
58{
59 unsigned long mfn;
60
61 if (phys_to_mach.rb_node != NULL) {
62 mfn = __pfn_to_mfn(pfn);
63 if (mfn != INVALID_P2M_ENTRY)
64 return mfn;
65 }
66
67 return pfn;
68}
69
70static inline unsigned long bfn_to_pfn(unsigned long bfn)
71{
72 return bfn;
73}
74
75#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
76
77/* VIRT <-> GUEST conversion */
78#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
79#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
80
81/* Only used in PV code. But ARM guests are always HVM. */
82static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
83{
84 BUG();
85}
86
87/* TODO: this shouldn't be here but it is because the frontend drivers
88 * are using it (its rolled in headers) even though we won't hit the code path.
89 * So for right now just punt with this.
90 */
91static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
92{
93 BUG();
94 return NULL;
95}
96
97extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
98 struct gnttab_map_grant_ref *kmap_ops,
99 struct page **pages, unsigned int count);
100
101extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
102 struct gnttab_unmap_grant_ref *kunmap_ops,
103 struct page **pages, unsigned int count);
104
105bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
106bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
107 unsigned long nr_pages);
108
109static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
110{
111 return __set_phys_to_machine(pfn, mfn);
112}
113
114#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
115#define xen_unmap(cookie) iounmap((cookie))
116
117bool xen_arch_need_swiotlb(struct device *dev,
118 phys_addr_t phys,
119 dma_addr_t dev_addr);
120unsigned long xen_get_swiotlb_free_pages(unsigned int order);
121
122#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f193414d0f6f..4986dc0c1dff 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -372,8 +372,7 @@ static int __init xen_guest_init(void)
372 * for secondary CPUs as they are brought up. 372 * for secondary CPUs as they are brought up.
373 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 373 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
374 */ 374 */
375 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 375 xen_vcpu_info = alloc_percpu(struct vcpu_info);
376 sizeof(struct vcpu_info));
377 if (xen_vcpu_info == NULL) 376 if (xen_vcpu_info == NULL)
378 return -ENOMEM; 377 return -ENOMEM;
379 378
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 74b0c423ff5b..3522cbaed316 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1 @@
#include <../../arm/include/asm/xen/hypercall.h> #include <xen/arm/hypercall.h>
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h
index f263da8e8769..d6e7709d0688 100644
--- a/arch/arm64/include/asm/xen/hypervisor.h
+++ b/arch/arm64/include/asm/xen/hypervisor.h
@@ -1 +1 @@
#include <../../arm/include/asm/xen/hypervisor.h> #include <xen/arm/hypervisor.h>
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h
index 44457aebeed4..88c0d75da190 100644
--- a/arch/arm64/include/asm/xen/interface.h
+++ b/arch/arm64/include/asm/xen/interface.h
@@ -1 +1 @@
#include <../../arm/include/asm/xen/interface.h> #include <xen/arm/interface.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2052102b4e02..b3ef061d8b74 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1 @@
#include <../../arm/include/asm/xen/page-coherent.h> #include <xen/arm/page-coherent.h>
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
index bed87ec36780..31bbc803cecb 100644
--- a/arch/arm64/include/asm/xen/page.h
+++ b/arch/arm64/include/asm/xen/page.h
@@ -1 +1 @@
#include <../../arm/include/asm/xen/page.h> #include <xen/arm/page.h>
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 476b574de99e..ec23d8e1297c 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,13 +1,17 @@
1#ifndef _ASM_X86_E820_H 1#ifndef _ASM_X86_E820_H
2#define _ASM_X86_E820_H 2#define _ASM_X86_E820_H
3 3
4#ifdef CONFIG_EFI 4/*
5 * E820_X_MAX is the maximum size of the extended E820 table. The extended
6 * table may contain up to 3 extra E820 entries per possible NUMA node, so we
7 * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128.
8 * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h.
9 */
5#include <linux/numa.h> 10#include <linux/numa.h>
6#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) 11#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
7#else /* ! CONFIG_EFI */ 12
8#define E820_X_MAX E820MAX
9#endif
10#include <uapi/asm/e820.h> 13#include <uapi/asm/e820.h>
14
11#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
12/* see comment in arch/x86/kernel/e820.c */ 16/* see comment in arch/x86/kernel/e820.c */
13extern struct e820map *e820; 17extern struct e820map *e820;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab98077a..e1fb269c87af 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -264,8 +264,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
264 return 0; 264 return 0;
265 265
266error: 266error:
267 dev_err(&dev->dev, 267 dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
268 "Xen PCI frontend has not registered MSI/MSI-X support!\n"); 268 type == PCI_CAP_ID_MSI ? "" : "-X", irq);
269 return irq; 269 return irq;
270} 270}
271 271
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fca0827..8c394e30e5fe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -41,7 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
41unsigned long xen_released_pages; 41unsigned long xen_released_pages;
42 42
43/* E820 map used during setting up memory. */ 43/* E820 map used during setting up memory. */
44static struct e820entry xen_e820_map[E820MAX] __initdata; 44static struct e820entry xen_e820_map[E820_X_MAX] __initdata;
45static u32 xen_e820_map_entries __initdata; 45static u32 xen_e820_map_entries __initdata;
46 46
47/* 47/*
@@ -750,7 +750,7 @@ char * __init xen_memory_setup(void)
750 max_pfn = min(max_pfn, xen_start_info->nr_pages); 750 max_pfn = min(max_pfn, xen_start_info->nr_pages);
751 mem_end = PFN_PHYS(max_pfn); 751 mem_end = PFN_PHYS(max_pfn);
752 752
753 memmap.nr_entries = E820MAX; 753 memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
754 set_xen_guest_handle(memmap.buffer, xen_e820_map); 754 set_xen_guest_handle(memmap.buffer, xen_e820_map);
755 755
756 op = xen_initial_domain() ? 756 op = xen_initial_domain() ?
@@ -923,7 +923,7 @@ char * __init xen_auto_xlated_memory_setup(void)
923 int i; 923 int i;
924 int rc; 924 int rc;
925 925
926 memmap.nr_entries = E820MAX; 926 memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
927 set_xen_guest_handle(memmap.buffer, xen_e820_map); 927 set_xen_guest_handle(memmap.buffer, xen_e820_map);
928 928
929 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); 929 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d86f1e..415e79b69d34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
533 struct xenbus_device *dev = be->dev; 533 struct xenbus_device *dev = be->dev;
534 struct xen_blkif *blkif = be->blkif; 534 struct xen_blkif *blkif = be->blkif;
535 int err; 535 int err;
536 int state = 0, discard_enable; 536 int state = 0;
537 struct block_device *bdev = be->blkif->vbd.bdev; 537 struct block_device *bdev = be->blkif->vbd.bdev;
538 struct request_queue *q = bdev_get_queue(bdev); 538 struct request_queue *q = bdev_get_queue(bdev);
539 539
540 err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d", 540 if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
541 &discard_enable);
542 if (err == 1 && !discard_enable)
543 return; 541 return;
544 542
545 if (blk_queue_discard(q)) { 543 if (blk_queue_discard(q)) {
@@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be)
1039 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); 1037 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1040 return -ENOSYS; 1038 return -ENOSYS;
1041 } 1039 }
1042 err = xenbus_scanf(XBT_NIL, dev->otherend, 1040 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
1043 "feature-persistent", "%u", &pers_grants); 1041 0);
1044 if (err <= 0)
1045 pers_grants = 0;
1046
1047 be->blkif->vbd.feature_gnt_persistent = pers_grants; 1042 be->blkif->vbd.feature_gnt_persistent = pers_grants;
1048 be->blkif->vbd.overflow_max_grants = 0; 1043 be->blkif->vbd.overflow_max_grants = 0;
1049 1044
1050 /* 1045 /*
1051 * Read the number of hardware queues from frontend. 1046 * Read the number of hardware queues from frontend.
1052 */ 1047 */
1053 err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues", 1048 requested_num_queues = xenbus_read_unsigned(dev->otherend,
1054 "%u", &requested_num_queues); 1049 "multi-queue-num-queues",
1055 if (err < 0) { 1050 1);
1056 requested_num_queues = 1; 1051 if (requested_num_queues > xenblk_max_queues
1057 } else { 1052 || requested_num_queues == 0) {
1058 if (requested_num_queues > xenblk_max_queues 1053 /* Buggy or malicious guest. */
1059 || requested_num_queues == 0) { 1054 xenbus_dev_fatal(dev, err,
1060 /* Buggy or malicious guest. */ 1055 "guest requested %u queues, exceeding the maximum of %u.",
1061 xenbus_dev_fatal(dev, err, 1056 requested_num_queues, xenblk_max_queues);
1062 "guest requested %u queues, exceeding the maximum of %u.", 1057 return -ENOSYS;
1063 requested_num_queues, xenblk_max_queues);
1064 return -ENOSYS;
1065 }
1066 } 1058 }
1067 be->blkif->nr_rings = requested_num_queues; 1059 be->blkif->nr_rings = requested_num_queues;
1068 if (xen_blkif_alloc_rings(be->blkif)) 1060 if (xen_blkif_alloc_rings(be->blkif))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c000fdf048b2..b2bdfa81f929 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
1758 const char *message = NULL; 1758 const char *message = NULL;
1759 struct xenbus_transaction xbt; 1759 struct xenbus_transaction xbt;
1760 int err; 1760 int err;
1761 unsigned int i, max_page_order = 0; 1761 unsigned int i, max_page_order;
1762 unsigned int ring_page_order = 0; 1762 unsigned int ring_page_order;
1763 1763
1764 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1764 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1765 "max-ring-page-order", "%u", &max_page_order); 1765 "max-ring-page-order", 0);
1766 if (err != 1) 1766 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1767 info->nr_ring_pages = 1; 1767 info->nr_ring_pages = 1 << ring_page_order;
1768 else {
1769 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1770 info->nr_ring_pages = 1 << ring_page_order;
1771 }
1772 1768
1773 for (i = 0; i < info->nr_rings; i++) { 1769 for (i = 0; i < info->nr_rings; i++) {
1774 struct blkfront_ring_info *rinfo = &info->rinfo[i]; 1770 struct blkfront_ring_info *rinfo = &info->rinfo[i];
@@ -1877,18 +1873,14 @@ again:
1877 1873
1878static int negotiate_mq(struct blkfront_info *info) 1874static int negotiate_mq(struct blkfront_info *info)
1879{ 1875{
1880 unsigned int backend_max_queues = 0; 1876 unsigned int backend_max_queues;
1881 int err;
1882 unsigned int i; 1877 unsigned int i;
1883 1878
1884 BUG_ON(info->nr_rings); 1879 BUG_ON(info->nr_rings);
1885 1880
1886 /* Check if backend supports multiple queues. */ 1881 /* Check if backend supports multiple queues. */
1887 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1882 backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1888 "multi-queue-max-queues", "%u", &backend_max_queues); 1883 "multi-queue-max-queues", 1);
1889 if (err < 0)
1890 backend_max_queues = 1;
1891
1892 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); 1884 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1893 /* We need at least one ring. */ 1885 /* We need at least one ring. */
1894 if (!info->nr_rings) 1886 if (!info->nr_rings)
@@ -2196,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
2196 int err; 2188 int err;
2197 unsigned int discard_granularity; 2189 unsigned int discard_granularity;
2198 unsigned int discard_alignment; 2190 unsigned int discard_alignment;
2199 unsigned int discard_secure;
2200 2191
2201 info->feature_discard = 1; 2192 info->feature_discard = 1;
2202 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 2193 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
@@ -2207,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
2207 info->discard_granularity = discard_granularity; 2198 info->discard_granularity = discard_granularity;
2208 info->discard_alignment = discard_alignment; 2199 info->discard_alignment = discard_alignment;
2209 } 2200 }
2210 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2201 info->feature_secdiscard =
2211 "discard-secure", "%u", &discard_secure); 2202 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2212 if (err > 0) 2203 0);
2213 info->feature_secdiscard = !!discard_secure;
2214} 2204}
2215 2205
2216static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) 2206static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
@@ -2302,16 +2292,11 @@ out_of_memory:
2302 */ 2292 */
2303static void blkfront_gather_backend_features(struct blkfront_info *info) 2293static void blkfront_gather_backend_features(struct blkfront_info *info)
2304{ 2294{
2305 int err;
2306 int barrier, flush, discard, persistent;
2307 unsigned int indirect_segments; 2295 unsigned int indirect_segments;
2308 2296
2309 info->feature_flush = 0; 2297 info->feature_flush = 0;
2310 info->feature_fua = 0; 2298 info->feature_fua = 0;
2311 2299
2312 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2313 "feature-barrier", "%d", &barrier);
2314
2315 /* 2300 /*
2316 * If there's no "feature-barrier" defined, then it means 2301 * If there's no "feature-barrier" defined, then it means
2317 * we're dealing with a very old backend which writes 2302 * we're dealing with a very old backend which writes
@@ -2319,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2319 * 2304 *
2320 * If there are barriers, then we use flush. 2305 * If there are barriers, then we use flush.
2321 */ 2306 */
2322 if (err > 0 && barrier) { 2307 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2323 info->feature_flush = 1; 2308 info->feature_flush = 1;
2324 info->feature_fua = 1; 2309 info->feature_fua = 1;
2325 } 2310 }
@@ -2328,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2328 * And if there is "feature-flush-cache" use that above 2313 * And if there is "feature-flush-cache" use that above
2329 * barriers. 2314 * barriers.
2330 */ 2315 */
2331 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2316 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2332 "feature-flush-cache", "%d", &flush); 2317 0)) {
2333
2334 if (err > 0 && flush) {
2335 info->feature_flush = 1; 2318 info->feature_flush = 1;
2336 info->feature_fua = 0; 2319 info->feature_fua = 0;
2337 } 2320 }
2338 2321
2339 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2322 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2340 "feature-discard", "%d", &discard);
2341
2342 if (err > 0 && discard)
2343 blkfront_setup_discard(info); 2323 blkfront_setup_discard(info);
2344 2324
2345 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2325 info->feature_persistent =
2346 "feature-persistent", "%d", &persistent); 2326 xenbus_read_unsigned(info->xbdev->otherend,
2347 if (err <= 0) 2327 "feature-persistent", 0);
2348 info->feature_persistent = 0;
2349 else
2350 info->feature_persistent = persistent;
2351 2328
2352 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2353 "feature-max-indirect-segments", "%u", 2330 "feature-max-indirect-segments", 0);
2354 &indirect_segments); 2331 info->max_indirect_segments = min(indirect_segments,
2355 if (err <= 0) 2332 xen_blkif_max_segments);
2356 info->max_indirect_segments = 0;
2357 else
2358 info->max_indirect_segments = min(indirect_segments,
2359 xen_blkif_max_segments);
2360} 2333}
2361 2334
2362/* 2335/*
@@ -2421,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info)
2421 * provide this. Assume physical sector size to be the same as 2394 * provide this. Assume physical sector size to be the same as
2422 * sector_size in that case. 2395 * sector_size in that case.
2423 */ 2396 */
2424 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2397 physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2425 "physical-sector-size", "%u", &physical_sector_size); 2398 "physical-sector-size",
2426 if (err != 1) 2399 sector_size);
2427 physical_sector_size = sector_size;
2428
2429 blkfront_gather_backend_features(info); 2400 blkfront_gather_backend_features(info);
2430 for (i = 0; i < info->nr_rings; i++) { 2401 for (i = 0; i < info->nr_rings; i++) {
2431 err = blkfront_setup_indirect(&info->rinfo[i]); 2402 err = blkfront_setup_indirect(&info->rinfo[i]);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 62028f483bba..50072cc4fe5c 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -337,18 +337,14 @@ static int tpmfront_resume(struct xenbus_device *dev)
337static void backend_changed(struct xenbus_device *dev, 337static void backend_changed(struct xenbus_device *dev,
338 enum xenbus_state backend_state) 338 enum xenbus_state backend_state)
339{ 339{
340 int val;
341
342 switch (backend_state) { 340 switch (backend_state) {
343 case XenbusStateInitialised: 341 case XenbusStateInitialised:
344 case XenbusStateConnected: 342 case XenbusStateConnected:
345 if (dev->state == XenbusStateConnected) 343 if (dev->state == XenbusStateConnected)
346 break; 344 break;
347 345
348 if (xenbus_scanf(XBT_NIL, dev->otherend, 346 if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
349 "feature-protocol-v2", "%d", &val) < 0) 347 0)) {
350 val = 0;
351 if (!val) {
352 xenbus_dev_fatal(dev, -EINVAL, 348 xenbus_dev_fatal(dev, -EINVAL,
353 "vTPM protocol 2 required"); 349 "vTPM protocol 2 required");
354 return; 350 return;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 227fbd2dbb71..3900875dec10 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
108static int xenkbd_probe(struct xenbus_device *dev, 108static int xenkbd_probe(struct xenbus_device *dev,
109 const struct xenbus_device_id *id) 109 const struct xenbus_device_id *id)
110{ 110{
111 int ret, i, abs; 111 int ret, i;
112 unsigned int abs;
112 struct xenkbd_info *info; 113 struct xenkbd_info *info;
113 struct input_dev *kbd, *ptr; 114 struct input_dev *kbd, *ptr;
114 115
@@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
127 if (!info->page) 128 if (!info->page)
128 goto error_nomem; 129 goto error_nomem;
129 130
130 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) 131 abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
131 abs = 0;
132 if (abs) { 132 if (abs) {
133 ret = xenbus_write(XBT_NIL, dev->nodename, 133 ret = xenbus_write(XBT_NIL, dev->nodename,
134 "request-abs-pointer", "1"); 134 "request-abs-pointer", "1");
@@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
322 322
323 case XenbusStateInitWait: 323 case XenbusStateInitWait:
324InitWait: 324InitWait:
325 ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 325 if (xenbus_read_unsigned(info->xbdev->otherend,
326 "feature-abs-pointer", "%d", &val); 326 "feature-abs-pointer", 0)) {
327 if (ret < 0)
328 val = 0;
329 if (val) {
330 ret = xenbus_write(XBT_NIL, info->xbdev->nodename, 327 ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
331 "request-abs-pointer", "1"); 328 "request-abs-pointer", "1");
332 if (ret) 329 if (ret)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 55a4488633e4..3124eaec9427 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
785 struct xenvif *vif = container_of(watch, struct xenvif, 785 struct xenvif *vif = container_of(watch, struct xenvif,
786 mcast_ctrl_watch); 786 mcast_ctrl_watch);
787 struct xenbus_device *dev = xenvif_to_xenbus_device(vif); 787 struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
788 int val;
789 788
790 if (xenbus_scanf(XBT_NIL, dev->otherend, 789 vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
791 "request-multicast-control", "%d", &val) < 0) 790 "request-multicast-control", 0);
792 val = 0;
793 vif->multicast_control = !!val;
794} 791}
795 792
796static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, 793static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
@@ -934,14 +931,11 @@ static void connect(struct backend_info *be)
934 /* Check whether the frontend requested multiple queues 931 /* Check whether the frontend requested multiple queues
935 * and read the number requested. 932 * and read the number requested.
936 */ 933 */
937 err = xenbus_scanf(XBT_NIL, dev->otherend, 934 requested_num_queues = xenbus_read_unsigned(dev->otherend,
938 "multi-queue-num-queues", 935 "multi-queue-num-queues", 1);
939 "%u", &requested_num_queues); 936 if (requested_num_queues > xenvif_max_queues) {
940 if (err < 0) {
941 requested_num_queues = 1; /* Fall back to single queue */
942 } else if (requested_num_queues > xenvif_max_queues) {
943 /* buggy or malicious guest */ 937 /* buggy or malicious guest */
944 xenbus_dev_fatal(dev, err, 938 xenbus_dev_fatal(dev, -EINVAL,
945 "guest requested %u queues, exceeding the maximum of %u.", 939 "guest requested %u queues, exceeding the maximum of %u.",
946 requested_num_queues, xenvif_max_queues); 940 requested_num_queues, xenvif_max_queues);
947 return; 941 return;
@@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
1134 struct xenvif *vif = be->vif; 1128 struct xenvif *vif = be->vif;
1135 struct xenbus_device *dev = be->dev; 1129 struct xenbus_device *dev = be->dev;
1136 unsigned int rx_copy; 1130 unsigned int rx_copy;
1137 int err, val; 1131 int err;
1138 1132
1139 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 1133 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
1140 &rx_copy); 1134 &rx_copy);
@@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
1150 if (!rx_copy) 1144 if (!rx_copy)
1151 return -EOPNOTSUPP; 1145 return -EOPNOTSUPP;
1152 1146
1153 if (xenbus_scanf(XBT_NIL, dev->otherend, 1147 if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
1154 "feature-rx-notify", "%d", &val) < 0)
1155 val = 0;
1156 if (!val) {
1157 /* - Reduce drain timeout to poll more frequently for 1148 /* - Reduce drain timeout to poll more frequently for
1158 * Rx requests. 1149 * Rx requests.
1159 * - Disable Rx stall detection. 1150 * - Disable Rx stall detection.
@@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be)
1162 be->vif->stall_timeout = 0; 1153 be->vif->stall_timeout = 0;
1163 } 1154 }
1164 1155
1165 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 1156 vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
1166 "%d", &val) < 0)
1167 val = 0;
1168 vif->can_sg = !!val;
1169 1157
1170 vif->gso_mask = 0; 1158 vif->gso_mask = 0;
1171 1159
1172 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 1160 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
1173 "%d", &val) < 0)
1174 val = 0;
1175 if (val)
1176 vif->gso_mask |= GSO_BIT(TCPV4); 1161 vif->gso_mask |= GSO_BIT(TCPV4);
1177 1162
1178 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 1163 if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
1179 "%d", &val) < 0)
1180 val = 0;
1181 if (val)
1182 vif->gso_mask |= GSO_BIT(TCPV6); 1164 vif->gso_mask |= GSO_BIT(TCPV6);
1183 1165
1184 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 1166 vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
1185 "%d", &val) < 0) 1167 "feature-no-csum-offload", 0);
1186 val = 0;
1187 vif->ip_csum = !val;
1188 1168
1189 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", 1169 vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
1190 "%d", &val) < 0) 1170 "feature-ipv6-csum-offload", 0);
1191 val = 0;
1192 vif->ipv6_csum = !!val;
1193 1171
1194 return 0; 1172 return 0;
1195} 1173}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e085c8c31cfe..a479cd99911d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
1169 netdev_features_t features) 1169 netdev_features_t features)
1170{ 1170{
1171 struct netfront_info *np = netdev_priv(dev); 1171 struct netfront_info *np = netdev_priv(dev);
1172 int val;
1173 1172
1174 if (features & NETIF_F_SG) { 1173 if (features & NETIF_F_SG &&
1175 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1174 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1176 "%d", &val) < 0) 1175 features &= ~NETIF_F_SG;
1177 val = 0;
1178 1176
1179 if (!val) 1177 if (features & NETIF_F_IPV6_CSUM &&
1180 features &= ~NETIF_F_SG; 1178 !xenbus_read_unsigned(np->xbdev->otherend,
1181 } 1179 "feature-ipv6-csum-offload", 0))
1182 1180 features &= ~NETIF_F_IPV6_CSUM;
1183 if (features & NETIF_F_IPV6_CSUM) {
1184 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1185 "feature-ipv6-csum-offload", "%d", &val) < 0)
1186 val = 0;
1187
1188 if (!val)
1189 features &= ~NETIF_F_IPV6_CSUM;
1190 }
1191
1192 if (features & NETIF_F_TSO) {
1193 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1194 "feature-gso-tcpv4", "%d", &val) < 0)
1195 val = 0;
1196 1181
1197 if (!val) 1182 if (features & NETIF_F_TSO &&
1198 features &= ~NETIF_F_TSO; 1183 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1199 } 1184 features &= ~NETIF_F_TSO;
1200 1185
1201 if (features & NETIF_F_TSO6) { 1186 if (features & NETIF_F_TSO6 &&
1202 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1187 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1203 "feature-gso-tcpv6", "%d", &val) < 0) 1188 features &= ~NETIF_F_TSO6;
1204 val = 0;
1205
1206 if (!val)
1207 features &= ~NETIF_F_TSO6;
1208 }
1209 1189
1210 return features; 1190 return features;
1211} 1191}
@@ -1823,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev,
1823 info->netdev->irq = 0; 1803 info->netdev->irq = 0;
1824 1804
1825 /* Check if backend supports multiple queues */ 1805 /* Check if backend supports multiple queues */
1826 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1806 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1827 "multi-queue-max-queues", "%u", &max_queues); 1807 "multi-queue-max-queues", 1);
1828 if (err < 0)
1829 max_queues = 1;
1830 num_queues = min(max_queues, xennet_max_queues); 1808 num_queues = min(max_queues, xennet_max_queues);
1831 1809
1832 /* Check feature-split-event-channels */ 1810 /* Check feature-split-event-channels */
1833 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1811 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1834 "feature-split-event-channels", "%u", 1812 "feature-split-event-channels", 0);
1835 &feature_split_evtchn);
1836 if (err < 0)
1837 feature_split_evtchn = 0;
1838 1813
1839 /* Read mac addr. */ 1814 /* Read mac addr. */
1840 err = xen_net_read_mac(dev, info->netdev->dev_addr); 1815 err = xen_net_read_mac(dev, info->netdev->dev_addr);
@@ -1968,16 +1943,10 @@ static int xennet_connect(struct net_device *dev)
1968 struct netfront_info *np = netdev_priv(dev); 1943 struct netfront_info *np = netdev_priv(dev);
1969 unsigned int num_queues = 0; 1944 unsigned int num_queues = 0;
1970 int err; 1945 int err;
1971 unsigned int feature_rx_copy;
1972 unsigned int j = 0; 1946 unsigned int j = 0;
1973 struct netfront_queue *queue = NULL; 1947 struct netfront_queue *queue = NULL;
1974 1948
1975 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1949 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1976 "feature-rx-copy", "%u", &feature_rx_copy);
1977 if (err != 1)
1978 feature_rx_copy = 0;
1979
1980 if (!feature_rx_copy) {
1981 dev_info(&dev->dev, 1950 dev_info(&dev->dev,
1982 "backend does not support copying receive path\n"); 1951 "backend does not support copying receive path\n");
1983 return -ENODEV; 1952 return -ENODEV;
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d6ff5e82377d..8fc2e9532575 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
1038 err = -ENOMEM; 1038 err = -ENOMEM;
1039 goto out; 1039 goto out;
1040 } 1040 }
1041 err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", 1041 state = xenbus_read_unsigned(pdev->xdev->otherend, str,
1042 &state); 1042 XenbusStateUnknown);
1043 if (err != 1)
1044 state = XenbusStateUnknown;
1045 1043
1046 if (state != XenbusStateClosing) 1044 if (state != XenbusStateClosing)
1047 continue; 1045 continue;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687bf048..9aa1fe1fc939 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
79struct vscsifrnt_shadow { 79struct vscsifrnt_shadow {
80 /* command between backend and frontend */ 80 /* command between backend and frontend */
81 unsigned char act; 81 unsigned char act;
82 uint8_t nr_segments;
82 uint16_t rqid; 83 uint16_t rqid;
84 uint16_t ref_rqid;
83 85
84 unsigned int nr_grants; /* number of grants in gref[] */ 86 unsigned int nr_grants; /* number of grants in gref[] */
85 struct scsiif_request_segment *sg; /* scatter/gather elements */ 87 struct scsiif_request_segment *sg; /* scatter/gather elements */
88 struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
86 89
87 /* Do reset or abort function. */ 90 /* Do reset or abort function. */
88 wait_queue_head_t wq_reset; /* reset work queue */ 91 wait_queue_head_t wq_reset; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
172 scsifront_wake_up(info); 175 scsifront_wake_up(info);
173} 176}
174 177
175static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) 178static int scsifront_do_request(struct vscsifrnt_info *info,
179 struct vscsifrnt_shadow *shadow)
176{ 180{
177 struct vscsiif_front_ring *ring = &(info->ring); 181 struct vscsiif_front_ring *ring = &(info->ring);
178 struct vscsiif_request *ring_req; 182 struct vscsiif_request *ring_req;
183 struct scsi_cmnd *sc = shadow->sc;
179 uint32_t id; 184 uint32_t id;
185 int i, notify;
186
187 if (RING_FULL(&info->ring))
188 return -EBUSY;
180 189
181 id = scsifront_get_rqid(info); /* use id in response */ 190 id = scsifront_get_rqid(info); /* use id in response */
182 if (id >= VSCSIIF_MAX_REQS) 191 if (id >= VSCSIIF_MAX_REQS)
183 return NULL; 192 return -EBUSY;
184 193
185 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); 194 info->shadow[id] = shadow;
195 shadow->rqid = id;
186 196
197 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
187 ring->req_prod_pvt++; 198 ring->req_prod_pvt++;
188 199
189 ring_req->rqid = (uint16_t)id; 200 ring_req->rqid = id;
201 ring_req->act = shadow->act;
202 ring_req->ref_rqid = shadow->ref_rqid;
203 ring_req->nr_segments = shadow->nr_segments;
190 204
191 return ring_req; 205 ring_req->id = sc->device->id;
192} 206 ring_req->lun = sc->device->lun;
207 ring_req->channel = sc->device->channel;
208 ring_req->cmd_len = sc->cmd_len;
193 209
194static void scsifront_do_request(struct vscsifrnt_info *info) 210 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
195{ 211
196 struct vscsiif_front_ring *ring = &(info->ring); 212 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
197 int notify; 213
214 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
215 ring_req->timeout_per_command = sc->request->timeout / HZ;
216
217 for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
218 ring_req->seg[i] = shadow->seg[i];
198 219
199 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); 220 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
200 if (notify) 221 if (notify)
201 notify_remote_via_irq(info->irq); 222 notify_remote_via_irq(info->irq);
223
224 return 0;
202} 225}
203 226
204static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) 227static void scsifront_gnttab_done(struct vscsifrnt_info *info,
228 struct vscsifrnt_shadow *shadow)
205{ 229{
206 struct vscsifrnt_shadow *s = info->shadow[id];
207 int i; 230 int i;
208 231
209 if (s->sc->sc_data_direction == DMA_NONE) 232 if (shadow->sc->sc_data_direction == DMA_NONE)
210 return; 233 return;
211 234
212 for (i = 0; i < s->nr_grants; i++) { 235 for (i = 0; i < shadow->nr_grants; i++) {
213 if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { 236 if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
214 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME 237 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
215 "grant still in use by backend\n"); 238 "grant still in use by backend\n");
216 BUG(); 239 BUG();
217 } 240 }
218 gnttab_end_foreign_access(s->gref[i], 0, 0UL); 241 gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
219 } 242 }
220 243
221 kfree(s->sg); 244 kfree(shadow->sg);
222} 245}
223 246
224static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, 247static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
225 struct vscsiif_response *ring_rsp) 248 struct vscsiif_response *ring_rsp)
226{ 249{
250 struct vscsifrnt_shadow *shadow;
227 struct scsi_cmnd *sc; 251 struct scsi_cmnd *sc;
228 uint32_t id; 252 uint32_t id;
229 uint8_t sense_len; 253 uint8_t sense_len;
230 254
231 id = ring_rsp->rqid; 255 id = ring_rsp->rqid;
232 sc = info->shadow[id]->sc; 256 shadow = info->shadow[id];
257 sc = shadow->sc;
233 258
234 BUG_ON(sc == NULL); 259 BUG_ON(sc == NULL);
235 260
236 scsifront_gnttab_done(info, id); 261 scsifront_gnttab_done(info, shadow);
237 scsifront_put_rqid(info, id); 262 scsifront_put_rqid(info, id);
238 263
239 sc->result = ring_rsp->rslt; 264 sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
366 391
367static int map_data_for_request(struct vscsifrnt_info *info, 392static int map_data_for_request(struct vscsifrnt_info *info,
368 struct scsi_cmnd *sc, 393 struct scsi_cmnd *sc,
369 struct vscsiif_request *ring_req,
370 struct vscsifrnt_shadow *shadow) 394 struct vscsifrnt_shadow *shadow)
371{ 395{
372 grant_ref_t gref_head; 396 grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
379 struct scatterlist *sg; 403 struct scatterlist *sg;
380 struct scsiif_request_segment *seg; 404 struct scsiif_request_segment *seg;
381 405
382 ring_req->nr_segments = 0;
383 if (sc->sc_data_direction == DMA_NONE || !data_len) 406 if (sc->sc_data_direction == DMA_NONE || !data_len)
384 return 0; 407 return 0;
385 408
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
398 if (!shadow->sg) 421 if (!shadow->sg)
399 return -ENOMEM; 422 return -ENOMEM;
400 } 423 }
401 seg = shadow->sg ? : ring_req->seg; 424 seg = shadow->sg ? : shadow->seg;
402 425
403 err = gnttab_alloc_grant_references(seg_grants + data_grants, 426 err = gnttab_alloc_grant_references(seg_grants + data_grants,
404 &gref_head); 427 &gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
423 info->dev->otherend_id, 446 info->dev->otherend_id,
424 xen_page_to_gfn(page), 1); 447 xen_page_to_gfn(page), 1);
425 shadow->gref[ref_cnt] = ref; 448 shadow->gref[ref_cnt] = ref;
426 ring_req->seg[ref_cnt].gref = ref; 449 shadow->seg[ref_cnt].gref = ref;
427 ring_req->seg[ref_cnt].offset = (uint16_t)off; 450 shadow->seg[ref_cnt].offset = (uint16_t)off;
428 ring_req->seg[ref_cnt].length = (uint16_t)bytes; 451 shadow->seg[ref_cnt].length = (uint16_t)bytes;
429 452
430 page++; 453 page++;
431 len -= bytes; 454 len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
473 } 496 }
474 497
475 if (seg_grants) 498 if (seg_grants)
476 ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 499 shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
477 else 500 else
478 ring_req->nr_segments = (uint8_t)ref_cnt; 501 shadow->nr_segments = (uint8_t)ref_cnt;
479 shadow->nr_grants = ref_cnt; 502 shadow->nr_grants = ref_cnt;
480 503
481 return 0; 504 return 0;
482} 505}
483 506
484static struct vscsiif_request *scsifront_command2ring(
485 struct vscsifrnt_info *info, struct scsi_cmnd *sc,
486 struct vscsifrnt_shadow *shadow)
487{
488 struct vscsiif_request *ring_req;
489
490 memset(shadow, 0, sizeof(*shadow));
491
492 ring_req = scsifront_pre_req(info);
493 if (!ring_req)
494 return NULL;
495
496 info->shadow[ring_req->rqid] = shadow;
497 shadow->rqid = ring_req->rqid;
498
499 ring_req->id = sc->device->id;
500 ring_req->lun = sc->device->lun;
501 ring_req->channel = sc->device->channel;
502 ring_req->cmd_len = sc->cmd_len;
503
504 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
505
506 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
507
508 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
509 ring_req->timeout_per_command = sc->request->timeout / HZ;
510
511 return ring_req;
512}
513
514static int scsifront_enter(struct vscsifrnt_info *info) 507static int scsifront_enter(struct vscsifrnt_info *info)
515{ 508{
516 if (info->pause) 509 if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
536 struct scsi_cmnd *sc) 529 struct scsi_cmnd *sc)
537{ 530{
538 struct vscsifrnt_info *info = shost_priv(shost); 531 struct vscsifrnt_info *info = shost_priv(shost);
539 struct vscsiif_request *ring_req;
540 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); 532 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
541 unsigned long flags; 533 unsigned long flags;
542 int err; 534 int err;
543 uint16_t rqid; 535
536 sc->result = 0;
537 memset(shadow, 0, sizeof(*shadow));
538
539 shadow->sc = sc;
540 shadow->act = VSCSIIF_ACT_SCSI_CDB;
544 541
545 spin_lock_irqsave(shost->host_lock, flags); 542 spin_lock_irqsave(shost->host_lock, flags);
546 if (scsifront_enter(info)) { 543 if (scsifront_enter(info)) {
547 spin_unlock_irqrestore(shost->host_lock, flags); 544 spin_unlock_irqrestore(shost->host_lock, flags);
548 return SCSI_MLQUEUE_HOST_BUSY; 545 return SCSI_MLQUEUE_HOST_BUSY;
549 } 546 }
550 if (RING_FULL(&info->ring))
551 goto busy;
552
553 ring_req = scsifront_command2ring(info, sc, shadow);
554 if (!ring_req)
555 goto busy;
556
557 sc->result = 0;
558
559 rqid = ring_req->rqid;
560 ring_req->act = VSCSIIF_ACT_SCSI_CDB;
561 547
562 shadow->sc = sc; 548 err = map_data_for_request(info, sc, shadow);
563 shadow->act = VSCSIIF_ACT_SCSI_CDB;
564
565 err = map_data_for_request(info, sc, ring_req, shadow);
566 if (err < 0) { 549 if (err < 0) {
567 pr_debug("%s: err %d\n", __func__, err); 550 pr_debug("%s: err %d\n", __func__, err);
568 scsifront_put_rqid(info, rqid);
569 scsifront_return(info); 551 scsifront_return(info);
570 spin_unlock_irqrestore(shost->host_lock, flags); 552 spin_unlock_irqrestore(shost->host_lock, flags);
571 if (err == -ENOMEM) 553 if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
575 return 0; 557 return 0;
576 } 558 }
577 559
578 scsifront_do_request(info); 560 if (scsifront_do_request(info, shadow)) {
561 scsifront_gnttab_done(info, shadow);
562 goto busy;
563 }
564
579 scsifront_return(info); 565 scsifront_return(info);
580 spin_unlock_irqrestore(shost->host_lock, flags); 566 spin_unlock_irqrestore(shost->host_lock, flags);
581 567
@@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
598 struct Scsi_Host *host = sc->device->host; 584 struct Scsi_Host *host = sc->device->host;
599 struct vscsifrnt_info *info = shost_priv(host); 585 struct vscsifrnt_info *info = shost_priv(host);
600 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); 586 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
601 struct vscsiif_request *ring_req;
602 int err = 0; 587 int err = 0;
603 588
604 shadow = kmalloc(sizeof(*shadow), GFP_NOIO); 589 shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
605 if (!shadow) 590 if (!shadow)
606 return FAILED; 591 return FAILED;
607 592
593 shadow->act = act;
594 shadow->rslt_reset = RSLT_RESET_WAITING;
595 shadow->sc = sc;
596 shadow->ref_rqid = s->rqid;
597 init_waitqueue_head(&shadow->wq_reset);
598
608 spin_lock_irq(host->host_lock); 599 spin_lock_irq(host->host_lock);
609 600
610 for (;;) { 601 for (;;) {
611 if (!RING_FULL(&info->ring)) { 602 if (scsifront_enter(info))
612 ring_req = scsifront_command2ring(info, sc, shadow); 603 goto fail;
613 if (ring_req) 604
614 break; 605 if (!scsifront_do_request(info, shadow))
615 } 606 break;
616 if (err || info->pause) { 607
617 spin_unlock_irq(host->host_lock); 608 scsifront_return(info);
618 kfree(shadow); 609 if (err)
619 return FAILED; 610 goto fail;
620 }
621 info->wait_ring_available = 1; 611 info->wait_ring_available = 1;
622 spin_unlock_irq(host->host_lock); 612 spin_unlock_irq(host->host_lock);
623 err = wait_event_interruptible(info->wq_sync, 613 err = wait_event_interruptible(info->wq_sync,
@@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
625 spin_lock_irq(host->host_lock); 615 spin_lock_irq(host->host_lock);
626 } 616 }
627 617
628 if (scsifront_enter(info)) {
629 spin_unlock_irq(host->host_lock);
630 return FAILED;
631 }
632
633 ring_req->act = act;
634 ring_req->ref_rqid = s->rqid;
635
636 shadow->act = act;
637 shadow->rslt_reset = RSLT_RESET_WAITING;
638 init_waitqueue_head(&shadow->wq_reset);
639
640 ring_req->nr_segments = 0;
641
642 scsifront_do_request(info);
643
644 spin_unlock_irq(host->host_lock); 618 spin_unlock_irq(host->host_lock);
645 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); 619 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
646 spin_lock_irq(host->host_lock); 620 spin_lock_irq(host->host_lock);
@@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
659 scsifront_return(info); 633 scsifront_return(info);
660 spin_unlock_irq(host->host_lock); 634 spin_unlock_irq(host->host_lock);
661 return err; 635 return err;
636
637fail:
638 spin_unlock_irq(host->host_lock);
639 kfree(shadow);
640 return FAILED;
662} 641}
663 642
664static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) 643static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
@@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev,
1060 struct vscsifrnt_info *info) 1039 struct vscsifrnt_info *info)
1061{ 1040{
1062 unsigned int sg_grant, nr_segs; 1041 unsigned int sg_grant, nr_segs;
1063 int ret;
1064 struct Scsi_Host *host = info->host; 1042 struct Scsi_Host *host = info->host;
1065 1043
1066 ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", 1044 sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
1067 &sg_grant);
1068 if (ret != 1)
1069 sg_grant = 0;
1070 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); 1045 nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
1071 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); 1046 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
1072 nr_segs = min_t(unsigned int, nr_segs, 1047 nr_segs = min_t(unsigned int, nr_segs,
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d517eed3..d0115a7af0a9 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -633,7 +633,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
633 enum xenbus_state backend_state) 633 enum xenbus_state backend_state)
634{ 634{
635 struct xenfb_info *info = dev_get_drvdata(&dev->dev); 635 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
636 int val;
637 636
638 switch (backend_state) { 637 switch (backend_state) {
639 case XenbusStateInitialising: 638 case XenbusStateInitialising:
@@ -657,16 +656,12 @@ InitWait:
657 if (dev->state != XenbusStateConnected) 656 if (dev->state != XenbusStateConnected)
658 goto InitWait; /* no InitWait seen yet, fudge it */ 657 goto InitWait; /* no InitWait seen yet, fudge it */
659 658
660 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
661 "request-update", "%d", &val) < 0) 660 "request-update", 0))
662 val = 0;
663 if (val)
664 info->update_wanted = 1; 661 info->update_wanted = 1;
665 662
666 if (xenbus_scanf(XBT_NIL, dev->otherend, 663 info->feature_resize = xenbus_read_unsigned(dev->otherend,
667 "feature-resize", "%d", &val) < 0) 664 "feature-resize", 0);
668 val = 0;
669 info->feature_resize = val;
670 break; 665 break;
671 666
672 case XenbusStateClosed: 667 case XenbusStateClosed:
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e4db19e88ab1..db107fa50ca1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -180,7 +180,6 @@ static void __balloon_append(struct page *page)
180static void balloon_append(struct page *page) 180static void balloon_append(struct page *page)
181{ 181{
182 __balloon_append(page); 182 __balloon_append(page);
183 adjust_managed_page_count(page, -1);
184} 183}
185 184
186/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 185/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem)
201 else 200 else
202 balloon_stats.balloon_low--; 201 balloon_stats.balloon_low--;
203 202
204 adjust_managed_page_count(page, 1);
205
206 return page; 203 return page;
207} 204}
208 205
@@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
478#endif 475#endif
479 476
480 /* Relinquish the page back to the allocator. */ 477 /* Relinquish the page back to the allocator. */
481 __free_reserved_page(page); 478 free_reserved_page(page);
482 } 479 }
483 480
484 balloon_stats.current_pages += rc; 481 balloon_stats.current_pages += rc;
@@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
509 state = BP_EAGAIN; 506 state = BP_EAGAIN;
510 break; 507 break;
511 } 508 }
509 adjust_managed_page_count(page, -1);
512 scrub_page(page); 510 scrub_page(page);
513 list_add(&page->lru, &pages); 511 list_add(&page->lru, &pages);
514 } 512 }
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index adc19ce3cc66..fd8e872d2943 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -947,7 +947,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
947 continue; 947 continue;
948 if (status.status != EVTCHNSTAT_virq) 948 if (status.status != EVTCHNSTAT_virq)
949 continue; 949 continue;
950 if (status.u.virq == virq && status.vcpu == cpu) { 950 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
951 rc = port; 951 rc = port;
952 break; 952 break;
953 } 953 }
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 7a47c4c9fb1b..1bf55a32a4b3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -127,18 +127,21 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
127 struct gntalloc_gref *gref, *next; 127 struct gntalloc_gref *gref, *next;
128 128
129 readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); 129 readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
130 rc = -ENOMEM;
131 for (i = 0; i < op->count; i++) { 130 for (i = 0; i < op->count; i++) {
132 gref = kzalloc(sizeof(*gref), GFP_KERNEL); 131 gref = kzalloc(sizeof(*gref), GFP_KERNEL);
133 if (!gref) 132 if (!gref) {
133 rc = -ENOMEM;
134 goto undo; 134 goto undo;
135 }
135 list_add_tail(&gref->next_gref, &queue_gref); 136 list_add_tail(&gref->next_gref, &queue_gref);
136 list_add_tail(&gref->next_file, &queue_file); 137 list_add_tail(&gref->next_file, &queue_file);
137 gref->users = 1; 138 gref->users = 1;
138 gref->file_index = op->index + i * PAGE_SIZE; 139 gref->file_index = op->index + i * PAGE_SIZE;
139 gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO); 140 gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
140 if (!gref->page) 141 if (!gref->page) {
142 rc = -ENOMEM;
141 goto undo; 143 goto undo;
144 }
142 145
143 /* Grant foreign access to the page. */ 146 /* Grant foreign access to the page. */
144 rc = gnttab_grant_foreign_access(op->domid, 147 rc = gnttab_grant_foreign_access(op->domid,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bb952121ea94..2ef2b61b69df 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1007 1007
1008 vma->vm_ops = &gntdev_vmops; 1008 vma->vm_ops = &gntdev_vmops;
1009 1009
1010 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; 1010 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1011 1011
1012 if (use_ptemod) 1012 if (use_ptemod)
1013 vma->vm_flags |= VM_DONTCOPY; 1013 vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index b59c9455aae1..112ce422dc22 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -125,8 +125,4 @@ static struct pci_driver platform_driver = {
125 .id_table = platform_pci_tbl, 125 .id_table = platform_pci_tbl,
126}; 126};
127 127
128static int __init platform_pci_init(void) 128builtin_pci_driver(platform_driver);
129{
130 return pci_register_driver(&platform_driver);
131}
132device_initcall(platform_pci_init);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5ce878c51d03..3f0aee0a068b 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -362,7 +362,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
362 int err = 0; 362 int err = 0;
363 int num_devs; 363 int num_devs;
364 int domain, bus, slot, func; 364 int domain, bus, slot, func;
365 int substate; 365 unsigned int substate;
366 int i, len; 366 int i, len;
367 char state_str[64]; 367 char state_str[64];
368 char dev_str[64]; 368 char dev_str[64];
@@ -395,10 +395,8 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
395 "configuration"); 395 "configuration");
396 goto out; 396 goto out;
397 } 397 }
398 err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str, 398 substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str,
399 "%d", &substate); 399 XenbusStateUnknown);
400 if (err != 1)
401 substate = XenbusStateUnknown;
402 400
403 switch (substate) { 401 switch (substate) {
404 case XenbusStateInitialising: 402 case XenbusStateInitialising:
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12ebb55..6c0ead4be784 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -538,6 +538,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
538 538
539 nonseekable_open(inode, filp); 539 nonseekable_open(inode, filp);
540 540
541 filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
542
541 u = kzalloc(sizeof(*u), GFP_KERNEL); 543 u = kzalloc(sizeof(*u), GFP_KERNEL);
542 if (u == NULL) 544 if (u == NULL)
543 return -ENOMEM; 545 return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cfef55d..4bdf654041e9 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -702,7 +702,7 @@ device_initcall(xenbus_probe_initcall);
702 */ 702 */
703static int __init xenstored_local_init(void) 703static int __init xenstored_local_init(void)
704{ 704{
705 int err = 0; 705 int err = -ENOMEM;
706 unsigned long page = 0; 706 unsigned long page = 0;
707 struct evtchn_alloc_unbound alloc_unbound; 707 struct evtchn_alloc_unbound alloc_unbound;
708 708
@@ -826,7 +826,7 @@ static int __init xenbus_init(void)
826 * Create xenfs mountpoint in /proc for compatibility with 826 * Create xenfs mountpoint in /proc for compatibility with
827 * utilities that expect to find "xenbus" under "/proc/xen". 827 * utilities that expect to find "xenbus" under "/proc/xen".
828 */ 828 */
829 proc_mkdir("xen", NULL); 829 proc_create_mount_point("xen");
830#endif 830#endif
831 831
832out_error: 832out_error:
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 04f7f85a5edf..37929df829a3 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -224,13 +224,7 @@ static int read_frontend_details(struct xenbus_device *xendev)
224 224
225int xenbus_dev_is_online(struct xenbus_device *dev) 225int xenbus_dev_is_online(struct xenbus_device *dev)
226{ 226{
227 int rc, val; 227 return !!xenbus_read_unsigned(dev->nodename, "online", 0);
228
229 rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
230 if (rc != 1)
231 val = 0; /* no online node present */
232
233 return val;
234} 228}
235EXPORT_SYMBOL_GPL(xenbus_dev_is_online); 229EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
236 230
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 22f7cd711c57..6afb993c5809 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -559,6 +559,21 @@ int xenbus_scanf(struct xenbus_transaction t,
559} 559}
560EXPORT_SYMBOL_GPL(xenbus_scanf); 560EXPORT_SYMBOL_GPL(xenbus_scanf);
561 561
562/* Read an (optional) unsigned value. */
563unsigned int xenbus_read_unsigned(const char *dir, const char *node,
564 unsigned int default_val)
565{
566 unsigned int val;
567 int ret;
568
569 ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
570 if (ret <= 0)
571 val = default_val;
572
573 return val;
574}
575EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
576
562/* Single printf and write: returns -errno or 0. */ 577/* Single printf and write: returns -errno or 0. */
563int xenbus_printf(struct xenbus_transaction t, 578int xenbus_printf(struct xenbus_transaction t,
564 const char *dir, const char *node, const char *fmt, ...) 579 const char *dir, const char *node, const char *fmt, ...)
@@ -672,7 +687,7 @@ static bool xen_strict_xenbus_quirk(void)
672} 687}
673static void xs_reset_watches(void) 688static void xs_reset_watches(void)
674{ 689{
675 int err, supported = 0; 690 int err;
676 691
677 if (!xen_hvm_domain() || xen_initial_domain()) 692 if (!xen_hvm_domain() || xen_initial_domain())
678 return; 693 return;
@@ -680,9 +695,8 @@ static void xs_reset_watches(void)
680 if (xen_strict_xenbus_quirk()) 695 if (xen_strict_xenbus_quirk())
681 return; 696 return;
682 697
683 err = xenbus_scanf(XBT_NIL, "control", 698 if (!xenbus_read_unsigned("control",
684 "platform-feature-xs_reset_watches", "%d", &supported); 699 "platform-feature-xs_reset_watches", 0))
685 if (err != 1 || !supported)
686 return; 700 return;
687 701
688 err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); 702 err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc2032c79..7eb3cefcf2a3 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -479,6 +479,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
479 } 479 }
480 return ent; 480 return ent;
481} 481}
482EXPORT_SYMBOL(proc_create_mount_point);
482 483
483struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, 484struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
484 struct proc_dir_entry *parent, 485 struct proc_dir_entry *parent,
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index bbba5d22aada..109876a24d2c 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -195,7 +195,6 @@ static inline bool is_empty_pde(const struct proc_dir_entry *pde)
195{ 195{
196 return S_ISDIR(pde->mode) && !pde->proc_iops; 196 return S_ISDIR(pde->mode) && !pde->proc_iops;
197} 197}
198struct proc_dir_entry *proc_create_mount_point(const char *name);
199 198
200/* 199/*
201 * inode.c 200 * inode.c
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 368c7ad06ae5..2d2bf592d9db 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
21 struct proc_dir_entry *, void *); 21 struct proc_dir_entry *, void *);
22extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, 22extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
23 struct proc_dir_entry *); 23 struct proc_dir_entry *);
24struct proc_dir_entry *proc_create_mount_point(const char *name);
24 25
25extern struct proc_dir_entry *proc_create_data(const char *, umode_t, 26extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
26 struct proc_dir_entry *, 27 struct proc_dir_entry *,
@@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
56 struct proc_dir_entry *parent,const char *dest) { return NULL;} 57 struct proc_dir_entry *parent,const char *dest) { return NULL;}
57static inline struct proc_dir_entry *proc_mkdir(const char *name, 58static inline struct proc_dir_entry *proc_mkdir(const char *name,
58 struct proc_dir_entry *parent) {return NULL;} 59 struct proc_dir_entry *parent) {return NULL;}
60static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
59static inline struct proc_dir_entry *proc_mkdir_data(const char *name, 61static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
60 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } 62 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
61static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, 63static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
new file mode 100644
index 000000000000..9d874db13c0e
--- /dev/null
+++ b/include/xen/arm/hypercall.h
@@ -0,0 +1,87 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_ARM_XEN_HYPERCALL_H
34#define _ASM_ARM_XEN_HYPERCALL_H
35
36#include <linux/bug.h>
37
38#include <xen/interface/xen.h>
39#include <xen/interface/sched.h>
40#include <xen/interface/platform.h>
41
42long privcmd_call(unsigned call, unsigned long a1,
43 unsigned long a2, unsigned long a3,
44 unsigned long a4, unsigned long a5);
45int HYPERVISOR_xen_version(int cmd, void *arg);
46int HYPERVISOR_console_io(int cmd, int count, char *str);
47int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
48int HYPERVISOR_sched_op(int cmd, void *arg);
49int HYPERVISOR_event_channel_op(int cmd, void *arg);
50unsigned long HYPERVISOR_hvm_op(int op, void *arg);
51int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
52int HYPERVISOR_physdev_op(int cmd, void *arg);
53int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
54int HYPERVISOR_tmem_op(void *arg);
55int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
56int HYPERVISOR_platform_op_raw(void *arg);
57static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
58{
59 op->interface_version = XENPF_INTERFACE_VERSION;
60 return HYPERVISOR_platform_op_raw(op);
61}
62int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
63
64static inline int
65HYPERVISOR_suspend(unsigned long start_info_mfn)
66{
67 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
68
69 /* start_info_mfn is unused on ARM */
70 return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
71}
72
73static inline void
74MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
75 unsigned int new_val, unsigned long flags)
76{
77 BUG();
78}
79
80static inline void
81MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
82 int count, int *success_count, domid_t domid)
83{
84 BUG();
85}
86
87#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
new file mode 100644
index 000000000000..95251512e2c4
--- /dev/null
+++ b/include/xen/arm/hypervisor.h
@@ -0,0 +1,39 @@
1#ifndef _ASM_ARM_XEN_HYPERVISOR_H
2#define _ASM_ARM_XEN_HYPERVISOR_H
3
4#include <linux/init.h>
5
6extern struct shared_info *HYPERVISOR_shared_info;
7extern struct start_info *xen_start_info;
8
9/* Lazy mode for batching updates / context switch */
10enum paravirt_lazy_mode {
11 PARAVIRT_LAZY_NONE,
12 PARAVIRT_LAZY_MMU,
13 PARAVIRT_LAZY_CPU,
14};
15
16static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
17{
18 return PARAVIRT_LAZY_NONE;
19}
20
21extern struct dma_map_ops *xen_dma_ops;
22
23#ifdef CONFIG_XEN
24void __init xen_early_init(void);
25#else
26static inline void xen_early_init(void) { return; }
27#endif
28
29#ifdef CONFIG_HOTPLUG_CPU
30static inline void xen_arch_register_cpu(int num)
31{
32}
33
34static inline void xen_arch_unregister_cpu(int num)
35{
36}
37#endif
38
39#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/include/xen/arm/interface.h b/include/xen/arm/interface.h
new file mode 100644
index 000000000000..75d596862892
--- /dev/null
+++ b/include/xen/arm/interface.h
@@ -0,0 +1,85 @@
1/******************************************************************************
2 * Guest OS interface to ARM Xen.
3 *
4 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
5 */
6
7#ifndef _ASM_ARM_XEN_INTERFACE_H
8#define _ASM_ARM_XEN_INTERFACE_H
9
10#include <linux/types.h>
11
12#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
13
14#define __DEFINE_GUEST_HANDLE(name, type) \
15 typedef struct { union { type *p; uint64_aligned_t q; }; } \
16 __guest_handle_ ## name
17
18#define DEFINE_GUEST_HANDLE_STRUCT(name) \
19 __DEFINE_GUEST_HANDLE(name, struct name)
20#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
21#define GUEST_HANDLE(name) __guest_handle_ ## name
22
23#define set_xen_guest_handle(hnd, val) \
24 do { \
25 if (sizeof(hnd) == 8) \
26 *(uint64_t *)&(hnd) = 0; \
27 (hnd).p = val; \
28 } while (0)
29
30#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
31
32#ifndef __ASSEMBLY__
33/* Explicitly size integers that represent pfns in the interface with
34 * Xen so that we can have one ABI that works for 32 and 64 bit guests.
35 * Note that this means that the xen_pfn_t type may be capable of
36 * representing pfn's which the guest cannot represent in its own pfn
37 * type. However since pfn space is controlled by the guest this is
38 * fine since it simply wouldn't be able to create any sure pfns in
39 * the first place.
40 */
41typedef uint64_t xen_pfn_t;
42#define PRI_xen_pfn "llx"
43typedef uint64_t xen_ulong_t;
44#define PRI_xen_ulong "llx"
45typedef int64_t xen_long_t;
46#define PRI_xen_long "llx"
47/* Guest handles for primitive C types. */
48__DEFINE_GUEST_HANDLE(uchar, unsigned char);
49__DEFINE_GUEST_HANDLE(uint, unsigned int);
50DEFINE_GUEST_HANDLE(char);
51DEFINE_GUEST_HANDLE(int);
52DEFINE_GUEST_HANDLE(void);
53DEFINE_GUEST_HANDLE(uint64_t);
54DEFINE_GUEST_HANDLE(uint32_t);
55DEFINE_GUEST_HANDLE(xen_pfn_t);
56DEFINE_GUEST_HANDLE(xen_ulong_t);
57
58/* Maximum number of virtual CPUs in multi-processor guests. */
59#define MAX_VIRT_CPUS 1
60
61struct arch_vcpu_info { };
62struct arch_shared_info { };
63
64/* TODO: Move pvclock definitions some place arch independent */
65struct pvclock_vcpu_time_info {
66 u32 version;
67 u32 pad0;
68 u64 tsc_timestamp;
69 u64 system_time;
70 u32 tsc_to_system_mul;
71 s8 tsc_shift;
72 u8 flags;
73 u8 pad[2];
74} __attribute__((__packed__)); /* 32 bytes */
75
76/* It is OK to have a 12 bytes struct with no padding because it is packed */
77struct pvclock_wall_clock {
78 u32 version;
79 u32 sec;
80 u32 nsec;
81 u32 sec_hi;
82} __attribute__((__packed__));
83#endif
84
85#endif /* _ASM_ARM_XEN_INTERFACE_H */
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
new file mode 100644
index 000000000000..95ce6ac3a971
--- /dev/null
+++ b/include/xen/arm/page-coherent.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-mapping.h>
6
7void __xen_dma_map_page(struct device *hwdev, struct page *page,
8 dma_addr_t dev_addr, unsigned long offset, size_t size,
9 enum dma_data_direction dir, unsigned long attrs);
10void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
11 size_t size, enum dma_data_direction dir,
12 unsigned long attrs);
13void __xen_dma_sync_single_for_cpu(struct device *hwdev,
14 dma_addr_t handle, size_t size, enum dma_data_direction dir);
15
16void __xen_dma_sync_single_for_device(struct device *hwdev,
17 dma_addr_t handle, size_t size, enum dma_data_direction dir);
18
19static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
21{
22 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
23}
24
25static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
26 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
27{
28 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
29}
30
31static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
32 dma_addr_t dev_addr, unsigned long offset, size_t size,
33 enum dma_data_direction dir, unsigned long attrs)
34{
35 unsigned long page_pfn = page_to_xen_pfn(page);
36 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
37 unsigned long compound_pages =
38 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
39 bool local = (page_pfn <= dev_pfn) &&
40 (dev_pfn - page_pfn < compound_pages);
41
42 /*
43 * Dom0 is mapped 1:1, while the Linux page can span across
44 * multiple Xen pages, it's not possible for it to contain a
45 * mix of local and foreign Xen pages. So if the first xen_pfn
46 * == mfn the page is local otherwise it's a foreign page
47 * grant-mapped in dom0. If the page is local we can safely
48 * call the native dma_ops function, otherwise we call the xen
49 * specific function.
50 */
51 if (local)
52 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
53 else
54 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
55}
56
57static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
58 size_t size, enum dma_data_direction dir, unsigned long attrs)
59{
60 unsigned long pfn = PFN_DOWN(handle);
61 /*
62 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
63 * multiple Xen page, it's not possible to have a mix of local and
64 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
65 * foreign mfn will always return false. If the page is local we can
66 * safely call the native dma_ops function, otherwise we call the xen
67 * specific function.
68 */
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->unmap_page)
71 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
72 } else
73 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
74}
75
76static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
78{
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
82 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
85}
86
87static inline void xen_dma_sync_single_for_device(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 unsigned long pfn = PFN_DOWN(handle);
91 if (pfn_valid(pfn)) {
92 if (__generic_dma_ops(hwdev)->sync_single_for_device)
93 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
94 } else
95 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
96}
97
98#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
new file mode 100644
index 000000000000..415dbc6e43fd
--- /dev/null
+++ b/include/xen/arm/page.h
@@ -0,0 +1,122 @@
1#ifndef _ASM_ARM_XEN_PAGE_H
2#define _ASM_ARM_XEN_PAGE_H
3
4#include <asm/page.h>
5#include <asm/pgtable.h>
6
7#include <linux/pfn.h>
8#include <linux/types.h>
9#include <linux/dma-mapping.h>
10
11#include <xen/xen.h>
12#include <xen/interface/grant_table.h>
13
14#define phys_to_machine_mapping_valid(pfn) (1)
15
16/* Xen machine address */
17typedef struct xmaddr {
18 phys_addr_t maddr;
19} xmaddr_t;
20
21/* Xen pseudo-physical address */
22typedef struct xpaddr {
23 phys_addr_t paddr;
24} xpaddr_t;
25
26#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
27#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
28
29#define INVALID_P2M_ENTRY (~0UL)
30
31/*
32 * The pseudo-physical frame (pfn) used in all the helpers is always based
33 * on Xen page granularity (i.e 4KB).
34 *
35 * A Linux page may be split across multiple non-contiguous Xen page so we
36 * have to keep track with frame based on 4KB page granularity.
37 *
38 * PV drivers should never make a direct usage of those helpers (particularly
39 * pfn_to_gfn and gfn_to_pfn).
40 */
41
42unsigned long __pfn_to_mfn(unsigned long pfn);
43extern struct rb_root phys_to_mach;
44
45/* Pseudo-physical <-> Guest conversion */
46static inline unsigned long pfn_to_gfn(unsigned long pfn)
47{
48 return pfn;
49}
50
51static inline unsigned long gfn_to_pfn(unsigned long gfn)
52{
53 return gfn;
54}
55
56/* Pseudo-physical <-> BUS conversion */
57static inline unsigned long pfn_to_bfn(unsigned long pfn)
58{
59 unsigned long mfn;
60
61 if (phys_to_mach.rb_node != NULL) {
62 mfn = __pfn_to_mfn(pfn);
63 if (mfn != INVALID_P2M_ENTRY)
64 return mfn;
65 }
66
67 return pfn;
68}
69
70static inline unsigned long bfn_to_pfn(unsigned long bfn)
71{
72 return bfn;
73}
74
75#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
76
77/* VIRT <-> GUEST conversion */
78#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
79#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
80
81/* Only used in PV code. But ARM guests are always HVM. */
82static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
83{
84 BUG();
85}
86
87/* TODO: this shouldn't be here but it is because the frontend drivers
88 * are using it (its rolled in headers) even though we won't hit the code path.
89 * So for right now just punt with this.
90 */
91static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
92{
93 BUG();
94 return NULL;
95}
96
97extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
98 struct gnttab_map_grant_ref *kmap_ops,
99 struct page **pages, unsigned int count);
100
101extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
102 struct gnttab_unmap_grant_ref *kunmap_ops,
103 struct page **pages, unsigned int count);
104
105bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
106bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
107 unsigned long nr_pages);
108
109static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
110{
111 return __set_phys_to_machine(pfn, mfn);
112}
113
114#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
115#define xen_unmap(cookie) iounmap((cookie))
116
117bool xen_arch_need_swiotlb(struct device *dev,
118 phys_addr_t phys,
119 dma_addr_t dev_addr);
120unsigned long xen_get_swiotlb_free_pages(unsigned int order);
121
122#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 32b944b7cebd..271ba62503c7 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -151,6 +151,10 @@ __scanf(4, 5)
151int xenbus_scanf(struct xenbus_transaction t, 151int xenbus_scanf(struct xenbus_transaction t,
152 const char *dir, const char *node, const char *fmt, ...); 152 const char *dir, const char *node, const char *fmt, ...);
153 153
154/* Read an (optional) unsigned value. */
155unsigned int xenbus_read_unsigned(const char *dir, const char *node,
156 unsigned int default_val);
157
154/* Single printf and write: returns -errno or 0. */ 158/* Single printf and write: returns -errno or 0. */
155__printf(4, 5) 159__printf(4, 5)
156int xenbus_printf(struct xenbus_transaction t, 160int xenbus_printf(struct xenbus_transaction t,