aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 21:15:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 21:15:33 -0500
commit9d050966e2eb37a643ac15904b6a8fda7fcfabe9 (patch)
treef3a6f9cc93f6dde2e0cd6f4114b8258afb596bc1
parentc0222ac086669a631814bbf857f8c8023452a4d7 (diff)
parent4ef8e3f3504808621e594f01852476a1d4e7ef93 (diff)
Merge tag 'stable/for-linus-3.19-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen features and fixes from David Vrabel: - Fully support non-coherent devices on ARM by introducing the mechanisms to request the hypervisor to perform the required cache maintainance operations. - A number of pciback bug fixes and cleanups. Notably a deadlock fix if a PCI device was manually uunbound and a fix for incorrectly restoring state after a function reset. - In x86 PVHVM guests, use the APIC for interrupts if this has been virtualized by the hardware. This reduces the number of interrupt- related VM exits on such hardware. * tag 'stable/for-linus-3.19-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (26 commits) Revert "swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single" xen/pci: Use APIC directly when APIC virtualization hardware is available xen/pci: Defer initialization of MSI ops on HVM guests xen-pciback: drop SR-IOV VFs when PF driver unloads xen/pciback: Restore configuration space when detaching from a guest. PCI: Expose pci_load_saved_state for public consumption. xen/pciback: Remove tons of dereferences xen/pciback: Print out the domain owning the device. xen/pciback: Include the domain id if removing the device whilst still in use driver core: Provide an wrapper around the mutex to do lockdep warnings xen/pciback: Don't deadlock when unbinding. swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single swiotlb-xen: call xen_dma_sync_single_for_device when appropriate swiotlb-xen: remove BUG_ON in xen_bus_to_phys swiotlb-xen: pass dev_addr to xen_dma_unmap_page and xen_dma_sync_single_for_cpu xen/arm: introduce GNTTABOP_cache_flush xen/arm/arm64: introduce xen_arch_need_swiotlb xen/arm/arm64: merge xen/mm32.c into xen/mm.c xen/arm: use hypercall to flush caches in map_page xen: add a dma_addr_t dev_addr argument to xen_dma_map_page ...
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h66
-rw-r--r--arch/arm/include/asm/xen/page.h4
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c5
-rw-r--r--arch/arm/xen/mm.c121
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm64/include/asm/device.h1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h44
-rw-r--r--arch/x86/include/asm/xen/cpuid.h91
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h4
-rw-r--r--arch/x86/include/asm/xen/page.h7
-rw-r--r--arch/x86/pci/xen.c31
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/xen/swiotlb-xen.c19
-rw-r--r--drivers/xen/xen-pciback/passthrough.c14
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c112
-rw-r--r--drivers/xen/xen-pciback/pciback.h7
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/grant_table.h19
26 files changed, 488 insertions, 309 deletions
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..4111592f0130 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -17,6 +17,7 @@ struct dev_archdata {
17#ifdef CONFIG_ARM_DMA_USE_IOMMU 17#ifdef CONFIG_ARM_DMA_USE_IOMMU
18 struct dma_iommu_mapping *mapping; 18 struct dma_iommu_mapping *mapping;
19#endif 19#endif
20 bool dma_coherent;
20}; 21};
21 22
22struct omap_device; 23struct omap_device;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 85738b200023..e6e3446abdf6 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -123,11 +123,18 @@ static inline unsigned long dma_max_pfn(struct device *dev)
123 123
124static inline int set_arch_dma_coherent_ops(struct device *dev) 124static inline int set_arch_dma_coherent_ops(struct device *dev)
125{ 125{
126 dev->archdata.dma_coherent = true;
126 set_dma_ops(dev, &arm_coherent_dma_ops); 127 set_dma_ops(dev, &arm_coherent_dma_ops);
127 return 0; 128 return 0;
128} 129}
129#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) 130#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
130 131
132/* do not use this function in a driver */
133static inline bool is_device_dma_coherent(struct device *dev)
134{
135 return dev->archdata.dma_coherent;
136}
137
131static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 138static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
132{ 139{
133 unsigned int offset = paddr & ~PAGE_MASK; 140 unsigned int offset = paddr & ~PAGE_MASK;
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index e8275ea88e88..efd562412850 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -5,6 +5,18 @@
5#include <linux/dma-attrs.h> 5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7 7
8void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 dma_addr_t dev_addr, unsigned long offset, size_t size,
10 enum dma_data_direction dir, struct dma_attrs *attrs);
11void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 size_t size, enum dma_data_direction dir,
13 struct dma_attrs *attrs);
14void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
16
17void __xen_dma_sync_single_for_device(struct device *hwdev,
18 dma_addr_t handle, size_t size, enum dma_data_direction dir);
19
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags, 21 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs) 22 struct dma_attrs *attrs)
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
20} 32}
21 33
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 34static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 35 dma_addr_t dev_addr, unsigned long offset, size_t size,
24 struct dma_attrs *attrs) 36 enum dma_data_direction dir, struct dma_attrs *attrs)
25{ 37{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 38 bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
39 /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
40 * is a foreign page grant-mapped in dom0. If the page is local we
41 * can safely call the native dma_ops function, otherwise we call
42 * the xen specific function. */
43 if (local)
44 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
45 else
46 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
27} 47}
28 48
29void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 49static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 50 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs); 51 struct dma_attrs *attrs)
52{
53 unsigned long pfn = PFN_DOWN(handle);
54 /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
55 * always return false. If the page is local we can safely call the
56 * native dma_ops function, otherwise we call the xen specific
57 * function. */
58 if (pfn_valid(pfn)) {
59 if (__generic_dma_ops(hwdev)->unmap_page)
60 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
61 } else
62 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
63}
32 64
33void xen_dma_sync_single_for_cpu(struct device *hwdev, 65static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
34 dma_addr_t handle, size_t size, enum dma_data_direction dir); 66 dma_addr_t handle, size_t size, enum dma_data_direction dir)
67{
68 unsigned long pfn = PFN_DOWN(handle);
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
71 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
72 } else
73 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
74}
35 75
36void xen_dma_sync_single_for_device(struct device *hwdev, 76static inline void xen_dma_sync_single_for_device(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir); 77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
78{
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_device)
82 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
85}
38 86
39#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 87#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 135c24a5ba26..68c739b3fdf4 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
107#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 107#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
108#define xen_unmap(cookie) iounmap((cookie)) 108#define xen_unmap(cookie) iounmap((cookie))
109 109
110bool xen_arch_need_swiotlb(struct device *dev,
111 unsigned long pfn,
112 unsigned long mfn);
113
110#endif /* _ASM_ARM_XEN_PAGE_H */ 114#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 1f85bfe6b470..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0e15f011f9c8..c7ca936ebd99 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
261 261
262 xen_setup_features(); 262 xen_setup_features();
263 263
264 if (!xen_feature(XENFEAT_grant_map_identity)) {
265 pr_warn("Please upgrade your Xen.\n"
266 "If your platform has any non-coherent DMA devices, they won't work properly.\n");
267 }
268
269 if (xen_feature(XENFEAT_dom0)) 264 if (xen_feature(XENFEAT_dom0))
270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 265 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
271 else 266 else
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de99148..351b24a979d4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,10 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
1#include <linux/bootmem.h> 3#include <linux/bootmem.h>
2#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/highmem.h>
3#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/of_address.h>
4#include <linux/slab.h> 8#include <linux/slab.h>
5#include <linux/types.h> 9#include <linux/types.h>
6#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
@@ -8,6 +12,7 @@
8#include <linux/swiotlb.h> 12#include <linux/swiotlb.h>
9 13
10#include <xen/xen.h> 14#include <xen/xen.h>
15#include <xen/interface/grant_table.h>
11#include <xen/interface/memory.h> 16#include <xen/interface/memory.h>
12#include <xen/swiotlb-xen.h> 17#include <xen/swiotlb-xen.h>
13 18
@@ -16,6 +21,114 @@
16#include <asm/xen/hypercall.h> 21#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h> 22#include <asm/xen/interface.h>
18 23
24enum dma_cache_op {
25 DMA_UNMAP,
26 DMA_MAP,
27};
28static bool hypercall_cflush = false;
29
30/* functions called by SWIOTLB */
31
32static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
33 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
34{
35 struct gnttab_cache_flush cflush;
36 unsigned long pfn;
37 size_t left = size;
38
39 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
40 offset %= PAGE_SIZE;
41
42 do {
43 size_t len = left;
44
45 /* buffers in highmem or foreign pages cannot cross page
46 * boundaries */
47 if (len + offset > PAGE_SIZE)
48 len = PAGE_SIZE - offset;
49
50 cflush.op = 0;
51 cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
52 cflush.offset = offset;
53 cflush.length = len;
54
55 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
56 cflush.op = GNTTAB_CACHE_INVAL;
57 if (op == DMA_MAP) {
58 if (dir == DMA_FROM_DEVICE)
59 cflush.op = GNTTAB_CACHE_INVAL;
60 else
61 cflush.op = GNTTAB_CACHE_CLEAN;
62 }
63 if (cflush.op)
64 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
65
66 offset = 0;
67 pfn++;
68 left -= len;
69 } while (left);
70}
71
72static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
73 size_t size, enum dma_data_direction dir)
74{
75 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
76}
77
78static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
79 size_t size, enum dma_data_direction dir)
80{
81 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
82}
83
84void __xen_dma_map_page(struct device *hwdev, struct page *page,
85 dma_addr_t dev_addr, unsigned long offset, size_t size,
86 enum dma_data_direction dir, struct dma_attrs *attrs)
87{
88 if (is_device_dma_coherent(hwdev))
89 return;
90 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
91 return;
92
93 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
94}
95
96void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
97 size_t size, enum dma_data_direction dir,
98 struct dma_attrs *attrs)
99
100{
101 if (is_device_dma_coherent(hwdev))
102 return;
103 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
104 return;
105
106 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
107}
108
109void __xen_dma_sync_single_for_cpu(struct device *hwdev,
110 dma_addr_t handle, size_t size, enum dma_data_direction dir)
111{
112 if (is_device_dma_coherent(hwdev))
113 return;
114 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
115}
116
117void __xen_dma_sync_single_for_device(struct device *hwdev,
118 dma_addr_t handle, size_t size, enum dma_data_direction dir)
119{
120 if (is_device_dma_coherent(hwdev))
121 return;
122 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
123}
124
125bool xen_arch_need_swiotlb(struct device *dev,
126 unsigned long pfn,
127 unsigned long mfn)
128{
129 return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
130}
131
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 132int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits, 133 unsigned int address_bits,
21 dma_addr_t *dma_handle) 134 dma_addr_t *dma_handle)
@@ -56,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
56 169
57int __init xen_mm_init(void) 170int __init xen_mm_init(void)
58{ 171{
172 struct gnttab_cache_flush cflush;
59 if (!xen_initial_domain()) 173 if (!xen_initial_domain())
60 return 0; 174 return 0;
61 xen_swiotlb_init(1, false); 175 xen_swiotlb_init(1, false);
62 xen_dma_ops = &xen_swiotlb_dma_ops; 176 xen_dma_ops = &xen_swiotlb_dma_ops;
177
178 cflush.op = 0;
179 cflush.a.dev_bus_addr = 0;
180 cflush.offset = 0;
181 cflush.length = 0;
182 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
183 hypercall_cflush = true;
63 return 0; 184 return 0;
64} 185}
65arch_initcall(xen_mm_init); 186arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index 3b99860fd7ae..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,202 +0,0 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7
8static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10
11static int alloc_xen_mm32_scratch_page(int cpu)
12{
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
17
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
20
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
25 }
26
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
30
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33
34 return 0;
35}
36
37static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
39{
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
48 }
49 return NOTIFY_OK;
50}
51
52static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
54};
55
56static void* xen_mm32_remap_page(dma_addr_t handle)
57{
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
63
64 return (void*)virt;
65}
66
67static void xen_mm32_unmap(void *vaddr)
68{
69 put_cpu_var(xen_mm32_scratch_virt);
70}
71
72
73/* functions called by SWIOTLB */
74
75static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 size_t size, enum dma_data_direction dir,
77 void (*op)(const void *, size_t, int))
78{
79 unsigned long pfn;
80 size_t left = size;
81
82 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 offset %= PAGE_SIZE;
84
85 do {
86 size_t len = left;
87 void *vaddr;
88
89 if (!pfn_valid(pfn))
90 {
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else {
99 struct page *page = pfn_to_page(pfn);
100
101 if (PageHighMem(page)) {
102 if (len + offset > PAGE_SIZE)
103 len = PAGE_SIZE - offset;
104
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr = kmap_atomic(page);
107 op(vaddr + offset, len, dir);
108 kunmap_atomic(vaddr);
109 } else {
110 vaddr = kmap_high_get(page);
111 if (vaddr) {
112 op(vaddr + offset, len, dir);
113 kunmap_high(page);
114 }
115 }
116 } else {
117 vaddr = page_address(page) + offset;
118 op(vaddr, len, dir);
119 }
120 }
121
122 offset = 0;
123 pfn++;
124 left -= len;
125 } while (left);
126}
127
128static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 size_t size, enum dma_data_direction dir)
130{
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
133
134 if (dir != DMA_TO_DEVICE)
135 outer_inv_range(handle, handle + size);
136
137 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
138}
139
140static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 size_t size, enum dma_data_direction dir)
142{
143
144 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
145
146 if (dir == DMA_FROM_DEVICE) {
147 outer_inv_range(handle, handle + size);
148 } else {
149 outer_clean_range(handle, handle + size);
150 }
151}
152
153void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156
157{
158 if (!__generic_dma_ops(hwdev)->unmap_page)
159 return;
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 return;
162
163 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
164}
165
166void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
168{
169 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 return;
171 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
172}
173
174void xen_dma_sync_single_for_device(struct device *hwdev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
176{
177 if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 return;
179 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
180}
181
182int __init xen_mm32_init(void)
183{
184 int cpu;
185
186 if (!xen_initial_domain())
187 return 0;
188
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
196 }
197 }
198 put_online_cpus();
199
200 return 0;
201}
202arch_initcall(xen_mm32_init);
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index cf98b362094b..243ef256b8c9 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -21,6 +21,7 @@ struct dev_archdata {
21#ifdef CONFIG_IOMMU_API 21#ifdef CONFIG_IOMMU_API
22 void *iommu; /* private IOMMU data */ 22 void *iommu; /* private IOMMU data */
23#endif 23#endif
24 bool dma_coherent;
24}; 25};
25 26
26struct pdev_archdata { 27struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index adeae3f6f0fc..d34189bceff7 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -54,11 +54,18 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
54 54
55static inline int set_arch_dma_coherent_ops(struct device *dev) 55static inline int set_arch_dma_coherent_ops(struct device *dev)
56{ 56{
57 dev->archdata.dma_coherent = true;
57 set_dma_ops(dev, &coherent_swiotlb_dma_ops); 58 set_dma_ops(dev, &coherent_swiotlb_dma_ops);
58 return 0; 59 return 0;
59} 60}
60#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops 61#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
61 62
63/* do not use this function in a driver */
64static inline bool is_device_dma_coherent(struct device *dev)
65{
66 return dev->archdata.dma_coherent;
67}
68
62#include <asm-generic/dma-mapping-common.h> 69#include <asm-generic/dma-mapping-common.h>
63 70
64static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 71static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index dde3fc9c49f0..2052102b4e02 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1,43 +1 @@
1#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H #include <../../arm/include/asm/xen/page-coherent.h>
2#define _ASM_ARM64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle,
17 struct dma_attrs *attrs)
18{
19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
20}
21
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs)
25{
26}
27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs)
31{
32}
33
34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
36{
37}
38
39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
41{
42}
43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
new file mode 100644
index 000000000000..0d809e9fc975
--- /dev/null
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -0,0 +1,91 @@
1/******************************************************************************
2 * arch-x86/cpuid.h
3 *
4 * CPUID interface to Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2007 Citrix Systems, Inc.
25 *
26 * Authors:
27 * Keir Fraser <keir@xen.org>
28 */
29
30#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
31#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
32
33/*
34 * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
35 * can be found at the first otherwise unused 0x100 aligned boundary starting
36 * from 0x40000000.
37 *
38 * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
39 * leaves will start at 0x40000100
40 */
41
42#define XEN_CPUID_FIRST_LEAF 0x40000000
43#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
44
45/*
46 * Leaf 1 (0x40000x00)
47 * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
48 * are supported by the Xen host.
49 * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
50 * of a Xen host.
51 */
52#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
53#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
54#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
55
56/*
57 * Leaf 2 (0x40000x01)
58 * EAX[31:16]: Xen major version.
59 * EAX[15: 0]: Xen minor version.
60 * EBX-EDX: Reserved (currently all zeroes).
61 */
62
63/*
64 * Leaf 3 (0x40000x02)
65 * EAX: Number of hypercall transfer pages. This register is always guaranteed
66 * to specify one hypercall page.
67 * EBX: Base address of Xen-specific MSRs.
68 * ECX: Features 1. Unused bits are set to zero.
69 * EDX: Features 2. Unused bits are set to zero.
70 */
71
72/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
73#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
74#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
75
76/*
77 * Leaf 5 (0x40000x04)
78 * HVM-specific features
79 */
80
81/* EAX Features */
82/* Virtualized APIC registers */
83#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
84/* Virtualized x2APIC accesses */
85#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
86/* Memory mapped from other domains has valid IOMMU entries */
87#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
88
89#define XEN_CPUID_MAX_NUM_LEAVES 4
90
91#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
index 7f02fe4e2c7b..acd844c017d3 100644
--- a/arch/x86/include/asm/xen/page-coherent.h
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
22} 22}
23 23
24static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 24static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
25 unsigned long offset, size_t size, enum dma_data_direction dir, 25 dma_addr_t dev_addr, unsigned long offset, size_t size,
26 struct dma_attrs *attrs) { } 26 enum dma_data_direction dir, struct dma_attrs *attrs) { }
27 27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c949923a5668..f58ef6c0613b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
236#define xen_remap(cookie, size) ioremap((cookie), (size)); 236#define xen_remap(cookie, size) ioremap((cookie), (size));
237#define xen_unmap(cookie) iounmap((cookie)) 237#define xen_unmap(cookie) iounmap((cookie))
238 238
239static inline bool xen_arch_need_swiotlb(struct device *dev,
240 unsigned long pfn,
241 unsigned long mfn)
242{
243 return false;
244}
245
239#endif /* _ASM_X86_XEN_PAGE_H */ 246#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1819a91bbb9f..c489ef2c1a39 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -23,6 +23,8 @@
23#include <xen/features.h> 23#include <xen/features.h>
24#include <xen/events.h> 24#include <xen/events.h>
25#include <asm/xen/pci.h> 25#include <asm/xen/pci.h>
26#include <asm/xen/cpuid.h>
27#include <asm/apic.h>
26#include <asm/i8259.h> 28#include <asm/i8259.h>
27 29
28static int xen_pcifront_enable_irq(struct pci_dev *dev) 30static int xen_pcifront_enable_irq(struct pci_dev *dev)
@@ -423,6 +425,28 @@ int __init pci_xen_init(void)
423 return 0; 425 return 0;
424} 426}
425 427
428#ifdef CONFIG_PCI_MSI
429void __init xen_msi_init(void)
430{
431 if (!disable_apic) {
432 /*
433 * If hardware supports (x2)APIC virtualization (as indicated
434 * by hypervisor's leaf 4) then we don't need to use pirqs/
435 * event channels for MSI handling and instead use regular
436 * APIC processing
437 */
438 uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
439
440 if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
441 ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && cpu_has_apic))
442 return;
443 }
444
445 x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
446 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
447}
448#endif
449
426int __init pci_xen_hvm_init(void) 450int __init pci_xen_hvm_init(void)
427{ 451{
428 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) 452 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
@@ -437,8 +461,11 @@ int __init pci_xen_hvm_init(void)
437#endif 461#endif
438 462
439#ifdef CONFIG_PCI_MSI 463#ifdef CONFIG_PCI_MSI
440 x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; 464 /*
441 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 465 * We need to wait until after x2apic is initialized
466 * before we can set MSI IRQ ops.
467 */
468 x86_platform.apic_post_init = xen_msi_init;
442#endif 469#endif
443 return 0; 470 return 0;
444} 471}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a7ac72639c52..cab05f31223f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1138,8 +1138,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state);
1138 * @dev: PCI device that we're dealing with 1138 * @dev: PCI device that we're dealing with
1139 * @state: Saved state returned from pci_store_saved_state() 1139 * @state: Saved state returned from pci_store_saved_state()
1140 */ 1140 */
1141static int pci_load_saved_state(struct pci_dev *dev, 1141int pci_load_saved_state(struct pci_dev *dev,
1142 struct pci_saved_state *state) 1142 struct pci_saved_state *state)
1143{ 1143{
1144 struct pci_cap_saved_data *cap; 1144 struct pci_cap_saved_data *cap;
1145 1145
@@ -1167,6 +1167,7 @@ static int pci_load_saved_state(struct pci_dev *dev,
1167 dev->state_saved = true; 1167 dev->state_saved = true;
1168 return 0; 1168 return 0;
1169} 1169}
1170EXPORT_SYMBOL_GPL(pci_load_saved_state);
1170 1171
1171/** 1172/**
1172 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1173 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ebd8f218a788..810ad419e34c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; 96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
97 phys_addr_t paddr = dma; 97 phys_addr_t paddr = dma;
98 98
99 BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
100
101 paddr |= baddr & ~PAGE_MASK; 99 paddr |= baddr & ~PAGE_MASK;
102 100
103 return paddr; 101 return paddr;
@@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
399 * buffering it. 397 * buffering it.
400 */ 398 */
401 if (dma_capable(dev, dev_addr, size) && 399 if (dma_capable(dev, dev_addr, size) &&
402 !range_straddles_page_boundary(phys, size) && !swiotlb_force) { 400 !range_straddles_page_boundary(phys, size) &&
401 !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
402 !swiotlb_force) {
403 /* we are not interested in the dma_addr returned by 403 /* we are not interested in the dma_addr returned by
404 * xen_dma_map_page, only in the potential cache flushes executed 404 * xen_dma_map_page, only in the potential cache flushes executed
405 * by the function. */ 405 * by the function. */
406 xen_dma_map_page(dev, page, offset, size, dir, attrs); 406 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
407 return dev_addr; 407 return dev_addr;
408 } 408 }
409 409
@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
417 return DMA_ERROR_CODE; 417 return DMA_ERROR_CODE;
418 418
419 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 419 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
420 map & ~PAGE_MASK, size, dir, attrs); 420 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
421 dev_addr = xen_phys_to_bus(map); 421 dev_addr = xen_phys_to_bus(map);
422 422
423 /* 423 /*
@@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
447 447
448 BUG_ON(dir == DMA_NONE); 448 BUG_ON(dir == DMA_NONE);
449 449
450 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); 450 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
451 451
452 /* NOTE: We use dev_addr here, not paddr! */ 452 /* NOTE: We use dev_addr here, not paddr! */
453 if (is_xen_swiotlb_buffer(dev_addr)) { 453 if (is_xen_swiotlb_buffer(dev_addr)) {
@@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
495 BUG_ON(dir == DMA_NONE); 495 BUG_ON(dir == DMA_NONE);
496 496
497 if (target == SYNC_FOR_CPU) 497 if (target == SYNC_FOR_CPU)
498 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); 498 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
499 499
500 /* NOTE: We use dev_addr here, not paddr! */ 500 /* NOTE: We use dev_addr here, not paddr! */
501 if (is_xen_swiotlb_buffer(dev_addr)) 501 if (is_xen_swiotlb_buffer(dev_addr))
502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
503 503
504 if (target == SYNC_FOR_DEVICE) 504 if (target == SYNC_FOR_DEVICE)
505 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); 505 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
506 506
507 if (dir != DMA_FROM_DEVICE) 507 if (dir != DMA_FROM_DEVICE)
508 return; 508 return;
@@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
557 dma_addr_t dev_addr = xen_phys_to_bus(paddr); 557 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
558 558
559 if (swiotlb_force || 559 if (swiotlb_force ||
560 xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
560 !dma_capable(hwdev, dev_addr, sg->length) || 561 !dma_capable(hwdev, dev_addr, sg->length) ||
561 range_straddles_page_boundary(paddr, sg->length)) { 562 range_straddles_page_boundary(paddr, sg->length)) {
562 phys_addr_t map = swiotlb_tbl_map_single(hwdev, 563 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
@@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
574 return 0; 575 return 0;
575 } 576 }
576 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 577 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
578 dev_addr,
577 map & ~PAGE_MASK, 579 map & ~PAGE_MASK,
578 sg->length, 580 sg->length,
579 dir, 581 dir,
@@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
584 * xen_dma_map_page, only in the potential cache flushes executed 586 * xen_dma_map_page, only in the potential cache flushes executed
585 * by the function. */ 587 * by the function. */
586 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), 588 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
589 dev_addr,
587 paddr & ~PAGE_MASK, 590 paddr & ~PAGE_MASK,
588 sg->length, 591 sg->length,
589 dir, 592 dir,
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 828dddc360df..f16a30e2a110 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
69} 69}
70 70
71static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 71static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
72 struct pci_dev *dev) 72 struct pci_dev *dev, bool lock)
73{ 73{
74 struct passthrough_dev_data *dev_data = pdev->pci_dev_data; 74 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
75 struct pci_dev_entry *dev_entry, *t; 75 struct pci_dev_entry *dev_entry, *t;
@@ -87,8 +87,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
87 87
88 mutex_unlock(&dev_data->lock); 88 mutex_unlock(&dev_data->lock);
89 89
90 if (found_dev) 90 if (found_dev) {
91 if (lock)
92 device_lock(&found_dev->dev);
91 pcistub_put_pci_dev(found_dev); 93 pcistub_put_pci_dev(found_dev);
94 if (lock)
95 device_unlock(&found_dev->dev);
96 }
92} 97}
93 98
94static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) 99static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -156,8 +161,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
156 struct pci_dev_entry *dev_entry, *t; 161 struct pci_dev_entry *dev_entry, *t;
157 162
158 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { 163 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
164 struct pci_dev *dev = dev_entry->dev;
159 list_del(&dev_entry->list); 165 list_del(&dev_entry->list);
160 pcistub_put_pci_dev(dev_entry->dev); 166 device_lock(&dev->dev);
167 pcistub_put_pci_dev(dev);
168 device_unlock(&dev->dev);
161 kfree(dev_entry); 169 kfree(dev_entry);
162 } 170 }
163 171
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 017069a455d4..cc3cbb4435f8 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -105,7 +105,7 @@ static void pcistub_device_release(struct kref *kref)
105 */ 105 */
106 __pci_reset_function_locked(dev); 106 __pci_reset_function_locked(dev);
107 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) 107 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
108 dev_dbg(&dev->dev, "Could not reload PCI state\n"); 108 dev_info(&dev->dev, "Could not reload PCI state\n");
109 else 109 else
110 pci_restore_state(dev); 110 pci_restore_state(dev);
111 111
@@ -250,11 +250,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
250 * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove 250 * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
251 * 251 *
252 * As such we have to be careful. 252 * As such we have to be careful.
253 *
254 * To make this easier, the caller has to hold the device lock.
253 */ 255 */
254void pcistub_put_pci_dev(struct pci_dev *dev) 256void pcistub_put_pci_dev(struct pci_dev *dev)
255{ 257{
256 struct pcistub_device *psdev, *found_psdev = NULL; 258 struct pcistub_device *psdev, *found_psdev = NULL;
257 unsigned long flags; 259 unsigned long flags;
260 struct xen_pcibk_dev_data *dev_data;
261 int ret;
258 262
259 spin_lock_irqsave(&pcistub_devices_lock, flags); 263 spin_lock_irqsave(&pcistub_devices_lock, flags);
260 264
@@ -276,13 +280,20 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
276 /* Cleanup our device 280 /* Cleanup our device
277 * (so it's ready for the next domain) 281 * (so it's ready for the next domain)
278 */ 282 */
283 device_lock_assert(&dev->dev);
284 __pci_reset_function_locked(dev);
279 285
280 /* This is OK - we are running from workqueue context 286 dev_data = pci_get_drvdata(dev);
281 * and want to inhibit the user from fiddling with 'reset' 287 ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
282 */ 288 if (!ret) {
283 pci_reset_function(dev); 289 /*
284 pci_restore_state(dev); 290 * The usual sequence is pci_save_state & pci_restore_state
285 291 * but the guest might have messed the configuration space up.
292 * Use the initial version (when device was bound to us).
293 */
294 pci_restore_state(dev);
295 } else
296 dev_info(&dev->dev, "Could not reload PCI state\n");
286 /* This disables the device. */ 297 /* This disables the device. */
287 xen_pcibk_reset_device(dev); 298 xen_pcibk_reset_device(dev);
288 299
@@ -554,12 +565,14 @@ static void pcistub_remove(struct pci_dev *dev)
554 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 565 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
555 566
556 if (found_psdev) { 567 if (found_psdev) {
557 dev_dbg(&dev->dev, "found device to remove - in use? %p\n", 568 dev_dbg(&dev->dev, "found device to remove %s\n",
558 found_psdev->pdev); 569 found_psdev->pdev ? "- in-use" : "");
559 570
560 if (found_psdev->pdev) { 571 if (found_psdev->pdev) {
561 pr_warn("****** removing device %s while still in-use! ******\n", 572 int domid = xen_find_device_domain_owner(dev);
562 pci_name(found_psdev->dev)); 573
574 pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
575 pci_name(found_psdev->dev), domid);
563 pr_warn("****** driver domain may still access this device's i/o resources!\n"); 576 pr_warn("****** driver domain may still access this device's i/o resources!\n");
564 pr_warn("****** shutdown driver domain before binding device\n"); 577 pr_warn("****** shutdown driver domain before binding device\n");
565 pr_warn("****** to other drivers or domains\n"); 578 pr_warn("****** to other drivers or domains\n");
@@ -567,7 +580,8 @@ static void pcistub_remove(struct pci_dev *dev)
567 /* N.B. This ends up calling pcistub_put_pci_dev which ends up 580 /* N.B. This ends up calling pcistub_put_pci_dev which ends up
568 * doing the FLR. */ 581 * doing the FLR. */
569 xen_pcibk_release_pci_dev(found_psdev->pdev, 582 xen_pcibk_release_pci_dev(found_psdev->pdev,
570 found_psdev->dev); 583 found_psdev->dev,
584 false /* caller holds the lock. */);
571 } 585 }
572 586
573 spin_lock_irqsave(&pcistub_devices_lock, flags); 587 spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -629,10 +643,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
629{ 643{
630 pci_ers_result_t res = result; 644 pci_ers_result_t res = result;
631 struct xen_pcie_aer_op *aer_op; 645 struct xen_pcie_aer_op *aer_op;
646 struct xen_pcibk_device *pdev = psdev->pdev;
647 struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
632 int ret; 648 int ret;
633 649
634 /*with PV AER drivers*/ 650 /*with PV AER drivers*/
635 aer_op = &(psdev->pdev->sh_info->aer_op); 651 aer_op = &(sh_info->aer_op);
636 aer_op->cmd = aer_cmd ; 652 aer_op->cmd = aer_cmd ;
637 /*useful for error_detected callback*/ 653 /*useful for error_detected callback*/
638 aer_op->err = state; 654 aer_op->err = state;
@@ -653,36 +669,36 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
653 * this flag to judge whether we need to check pci-front give aer 669 * this flag to judge whether we need to check pci-front give aer
654 * service ack signal 670 * service ack signal
655 */ 671 */
656 set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); 672 set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
657 673
658 /*It is possible that a pcifront conf_read_write ops request invokes 674 /*It is possible that a pcifront conf_read_write ops request invokes
659 * the callback which cause the spurious execution of wake_up. 675 * the callback which cause the spurious execution of wake_up.
660 * Yet it is harmless and better than a spinlock here 676 * Yet it is harmless and better than a spinlock here
661 */ 677 */
662 set_bit(_XEN_PCIB_active, 678 set_bit(_XEN_PCIB_active,
663 (unsigned long *)&psdev->pdev->sh_info->flags); 679 (unsigned long *)&sh_info->flags);
664 wmb(); 680 wmb();
665 notify_remote_via_irq(psdev->pdev->evtchn_irq); 681 notify_remote_via_irq(pdev->evtchn_irq);
666 682
667 ret = wait_event_timeout(xen_pcibk_aer_wait_queue, 683 ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
668 !(test_bit(_XEN_PCIB_active, (unsigned long *) 684 !(test_bit(_XEN_PCIB_active, (unsigned long *)
669 &psdev->pdev->sh_info->flags)), 300*HZ); 685 &sh_info->flags)), 300*HZ);
670 686
671 if (!ret) { 687 if (!ret) {
672 if (test_bit(_XEN_PCIB_active, 688 if (test_bit(_XEN_PCIB_active,
673 (unsigned long *)&psdev->pdev->sh_info->flags)) { 689 (unsigned long *)&sh_info->flags)) {
674 dev_err(&psdev->dev->dev, 690 dev_err(&psdev->dev->dev,
675 "pcifront aer process not responding!\n"); 691 "pcifront aer process not responding!\n");
676 clear_bit(_XEN_PCIB_active, 692 clear_bit(_XEN_PCIB_active,
677 (unsigned long *)&psdev->pdev->sh_info->flags); 693 (unsigned long *)&sh_info->flags);
678 aer_op->err = PCI_ERS_RESULT_NONE; 694 aer_op->err = PCI_ERS_RESULT_NONE;
679 return res; 695 return res;
680 } 696 }
681 } 697 }
682 clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); 698 clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
683 699
684 if (test_bit(_XEN_PCIF_active, 700 if (test_bit(_XEN_PCIF_active,
685 (unsigned long *)&psdev->pdev->sh_info->flags)) { 701 (unsigned long *)&sh_info->flags)) {
686 dev_dbg(&psdev->dev->dev, 702 dev_dbg(&psdev->dev->dev,
687 "schedule pci_conf service in " DRV_NAME "\n"); 703 "schedule pci_conf service in " DRV_NAME "\n");
688 xen_pcibk_test_and_schedule_op(psdev->pdev); 704 xen_pcibk_test_and_schedule_op(psdev->pdev);
@@ -1502,6 +1518,53 @@ parse_error:
1502fs_initcall(pcistub_init); 1518fs_initcall(pcistub_init);
1503#endif 1519#endif
1504 1520
1521#ifdef CONFIG_PCI_IOV
1522static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
1523{
1524 struct pcistub_device *psdev = NULL;
1525 unsigned long flags;
1526 bool found = false;
1527
1528 spin_lock_irqsave(&pcistub_devices_lock, flags);
1529 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1530 if (!psdev->pdev && psdev->dev != pdev
1531 && pci_physfn(psdev->dev) == pdev) {
1532 found = true;
1533 break;
1534 }
1535 }
1536 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1537 if (found)
1538 return psdev;
1539 return NULL;
1540}
1541
1542static int pci_stub_notifier(struct notifier_block *nb,
1543 unsigned long action, void *data)
1544{
1545 struct device *dev = data;
1546 const struct pci_dev *pdev = to_pci_dev(dev);
1547
1548 if (action != BUS_NOTIFY_UNBIND_DRIVER)
1549 return NOTIFY_DONE;
1550
1551 if (!pdev->is_physfn)
1552 return NOTIFY_DONE;
1553
1554 for (;;) {
1555 struct pcistub_device *psdev = find_vfs(pdev);
1556 if (!psdev)
1557 break;
1558 device_release_driver(&psdev->dev->dev);
1559 }
1560 return NOTIFY_DONE;
1561}
1562
1563static struct notifier_block pci_stub_nb = {
1564 .notifier_call = pci_stub_notifier,
1565};
1566#endif
1567
1505static int __init xen_pcibk_init(void) 1568static int __init xen_pcibk_init(void)
1506{ 1569{
1507 int err; 1570 int err;
@@ -1523,12 +1586,19 @@ static int __init xen_pcibk_init(void)
1523 err = xen_pcibk_xenbus_register(); 1586 err = xen_pcibk_xenbus_register();
1524 if (err) 1587 if (err)
1525 pcistub_exit(); 1588 pcistub_exit();
1589#ifdef CONFIG_PCI_IOV
1590 else
1591 bus_register_notifier(&pci_bus_type, &pci_stub_nb);
1592#endif
1526 1593
1527 return err; 1594 return err;
1528} 1595}
1529 1596
1530static void __exit xen_pcibk_cleanup(void) 1597static void __exit xen_pcibk_cleanup(void)
1531{ 1598{
1599#ifdef CONFIG_PCI_IOV
1600 bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
1601#endif
1532 xen_pcibk_xenbus_unregister(); 1602 xen_pcibk_xenbus_unregister();
1533 pcistub_exit(); 1603 pcistub_exit();
1534} 1604}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f72af87640e0..58e38d586f52 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -99,7 +99,8 @@ struct xen_pcibk_backend {
99 unsigned int *domain, unsigned int *bus, 99 unsigned int *domain, unsigned int *bus,
100 unsigned int *devfn); 100 unsigned int *devfn);
101 int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb); 101 int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
102 void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev); 102 void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
103 bool lock);
103 int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev, 104 int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
104 int devid, publish_pci_dev_cb publish_cb); 105 int devid, publish_pci_dev_cb publish_cb);
105 struct pci_dev *(*get)(struct xen_pcibk_device *pdev, 106 struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
@@ -122,10 +123,10 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
122} 123}
123 124
124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 125static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
125 struct pci_dev *dev) 126 struct pci_dev *dev, bool lock)
126{ 127{
127 if (xen_pcibk_backend && xen_pcibk_backend->release) 128 if (xen_pcibk_backend && xen_pcibk_backend->release)
128 return xen_pcibk_backend->release(pdev, dev); 129 return xen_pcibk_backend->release(pdev, dev, lock);
129} 130}
130 131
131static inline struct pci_dev * 132static inline struct pci_dev *
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 51afff96c515..c99f8bb1c56c 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -145,7 +145,7 @@ out:
145} 145}
146 146
147static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 147static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
148 struct pci_dev *dev) 148 struct pci_dev *dev, bool lock)
149{ 149{
150 int slot; 150 int slot;
151 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 151 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -169,8 +169,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
169out: 169out:
170 mutex_unlock(&vpci_dev->lock); 170 mutex_unlock(&vpci_dev->lock);
171 171
172 if (found_dev) 172 if (found_dev) {
173 if (lock)
174 device_lock(&found_dev->dev);
173 pcistub_put_pci_dev(found_dev); 175 pcistub_put_pci_dev(found_dev);
176 if (lock)
177 device_unlock(&found_dev->dev);
178 }
174} 179}
175 180
176static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) 181static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -208,8 +213,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
208 struct pci_dev_entry *e, *tmp; 213 struct pci_dev_entry *e, *tmp;
209 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot], 214 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
210 list) { 215 list) {
216 struct pci_dev *dev = e->dev;
211 list_del(&e->list); 217 list_del(&e->list);
212 pcistub_put_pci_dev(e->dev); 218 device_lock(&dev->dev);
219 pcistub_put_pci_dev(dev);
220 device_unlock(&dev->dev);
213 kfree(e); 221 kfree(e);
214 } 222 }
215 } 223 }
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index ad8d30c088fe..fe17c80ff4b7 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -247,7 +247,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
247 if (err) 247 if (err)
248 goto out; 248 goto out;
249 249
250 dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); 250 dev_info(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
251 if (xen_register_device_domain_owner(dev, 251 if (xen_register_device_domain_owner(dev,
252 pdev->xdev->otherend_id) != 0) { 252 pdev->xdev->otherend_id) != 0) {
253 dev_err(&dev->dev, "Stealing ownership from dom%d.\n", 253 dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
@@ -291,7 +291,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
291 291
292 /* N.B. This ends up calling pcistub_put_pci_dev which ends up 292 /* N.B. This ends up calling pcistub_put_pci_dev which ends up
293 * doing the FLR. */ 293 * doing the FLR. */
294 xen_pcibk_release_pci_dev(pdev, dev); 294 xen_pcibk_release_pci_dev(pdev, dev, true /* use the lock. */);
295 295
296out: 296out:
297 return err; 297 return err;
diff --git a/include/linux/device.h b/include/linux/device.h
index ce1f21608b16..41d6a7555c6b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev)
911 mutex_unlock(&dev->mutex); 911 mutex_unlock(&dev->mutex);
912} 912}
913 913
914static inline void device_lock_assert(struct device *dev)
915{
916 lockdep_assert_held(&dev->mutex);
917}
918
914void driver_init(void); 919void driver_init(void);
915 920
916/* 921/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a523cee3abb5..44a27696ab6c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1004,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1004int pci_save_state(struct pci_dev *dev); 1004int pci_save_state(struct pci_dev *dev);
1005void pci_restore_state(struct pci_dev *dev); 1005void pci_restore_state(struct pci_dev *dev);
1006struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1006struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1007int pci_load_saved_state(struct pci_dev *dev,
1008 struct pci_saved_state *state);
1007int pci_load_and_free_saved_state(struct pci_dev *dev, 1009int pci_load_and_free_saved_state(struct pci_dev *dev,
1008 struct pci_saved_state **state); 1010 struct pci_saved_state **state);
1009struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); 1011struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 14334d0161d5..131a6ccdba25 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,9 +53,6 @@
53/* operation as Dom0 is supported */ 53/* operation as Dom0 is supported */
54#define XENFEAT_dom0 11 54#define XENFEAT_dom0 11
55 55
56/* Xen also maps grant references at pfn = mfn */
57#define XENFEAT_grant_map_identity 12
58
59#define XENFEAT_NR_SUBMAPS 1 56#define XENFEAT_NR_SUBMAPS 1
60 57
61#endif /* __XEN_PUBLIC_FEATURES_H__ */ 58#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index e40fae9bf11a..bcce56439d64 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -479,6 +479,25 @@ struct gnttab_get_version {
479DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); 479DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
480 480
481/* 481/*
482 * Issue one or more cache maintenance operations on a portion of a
483 * page granted to the calling domain by a foreign domain.
484 */
485#define GNTTABOP_cache_flush 12
486struct gnttab_cache_flush {
487 union {
488 uint64_t dev_bus_addr;
489 grant_ref_t ref;
490 } a;
491 uint16_t offset; /* offset from start of grant */
492 uint16_t length; /* size within the grant */
493#define GNTTAB_CACHE_CLEAN (1<<0)
494#define GNTTAB_CACHE_INVAL (1<<1)
495#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
496 uint32_t op;
497};
498DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
499
500/*
482 * Bitfield values for update_pin_status.flags. 501 * Bitfield values for update_pin_status.flags.
483 */ 502 */
484 /* Map the grant entry for access by I/O devices. */ 503 /* Map the grant entry for access by I/O devices. */