diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 21:15:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-11 21:15:33 -0500 |
commit | 9d050966e2eb37a643ac15904b6a8fda7fcfabe9 (patch) | |
tree | f3a6f9cc93f6dde2e0cd6f4114b8258afb596bc1 /arch/arm/include | |
parent | c0222ac086669a631814bbf857f8c8023452a4d7 (diff) | |
parent | 4ef8e3f3504808621e594f01852476a1d4e7ef93 (diff) |
Merge tag 'stable/for-linus-3.19-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen features and fixes from David Vrabel:
- Fully support non-coherent devices on ARM by introducing the
mechanisms to request the hypervisor to perform the required cache
maintainance operations.
- A number of pciback bug fixes and cleanups. Notably a deadlock fix
if a PCI device was manually uunbound and a fix for incorrectly
restoring state after a function reset.
- In x86 PVHVM guests, use the APIC for interrupts if this has been
virtualized by the hardware. This reduces the number of interrupt-
related VM exits on such hardware.
* tag 'stable/for-linus-3.19-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (26 commits)
Revert "swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single"
xen/pci: Use APIC directly when APIC virtualization hardware is available
xen/pci: Defer initialization of MSI ops on HVM guests
xen-pciback: drop SR-IOV VFs when PF driver unloads
xen/pciback: Restore configuration space when detaching from a guest.
PCI: Expose pci_load_saved_state for public consumption.
xen/pciback: Remove tons of dereferences
xen/pciback: Print out the domain owning the device.
xen/pciback: Include the domain id if removing the device whilst still in use
driver core: Provide an wrapper around the mutex to do lockdep warnings
xen/pciback: Don't deadlock when unbinding.
swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single
swiotlb-xen: call xen_dma_sync_single_for_device when appropriate
swiotlb-xen: remove BUG_ON in xen_bus_to_phys
swiotlb-xen: pass dev_addr to xen_dma_unmap_page and xen_dma_sync_single_for_cpu
xen/arm: introduce GNTTABOP_cache_flush
xen/arm/arm64: introduce xen_arch_need_swiotlb
xen/arm/arm64: merge xen/mm32.c into xen/mm.c
xen/arm: use hypercall to flush caches in map_page
xen: add a dma_addr_t dev_addr argument to xen_dma_map_page
...
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/device.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 66 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 4 |
4 files changed, 69 insertions, 9 deletions
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index dc662fca9230..4111592f0130 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h | |||
@@ -17,6 +17,7 @@ struct dev_archdata { | |||
17 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | 17 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
18 | struct dma_iommu_mapping *mapping; | 18 | struct dma_iommu_mapping *mapping; |
19 | #endif | 19 | #endif |
20 | bool dma_coherent; | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | struct omap_device; | 23 | struct omap_device; |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 85738b200023..e6e3446abdf6 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -123,11 +123,18 @@ static inline unsigned long dma_max_pfn(struct device *dev) | |||
123 | 123 | ||
124 | static inline int set_arch_dma_coherent_ops(struct device *dev) | 124 | static inline int set_arch_dma_coherent_ops(struct device *dev) |
125 | { | 125 | { |
126 | dev->archdata.dma_coherent = true; | ||
126 | set_dma_ops(dev, &arm_coherent_dma_ops); | 127 | set_dma_ops(dev, &arm_coherent_dma_ops); |
127 | return 0; | 128 | return 0; |
128 | } | 129 | } |
129 | #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) | 130 | #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) |
130 | 131 | ||
132 | /* do not use this function in a driver */ | ||
133 | static inline bool is_device_dma_coherent(struct device *dev) | ||
134 | { | ||
135 | return dev->archdata.dma_coherent; | ||
136 | } | ||
137 | |||
131 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 138 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
132 | { | 139 | { |
133 | unsigned int offset = paddr & ~PAGE_MASK; | 140 | unsigned int offset = paddr & ~PAGE_MASK; |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index e8275ea88e88..efd562412850 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -5,6 +5,18 @@ | |||
5 | #include <linux/dma-attrs.h> | 5 | #include <linux/dma-attrs.h> |
6 | #include <linux/dma-mapping.h> | 6 | #include <linux/dma-mapping.h> |
7 | 7 | ||
8 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | ||
9 | dma_addr_t dev_addr, unsigned long offset, size_t size, | ||
10 | enum dma_data_direction dir, struct dma_attrs *attrs); | ||
11 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
12 | size_t size, enum dma_data_direction dir, | ||
13 | struct dma_attrs *attrs); | ||
14 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
15 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | ||
16 | |||
17 | void __xen_dma_sync_single_for_device(struct device *hwdev, | ||
18 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | ||
19 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 20 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
9 | dma_addr_t *dma_handle, gfp_t flags, | 21 | dma_addr_t *dma_handle, gfp_t flags, |
10 | struct dma_attrs *attrs) | 22 | struct dma_attrs *attrs) |
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | |||
20 | } | 32 | } |
21 | 33 | ||
22 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 34 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 35 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
24 | struct dma_attrs *attrs) | 36 | enum dma_data_direction dir, struct dma_attrs *attrs) |
25 | { | 37 | { |
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | 38 | bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); |
39 | /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise | ||
40 | * is a foreign page grant-mapped in dom0. If the page is local we | ||
41 | * can safely call the native dma_ops function, otherwise we call | ||
42 | * the xen specific function. */ | ||
43 | if (local) | ||
44 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
45 | else | ||
46 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); | ||
27 | } | 47 | } |
28 | 48 | ||
29 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 49 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 50 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs); | 51 | struct dma_attrs *attrs) |
52 | { | ||
53 | unsigned long pfn = PFN_DOWN(handle); | ||
54 | /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will | ||
55 | * always return false. If the page is local we can safely call the | ||
56 | * native dma_ops function, otherwise we call the xen specific | ||
57 | * function. */ | ||
58 | if (pfn_valid(pfn)) { | ||
59 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
60 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
61 | } else | ||
62 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); | ||
63 | } | ||
32 | 64 | ||
33 | void xen_dma_sync_single_for_cpu(struct device *hwdev, | 65 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
34 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 66 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
67 | { | ||
68 | unsigned long pfn = PFN_DOWN(handle); | ||
69 | if (pfn_valid(pfn)) { | ||
70 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
71 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
72 | } else | ||
73 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); | ||
74 | } | ||
35 | 75 | ||
36 | void xen_dma_sync_single_for_device(struct device *hwdev, | 76 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 77 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
78 | { | ||
79 | unsigned long pfn = PFN_DOWN(handle); | ||
80 | if (pfn_valid(pfn)) { | ||
81 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
82 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
83 | } else | ||
84 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); | ||
85 | } | ||
38 | 86 | ||
39 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | 87 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 135c24a5ba26..68c739b3fdf4 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
107 | #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) | 107 | #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) |
108 | #define xen_unmap(cookie) iounmap((cookie)) | 108 | #define xen_unmap(cookie) iounmap((cookie)) |
109 | 109 | ||
110 | bool xen_arch_need_swiotlb(struct device *dev, | ||
111 | unsigned long pfn, | ||
112 | unsigned long mfn); | ||
113 | |||
110 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 114 | #endif /* _ASM_ARM_XEN_PAGE_H */ |