summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>2018-07-20 05:01:46 -0400
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2018-07-26 23:05:14 -0400
commit975ef7ff81bb000af6e6c8e63e81f89f3468dcf7 (patch)
tree4613c67c6a592e60631d0957a1e30133bf2a839c
parent9bdc7304f536f3f77f0a69e7c3a8f5afda561a68 (diff)
xen/gntdev: Allow mappings for DMA buffers
Allow mappings for DMA backed buffers if grant table module supports such: this extends grant device to not only map buffers made of balloon pages, but also from buffers allocated with dma_alloc_xxx. Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-rw-r--r--drivers/xen/gntdev.c99
-rw-r--r--include/uapi/xen/gntdev.h15
2 files changed, 112 insertions, 2 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bd56653b9bbc..173332f439d8 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -37,6 +37,9 @@
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/highmem.h> 38#include <linux/highmem.h>
39#include <linux/refcount.h> 39#include <linux/refcount.h>
40#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
41#include <linux/of_device.h>
42#endif
40 43
41#include <xen/xen.h> 44#include <xen/xen.h>
42#include <xen/grant_table.h> 45#include <xen/grant_table.h>
@@ -72,6 +75,11 @@ struct gntdev_priv {
72 struct mutex lock; 75 struct mutex lock;
73 struct mm_struct *mm; 76 struct mm_struct *mm;
74 struct mmu_notifier mn; 77 struct mmu_notifier mn;
78
79#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
80 /* Device for which DMA memory is allocated. */
81 struct device *dma_dev;
82#endif
75}; 83};
76 84
77struct unmap_notify { 85struct unmap_notify {
@@ -96,10 +104,27 @@ struct grant_map {
96 struct gnttab_unmap_grant_ref *kunmap_ops; 104 struct gnttab_unmap_grant_ref *kunmap_ops;
97 struct page **pages; 105 struct page **pages;
98 unsigned long pages_vm_start; 106 unsigned long pages_vm_start;
107
108#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
109 /*
110 * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
111 * capable memory.
112 */
113
114 struct device *dma_dev;
115 /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
116 int dma_flags;
117 void *dma_vaddr;
118 dma_addr_t dma_bus_addr;
119 /* Needed to avoid allocation in gnttab_dma_free_pages(). */
120 xen_pfn_t *frames;
121#endif
99}; 122};
100 123
101static int unmap_grant_pages(struct grant_map *map, int offset, int pages); 124static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
102 125
126static struct miscdevice gntdev_miscdev;
127
103/* ------------------------------------------------------------------ */ 128/* ------------------------------------------------------------------ */
104 129
105static void gntdev_print_maps(struct gntdev_priv *priv, 130static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -121,8 +146,27 @@ static void gntdev_free_map(struct grant_map *map)
121 if (map == NULL) 146 if (map == NULL)
122 return; 147 return;
123 148
149#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
150 if (map->dma_vaddr) {
151 struct gnttab_dma_alloc_args args;
152
153 args.dev = map->dma_dev;
154 args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
155 args.nr_pages = map->count;
156 args.pages = map->pages;
157 args.frames = map->frames;
158 args.vaddr = map->dma_vaddr;
159 args.dev_bus_addr = map->dma_bus_addr;
160
161 gnttab_dma_free_pages(&args);
162 } else
163#endif
124 if (map->pages) 164 if (map->pages)
125 gnttab_free_pages(map->count, map->pages); 165 gnttab_free_pages(map->count, map->pages);
166
167#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
168 kfree(map->frames);
169#endif
126 kfree(map->pages); 170 kfree(map->pages);
127 kfree(map->grants); 171 kfree(map->grants);
128 kfree(map->map_ops); 172 kfree(map->map_ops);
@@ -132,7 +176,8 @@ static void gntdev_free_map(struct grant_map *map)
132 kfree(map); 176 kfree(map);
133} 177}
134 178
135static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) 179static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
180 int dma_flags)
136{ 181{
137 struct grant_map *add; 182 struct grant_map *add;
138 int i; 183 int i;
@@ -155,6 +200,37 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
155 NULL == add->pages) 200 NULL == add->pages)
156 goto err; 201 goto err;
157 202
203#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
204 add->dma_flags = dma_flags;
205
206 /*
207 * Check if this mapping is requested to be backed
208 * by a DMA buffer.
209 */
210 if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
211 struct gnttab_dma_alloc_args args;
212
213 add->frames = kcalloc(count, sizeof(add->frames[0]),
214 GFP_KERNEL);
215 if (!add->frames)
216 goto err;
217
218 /* Remember the device, so we can free DMA memory. */
219 add->dma_dev = priv->dma_dev;
220
221 args.dev = priv->dma_dev;
222 args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
223 args.nr_pages = count;
224 args.pages = add->pages;
225 args.frames = add->frames;
226
227 if (gnttab_dma_alloc_pages(&args))
228 goto err;
229
230 add->dma_vaddr = args.vaddr;
231 add->dma_bus_addr = args.dev_bus_addr;
232 } else
233#endif
158 if (gnttab_alloc_pages(count, add->pages)) 234 if (gnttab_alloc_pages(count, add->pages))
159 goto err; 235 goto err;
160 236
@@ -325,6 +401,14 @@ static int map_grant_pages(struct grant_map *map)
325 map->unmap_ops[i].handle = map->map_ops[i].handle; 401 map->unmap_ops[i].handle = map->map_ops[i].handle;
326 if (use_ptemod) 402 if (use_ptemod)
327 map->kunmap_ops[i].handle = map->kmap_ops[i].handle; 403 map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
404#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
405 else if (map->dma_vaddr) {
406 unsigned long bfn;
407
408 bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
409 map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
410 }
411#endif
328 } 412 }
329 return err; 413 return err;
330} 414}
@@ -548,6 +632,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
548 } 632 }
549 633
550 flip->private_data = priv; 634 flip->private_data = priv;
635#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
636 priv->dma_dev = gntdev_miscdev.this_device;
637
638 /*
639 * The device is not spawn from a device tree, so arch_setup_dma_ops
640 * is not called, thus leaving the device with dummy DMA ops.
641 * Fix this by calling of_dma_configure() with a NULL node to set
642 * default DMA ops.
643 */
644 of_dma_configure(priv->dma_dev, NULL, true);
645#endif
551 pr_debug("priv %p\n", priv); 646 pr_debug("priv %p\n", priv);
552 647
553 return 0; 648 return 0;
@@ -589,7 +684,7 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
589 return -EINVAL; 684 return -EINVAL;
590 685
591 err = -ENOMEM; 686 err = -ENOMEM;
592 map = gntdev_alloc_map(priv, op.count); 687 map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
593 if (!map) 688 if (!map)
594 return err; 689 return err;
595 690
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 6d1163456c03..4b9d498a31d4 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -200,4 +200,19 @@ struct ioctl_gntdev_grant_copy {
200/* Send an interrupt on the indicated event channel */ 200/* Send an interrupt on the indicated event channel */
201#define UNMAP_NOTIFY_SEND_EVENT 0x2 201#define UNMAP_NOTIFY_SEND_EVENT 0x2
202 202
203/*
204 * Flags to be used while requesting memory mapping's backing storage
205 * to be allocated with DMA API.
206 */
207
208/*
209 * The buffer is backed with memory allocated with dma_alloc_wc.
210 */
211#define GNTDEV_DMA_FLAG_WC (1 << 0)
212
213/*
214 * The buffer is backed with memory allocated with dma_alloc_coherent.
215 */
216#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
217
203#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ 218#endif /* __LINUX_PUBLIC_GNTDEV_H__ */