aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/fpga
diff options
context:
space:
mode:
authorWu Hao <hao.wu@intel.com>2018-06-29 20:53:35 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-15 07:55:47 -0400
commitfa8dda1edef9ebc3af467c644c5533ac97171e12 (patch)
tree967916a0db830844d520da691e6b78d4de5748f8 /drivers/fpga
parent857a26222ff75eecf7d701ef0e91e4fbf6efa663 (diff)
fpga: dfl: afu: add DFL_FPGA_PORT_DMA_MAP/UNMAP ioctls support
DMA memory regions are required for Accelerated Function Unit (AFU) usage. These two ioctls allow user space applications to map user memory regions for dma, and unmap them after use. Iova is returned from driver to user space application via DFL_FPGA_PORT_DMA_MAP ioctl. Application needs to unmap it after use, otherwise, driver will unmap them in device file release operation. Each AFU has its own rb tree to keep track of its mapped DMA regions. Ioctl interfaces: * DFL_FPGA_PORT_DMA_MAP Do the dma mapping per user_addr and length provided by user. Return iova in provided struct dfl_fpga_port_dma_map. * DFL_FPGA_PORT_DMA_UNMAP Unmap the dma region per iova provided by user. Signed-off-by: Tim Whisonant <tim.whisonant@intel.com> Signed-off-by: Enno Luebbers <enno.luebbers@intel.com> Signed-off-by: Shiva Rao <shiva.rao@intel.com> Signed-off-by: Christopher Rauer <christopher.rauer@intel.com> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Wu Hao <hao.wu@intel.com> Acked-by: Alan Tull <atull@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/fpga')
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c463
-rw-r--r--drivers/fpga/dfl-afu-main.c61
-rw-r--r--drivers/fpga/dfl-afu.h31
4 files changed, 554 insertions, 3 deletions
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index a44d50dd0b70..7a2d73ba7122 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
38obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o 38obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
39 39
40dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o 40dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
41dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o 41dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
42 42
43# Drivers for FPGAs which implement DFL 43# Drivers for FPGAs which implement DFL
44obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o 44obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 000000000000..0e81d33af856
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,463 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/sched/signal.h>
14#include <linux/uaccess.h>
15
16#include "dfl-afu.h"
17
18static void put_all_pages(struct page **pages, int npages)
19{
20 int i;
21
22 for (i = 0; i < npages; i++)
23 if (pages[i])
24 put_page(pages[i]);
25}
26
27void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
28{
29 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
30
31 afu->dma_regions = RB_ROOT;
32}
33
34/**
35 * afu_dma_adjust_locked_vm - adjust locked memory
36 * @dev: port device
37 * @npages: number of pages
38 * @incr: increase or decrease locked memory
39 *
40 * Increase or decrease the locked memory size with npages input.
41 *
42 * Return 0 on success.
43 * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
44 */
45static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
46{
47 unsigned long locked, lock_limit;
48 int ret = 0;
49
50 /* the task is exiting. */
51 if (!current->mm)
52 return 0;
53
54 down_write(&current->mm->mmap_sem);
55
56 if (incr) {
57 locked = current->mm->locked_vm + npages;
58 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
59
60 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61 ret = -ENOMEM;
62 else
63 current->mm->locked_vm += npages;
64 } else {
65 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
66 npages = current->mm->locked_vm;
67 current->mm->locked_vm -= npages;
68 }
69
70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
71 incr ? '+' : '-', npages << PAGE_SHIFT,
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
73 ret ? "- execeeded" : "");
74
75 up_write(&current->mm->mmap_sem);
76
77 return ret;
78}
79
80/**
81 * afu_dma_pin_pages - pin pages of given dma memory region
82 * @pdata: feature device platform data
83 * @region: dma memory region to be pinned
84 *
85 * Pin all the pages of given dfl_afu_dma_region.
86 * Return 0 for success or negative error code.
87 */
88static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
89 struct dfl_afu_dma_region *region)
90{
91 int npages = region->length >> PAGE_SHIFT;
92 struct device *dev = &pdata->dev->dev;
93 int ret, pinned;
94
95 ret = afu_dma_adjust_locked_vm(dev, npages, true);
96 if (ret)
97 return ret;
98
99 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
100 if (!region->pages) {
101 ret = -ENOMEM;
102 goto unlock_vm;
103 }
104
105 pinned = get_user_pages_fast(region->user_addr, npages, 1,
106 region->pages);
107 if (pinned < 0) {
108 ret = pinned;
109 goto put_pages;
110 } else if (pinned != npages) {
111 ret = -EFAULT;
112 goto free_pages;
113 }
114
115 dev_dbg(dev, "%d pages pinned\n", pinned);
116
117 return 0;
118
119put_pages:
120 put_all_pages(region->pages, pinned);
121free_pages:
122 kfree(region->pages);
123unlock_vm:
124 afu_dma_adjust_locked_vm(dev, npages, false);
125 return ret;
126}
127
128/**
129 * afu_dma_unpin_pages - unpin pages of given dma memory region
130 * @pdata: feature device platform data
131 * @region: dma memory region to be unpinned
132 *
133 * Unpin all the pages of given dfl_afu_dma_region.
134 * Return 0 for success or negative error code.
135 */
136static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
137 struct dfl_afu_dma_region *region)
138{
139 long npages = region->length >> PAGE_SHIFT;
140 struct device *dev = &pdata->dev->dev;
141
142 put_all_pages(region->pages, npages);
143 kfree(region->pages);
144 afu_dma_adjust_locked_vm(dev, npages, false);
145
146 dev_dbg(dev, "%ld pages unpinned\n", npages);
147}
148
149/**
150 * afu_dma_check_continuous_pages - check if pages are continuous
151 * @region: dma memory region
152 *
153 * Return true if pages of given dma memory region have continuous physical
154 * address, otherwise return false.
155 */
156static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
157{
158 int npages = region->length >> PAGE_SHIFT;
159 int i;
160
161 for (i = 0; i < npages - 1; i++)
162 if (page_to_pfn(region->pages[i]) + 1 !=
163 page_to_pfn(region->pages[i + 1]))
164 return false;
165
166 return true;
167}
168
169/**
170 * dma_region_check_iova - check if memory area is fully contained in the region
171 * @region: dma memory region
172 * @iova: address of the dma memory area
173 * @size: size of the dma memory area
174 *
175 * Compare the dma memory area defined by @iova and @size with given dma region.
176 * Return true if memory area is fully contained in the region, otherwise false.
177 */
178static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
179 u64 iova, u64 size)
180{
181 if (!size && region->iova != iova)
182 return false;
183
184 return (region->iova <= iova) &&
185 (region->length + region->iova >= iova + size);
186}
187
188/**
189 * afu_dma_region_add - add given dma region to rbtree
190 * @pdata: feature device platform data
191 * @region: dma region to be added
192 *
193 * Return 0 for success, -EEXIST if dma region has already been added.
194 *
195 * Needs to be called with pdata->lock heold.
196 */
197static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
198 struct dfl_afu_dma_region *region)
199{
200 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
201 struct rb_node **new, *parent = NULL;
202
203 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
204 (unsigned long long)region->iova);
205
206 new = &afu->dma_regions.rb_node;
207
208 while (*new) {
209 struct dfl_afu_dma_region *this;
210
211 this = container_of(*new, struct dfl_afu_dma_region, node);
212
213 parent = *new;
214
215 if (dma_region_check_iova(this, region->iova, region->length))
216 return -EEXIST;
217
218 if (region->iova < this->iova)
219 new = &((*new)->rb_left);
220 else if (region->iova > this->iova)
221 new = &((*new)->rb_right);
222 else
223 return -EEXIST;
224 }
225
226 rb_link_node(&region->node, parent, new);
227 rb_insert_color(&region->node, &afu->dma_regions);
228
229 return 0;
230}
231
232/**
233 * afu_dma_region_remove - remove given dma region from rbtree
234 * @pdata: feature device platform data
235 * @region: dma region to be removed
236 *
237 * Needs to be called with pdata->lock heold.
238 */
239static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
240 struct dfl_afu_dma_region *region)
241{
242 struct dfl_afu *afu;
243
244 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
245 (unsigned long long)region->iova);
246
247 afu = dfl_fpga_pdata_get_private(pdata);
248 rb_erase(&region->node, &afu->dma_regions);
249}
250
251/**
252 * afu_dma_region_destroy - destroy all regions in rbtree
253 * @pdata: feature device platform data
254 *
255 * Needs to be called with pdata->lock heold.
256 */
257void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
258{
259 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
260 struct rb_node *node = rb_first(&afu->dma_regions);
261 struct dfl_afu_dma_region *region;
262
263 while (node) {
264 region = container_of(node, struct dfl_afu_dma_region, node);
265
266 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
267 (unsigned long long)region->iova);
268
269 rb_erase(node, &afu->dma_regions);
270
271 if (region->iova)
272 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
273 region->iova, region->length,
274 DMA_BIDIRECTIONAL);
275
276 if (region->pages)
277 afu_dma_unpin_pages(pdata, region);
278
279 node = rb_next(node);
280 kfree(region);
281 }
282}
283
284/**
285 * afu_dma_region_find - find the dma region from rbtree based on iova and size
286 * @pdata: feature device platform data
287 * @iova: address of the dma memory area
288 * @size: size of the dma memory area
289 *
290 * It finds the dma region from the rbtree based on @iova and @size:
291 * - if @size == 0, it finds the dma region which starts from @iova
292 * - otherwise, it finds the dma region which fully contains
293 * [@iova, @iova+size)
294 * If nothing is matched returns NULL.
295 *
296 * Needs to be called with pdata->lock held.
297 */
298struct dfl_afu_dma_region *
299afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
300{
301 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
302 struct rb_node *node = afu->dma_regions.rb_node;
303 struct device *dev = &pdata->dev->dev;
304
305 while (node) {
306 struct dfl_afu_dma_region *region;
307
308 region = container_of(node, struct dfl_afu_dma_region, node);
309
310 if (dma_region_check_iova(region, iova, size)) {
311 dev_dbg(dev, "find region (iova = %llx)\n",
312 (unsigned long long)region->iova);
313 return region;
314 }
315
316 if (iova < region->iova)
317 node = node->rb_left;
318 else if (iova > region->iova)
319 node = node->rb_right;
320 else
321 /* the iova region is not fully covered. */
322 break;
323 }
324
325 dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
326 (unsigned long long)iova, (unsigned long long)size);
327
328 return NULL;
329}
330
331/**
332 * afu_dma_region_find_iova - find the dma region from rbtree by iova
333 * @pdata: feature device platform data
334 * @iova: address of the dma region
335 *
336 * Needs to be called with pdata->lock held.
337 */
338static struct dfl_afu_dma_region *
339afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
340{
341 return afu_dma_region_find(pdata, iova, 0);
342}
343
344/**
345 * afu_dma_map_region - map memory region for dma
346 * @pdata: feature device platform data
347 * @user_addr: address of the memory region
348 * @length: size of the memory region
349 * @iova: pointer of iova address
350 *
351 * Map memory region defined by @user_addr and @length, and return dma address
352 * of the memory region via @iova.
353 * Return 0 for success, otherwise error code.
354 */
355int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
356 u64 user_addr, u64 length, u64 *iova)
357{
358 struct dfl_afu_dma_region *region;
359 int ret;
360
361 /*
362 * Check Inputs, only accept page-aligned user memory region with
363 * valid length.
364 */
365 if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
366 return -EINVAL;
367
368 /* Check overflow */
369 if (user_addr + length < user_addr)
370 return -EINVAL;
371
372 if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr,
373 length))
374 return -EINVAL;
375
376 region = kzalloc(sizeof(*region), GFP_KERNEL);
377 if (!region)
378 return -ENOMEM;
379
380 region->user_addr = user_addr;
381 region->length = length;
382
383 /* Pin the user memory region */
384 ret = afu_dma_pin_pages(pdata, region);
385 if (ret) {
386 dev_err(&pdata->dev->dev, "failed to pin memory region\n");
387 goto free_region;
388 }
389
390 /* Only accept continuous pages, return error else */
391 if (!afu_dma_check_continuous_pages(region)) {
392 dev_err(&pdata->dev->dev, "pages are not continuous\n");
393 ret = -EINVAL;
394 goto unpin_pages;
395 }
396
397 /* As pages are continuous then start to do DMA mapping */
398 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
399 region->pages[0], 0,
400 region->length,
401 DMA_BIDIRECTIONAL);
402 if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
403 dev_err(&pdata->dev->dev, "failed to map for dma\n");
404 ret = -EFAULT;
405 goto unpin_pages;
406 }
407
408 *iova = region->iova;
409
410 mutex_lock(&pdata->lock);
411 ret = afu_dma_region_add(pdata, region);
412 mutex_unlock(&pdata->lock);
413 if (ret) {
414 dev_err(&pdata->dev->dev, "failed to add dma region\n");
415 goto unmap_dma;
416 }
417
418 return 0;
419
420unmap_dma:
421 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
422 region->iova, region->length, DMA_BIDIRECTIONAL);
423unpin_pages:
424 afu_dma_unpin_pages(pdata, region);
425free_region:
426 kfree(region);
427 return ret;
428}
429
430/**
431 * afu_dma_unmap_region - unmap dma memory region
432 * @pdata: feature device platform data
433 * @iova: dma address of the region
434 *
435 * Unmap dma memory region based on @iova.
436 * Return 0 for success, otherwise error code.
437 */
438int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
439{
440 struct dfl_afu_dma_region *region;
441
442 mutex_lock(&pdata->lock);
443 region = afu_dma_region_find_iova(pdata, iova);
444 if (!region) {
445 mutex_unlock(&pdata->lock);
446 return -EINVAL;
447 }
448
449 if (region->in_use) {
450 mutex_unlock(&pdata->lock);
451 return -EBUSY;
452 }
453
454 afu_dma_region_remove(pdata, region);
455 mutex_unlock(&pdata->lock);
456
457 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
458 region->iova, region->length, DMA_BIDIRECTIONAL);
459 afu_dma_unpin_pages(pdata, region);
460 kfree(region);
461
462 return 0;
463}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index f67a78d7e9ad..02baa6a227c0 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -293,7 +293,11 @@ static int afu_release(struct inode *inode, struct file *filp)
293 293
294 pdata = dev_get_platdata(&pdev->dev); 294 pdata = dev_get_platdata(&pdev->dev);
295 295
296 port_reset(pdev); 296 mutex_lock(&pdata->lock);
297 __port_reset(pdev);
298 afu_dma_region_destroy(pdata);
299 mutex_unlock(&pdata->lock);
300
297 dfl_feature_dev_use_end(pdata); 301 dfl_feature_dev_use_end(pdata);
298 302
299 return 0; 303 return 0;
@@ -364,6 +368,55 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
364 return 0; 368 return 0;
365} 369}
366 370
371static long
372afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
373{
374 struct dfl_fpga_port_dma_map map;
375 unsigned long minsz;
376 long ret;
377
378 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
379
380 if (copy_from_user(&map, arg, minsz))
381 return -EFAULT;
382
383 if (map.argsz < minsz || map.flags)
384 return -EINVAL;
385
386 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
387 if (ret)
388 return ret;
389
390 if (copy_to_user(arg, &map, sizeof(map))) {
391 afu_dma_unmap_region(pdata, map.iova);
392 return -EFAULT;
393 }
394
395 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
396 (unsigned long long)map.user_addr,
397 (unsigned long long)map.length,
398 (unsigned long long)map.iova);
399
400 return 0;
401}
402
403static long
404afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
405{
406 struct dfl_fpga_port_dma_unmap unmap;
407 unsigned long minsz;
408
409 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
410
411 if (copy_from_user(&unmap, arg, minsz))
412 return -EFAULT;
413
414 if (unmap.argsz < minsz || unmap.flags)
415 return -EINVAL;
416
417 return afu_dma_unmap_region(pdata, unmap.iova);
418}
419
367static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 420static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
368{ 421{
369 struct platform_device *pdev = filp->private_data; 422 struct platform_device *pdev = filp->private_data;
@@ -384,6 +437,10 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
384 return afu_ioctl_get_info(pdata, (void __user *)arg); 437 return afu_ioctl_get_info(pdata, (void __user *)arg);
385 case DFL_FPGA_PORT_GET_REGION_INFO: 438 case DFL_FPGA_PORT_GET_REGION_INFO:
386 return afu_ioctl_get_region_info(pdata, (void __user *)arg); 439 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
440 case DFL_FPGA_PORT_DMA_MAP:
441 return afu_ioctl_dma_map(pdata, (void __user *)arg);
442 case DFL_FPGA_PORT_DMA_UNMAP:
443 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
387 default: 444 default:
388 /* 445 /*
389 * Let sub-feature's ioctl function to handle the cmd 446 * Let sub-feature's ioctl function to handle the cmd
@@ -460,6 +517,7 @@ static int afu_dev_init(struct platform_device *pdev)
460 mutex_lock(&pdata->lock); 517 mutex_lock(&pdata->lock);
461 dfl_fpga_pdata_set_private(pdata, afu); 518 dfl_fpga_pdata_set_private(pdata, afu);
462 afu_mmio_region_init(pdata); 519 afu_mmio_region_init(pdata);
520 afu_dma_region_init(pdata);
463 mutex_unlock(&pdata->lock); 521 mutex_unlock(&pdata->lock);
464 522
465 return 0; 523 return 0;
@@ -473,6 +531,7 @@ static int afu_dev_destroy(struct platform_device *pdev)
473 mutex_lock(&pdata->lock); 531 mutex_lock(&pdata->lock);
474 afu = dfl_fpga_pdata_get_private(pdata); 532 afu = dfl_fpga_pdata_get_private(pdata);
475 afu_mmio_region_destroy(pdata); 533 afu_mmio_region_destroy(pdata);
534 afu_dma_region_destroy(pdata);
476 dfl_fpga_pdata_set_private(pdata, NULL); 535 dfl_fpga_pdata_set_private(pdata, NULL);
477 mutex_unlock(&pdata->lock); 536 mutex_unlock(&pdata->lock);
478 537
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 11ce2cf99759..0c7630ae3cda 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -41,11 +41,31 @@ struct dfl_afu_mmio_region {
41}; 41};
42 42
43/** 43/**
44 * struct fpga_afu_dma_region - afu DMA region data structure
45 *
46 * @user_addr: region userspace virtual address.
47 * @length: region length.
48 * @iova: region IO virtual address.
49 * @pages: ptr to pages of this region.
50 * @node: rb tree node.
51 * @in_use: flag to indicate if this region is in_use.
52 */
53struct dfl_afu_dma_region {
54 u64 user_addr;
55 u64 length;
56 u64 iova;
57 struct page **pages;
58 struct rb_node node;
59 bool in_use;
60};
61
62/**
44 * struct dfl_afu - afu device data structure 63 * struct dfl_afu - afu device data structure
45 * 64 *
46 * @region_cur_offset: current region offset from start to the device fd. 65 * @region_cur_offset: current region offset from start to the device fd.
47 * @num_regions: num of mmio regions. 66 * @num_regions: num of mmio regions.
48 * @regions: the mmio region linked list of this afu feature device. 67 * @regions: the mmio region linked list of this afu feature device.
68 * @dma_regions: root of dma regions rb tree.
49 * @num_umsgs: num of umsgs. 69 * @num_umsgs: num of umsgs.
50 * @pdata: afu platform device's pdata. 70 * @pdata: afu platform device's pdata.
51 */ 71 */
@@ -54,6 +74,7 @@ struct dfl_afu {
54 int num_regions; 74 int num_regions;
55 u8 num_umsgs; 75 u8 num_umsgs;
56 struct list_head regions; 76 struct list_head regions;
77 struct rb_root dma_regions;
57 78
58 struct dfl_feature_platform_data *pdata; 79 struct dfl_feature_platform_data *pdata;
59}; 80};
@@ -68,4 +89,12 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
68int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata, 89int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
69 u64 offset, u64 size, 90 u64 offset, u64 size,
70 struct dfl_afu_mmio_region *pregion); 91 struct dfl_afu_mmio_region *pregion);
71#endif 92void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
93void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
94int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
95 u64 user_addr, u64 length, u64 *iova);
96int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
97struct dfl_afu_dma_region *
98afu_dma_region_find(struct dfl_feature_platform_data *pdata,
99 u64 iova, u64 size);
100#endif /* __DFL_AFU_H */